gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import json
import math
import optparse
import os
import os.path
import re
import sys
from PIL import Image, ImageDraw
from itertools import izip_longest
parser = optparse.OptionParser(usage='%(prog)s [options] output_dir')
parser.add_option("-i", "--input",
dest="input",
default=None,
action="store",
help="input file (default stdin)")
parser.add_option("-p", "--page-size",
dest="page_size",
default="490x318",
action="store",
help="page size (mm)")
parser.add_option("-d", "--dpi",
dest="dpi",
default=300,
action="store",
type="int",
help="dpi")
parser.add_option("-r", "--resize",
dest="resize",
default=None,
action="store",
type="float",
help="resize factor (if any)")
parser.add_option("-n", "--per-page",
dest="per_page",
default=9,
action="store",
type="int",
help="badge per page")
parser.add_option("-m", "--no-marker",
dest="no_marker",
default=False,
action="store_true",
help="don't draw the crop marks")
parser.add_option("-c", "--conf",
dest="conf",
default="conf.py",
action="store",
help="configuration script")
parser.add_option("-e", "--empty-pages",
dest="empty_pages",
default="0",
action="store",
help="prepare x empty pages")
opts, args = parser.parse_args()
try:
output_dir = args[0]
except IndexError:
parser.print_usage()
conf = {}
os.chdir(os.path.dirname(opts.conf))
execfile(os.path.basename(opts.conf), conf)
MM2INCH = 0.03937
tickets = conf['tickets']
ticket = conf['ticket']
DPI = opts.dpi
WASTE = conf.get('WASTE', 0) * MM2INCH * DPI
PAGE_MARGIN = int(conf.get('PAGE_MARGIN', 10) * MM2INCH * DPI)
if opts.page_size == 'A3':
psize = "420x297"
elif opts.page_size == 'A4':
psize = "297x210"
else:
psize = opts.page_size
PAGE_SIZE = map(lambda x: int(int(x) * MM2INCH * DPI), psize.split('x'))
data = json.loads(sys.stdin.read())
groups = tickets(data)
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def wrap_text(font, text, width):
words = re.split(' ', text)
lines = []
while words:
word = words.pop(0).strip()
if not word:
continue
if not lines:
lines.append(word)
else:
line = lines[-1]
w, h = font.getsize(line + ' ' + word)
if w <= width:
lines[-1] += ' ' + word
else:
lines.append(word)
for ix, line in enumerate(lines):
line = line.strip()
while True:
w, h = font.getsize(line)
if w <= width:
break
line = line[:-1]
lines[ix] = line
return lines
def draw_info(image, max_width, text, pos, font, color, line_offset=8):
d = ImageDraw.Draw(image)
cx = pos[0]
cy = pos[1] - font.getsize(text)[1]
lines = wrap_text(font, text, max_width)
for l in lines:
d.text((cx, cy), l, font = font, fill = color)
cy += font.getsize(l)[1] + line_offset
def assemble_page(images):
#cols = rows = int(math.ceil(math.sqrt(len(images))))
#w0, h0 = images[0].size
#x0 = (PAGE_SIZE[0] - w0 * cols) / 2
#y0 = (PAGE_SIZE[1] - h0 * rows) / 2
page = Image.new('RGBA', PAGE_SIZE, (255, 255, 255, 255))
x = y = PAGE_MARGIN
limits = PAGE_SIZE[0] - 2*PAGE_MARGIN, PAGE_SIZE[1] - 2*PAGE_MARGIN
for img in images:
size = img.size
if x + size[0] > limits[0]:
x = PAGE_MARGIN
y += size[1]
elif y + size[1] > limits[1]:
y += size[1]
page.paste(img, (x, y), img)
x += size[0]
return page
# TODO: reimplementare la gestione dei segni di taglio
# if not opts.no_marker and WASTE:
# draw = ImageDraw.Draw(page)
# a = WASTE
# line_width = int(0.5 * MM2INCH * DPI)
# def m(p0, p1):
# p0 = tuple(map(int, p0))
# p1 = tuple(map(int, p1))
# draw.line((p0, p1), fill = (0, 0, 0), width = line_width)
# for ix, i in enumerate(images):
# col = ix % cols
# row = ix / rows
# x1 = x0 + col * w0
# y1 = y0 + row * h0
# x2 = x1 + w0
# y2 = y1 + h0
#
# m((x1+a, y1), (x1+a, y1+a/2))
# m((x1, y1+a), (x1+a/2, y1+a))
#
# m((x2-a, y1), (x2-a, y1+a/2))
# m((x2, y1+a), (x2-a/2, y1+a))
#
# m((x2, y2-a), (x2-a/2, y2-a))
# m((x2-a, y2), (x2-a, y2-a/2))
#
# m((x1, y2-a), (x1+a/2, y2-a))
# m((x1+a, y2), (x1+a, y2-a/2))
# return page
def add_page(name, page):
with file(os.path.join(output_dir, name), 'w') as out:
page.save(out, 'TIFF', dpi=(DPI, DPI))
def render_badge(image, attendee, utils, resize_factor=None):
i = ticket(image, attendee, utils=utils)
if resize_factor:
nsize = i.size[0] * resize_factor, i.size[1] * resize_factor
i = i.resize(nsize, Image.ANTIALIAS)
return i
for group_type, data in sorted(groups.items()):
image = data['image']
attendees = data['attendees']
pages = len(attendees) / opts.per_page
if len(attendees) % opts.per_page:
pages += 1
utils = {
'wrap_text': wrap_text,
'draw_info': draw_info,
}
count = 1
for block in grouper(opts.per_page, attendees):
if block:
images = []
for a in block:
badge = render_badge(image, a, utils=utils, resize_factor=opts.resize)
images.append(badge)
page = assemble_page(images)
name = '[%s] pag %s-%s.tif' % (group_type, str(count).zfill(2), str(pages).zfill(2))
print >>sys.stderr, name
add_page(name, page)
count += 1
if opts.empty_pages.endswith('%'):
additional = int(math.ceil(pages * float(opts.empty_pages[:-1]) / 100 ))
else:
additional = int(opts.empty_pages)
for ix in range(additional):
name = '[%s][vuoti] pag %s-%s.tif' % (group_type, str(ix+1).zfill(2), str(additional).zfill(2))
images = [ render_badge(image, None, utils=utils, resize_factor=opts.resize) for x in range(opts.per_page) ]
add_page(name, assemble_page(images))
|
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import pytest
import numpy as np
import pandas as pd
import json
from math import isnan
from fluids.numerics import linspace, assert_close, derivative, assert_close1d
from chemicals.vapor_pressure import *
from thermo.vapor_pressure import *
from thermo.vapor_pressure import SANJARI, EDALAT, AMBROSE_WALTON, LEE_KESLER_PSAT, BOILING_CRITICAL, COOLPROP, VDI_PPDS, VDI_TABULAR, WAGNER_MCGARRY, ANTOINE_EXTENDED_POLING, ANTOINE_POLING, WAGNER_POLING, DIPPR_PERRY_8E, ANTOINE_WEBBOOK
from thermo.utils import TDependentProperty
from chemicals.identifiers import check_CAS
import chemicals
from thermo.coolprop import has_CoolProp
from math import *
@pytest.mark.CoolProp
@pytest.mark.meta_T_dept
@pytest.mark.skipif(not has_CoolProp(), reason='CoolProp is missing')
def test_VaporPressure_CoolProp():
EtOH = VaporPressure(Tb=351.39, Tc=514.0, Pc=6137000.0, omega=0.635, CASRN='64-17-5')
assert_close(EtOH.calculate(305.0, COOLPROP), 11592.205263402893, rtol=1e-7)
@pytest.mark.meta_T_dept
def test_VaporPressure_ethanol():
# Ethanol, test as many methods asa possible at once
EtOH = VaporPressure(Tb=351.39, Tc=514.0, Pc=6137000.0, omega=0.635, CASRN='64-17-5')
# Check that assigning a bad method does not change the object and raises a ValueERror
h0 = hash(EtOH)
with pytest.raises(ValueError):
EtOH.method = 'NOTAMETHOD'
assert hash(EtOH) == h0
# Check valid_methods call
assert set(EtOH.valid_methods()) == EtOH.all_methods
# For Ethanol, all methods are valid around 300 K
assert EtOH.valid_methods(365) == EtOH.valid_methods()
# Check test_property_validity
assert not EtOH.test_property_validity(1j)
# Check can't calculate with a bad method
with pytest.raises(ValueError):
EtOH.calculate(300, 'NOTAMETHOD')
methods = list(EtOH.all_methods)
methods.remove(VDI_TABULAR)
if COOLPROP in methods:
methods.remove(COOLPROP)
EtOH.extrapolation = 'nolimit'
Psats_expected = {WAGNER_MCGARRY: 11579.634014300127,
WAGNER_POLING: 11590.408779316374,
ANTOINE_POLING: 11593.661615921257,
DIPPR_PERRY_8E: 11659.154222044575,
VDI_PPDS: 11698.02742876088,
BOILING_CRITICAL: 14088.453409816764,
LEE_KESLER_PSAT: 11350.156640503357,
AMBROSE_WALTON: 11612.378633936816,
SANJARI: 9210.262000640221,
EDALAT: 12081.738947110121,
ANTOINE_WEBBOOK: 10827.523813517917,
}
T = 305.0
Psat_calcs = {}
for i in methods:
EtOH.method = i
Psat_calcs[i] = EtOH.T_dependent_property(T)
Tmin, Tmax = EtOH.T_limits[i]
if i not in (ANTOINE_WEBBOOK,):
assert Tmin < T < Tmax
for k, v in Psats_expected.items():
assert_close(v, Psat_calcs[k], rtol=1e-11)
assert len(Psats_expected) == len(Psats_expected)
assert_close(EtOH.calculate(305, VDI_TABULAR), 11690.81660829924, rtol=1E-4)
s = EtOH.as_json()
assert 'json_version' in s
obj2 = VaporPressure.from_json(s)
assert EtOH == obj2
# Test that methods return None
EtOH = VaporPressure(Tb=351.39, Tc=514.0, Pc=6137000.0, omega=0.635, CASRN='64-17-5')
EtOH.extrapolation = None
for i in list(EtOH.all_methods):
EtOH.method = i
assert EtOH.T_dependent_property(5000) is None
@pytest.mark.meta_T_dept
def test_VaporPressure_extended_poling():
# Use another chemical to get in ANTOINE_EXTENDED_POLING
a = VaporPressure(CASRN='589-81-1')
Psat_calcs = []
for i in list(a.all_methods):
a.method = i
Psat_calcs.append(a.T_dependent_property(410))
Psat_exp = [162944.82134710113, 162870.44794192078, 170508.47471278594, 162865.5380455795]
assert_close1d(sorted(Psat_calcs), sorted(Psat_exp))
s = a.as_json()
obj2 = VaporPressure.from_json(s)
assert a == obj2
@pytest.mark.meta_T_dept
def test_VaporPressure_water():
# Test interpolation, extrapolation
w = VaporPressure(Tb=373.124, Tc=647.14, Pc=22048320.0, omega=0.344, CASRN='7732-18-5')
Ts = linspace(300, 350, 10)
Ps = [3533.918074415897, 4865.419832056078, 6612.2351036034115, 8876.854141719203, 11780.097759775277, 15462.98385942125, 20088.570250257424, 25843.747665059742, 32940.95821687677, 41619.81654904555]
w.add_tabular_data(Ts=Ts, properties=Ps)
assert_close(w.T_dependent_property(305.), 4715.122890601165)
w.extrapolation = 'interp1d'
assert_close(w.T_dependent_property(200.), 0.5364148240126076)
Ts_bad = [300, 325, 350]
Ps_bad = [1, -1, 1j]
with pytest.raises(ValueError):
w.add_tabular_data(Ts=Ts_bad, properties=Ps_bad)
Ts_rev = list(reversed(Ts))
with pytest.raises(ValueError):
w.add_tabular_data(Ts=Ts_rev, properties=Ps)
@pytest.mark.meta_T_dept
def test_VaporPressure_cycloheptane():
# Get a check for Antoine Extended
cycloheptane = VaporPressure(Tb=391.95, Tc=604.2, Pc=3820000.0, omega=0.2384, CASRN='291-64-5')
cycloheptane.method = ('ANTOINE_EXTENDED_POLING')
cycloheptane.extrapolation = None
assert_close(cycloheptane.T_dependent_property(410), 161647.35219882353)
assert None == cycloheptane.T_dependent_property(400)
with pytest.raises(Exception):
cycloheptane.test_method_validity(300, 'BADMETHOD')
obj = VaporPressure(CASRN="71-43-2", Tb=353.23, Tc=562.05, Pc=4895000.0, omega=0.212, extrapolation="AntoineAB|DIPPR101_ABC", method="WAGNER_MCGARRY")
assert_close(obj.T_dependent_property_derivative(600.0), 2379682.4349338813, rtol=1e-4)
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting0():
obj = VaporPressure(CASRN='13838-16-9')
Tmin, Tmax = obj.WAGNER_POLING_Tmin, obj.WAGNER_POLING_Tmax
Ts = linspace(Tmin, Tmax, 10)
Ps = [obj(T) for T in Ts]
Tc, Pc = obj.WAGNER_POLING_Tc, obj.WAGNER_POLING_Pc
fitted = obj.fit_data_to_model(Ts=Ts, data=Ps, model='Wagner', use_numba=False,
model_kwargs={'Tc': obj.WAGNER_POLING_Tc, 'Pc': obj.WAGNER_POLING_Pc})
res = fitted
assert 'Tc' in res
assert 'Pc' in res
assert_close(res['a'], obj.WAGNER_POLING_coefs[0])
assert_close(res['b'], obj.WAGNER_POLING_coefs[1])
assert_close(res['c'], obj.WAGNER_POLING_coefs[2])
assert_close(res['d'], obj.WAGNER_POLING_coefs[3])
# Heavy compound fit
Ts = linspace(179.15, 237.15, 5)
props_calc = [Antoine(T, A=138., B=520200.0, C=3670.0) for T in Ts]
res, stats = TDependentProperty.fit_data_to_model(Ts=Ts, data=props_calc, model='Antoine',
do_statistics=True, use_numba=False, model_kwargs={'base':10.0},
fit_method='lm')
assert stats['MAE'] < 1e-5
# Fit with very low range and no C
Ts = linspace(374, 377.0, 5)
props_calc = [Antoine(T, A=12.852103, B=2942.98, C=0.0) for T in Ts]
res, stats = TDependentProperty.fit_data_to_model(Ts=Ts, data=props_calc, model='Antoine',
do_statistics=True, use_numba=False, model_kwargs={'base':10.0},
fit_method='lm')
assert stats['MAE'] < 1e-5
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting1():
# Ammonia data fitting from chemsep
ammonia_Ts_Psats = [191.24, 202.546, 213.852, 225.157, 236.463, 239.82, 247.769, 259.075, 270.381, 281.686, 292.992, 304.298, 315.604, 326.909, 338.215, 349.521, 360.827, 372.133, 383.438, 394.744, 406.05]
ammonia_Psats = [4376.24, 10607.70, 23135.70, 46190.70, 85593.30, 101505.00, 148872.00, 245284.00, 385761.00, 582794.00, 850310.00, 1203550.00, 1658980.00, 2234280.00, 2948340.00, 3821410.00, 4875270.00, 6133440.00, 7621610.00, 9367940.00, 11403600.00]
res, stats = VaporPressure.fit_data_to_model(Ts=ammonia_Ts_Psats, data=ammonia_Psats, model='DIPPR101',
do_statistics=True, use_numba=False, fit_method='lm')
assert stats['MAE'] < 1e-4
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting2_dippr():
pts = 10
fit_issue_CASs = ['75-07-0', '107-02-8', '108-38-3', '7732-18-5', '85-44-9',
'67-64-1', '78-87-5', '624-72-6', '118-96-7', '124-18-5',
# '526-73-8'
]
for CAS in fit_issue_CASs:
obj = VaporPressure(CASRN=CAS)
Ts = linspace(obj.Perrys2_8_Tmin, obj.Perrys2_8_Tmax, pts)
props_calc = [obj.calculate(T, DIPPR_PERRY_8E) for T in Ts]
res, stats = obj.fit_data_to_model(Ts=Ts, data=props_calc, model='DIPPR101',
do_statistics=True, use_numba=False, multiple_tries=True, fit_method='lm')
assert stats['MAE'] < 1e-5
@pytest.mark.slow
@pytest.mark.fuzz
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting3_WagnerMcGarry():
for i, CAS in enumerate(chemicals.vapor_pressure.Psat_data_WagnerMcGarry.index):
obj = VaporPressure(CASRN=CAS)
Ts = linspace(obj.WAGNER_MCGARRY_Tmin, obj.WAGNER_MCGARRY_Tc, 10)
props_calc = [obj.calculate(T, WAGNER_MCGARRY) for T in Ts]
res, stats = obj.fit_data_to_model(Ts=Ts, data=props_calc, model='Wagner_original',
do_statistics=True, use_numba=False,
fit_method='lm', model_kwargs={'Tc': obj.WAGNER_MCGARRY_Tc, 'Pc': obj.WAGNER_MCGARRY_Pc})
assert stats['MAE'] < 1e-7
@pytest.mark.slow
@pytest.mark.fuzz
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting4_WagnerPoling():
for i, CAS in enumerate(chemicals.vapor_pressure.Psat_data_WagnerPoling.index):
obj = VaporPressure(CASRN=CAS)
Ts = linspace(obj.WAGNER_POLING_Tmin, obj.WAGNER_POLING_Tc, 10)
props_calc = [obj.calculate(T, WAGNER_POLING) for T in Ts]
res, stats = obj.fit_data_to_model(Ts=Ts, data=props_calc, model='Wagner',
do_statistics=True, use_numba=False,
fit_method='lm', model_kwargs={'Tc': obj.WAGNER_POLING_Tc, 'Pc': obj.WAGNER_POLING_Pc})
assert stats['MAE'] < 1e-7
@pytest.mark.slow
@pytest.mark.fuzz
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting5_AntoinePoling():
for i, CAS in enumerate(chemicals.vapor_pressure.Psat_data_AntoinePoling.index):
obj = VaporPressure(CASRN=CAS)
Ts = linspace(obj.ANTOINE_POLING_Tmin, obj.ANTOINE_POLING_Tmax, 10)
props_calc = [obj.calculate(T, ANTOINE_POLING) for T in Ts]
res, stats = obj.fit_data_to_model(Ts=Ts, data=props_calc, model='Antoine',
do_statistics=True, use_numba=False,
multiple_tries=True,
model_kwargs={'base': 10.0}, fit_method='lm')
assert stats['MAE'] < 1e-7
@pytest.mark.slow
@pytest.mark.fuzz
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting6_VDI_PPDS():
for i, CAS in enumerate(chemicals.vapor_pressure.Psat_data_VDI_PPDS_3.index):
obj = VaporPressure(CASRN=CAS)
Ts = linspace(obj.VDI_PPDS_Tm, obj.VDI_PPDS_Tc, 10)
props_calc = [obj.calculate(T, VDI_PPDS) for T in Ts]
res, stats = obj.fit_data_to_model(Ts=Ts, data=props_calc, model='Wagner',
do_statistics=True, use_numba=False,
fit_method='lm', model_kwargs={'Tc': obj.VDI_PPDS_Tc, 'Pc': obj.VDI_PPDS_Pc})
assert stats['MAE'] < 1e-7
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting7_reduced_fit_params_with_jac():
obj = VaporPressure(CASRN='13838-16-9')
Tmin, Tmax = obj.WAGNER_POLING_Tmin, obj.WAGNER_POLING_Tmax
Ts = linspace(Tmin, Tmax, 10)
Ps = [obj(T) for T in Ts]
fit = obj.fit_data_to_model(Ts=Ts, data=Ps, model='Wagner', use_numba=False,
model_kwargs={'Tc': obj.WAGNER_POLING_Tc, 'Pc': obj.WAGNER_POLING_Pc, 'd': -4.60})
assert fit['d'] == -4.6
fit = obj.fit_data_to_model(Ts=Ts, data=Ps, model='Wagner', use_numba=False,
model_kwargs={'Tc': obj.WAGNER_POLING_Tc, 'Pc': obj.WAGNER_POLING_Pc, 'b': 2.4})
assert fit['b'] == 2.4
fit = obj.fit_data_to_model(Ts=Ts, data=Ps, model='Wagner', use_numba=False,
model_kwargs={'Tc': obj.WAGNER_POLING_Tc, 'Pc': obj.WAGNER_POLING_Pc, 'd': -4.60, 'a': -8.329, 'b': 2.4})
assert fit['a'] == -8.329
assert fit['b'] == 2.4
assert fit['d'] == -4.6
@pytest.mark.slow
@pytest.mark.fuzz
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting8_TRC_AntoineExtended():
hard_CASs = frozenset(['110-82-7'])
for i, CAS in enumerate(chemicals.vapor_pressure.Psat_data_AntoineExtended.index):
obj = VaporPressure(CASRN=CAS)
Ts = linspace(obj.ANTOINE_EXTENDED_POLING_Tmin, obj.ANTOINE_EXTENDED_POLING_Tmax, 10)
props_calc = [obj.calculate(T, ANTOINE_EXTENDED_POLING) for T in Ts]
res, stats = obj.fit_data_to_model(Ts=Ts, data=props_calc, model='TRC_Antoine_extended',
do_statistics=True, use_numba=False, multiple_tries=CAS in hard_CASs,# multiple_tries_max_err=1e-4,
fit_method='lm', model_kwargs={'Tc': obj.ANTOINE_EXTENDED_POLING_coefs[0],
'to': obj.ANTOINE_EXTENDED_POLING_coefs[1]})
assert stats['MAE'] < 1e-4
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting9_Yaws_Psat():
A, B, C, D, E, Tmin, Tmax = 53.93890302013294, -788.24, -22.734, 0.051225, 6.1896e-11, 68.15, 132.92
Ts = linspace(Tmin, Tmax, 10)
props_calc = [Yaws_Psat(T, A, B, C, D, E) for T in Ts]
res, stats = VaporPressure.fit_data_to_model(Ts=Ts, data=props_calc, model='Yaws_Psat',
do_statistics=True, use_numba=False,
fit_method='lm')
assert stats['MAE'] < 1e-5
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting10_example():
Ts = [203.65, 209.55, 212.45, 234.05, 237.04, 243.25, 249.35, 253.34, 257.25, 262.12, 264.5, 267.05, 268.95, 269.74, 272.95, 273.46, 275.97, 276.61, 277.23, 282.03, 283.06, 288.94, 291.49, 293.15, 293.15, 293.85, 294.25, 294.45, 294.6, 294.63, 294.85, 297.05, 297.45, 298.15, 298.15, 298.15, 298.15, 298.15, 299.86, 300.75, 301.35, 303.15, 303.15, 304.35, 304.85, 305.45, 306.25, 308.15, 308.15, 308.15, 308.22, 308.35, 308.45, 308.85, 309.05, 311.65, 311.85, 311.85, 311.95, 312.25, 314.68, 314.85, 317.75, 317.85, 318.05, 318.15, 318.66, 320.35, 320.35, 320.45, 320.65, 322.55, 322.65, 322.85, 322.95, 322.95, 323.35, 323.55, 324.65, 324.75, 324.85, 324.85, 325.15, 327.05, 327.15, 327.2, 327.25, 327.35, 328.22, 328.75, 328.85, 333.73, 338.95]
Psats = [58.93, 94.4, 118.52, 797.1, 996.5, 1581.2, 2365, 3480, 3893, 5182, 6041, 6853, 7442, 7935, 9290, 9639, 10983, 11283, 13014, 14775, 15559, 20364, 22883, 24478, 24598, 25131, 25665, 25931, 25998, 26079, 26264, 29064, 29598, 30397, 30544, 30611, 30784, 30851, 32636, 33931, 34864, 37637, 37824, 39330, 40130, 41063, 42396, 45996, 46090, 46356, 45462, 46263, 46396, 47129, 47396, 52996, 52929, 53262, 53062, 53796, 58169, 59328, 66395, 66461, 67461, 67661, 67424, 72927, 73127, 73061, 73927, 79127, 79527, 80393, 79927, 80127, 81993, 80175, 85393, 85660, 85993, 86260, 86660, 92726, 92992, 92992, 93126, 93326, 94366, 98325, 98592, 113737, 136626]
res, stats = TDependentProperty.fit_data_to_model(Ts=Ts, data=Psats, model='Antoine', do_statistics=True, multiple_tries=True, model_kwargs={'base': 10.0})
assert stats['MAE'] < 0.014
@pytest.mark.fitting
@pytest.mark.meta_T_dept
def test_VaporPressure_fitting11_bad_method():
Ts = [203.65, 209.55, 212.45, 234.05, 237.04, 243.25, 249.35, 253.34, 257.25, 262.12, 264.5, 267.05, 268.95, 269.74, 272.95, 273.46, 275.97, 276.61, 277.23, 282.03, 283.06, 288.94, 291.49, 293.15, 293.15, 293.85, 294.25, 294.45, 294.6, 294.63, 294.85, 297.05, 297.45, 298.15, 298.15, 298.15, 298.15, 298.15, 299.86, 300.75, 301.35, 303.15, 303.15, 304.35, 304.85, 305.45, 306.25, 308.15, 308.15, 308.15, 308.22, 308.35, 308.45, 308.85, 309.05, 311.65, 311.85, 311.85, 311.95, 312.25, 314.68, 314.85, 317.75, 317.85, 318.05, 318.15, 318.66, 320.35, 320.35, 320.45, 320.65, 322.55, 322.65, 322.85, 322.95, 322.95, 323.35, 323.55, 324.65, 324.75, 324.85, 324.85, 325.15, 327.05, 327.15, 327.2, 327.25, 327.35, 328.22, 328.75, 328.85, 333.73, 338.95]
Psats = [58.93, 94.4, 118.52, 797.1, 996.5, 1581.2, 2365, 3480, 3893, 5182, 6041, 6853, 7442, 7935, 9290, 9639, 10983, 11283, 13014, 14775, 15559, 20364, 22883, 24478, 24598, 25131, 25665, 25931, 25998, 26079, 26264, 29064, 29598, 30397, 30544, 30611, 30784, 30851, 32636, 33931, 34864, 37637, 37824, 39330, 40130, 41063, 42396, 45996, 46090, 46356, 45462, 46263, 46396, 47129, 47396, 52996, 52929, 53262, 53062, 53796, 58169, 59328, 66395, 66461, 67461, 67661, 67424, 72927, 73127, 73061, 73927, 79127, 79527, 80393, 79927, 80127, 81993, 80175, 85393, 85660, 85993, 86260, 86660, 92726, 92992, 92992, 93126, 93326, 94366, 98325, 98592, 113737, 136626]
with pytest.raises(ValueError):
TDependentProperty.fit_data_to_model(Ts=Ts, data=Psats, model='NOTAMETHOD')
@pytest.mark.meta_T_dept
def test_VaporPressure_analytical_derivatives():
Psat = VaporPressure(CASRN="108-38-3", Tb=412.25, Tc=617.0, Pc=3541000.0, omega=0.331,
extrapolation="AntoineAB|DIPPR101_ABC", method=WAGNER_MCGARRY)
assert_close(Psat.calculate_derivative(T=400.0, method=WAGNER_MCGARRY), 2075.9195652247963, rtol=1e-13)
assert_close(Psat.calculate_derivative(T=400.0, method=WAGNER_MCGARRY, order=2), 47.61112509616565, rtol=1e-13)
assert_close(Psat.calculate_derivative(T=400.0, method=WAGNER_POLING), 2073.565462948561, rtol=1e-13)
assert_close(Psat.calculate_derivative(T=400.0, method=WAGNER_POLING, order=2), 47.60007499952595, rtol=1e-13)
assert_close(Psat.calculate_derivative(T=400.0, method=DIPPR_PERRY_8E), 2075.1783812355125, rtol=1e-13)
assert_close(Psat.calculate_derivative(T=400.0, method=DIPPR_PERRY_8E, order=2), 47.566696599306596, rtol=1e-13)
assert_close(Psat.calculate_derivative(T=400.0, method=VDI_PPDS), 2073.5972901257196, rtol=1e-13)
assert_close(Psat.calculate_derivative(T=400.0, method=VDI_PPDS, order=2), 47.489535848986364, rtol=1e-13)
cycloheptane = VaporPressure(Tb=391.95, Tc=604.2, Pc=3820000.0, omega=0.2384, CASRN='291-64-5')
cycloheptane.method = ANTOINE_EXTENDED_POLING
assert_close(cycloheptane.calculate_derivative(T=500.0, method=ANTOINE_EXTENDED_POLING, order=2), 176.89903538855853, rtol=1e-13)
assert_close(cycloheptane.calculate_derivative(T=500.0, method=ANTOINE_EXTENDED_POLING, order=1), 15046.47337900798, rtol=1e-13)
cycloheptane.method = ANTOINE_POLING
assert_close(cycloheptane.calculate_derivative(T=400.0, method=ANTOINE_POLING, order=1), 3265.237029987264, rtol=1e-13)
assert_close(cycloheptane.calculate_derivative(T=400.0, method=ANTOINE_POLING, order=2), 65.83298769903531, rtol=1e-13)
def test_VaporPressure_no_isnan():
assert not isnan(VaporPressure(CASRN='4390-04-9').Tmin)
def test_VaporPressure_linear_extrapolation_non_negative():
ethanol_psat = VaporPressure(Tb=351.39, Tc=514.0, Pc=6137000.0, omega=0.635, CASRN='64-17-5')
# Make sure the constants are set to guard against future changes to defaults
ethanol_psat.method = WAGNER_MCGARRY
ethanol_psat.interpolation_T = (lambda T: 1/T)
ethanol_psat.interpolation_property = (lambda P: log(P))
ethanol_psat.interpolation_property_inv = (lambda P: exp(P))
ethanol_psat.extrapolation = 'linear'
assert_close(ethanol_psat(700), 59005875.32878946, rtol=1e-4)
assert_close(ethanol_psat(100), 1.0475828451230242e-11, rtol=1e-4)
assert ethanol_psat.T_limits['WAGNER_MCGARRY'][0] == ethanol_psat.WAGNER_MCGARRY_Tmin
assert ethanol_psat.T_limits['WAGNER_MCGARRY'][1] == ethanol_psat.WAGNER_MCGARRY_Tc
assert_close(ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tc),
ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tc-1e-6))
assert_close(ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tc),
ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tc+1e-6))
assert_close(ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tmin),
ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tmin-1e-6))
assert_close(ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tmin),
ethanol_psat.T_dependent_property(ethanol_psat.WAGNER_MCGARRY_Tmin+1e-6))
Tmin = ethanol_psat.T_limits[ethanol_psat.method][0]
Ts = linspace(0.7*Tmin, Tmin*(1-1e-10), 10)
Ps = [ethanol_psat(T) for T in Ts]
# Confirms it's linear
# plt.plot(1/np.array(Ts), np.log(Ps))
# plt.show()
# def rsquared(x, y):
# import scipy.stats
# _, _, r_value, _, _ = scipy.stats.linregress(x, y)
# return r_value*r_value
# assert_close(rsquared(1/np.array(Ts), np.log(Ps)), 1, atol=1e-5)
assert abs(np.polyfit(1/np.array(Ts), np.log(Ps), 1, full=True)[1][0]) < 1e-13
# TODO make work with different interpolation methods
# assert ethanol_psat == VaporPressure.from_json(ethanol_psat.as_json())
@pytest.mark.meta_T_dept
def test_VaporPressure_extrapolation_solve_prop():
cycloheptane = VaporPressure(Tb=391.95, Tc=604.2, Pc=3820000.0, omega=0.2384, CASRN='291-64-5')
cycloheptane.method = 'ANTOINE_EXTENDED_POLING'
cycloheptane.extrapolation = 'AntoineAB|DIPPR101_ABC'
cycloheptane.T_dependent_property(T=4000)
assert_close(cycloheptane.solve_property(1), 187.25621087267422)
assert_close(cycloheptane.solve_property(1e-20), 60.677576120119156)
assert_close(cycloheptane.solve_property(1e5), 391.3576035137979)
assert_close(cycloheptane.solve_property(1e6), 503.31772463155266)
assert_close(cycloheptane.solve_property(1e7), 711.8047771523733)
assert_close(cycloheptane.solve_property(3e7), 979.2026813626704)
def test_VaporPressure_bestfit_derivatives():
obj = VaporPressure(exp_poly_fit=(175.7, 512.49, [-1.446088049406911e-19, 4.565038519454878e-16, -6.278051259204248e-13, 4.935674274379539e-10,
-2.443464113936029e-07, 7.893819658700523e-05, -0.016615779444332356, 2.1842496316772264, -134.19766175812708]))
assert_close(obj.T_dependent_property(300), 18601.061401014867, rtol=1e-11)
assert_close(obj.T_dependent_property_derivative(300), 954.1652489206775, rtol=1e-11)
assert_close(obj.T_dependent_property_derivative(300, order=2), 41.8787546283273, rtol=1e-11)
assert_close(derivative(obj.T_dependent_property, 300, dx=300*1e-7), obj.T_dependent_property_derivative(300))
assert_close(derivative(obj.T_dependent_property_derivative, 300, dx=300*1e-7), obj.T_dependent_property_derivative(300, order=2))
@pytest.mark.meta_T_dept
def test_VaporPressure_extrapolation_AB():
obj = VaporPressure(Tb=309.21, Tc=469.7, Pc=3370000.0, omega=0.251, CASRN='109-66-0', load_data=True, extrapolation='AntoineAB')
obj.method = WAGNER_MCGARRY
obj.calculate_derivative(300, WAGNER_MCGARRY)
for extrapolation in ('AntoineAB', 'DIPPR101_ABC', 'AntoineAB|AntoineAB', 'DIPPR101_ABC|DIPPR101_ABC',
'DIPPR101_ABC|AntoineAB', 'AntoineAB|DIPPR101_ABC'):
obj.extrapolation = extrapolation
assert_close(obj.T_dependent_property(obj.WAGNER_MCGARRY_Tc),
obj.T_dependent_property(obj.WAGNER_MCGARRY_Tc-1e-6))
assert_close(obj.T_dependent_property(obj.WAGNER_MCGARRY_Tc),
obj.T_dependent_property(obj.WAGNER_MCGARRY_Tc+1e-6))
assert_close(obj.T_dependent_property(obj.WAGNER_MCGARRY_Tmin),
obj.T_dependent_property(obj.WAGNER_MCGARRY_Tmin-1e-6))
assert_close(obj.T_dependent_property(obj.WAGNER_MCGARRY_Tmin),
obj.T_dependent_property(obj.WAGNER_MCGARRY_Tmin+1e-6))
# @pytest.mark.meta_T_dept
# def test_VaporPressure_fast_Psat_poly_fit():
# corr = VaporPressure(exp_poly_fit=(273.17, 647.086, [-2.8478502840358144e-21, 1.7295186670575222e-17, -4.034229148562168e-14, 5.0588958391215855e-11, -3.861625996277003e-08, 1.886271475957639e-05, -0.005928371869421494, 1.1494956887882308, -96.74302379151317]))
# # Low transition
# P_trans = corr.exp_poly_fit_Tmin_value
# assert_close(corr.solve_property(P_trans), corr.solve_property_exp_poly_fit(P_trans), rtol=1e-10)
# assert_close(corr.solve_property(P_trans + 1e-7), corr.solve_property_exp_poly_fit(P_trans + 1e-7), rtol=1e-10)
# # High transition
# P_trans = corr.exp_poly_fit_Tmax_value
# assert_close(corr.solve_property(P_trans), corr.solve_property_exp_poly_fit(P_trans), rtol=1e-10)
# assert_close(corr.solve_property(P_trans + 1e-7), corr.solve_property_exp_poly_fit(P_trans + 1e-7), rtol=1e-10)
# # Low temperature values - up to 612 Pa
# assert_close(corr.solve_property(1e-5), corr.solve_property_exp_poly_fit(1e-5), rtol=1e-10)
# assert_close(corr.solve_property(1), corr.solve_property_exp_poly_fit(1), rtol=1e-10)
# assert_close(corr.solve_property(100), corr.solve_property_exp_poly_fit(100), rtol=1e-10)
# # Solver region
# assert_close(corr.solve_property(1e5), corr.solve_property_exp_poly_fit(1e5), rtol=1e-10)
# assert_close(corr.solve_property(1e7), corr.solve_property_exp_poly_fit(1e7), rtol=1e-10)
# # High T
# assert_close(corr.solve_property(1e8), corr.solve_property_exp_poly_fit(1e8), rtol=1e-10)
# # Extrapolation
# from thermo.vapor_pressure import POLY_FIT, BEST_FIT_AB, BEST_FIT_ABC
# obj = VaporPressure(poly_fit=(178.01, 591.74, [-8.638045111752356e-20, 2.995512203611858e-16, -4.5148088801006036e-13, 3.8761537879200513e-10, -2.0856828984716705e-07, 7.279010846673517e-05, -0.01641020023565049, 2.2758331029405516, -146.04484159879843]))
# assert_close(obj.calculate(1000, BEST_FIT_AB), 78666155.90418352, rtol=1e-10)
# assert_close(obj.calculate(1000, BEST_FIT_ABC), 156467764.5930495, rtol=1e-10)
# assert_close(obj.calculate(400, POLY_FIT), 157199.6909849476, rtol=1e-10)
# assert_close(obj.calculate(400, BEST_FIT_AB), 157199.6909849476, rtol=1e-10)
# assert_close(obj.calculate(400, BEST_FIT_ABC), 157199.6909849476, rtol=1e-10)
@pytest.mark.meta_T_dept
def test_VaporPressure_generic_polynomial_exp_parameters():
coeffs = [-1.446088049406911e-19, 4.565038519454878e-16, -6.278051259204248e-13, 4.935674274379539e-10,
-2.443464113936029e-07, 7.893819658700523e-05, -0.016615779444332356, 2.1842496316772264, -134.19766175812708]
obj_bestfit = VaporPressure(exp_poly_fit=(175.7, 512.49, coeffs))
obj_polynomial = VaporPressure(exp_polynomial_parameters={'test': {'coeffs': coeffs,
'Tmin': 175.7, 'Tmax': 512.49}})
assert_close(obj_bestfit.T_dependent_property(300), 18601.061401014867, rtol=1e-11)
assert_close(obj_polynomial(300), obj_bestfit.T_dependent_property(300), rtol=1e-13)
assert VaporPressure.from_json(obj_bestfit.as_json()) == obj_bestfit
assert eval(str(obj_bestfit)) == obj_bestfit
assert VaporPressure.from_json(obj_polynomial.as_json()) == obj_polynomial
assert eval(str(obj_polynomial)) == obj_polynomial
@pytest.mark.meta_T_dept
def test_VaporPressure_generic_polynomial_exp_parameters_complicated():
coeffs = [-1.446088049406911e-19, 4.565038519454878e-16, -6.278051259204248e-13, 4.935674274379539e-10,
-2.443464113936029e-07, 7.893819658700523e-05, -0.016615779444332356, 2.1842496316772264, -134.19766175812708]
T = 300.0
obj2 = VaporPressure(exp_poly_fit=(175.7, 512.49, coeffs))
assert_close(obj2(T), 18601.061401014867, rtol=1e-13)
assert_close(obj2.T_dependent_property_derivative(T), 954.1652489206775, rtol=1e-14)
assert_close(obj2.T_dependent_property_derivative(T, order=2), 41.8787546283273, rtol=1e-14)
assert_close(obj2.T_dependent_property_derivative(T, order=3), 1.496803960985584, rtol=1e-13)
@pytest.mark.meta_T_dept
def test_VaporPressure_exp_stablepoly_fit():
obj2 = VaporPressure(Tc=591.72, exp_stablepoly_fit=((309.0, 591.72, [0.008603558174828078, 0.007358688688856427, -0.016890323025782954, -0.005289197721114957, -0.0028824712174469625, 0.05130960832946553, -0.12709896610233662, 0.37774977659528036, -0.9595325030688526, 2.7931528759840174, 13.10149649770156])))
assert_close(obj2(400), 157191.01706242564, rtol=1e-13)
assert_close(obj2.T_dependent_property_derivative(400, order=1), 4056.436943642117, rtol=1e-13)
assert_close(obj2.T_dependent_property_derivative(400, order=2), 81.32645570045084, rtol=1e-13)
assert_close(obj2.T_dependent_property_derivative(400, order=3), 1.103603650822488, rtol=1e-13)
@pytest.mark.meta_T_dept
def test_VaporPressure_exp_cheb_fit():
obj2 = VaporPressure(Tc=591.72, exp_cheb_fit=((309.0, 591.72, [12.570668791524573, 3.1092695610681673, -0.5485217707981505, 0.11115875762247596, -0.01809803938553478, 0.003674911307077089, -0.00037626163070525465, 0.0001962813915017403, 6.120764548889213e-05, 3.602752453735203e-05])))
assert_close(obj2(300), 4186.189338463003, rtol=1e-13)
assert_close(obj2.T_dependent_property_derivative(400, order=1), 4056.277312107932, rtol=1e-13)
assert_close(obj2.T_dependent_property_derivative(400, order=2), 81.34302144188977, rtol=1e-13)
assert_close(obj2.T_dependent_property_derivative(400, order=3),1.105438780935656, rtol=1e-13)
@pytest.mark.meta_T_dept
def test_VaporPressure_extrapolation_no_validation():
N2 = VaporPressure(CASRN='7727-37-9', extrapolation='DIPPR101_ABC')
N2.method = WAGNER_MCGARRY
assert N2(298.15) is not None
assert N2(1000.15) is not None
@pytest.mark.meta_T_dept
def test_VaporPressure_fast_Psat_poly_fit_extrapolation():
obj = VaporPressure(exp_poly_fit=(175.7, 512.49, [-1.446088049406911e-19, 4.565038519454878e-16, -6.278051259204248e-13, 4.935674274379539e-10,
-2.443464113936029e-07, 7.893819658700523e-05, -0.016615779444332356, 2.1842496316772264, -134.19766175812708]))
obj.extrapolation = 'AntoineAB|DIPPR101_ABC'
assert_close(obj.solve_property(1e-13), 88.65839225764933)
assert_close(obj.solve_property(300), 237.7793675652309)
assert_close(1e8, obj.extrapolate(obj.solve_property(1e8), 'EXP_POLY_FIT'))
assert_close(obj.extrapolate(800, 'EXP_POLY_FIT'), 404793143.0358333)
@pytest.mark.meta_T_dept
def test_VaporPressure_Antoine_inputs():
obj = VaporPressure()
obj.add_correlation(name='WebBook', model='Antoine', Tmin=177.70, Tmax=264.93, A=3.45604+5, B=1044.038, C=-53.893)
assert_close(obj(200), 20.432980367117192, rtol=1e-12)
# json
hash0 = hash(obj)
obj2 = VaporPressure.from_json(json.loads(json.dumps(obj.as_json())))
assert obj == obj2
assert hash(obj) == hash0
assert hash(obj2) == hash0
obj2 = eval(str(obj))
assert obj == obj2
assert hash(obj) == hash0
assert hash(obj2) == hash0
obj2 = VaporPressure(Antoine_parameters={'WebBook': {'A': 8.45604, 'B': 1044.038, 'C': -53.893, 'Tmin': 177.7, 'Tmax': 264.93}})
assert_close(obj2(200), 20.432980367117192, rtol=1e-12)
assert obj == obj2
with pytest.raises(ValueError):
obj.add_correlation(name='WebBook2', model='Antoine', Tmin=177.70, Tmax=264.93, A=3.45604+5, B=1044.038)
with pytest.raises(ValueError):
obj.add_correlation(name='WebBook', model='Antoine', Tmin=177.70, Tmax=264.93, A=3.45604+5, B=1044.038, C=-53.893)
with pytest.raises(ValueError):
obj.add_correlation(name='WebBook4', model='NOTAMODEL', Tmin=177.70, Tmax=264.93, A=3.45604+5, B=1044.038, C=-53.893)
# Test with the new 'coefficients' input method
obj = VaporPressure(Antoine_parameters={'WebBook': {'coefficients': [8.45604, 1044.038, -53.893],
'Tmin': 177.7, 'Tmax': 264.93}})
assert_close(obj(220), 148.15143004993493, rtol=1e-13)
@pytest.mark.meta_T_dept
def test_VaporPressure_DIPPR101_inputs():
obj = VaporPressure()
# From Perry's 8th edition
obj.add_correlation(name='Eq101test', model='DIPPR101', Tmin=175.47, Tmax=512.5, A=82.718, B=-6904.5, C=-8.8622, D=7.4664E-6, E=2.0)
assert_close(obj(298.15), 16825.750567754883, rtol=1e-13)
assert_close(obj.T_dependent_property_derivative(298.15, order=1), 881.6678722199089, rtol=1e-12)
assert_close(obj.T_dependent_property_derivative(298.15, order=2), 39.36139219676838, rtol=1e-12)
assert_close(obj.T_dependent_property_derivative(298.15, order=3), 1.4228777458080808, rtol=1e-12)
# json
hash0 = hash(obj)
obj2 = VaporPressure.from_json(json.loads(json.dumps(obj.as_json())))
assert obj == obj2
assert hash(obj) == hash0
assert hash(obj2) == hash0
obj2 = eval(str(obj))
assert obj == obj2
assert hash(obj) == hash0
assert hash(obj2) == hash0
@pytest.mark.meta_T_dept
def test_VaporPressure_extrapolate_derivatives():
obj = VaporPressure(extrapolation='DIPPR101_ABC|AntoineAB', exp_poly_fit=(273.17, 647.086, [-2.8478502840358144e-21, 1.7295186670575222e-17, -4.034229148562168e-14, 5.0588958391215855e-11, -3.861625996277003e-08, 1.886271475957639e-05, -0.005928371869421494, 1.1494956887882308, -96.74302379151317]))
assert_close(obj.T_dependent_property_derivative(230), 1.4571835115958078, rtol=1e-11)
assert_close(obj.T_dependent_property_derivative(230, order=2), 0.14109259717547848, rtol=1e-11)
assert_close(obj.T_dependent_property_derivative(230, order=3), 0.012236116569167774, rtol=1e-11)
assert_close(obj.T_dependent_property_derivative(700, order=1), 403088.9468522063, rtol=1e-8)
assert_close(obj.T_dependent_property_derivative(700, order=2), 2957.4520772886904, rtol=1e-8)
@pytest.mark.meta_T_dept
def test_VaporPressure_weird_signatures():
from thermo.utils import PROPERTY_TRANSFORM_LN, PROPERTY_TRANSFORM_DLN, PROPERTY_TRANSFORM_D2LN, PROPERTY_TRANSFORM_D_X, PROPERTY_TRANSFORM_D2_X
obj = VaporPressure(extrapolation='DIPPR101_ABC|AntoineAB', exp_poly_fit=(273.17, 647.086, [-2.8478502840358144e-21, 1.7295186670575222e-17, -4.034229148562168e-14, 5.0588958391215855e-11, -3.861625996277003e-08, 1.886271475957639e-05, -0.005928371869421494, 1.1494956887882308, -96.74302379151317]))
# Within range
assert_close(obj.T_dependent_property_transform(300, PROPERTY_TRANSFORM_LN),
log(obj.T_dependent_property(300)))
assert_close(obj.T_dependent_property_transform(300, PROPERTY_TRANSFORM_D_X),
obj.T_dependent_property_derivative(300)/obj.T_dependent_property(300))
assert_close(obj.T_dependent_property_transform(300, PROPERTY_TRANSFORM_D2_X),
obj.T_dependent_property_derivative(300, 2)/obj.T_dependent_property(300))
dln = derivative(lambda T: log(obj(T)), 300, dx=300*1e-6)
assert_close(dln, obj.T_dependent_property_transform(300, PROPERTY_TRANSFORM_DLN))
dln = derivative(lambda T: log(obj(T)), 300, n=2, dx=300*1e-5)
assert_close(dln, obj.T_dependent_property_transform(300, PROPERTY_TRANSFORM_D2LN), rtol=1e-5)
# Extrapolations
for extrapolation in ('interp1d', 'DIPPR101_ABC', 'AntoineAB'):
obj.extrapolation = extrapolation
for T in (100, 1000):
assert_close(obj.T_dependent_property_transform(T, PROPERTY_TRANSFORM_LN),
log(obj.T_dependent_property(T)))
assert_close(obj.T_dependent_property_transform(T, PROPERTY_TRANSFORM_D_X),
obj.T_dependent_property_derivative(T)/obj.T_dependent_property(T))
assert_close(obj.T_dependent_property_transform(T, PROPERTY_TRANSFORM_D2_X),
obj.T_dependent_property_derivative(T, 2)/obj.T_dependent_property(T))
dln = derivative(lambda T: log(obj(T)), T, dx=T*1e-6)
assert_close(dln, obj.T_dependent_property_transform(T, PROPERTY_TRANSFORM_DLN))
dln = derivative(lambda T: log(obj(T)), T, n=2, dx=T*1e-5)
assert_close(dln, obj.T_dependent_property_transform(T, PROPERTY_TRANSFORM_D2LN), rtol=4e-4)
@pytest.mark.meta_T_dept
def test_VaporPressure_WebBook():
obj = VaporPressure(CASRN='7440-57-5')
obj.method = 'ANTOINE_WEBBOOK'
assert_close(obj(3000), 36784.98996094166, rtol=1e-13)
|
|
import json
import yaml
from .conditions import LogicEvaluator
from .dictobj import DictObject
from .language import Translator
class Rule(object):
"""
Base rule
"""
name = None
def should_trigger(self, context):
return True
def perform(self, context):
raise NotImplementedError
def record(self, context, result):
context._executed.append((self.ruleid, result))
@property
def ruleid(self):
return self.name or self.__class__.__name__.rsplit('.', 1)[-1]
class ConditionalRule(Rule):
"""
ConditionalRule defines receives two functions as parameters: condition and
action.
@param condition: lambda context: <some condition returning True or False>
@param action: lambda context: <return a dict to update the context with>
Example:
>>> rule = ConditionalRule(
... condition=lambda context: True,
... action=lambda context: {'result': 5})
"""
def __init__(self, condition=None, action=None):
self._condition = condition
self._action = action
def condition(self, context):
"""
Condition for executing this rule.
Override in subclasses if necessary. Should return boolean value that
determines if rule is used.
"""
return self._condition(self, context)
def action(self, context):
"""
Action for executing this rule.
Override in subclasses if necessary. Should return dictionary with
results that will be added to context.
"""
return self._action(self, context)
def should_trigger(self, context):
return self.condition(context)
def perform(self, context):
result = self.action(context)
context.update(result)
return result
class TableRule(Rule):
"""
A table rule is created from a list of dict objects of the following format:
[
{
'if' : {'logic': '1 | 2', 'conditions': ['foo', {'bar': 10}]},
'then' : ['action1', ...],
'target' : ['target1', ...]
},
...
]
Each rule is only executed if all conditions are met. In actions, use
'context.' to reference variables. Targets and conditions implicitly reference
'context.' (target 'xy' means 'context.xy'). Logic can be omitted, which
would imply "&" operation for all conditions. Condition can be a dictionary or
a single value, so 'value' is equivalent to {'value': True}
The result of the nth 'then' action is stored in the nth 'context.variable'
as defined in target.
"""
def __init__(self, rules, name=None):
self.rules = self._load_data({'rules': rules})
if name:
self.name = name
self._current_ruleid = None
self._evaluators = []
for rule in self.rules:
evaluator = LogicEvaluator(
rule['if'].get('logic'), rule['if']['conditions'])
self._evaluators.append(evaluator)
def perform(self, context):
count = 0
for evaluator, rule in zip(self._evaluators, self.rules):
if evaluator.evaluate(context):
count = count + 1
self._current_ruleid = rule.get('rule') or count
for action, target in zip(rule['then'], rule['target']):
result = \
context[target.replace('context.', '').strip()] = (
eval(action, {'context': context})
if isinstance(action, basestring)
else action)
self.record(context, result)
else:
continue
else:
self._current_ruleid = None
return True
return False
@property
def ruleid(self):
if self._current_ruleid:
return "%s.%s" % (super(TableRule, self).ruleid, self._current_ruleid)
return super(TableRule, self).ruleid
@classmethod
def from_yaml(cls, text):
return cls._from_data(yaml.load(text))
@classmethod
def from_json(cls, text):
return cls._from_data(json.loads(text))
@classmethod
def _from_data(cls, data):
rules = cls._load_data(data)
return cls(rules, name=data.get('ruleset'))
@staticmethod
def _load_data(data):
"""
Rules data is preprocessed here i.e. to convert from brief rule
definitions to detailed format.
"""
# We have to convert non-string data in clauses back to strings,
# because they will be eval-ed
rules = []
for rule in data['rules']:
obj = {
'rule': rule.get('rule'),
'then': rule['then'],
'target': rule['target']}
if_clause = {}
# Convert conditions to dictionaries, i.e. "foo" becomes {"foo": True}
if isinstance(rule['if'], list):
logic = None
conditions = rule['if']
else:
conditions = rule['if'].get('conditions', [])
# Get logic string. If it's not specified, generate string like
# "1 & 2 & .. & N", where N is number of conditions.
logic = rule['if'].get('logic')
obj['if'] = {'logic': logic, 'conditions': conditions}
rules.append(obj)
return rules
class SequencedRuleset(Rule):
"""
A set of Rules, guaranteed to run in sequence
"""
def __init__(self, rules):
self.rules = rules or []
def should_trigger(self, context):
return True
def perform(self, context):
for rule in self.rules:
if rule.should_trigger(context):
result = rule.perform(context)
rule.record(context, result)
return True
class NaturalLanguageRule(TableRule):
"""
A natural language rule given as a text.
TODO implement this
"""
def __init__(self, translations):
if translations:
translator = Translator(translations)
for rule in self.rules:
for key in rule.keys():
rule[key] = [translator.replace(item) for item in rule[key]]
translator = Translator(translations)
def should_trigger(self, context):
pass
|
|
#------------------------------------------------------------------------------
#
# GLOBAL SETTINGS
#
# Defines: gateway_schema, dataset_schema, pod_schema, user_schema,
#
#------------------------------------------------------------------------------
import os
# FIGURE OUT WHERE WE ARE RUNNING... ON HEROKU, OR LOCALLY?
if os.environ.get('PORT'):
# We're hosted on Heroku! Use the MongoHQ Sandbox as our backend
# Set API entry point (for heroku):
# Heroku environmental variables must be set using:
# > heroku config:set key=value
MONGO_HOST = os.getenv('MONGO_HOST')
MONGO_PORT = os.getenv('MONGO_PORT')
MONGO_USERNAME = os.getenv('MONGO_USERNAME')
MONGO_PASSWORD = os.getenv('MONGO_PASSWORD')
MONGO_DBNAME = os.getenv('MONGO_DBNAME')
SERVER_NAME = os.getenv('SERVER_NAME')
else:
# Run locally, using a different port than the local gateway app
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DBNAME = 'evepod'
SERVER_NAME = '0.0.0.0:3000'
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
# (if you omit this line, the API will default to ['GET'] and provide
# read-only access to the endpoint).
RESOURCE_METHODS = ['GET', 'POST']
# Enable reads (GET), edits (PATCH) and deletes of individual items
# (defaults to read-only item access).
ITEM_METHODS = ['GET', 'PATCH']
# Set the public methods for the read-only API.
# Only authorized users can write, edit and delete
# PUBLIC_METHODS = ['GET']
# PUBLIC_ITEM_METHODS = ['GET']
#------------------------------------------------------------------------------
#
# RESOURCE SCHEMAS
#
# Defines: gateway_schema, dataset_schema, pod_schema, user_schema,
# allsensorinfo, allpoddata, allfarmerdata, farmers
#
#------------------------------------------------------------------------------
data_schema = {
# Schema definition, based on Cerberus grammar. Check the Cerberus project
# (https://github.com/nicolaiarocci/cerberus) for details.
# Note: using short variable names to save space in MongoDB.
't':{'type':'datetime','required':True}, # datetime
'v':{'type':'float','required':True}, # value
'p':{'type':'string','required':True}, # pod
's':{'type':'string','required':True}, # sensor id (SID)
'pod':{
'type':'objectid',
'data_relation': {
'resource' :'pods',
'field': '_id',
'embeddable':True,
},
},
'sensor':{
'type':'objectid',
'data_relation': {
'resource': 'sensors',
'field': '_id',
'embeddable': True
},
}
}
user_schema = {
# Schema definition, based on Cerberus grammar. Check the Cerberus project
# (https://github.com/nicolaiarocci/cerberus) for detailsself.
# Only keys are stored on evepod. All user information is stored on stormpath
'keys': {'type': 'list','items':[{'type':'string'}]},
}
pod_schema = {
# Schema definition, based on Cerberus grammar. Check the Cerberus project
# (https://github.com/nicolaiarocci/cerberus) for details.
# Sensor text ID for use in URLs and in API data queries/submissions
'urlid' : { # Pod URL name
'type': 'string',
'minlength': 1,
'maxlength': 20,
'required': True,
},
'pid' : { # Pod ID (usually phone number)
'type':'string',
'minlength':10,
'maxlength':15,
'required':True,
},
'imei':{ # MIEI address of cellular radio, acts as Serial Number
'type':'string', # Need to define a MAC address type
'unique':True,
'required':True,
'minlength':15,
'maxlength':20,
},
'firmware':{
'type':'integer',
'minlength':1,
'maxlength':1,
},
'status': {
'type': 'string',
'allowed': ['dead','deployed','provisioned','active','unknown'],
'required':True,
},
'last': {
'type':'datetime',
},
'owner': {
'type':'string',
},
'public': {
'type':'boolean',
'required': True,
'default': True
},
'voltage':{
'type':'number',
'required':True,
'default':0
}
}
sensor_schema = {
# Schema definition, based on Cerberus grammar. Check the Cerberus project
# (https://github.com/nicolaiarocci/cerberus) for details.
# Sensor text ID for use in URLs and in API data queries/submissions
'urlid' : {
'type': 'string',
'minlength': 1,
'maxlength': 16,
'required': True,
},
# Unique sensor ID. SID will be referenced in the PUD but should NOT be used elsewhere
'sid' : {
'type': 'integer',
'minlength': 1,
'maxlength': 3,
'required': True,
'unique': True,
},
# Number of bytes required for each piece of sensor data
'nbytes' : {
'type':'integer',
'required':True,
},
# Format of data values, based on structs library http://docs.python.org/2/library/struct.html
'fmt' : {
'type':'string',
'required':True,
'minlength':1,
'maxlength':1,
'allowed': ['x','c','b','B','?','h','H','i','I','l','L','q','Q','f','d','s','p','P'],
},
# Byte order of data values, based on structs library http://docs.python.org/2/library/struct.html
'byteorder' : {
'type':'string',
'required':False,
'minlength':1,
'maxlength':1,
'allowed': ['@','=','<','>','!'],
'default':'<',
},
# Sensor info: A text string that provides summary info for each sensor
'info' : {
'type':'string',
'required':False,
'minlength':1,
'maxlength':256,
'default':'no additional information is available for this sensor',
},
# Magnitude: A multiplier for sensor values
'magnitude' : {
'type':'float',
'required':False,
'maxlength':100,
'default':1.0,
},
# Units: A text string that identifies the units for sensor values
'units' : {
'type':'string',
'required':False,
'maxlength':100,
},
}
#------------------------------------------------------------------------------
#
# RESOURCE DEFINITIONS
#
# Defines: pods,
#
#------------------------------------------------------------------------------
pods = {
# 'title' tag used in item links. Defaults to the resource title minus
# the final, plural 's' (works fine in most cases but not for 'people')
# 'item_title': 'p',
# by default the standard item entry point is defined as
# '/<item_title>/<ObjectId>/'. We leave it untouched, and we also enable an
# additional read-only entry point. This way consumers can also perform
# GET requests at '/<item_title>/<urlname>/'.
'additional_lookup': {
'url' : 'regex("[\w]+")',
'field': 'urlid'
},
# 'datasource': {
# 'projection': { 'owner': 0,
# 'firmware': 0,
# },
# },
# We choose to override global cache-control directives for this resource.
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
# most global settings can be overridden at resource level
'resource_methods': ['GET', 'POST'],
'item_methods': ['GET','PATCH'],
# Public read-only access:
# 'public_methods': ['GET'],
# 'public_item_methods': ['GET'],
'schema': pod_schema
}
data = {
# most global settings can be overridden at resource level
'resource_methods': ['GET', 'POST'],
'schema': data_schema
}
users = {
# 'title' tag used in item links. Defaults to the resource title minus
# the final, plural 's' (works fine in most cases but not for 'people')
# 'item_title': 'f',
# by default the standard item entry point is defined as
# '/<item_title>/<ObjectId>/'. We leave it untouched, and we also enable an
# additional read-only entry point. This way consumers can also perform
# GET requests at '/<item_title>/<username>/'.
# We choose to override global cache-control directives for this resource.
'cache_control': '',
'cache_expires': 0,
# Resource security:
# No public methods on users
# #'public_methods': [],
# 'public_item_methods': [],
# Only allow superusers and admin
# 'allowed_roles': ['superuser', 'admin'],
# most global settings can be overridden at resource level
'resource_methods': ['GET', 'POST', 'DELETE'],
'schema': user_schema
}
sensors = {
# 'title' tag used in item links. Defaults to the resource title minus
# the final, plural 's' (works fine in most cases but not for 'people')
# 'item_title': 'f',
# by default the standard item entry point is defined as
# '/<item_title>/<ObjectId>/'. We leave it untouched, and we also enable an
# additional read-only entry point. This way consumers can also perform
# GET requests at '/<item_title>/<lastname>/'.
'additional_lookup': {
'url' : 'regex("[\w]+")',
'field': 'urlid'
},
# We choose to override global cache-control directives for this resource.
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
# Public read-only access:
# 'public_methods': ['GET'],
# 'public_item_methods': ['GET'],
# most global settings can be overridden at resource level
'resource_methods': ['GET', 'POST'],
'schema': sensor_schema
}
#------------------------------------------------------------------------------
#
# DOMAINS
#
# Uses: pods, users, farmers, gateways, sensors, datasets
#
#------------------------------------------------------------------------------
DOMAIN = {
'pods': pods,
'users':users,
'sensors':sensors,
'data':data,
}
|
|
import logging
from collections import Counter
from django.contrib.auth.models import User
from django.db import connection, transaction
from django.template.loader import render_to_string
from django.utils import timezone
from pontoon.administration.vcs import CommitToRepositoryException
from pontoon.base.models import (
ChangedEntityLocale,
Entity,
Resource,
update_stats
)
from pontoon.sync.changeset import ChangeSet
from pontoon.sync.vcs_models import VCSProject
log = logging.getLogger(__name__)
@transaction.atomic
def sync_project(db_project, no_pull=False, no_commit=False):
"""
Update the database with the current state of resources in version
control and write any submitted translations from the database back
to version control.
"""
# Mark "now" at the start of sync to avoid messing with
# translations submitted during sync.
now = timezone.now()
# Pull changes from VCS and update what we know about the files.
if not no_pull:
repos_changed = pull_changes(db_project)
else:
repos_changed = True # Assume changed.
# If the repos haven't changed since the last sync and there are
# no Pontoon-side changes for this project, quit early.
if not repos_changed and not db_project.needs_sync:
log.info('Skipping project {0}, no changes detected.'.format(db_project.slug))
return
vcs_project = VCSProject(db_project)
update_resources(db_project, vcs_project)
# Collect all entities across VCS and the database and get their
# keys so we can match up matching entities.
vcs_entities = get_vcs_entities(vcs_project)
db_entities = get_db_entities(db_project)
entity_keys = set().union(db_entities.keys(), vcs_entities.keys())
changeset = ChangeSet(db_project, vcs_project, now)
for key in entity_keys:
db_entity = db_entities.get(key, None)
vcs_entity = vcs_entities.get(key, None)
handle_entity(changeset, db_project, key, db_entity, vcs_entity)
# Apply the changeset to the files, commit them, and update stats
# entries in the DB.
changeset.execute()
if not no_commit:
commit_changes(db_project, vcs_project, changeset)
update_project_stats(db_project, vcs_project, changeset)
# Clear out the "has_changed" markers now that we've finished
# syncing.
(ChangedEntityLocale.objects
.filter(entity__resource__project=db_project, when__lte=now)
.delete())
db_project.has_changed = False
db_project.save()
# Clean up any duplicate approvals at the end of sync right
# before we commit the transaction to avoid race conditions.
with connection.cursor() as cursor:
cursor.execute("""
UPDATE base_translation AS b
SET approved = FALSE, approved_date = NULL
WHERE approved_date !=
(SELECT max(approved_date)
FROM base_translation
WHERE entity_id = b.entity_id
AND locale_id = b.locale_id
AND (plural_form = b.plural_form OR plural_form IS NULL));
""")
log.info(u'Synced project {0}'.format(db_project.slug))
def handle_entity(changeset, db_project, key, db_entity, vcs_entity):
"""
Determine what needs to be synced between the database and VCS versions
of a single entity and log what needs to be changed in the changeset.
"""
if vcs_entity is None:
if db_entity is None:
# This should never happen. What? Hard abort.
raise ValueError('No entities found for key {0}'.format(key))
else:
# VCS no longer has the entity, remove it from Pontoon.
changeset.obsolete_db_entity(db_entity)
elif db_entity is None:
# New VCS entities are added to Pontoon.
changeset.create_db_entity(vcs_entity)
else:
for locale in db_project.locales.all():
if not vcs_entity.has_translation_for(locale.code):
# VCS lacks an entity for this locale, so we can't
# pull updates nor edit it. Skip it!
continue
if db_entity.has_changed(locale):
# Pontoon changes overwrite whatever VCS has.
changeset.update_vcs_entity(locale.code, db_entity, vcs_entity)
else:
# If Pontoon has nothing or has not changed, and the VCS
# still has the entity, update Pontoon with whatever may
# have changed.
changeset.update_db_entity(locale.code, db_entity, vcs_entity)
def update_resources(db_project, vcs_project):
"""Update the database on what resource files exist in VCS."""
relative_paths = vcs_project.resources.keys()
db_project.resources.exclude(path__in=relative_paths).delete()
for relative_path, vcs_resource in vcs_project.resources.items():
resource, created = db_project.resources.get_or_create(path=relative_path)
resource.format = Resource.get_path_format(relative_path)
resource.entity_count = len(vcs_resource.entities)
resource.save()
def update_project_stats(db_project, vcs_project, changeset):
"""Update the Stats entries in the database."""
for resource in db_project.resources.all():
for locale in db_project.locales.all():
# We only want to create/update the stats object if the resource
# exists in the current locale, UNLESS the file is asymmetric.
vcs_resource = vcs_project.resources[resource.path]
resource_exists = vcs_resource.files.get(locale) is not None
if resource_exists or resource.is_asymmetric:
update_stats(resource, locale)
def get_vcs_entities(vcs_project):
return {entity_key(entity): entity for entity in vcs_project.entities}
def get_db_entities(db_project):
entities = (Entity.objects
.select_related('resource')
.prefetch_related('changed_locales')
.filter(resource__project=db_project, obsolete=False))
return {entity_key(entity): entity for entity in entities}
def entity_key(entity):
"""
Generate a key for the given entity that is unique within the
project.
"""
key = entity.key or entity.string
return ':'.join([entity.resource.path, key])
def pull_changes(db_project):
"""
Update the local files with changes from the VCS. Returns
whether any of the updated repos have changed since the last
sync, based on the revision numbers.
"""
changed = False
for repo in db_project.repositories.all():
repo_revisions = repo.pull()
# If any revision is None, we can't be sure if a change
# happened or not, so we default to assuming it did.
unsure_change = None in repo_revisions.values()
if unsure_change or repo_revisions != repo.last_synced_revisions:
changed = True
repo.last_synced_revisions = repo_revisions
repo.save()
return changed
def commit_changes(db_project, vcs_project, changeset):
"""Commit the changes we've made back to the VCS."""
for locale in db_project.locales.all():
authors = changeset.commit_authors_per_locale.get(locale.code, [])
# Use the top translator for this batch as commit author, or
# the fake Pontoon user if there are no authors.
if len(authors) > 0:
commit_author = Counter(authors).most_common(1)[0][0]
else:
commit_author = User(first_name="Pontoon", email="[email protected]")
commit_message = render_to_string('commit_message.jinja', {
'locale': locale,
'project': db_project,
'authors': authors
})
locale_path = vcs_project.locale_directory_path(locale.code)
try:
repo = db_project.repository_for_path(locale_path)
result = repo.commit(commit_message, commit_author, locale_path)
except (CommitToRepositoryException, ValueError) as err:
result = {'message': unicode(err)}
if result is not None:
msg = (u'Committing project {project.name} for {locale.name} '
u'({locale.code}) failed: {reason}')
log.info(msg.format(
project=db_project,
locale=locale,
reason=result['message']
))
|
|
import json
import logging
import os
import shutil
import sys
import time
import urllib2
import warnings
# Dropping a table inexplicably produces a warning despite
# the 'IF EXISTS' clause. Squelch these warnings.
warnings.simplefilter('ignore')
import MySQLdb
import environment
import utils
from mysql_flavor import mysql_flavor
from protocols_flavor import protocols_flavor
tablet_cell_map = {
62344: 'nj',
62044: 'nj',
41983: 'nj',
31981: 'ny',
}
def get_backup_storage_flags():
return ['-backup_storage_implementation', 'file',
'-file_backup_storage_root',
os.path.join(environment.tmproot, 'backupstorage')]
def get_all_extra_my_cnf(extra_my_cnf):
all_extra_my_cnf = [environment.vttop + '/config/mycnf/default-fast.cnf']
flavor_my_cnf = mysql_flavor().extra_my_cnf()
if flavor_my_cnf:
all_extra_my_cnf.append(flavor_my_cnf)
if extra_my_cnf:
all_extra_my_cnf.append(extra_my_cnf)
return all_extra_my_cnf
class Tablet(object):
"""This class helps manage a vttablet instance.
To use it for vttablet, you need to use init_tablet and/or
start_vttablet.
"""
default_uid = 62344
seq = 0
tablets_running = 0
default_db_config = {
'app': {
'uname': 'vt_app',
'charset': 'utf8'
},
'dba': {
'uname': 'vt_dba',
'charset': 'utf8'
},
'filtered': {
'uname': 'vt_filtered',
'charset': 'utf8'
},
'repl': {
'uname': 'vt_repl',
'charset': 'utf8'
}
}
# this will eventually be coming from the proto3
tablet_type_value = {
'UNKNOWN': 0,
'MASTER': 1,
'REPLICA': 2,
'RDONLY': 3,
'BATCH': 3,
'SPARE': 4,
'EXPERIMENTAL': 5,
'BACKUP': 6,
'RESTORE': 7,
'WORKER': 8,
}
def __init__(self, tablet_uid=None, port=None, mysql_port=None, cell=None,
use_mysqlctld=False):
self.tablet_uid = tablet_uid or (Tablet.default_uid + Tablet.seq)
self.port = port or (environment.reserve_ports(1))
self.mysql_port = mysql_port or (environment.reserve_ports(1))
self.grpc_port = environment.reserve_ports(1)
self.use_mysqlctld = use_mysqlctld
Tablet.seq += 1
if cell:
self.cell = cell
else:
self.cell = tablet_cell_map.get(tablet_uid, 'nj')
self.proc = None
# filled in during init_tablet
self.keyspace = None
self.shard = None
# utility variables
self.tablet_alias = 'test_%s-%010d' % (self.cell, self.tablet_uid)
self.zk_tablet_path = (
'/zk/test_%s/vt/tablets/%010d' % (self.cell, self.tablet_uid))
def update_stream_python_endpoint(self):
protocol = protocols_flavor().binlog_player_python_protocol()
port = self.port
if protocol == 'gorpc':
from vtdb import gorpc_update_stream
elif protocol == 'grpc':
# import the grpc update stream client implementation, change the port
from vtdb import grpc_update_stream
port = self.grpc_port
return (protocol, 'localhost:%d' % port)
def mysqlctl(self, cmd, extra_my_cnf=None, with_ports=False, verbose=False):
extra_env = {}
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
if all_extra_my_cnf:
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
args = environment.binary_args('mysqlctl') + [
'-log_dir', environment.vtlogroot,
'-tablet_uid', str(self.tablet_uid)]
if self.use_mysqlctld:
args.extend(
['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
if with_ports:
args.extend(['-port', str(self.port),
'-mysql_port', str(self.mysql_port)])
self._add_dbconfigs(args)
if verbose:
args.append('-alsologtostderr')
args.extend(cmd)
return utils.run_bg(args, extra_env=extra_env)
def mysqlctld(self, cmd, extra_my_cnf=None, verbose=False):
extra_env = {}
all_extra_my_cnf = get_all_extra_my_cnf(extra_my_cnf)
if all_extra_my_cnf:
extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf)
args = environment.binary_args('mysqlctld') + [
'-log_dir', environment.vtlogroot,
'-tablet_uid', str(self.tablet_uid),
'-mysql_port', str(self.mysql_port),
'-socket_file', os.path.join(self.tablet_dir, 'mysqlctl.sock')]
self._add_dbconfigs(args)
if verbose:
args.append('-alsologtostderr')
args.extend(cmd)
return utils.run_bg(args, extra_env=extra_env)
def init_mysql(self, extra_my_cnf=None):
if self.use_mysqlctld:
return self.mysqlctld(
['-bootstrap_archive', mysql_flavor().bootstrap_archive()],
extra_my_cnf=extra_my_cnf)
else:
return self.mysqlctl(
['init', '-bootstrap_archive', mysql_flavor().bootstrap_archive()],
extra_my_cnf=extra_my_cnf, with_ports=True)
def start_mysql(self):
return self.mysqlctl(['start'], with_ports=True)
def shutdown_mysql(self):
return self.mysqlctl(['shutdown'], with_ports=True)
def teardown_mysql(self):
if utils.options.keep_logs:
return self.shutdown_mysql()
return self.mysqlctl(['teardown', '-force'])
def remove_tree(self):
if utils.options.keep_logs:
return
try:
shutil.rmtree(self.tablet_dir)
except OSError as e:
if utils.options.verbose == 2:
print >> sys.stderr, e, self.tablet_dir
def mysql_connection_parameters(self, dbname, user='vt_dba'):
return dict(user=user,
unix_socket=self.tablet_dir + '/mysql.sock',
db=dbname)
def connect(self, dbname='', user='vt_dba', **params):
params.update(self.mysql_connection_parameters(dbname, user))
conn = MySQLdb.Connect(**params)
return conn, conn.cursor()
def connect_dict(self, dbname='', user='vt_dba', **params):
params.update(self.mysql_connection_parameters(dbname, user))
conn = MySQLdb.Connect(**params)
return conn, MySQLdb.cursors.DictCursor(conn)
# Query the MySQL instance directly
def mquery(
self, dbname, query, write=False, user='vt_dba', conn_params=None):
if conn_params is None:
conn_params = {}
conn, cursor = self.connect(dbname, user=user, **conn_params)
if write:
conn.begin()
if isinstance(query, basestring):
query = [query]
for q in query:
# logging.debug('mysql(%s,%s): %s', self.tablet_uid, dbname, q)
cursor.execute(q)
if write:
conn.commit()
try:
return cursor.fetchall()
finally:
conn.close()
def assert_table_count(self, dbname, table, n, where=''):
result = self.mquery(dbname, 'select count(*) from ' + table + ' ' + where)
if result[0][0] != n:
raise utils.TestError('expected %d rows in %s' % (n, table), result)
def reset_replication(self):
self.mquery('', mysql_flavor().reset_replication_commands())
def populate(self, dbname, create_sql, insert_sqls=[]):
self.create_db(dbname)
if isinstance(create_sql, basestring):
create_sql = [create_sql]
for q in create_sql:
self.mquery(dbname, q)
for q in insert_sqls:
self.mquery(dbname, q, write=True)
def has_db(self, name):
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname == name:
return True
return False
def drop_db(self, name):
self.mquery('', 'drop database if exists %s' % name)
while self.has_db(name):
logging.debug('%s sleeping while waiting for database drop: %s',
self.tablet_alias, name)
time.sleep(0.3)
self.mquery('', 'drop database if exists %s' % name)
def create_db(self, name):
self.drop_db(name)
self.mquery('', 'create database %s' % name)
def clean_dbs(self):
logging.debug('mysql(%s): removing all databases', self.tablet_uid)
rows = self.mquery('', 'show databases')
for row in rows:
dbname = row[0]
if dbname in ['information_schema', 'mysql']:
continue
self.drop_db(dbname)
def wait_check_db_var(self, name, value):
for _ in range(3):
try:
return self.check_db_var(name, value)
except utils.TestError as e:
print >> sys.stderr, 'WARNING: ', e
time.sleep(1.0)
raise e
def check_db_var(self, name, value):
row = self.get_db_var(name)
if row != (name, value):
raise utils.TestError('variable not set correctly', name, row)
def get_db_var(self, name):
conn, cursor = self.connect()
try:
cursor.execute("show variables like '%s'" % name)
return cursor.fetchone()
finally:
conn.close()
def update_addrs(self):
args = [
'UpdateTabletAddrs',
'-hostname', 'localhost',
'-ip-addr', '127.0.0.1',
'-mysql-port', '%d' % self.mysql_port,
'-vt-port', '%d' % self.port,
self.tablet_alias
]
return utils.run_vtctl(args)
def init_tablet(self, tablet_type, keyspace, shard,
start=False, dbname=None, parent=True, wait_for_start=True,
include_mysql_port=True, **kwargs):
self.tablet_type = tablet_type
self.keyspace = keyspace
self.shard = shard
self.dbname = dbname or ('vt_' + self.keyspace)
args = ['InitTablet',
'-hostname', 'localhost',
'-port', str(self.port)]
if include_mysql_port:
args.extend(['-mysql_port', str(self.mysql_port)])
if parent:
args.append('-parent')
if dbname:
args.extend(['-db_name_override', dbname])
if keyspace:
args.extend(['-keyspace', keyspace])
if shard:
args.extend(['-shard', shard])
args.extend([self.tablet_alias, tablet_type])
utils.run_vtctl(args)
if start:
if not wait_for_start:
expected_state = None
elif (tablet_type == 'master' or tablet_type == 'replica' or
tablet_type == 'rdonly' or tablet_type == 'batch'):
expected_state = 'SERVING'
else:
expected_state = 'NOT_SERVING'
self.start_vttablet(wait_for_state=expected_state, **kwargs)
@property
def tablet_dir(self):
return '%s/vt_%010d' % (environment.vtdataroot, self.tablet_uid)
def grpc_enabled(self):
return (
protocols_flavor().tabletconn_protocol() == 'grpc' or
protocols_flavor().tablet_manager_protocol() == 'grpc' or
protocols_flavor().binlog_player_protocol() == 'grpc')
def flush(self):
utils.curl('http://localhost:%s%s' %
(self.port, environment.flush_logs_url),
stderr=utils.devnull, stdout=utils.devnull)
def start_vttablet(
self, port=None, memcache=False,
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
schema_override=None,
repl_extra_flags=None, table_acl_config=None,
lameduck_period=None, security_policy=None,
target_tablet_type=None, full_mycnf_args=False,
extra_args=None, extra_env=None, include_mysql_port=True,
init_tablet_type=None, init_keyspace=None,
init_shard=None, init_db_name_override=None,
supports_backups=False):
"""Starts a vttablet process, and returns it.
The process is also saved in self.proc, so it's easy to kill as well.
"""
args = environment.binary_args('vttablet')
# Use 'localhost' as hostname because Travis CI worker hostnames
# are too long for MySQL replication.
args.extend(['-tablet_hostname', 'localhost'])
args.extend(['-tablet-path', self.tablet_alias])
args.extend(environment.topo_server().flags())
args.extend(['-binlog_player_protocol',
protocols_flavor().binlog_player_protocol()])
args.extend(['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol()])
args.extend(['-pid_file', os.path.join(self.tablet_dir, 'vttablet.pid')])
if self.use_mysqlctld:
args.extend(
['-mysqlctl_socket', os.path.join(self.tablet_dir, 'mysqlctl.sock')])
if full_mycnf_args:
# this flag is used to specify all the mycnf_ flags, to make
# sure that code works and can fork actions.
relay_log_path = os.path.join(self.tablet_dir, 'relay-logs',
'vt-%010d-relay-bin' % self.tablet_uid)
args.extend([
'-mycnf_server_id', str(self.tablet_uid),
'-mycnf_data_dir', os.path.join(self.tablet_dir, 'data'),
'-mycnf_innodb_data_home_dir', os.path.join(self.tablet_dir,
'innodb', 'data'),
'-mycnf_innodb_log_group_home_dir', os.path.join(self.tablet_dir,
'innodb', 'logs'),
'-mycnf_socket_file', os.path.join(self.tablet_dir, 'mysql.sock'),
'-mycnf_error_log_path', os.path.join(self.tablet_dir, 'error.log'),
'-mycnf_slow_log_path', os.path.join(self.tablet_dir,
'slow-query.log'),
'-mycnf_relay_log_path', relay_log_path,
'-mycnf_relay_log_index_path', relay_log_path + '.index',
'-mycnf_relay_log_info_path', os.path.join(self.tablet_dir,
'relay-logs',
'relay-log.info'),
'-mycnf_bin_log_path', os.path.join(
self.tablet_dir, 'bin-logs', 'vt-%010d-bin' % self.tablet_uid),
'-mycnf_master_info_file', os.path.join(self.tablet_dir,
'master.info'),
'-mycnf_pid_file', os.path.join(self.tablet_dir, 'mysql.pid'),
'-mycnf_tmp_dir', os.path.join(self.tablet_dir, 'tmp'),
'-mycnf_slave_load_tmp_dir', os.path.join(self.tablet_dir, 'tmp'),
])
if include_mysql_port:
args.extend(['-mycnf_mysql_port', str(self.mysql_port)])
if target_tablet_type:
self.tablet_type = target_tablet_type
args.extend(['-target_tablet_type', target_tablet_type,
'-health_check_interval', '2s',
'-enable_replication_lag_check',
'-degraded_threshold', '5s'])
# this is used to run InitTablet as part of the vttablet startup
if init_tablet_type:
self.tablet_type = init_tablet_type
args.extend(['-init_tablet_type', init_tablet_type])
if init_keyspace:
self.keyspace = init_keyspace
self.shard = init_shard
args.extend(['-init_keyspace', init_keyspace,
'-init_shard', init_shard])
if init_db_name_override:
self.dbname = init_db_name_override
args.extend(['-init_db_name_override', init_db_name_override])
else:
self.dbname = 'vt_' + init_keyspace
if supports_backups:
args.extend(['-restore_from_backup'] + get_backup_storage_flags())
if extra_args:
args.extend(extra_args)
args.extend(['-port', '%s' % (port or self.port),
'-log_dir', environment.vtlogroot])
self._add_dbconfigs(args, repl_extra_flags)
if memcache:
args.extend(['-rowcache-bin', environment.memcached_bin()])
memcache_socket = os.path.join(self.tablet_dir, 'memcache.sock')
args.extend(['-rowcache-socket', memcache_socket])
args.extend(['-enable-rowcache'])
if filecustomrules:
args.extend(['-filecustomrules', filecustomrules])
if zkcustomrules:
args.extend(['-zkcustomrules', zkcustomrules])
if schema_override:
args.extend(['-schema-override', schema_override])
if table_acl_config:
args.extend(['-table-acl-config', table_acl_config])
args.extend(['-queryserver-config-strict-table-acl'])
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
if self.grpc_enabled():
args.extend(['-grpc_port', str(self.grpc_port)])
if lameduck_period:
args.extend(['-lameduck-period', lameduck_period])
if security_policy:
args.extend(['-security_policy', security_policy])
if extra_args:
args.extend(extra_args)
args.extend(['-enable-autocommit'])
stderr_fd = open(
os.path.join(environment.vtlogroot, 'vttablet-%d.stderr' %
self.tablet_uid), 'w')
# increment count only the first time
if not self.proc:
Tablet.tablets_running += 1
self.proc = utils.run_bg(args, stderr=stderr_fd, extra_env=extra_env)
log_message = (
'Started vttablet: %s (%s) with pid: %s - Log files: '
'%s/vttablet.*.{INFO,WARNING,ERROR,FATAL}.*.%s' %
(self.tablet_uid, self.tablet_alias, self.proc.pid,
environment.vtlogroot, self.proc.pid))
# This may race with the stderr output from the process (though
# that's usually empty).
stderr_fd.write(log_message + '\n')
stderr_fd.close()
logging.debug(log_message)
# wait for query service to be in the right state
if wait_for_state:
self.wait_for_vttablet_state(wait_for_state, port=port)
return self.proc
def wait_for_vttablet_state(self, expected, timeout=60.0, port=None):
while True:
v = utils.get_vars(port or self.port)
last_seen_state = '?'
if v == None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for state %s' % expected)
logging.debug(
' vttablet %s not answering at /debug/vars, waiting...',
self.tablet_alias)
else:
if 'TabletStateName' not in v:
logging.debug(
' vttablet %s not exporting TabletStateName, waiting...',
self.tablet_alias)
else:
s = v['TabletStateName']
last_seen_state = s
if s != expected:
logging.debug(
' vttablet %s in state %s != %s', self.tablet_alias, s,
expected)
else:
break
timeout = utils.wait_step(
'waiting for state %s (last seen state: %s)' %
(expected, last_seen_state),
timeout, sleep_time=0.1)
def wait_for_mysqlctl_socket(self, timeout=30.0):
mysql_sock = os.path.join(self.tablet_dir, 'mysql.sock')
mysqlctl_sock = os.path.join(self.tablet_dir, 'mysqlctl.sock')
while True:
if os.path.exists(mysql_sock) and os.path.exists(mysqlctl_sock):
return
timeout = utils.wait_step(
'waiting for mysql and mysqlctl socket files: %s %s' %
(mysql_sock, mysqlctl_sock), timeout)
def _add_dbconfigs(self, args, repl_extra_flags=None):
if repl_extra_flags is None:
repl_extra_flags = {}
config = dict(self.default_db_config)
if self.keyspace:
config['app']['dbname'] = self.dbname
config['repl']['dbname'] = self.dbname
config['repl'].update(repl_extra_flags)
for key1 in config:
for key2 in config[key1]:
args.extend(['-db-config-' + key1 + '-' + key2, config[key1][key2]])
def get_status(self):
return utils.get_status(self.port)
def get_healthz(self):
return urllib2.urlopen('http://localhost:%d/healthz' % self.port).read()
def kill_vttablet(self, wait=True):
logging.debug('killing vttablet: %s, wait: %s', self.tablet_alias,
str(wait))
if self.proc is not None:
Tablet.tablets_running -= 1
if self.proc.poll() is None:
self.proc.terminate()
if wait:
self.proc.wait()
self.proc = None
def hard_kill_vttablet(self):
logging.debug('hard killing vttablet: %s', self.tablet_alias)
if self.proc is not None:
Tablet.tablets_running -= 1
if self.proc.poll() is None:
self.proc.kill()
self.proc.wait()
self.proc = None
def wait_for_binlog_server_state(self, expected, timeout=30.0):
while True:
v = utils.get_vars(self.port)
if v == None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for binlog state %s' %
expected)
logging.debug(' vttablet not answering at /debug/vars, waiting...')
else:
if 'UpdateStreamState' not in v:
logging.debug(
' vttablet not exporting BinlogServerState, waiting...')
else:
s = v['UpdateStreamState']
if s != expected:
logging.debug(" vttablet's binlog server in state %s != %s", s,
expected)
else:
break
timeout = utils.wait_step(
'waiting for binlog server state %s' % expected,
timeout, sleep_time=0.5)
logging.debug('tablet %s binlog service is in state %s',
self.tablet_alias, expected)
def wait_for_binlog_player_count(self, expected, timeout=30.0):
while True:
v = utils.get_vars(self.port)
if v == None:
if self.proc.poll() is not None:
raise utils.TestError(
'vttablet died while test waiting for binlog count %s' %
expected)
logging.debug(' vttablet not answering at /debug/vars, waiting...')
else:
if 'BinlogPlayerMapSize' not in v:
logging.debug(
' vttablet not exporting BinlogPlayerMapSize, waiting...')
else:
s = v['BinlogPlayerMapSize']
if s != expected:
logging.debug(" vttablet's binlog player map has count %d != %d",
s, expected)
else:
break
timeout = utils.wait_step(
'waiting for binlog player count %d' % expected,
timeout, sleep_time=0.5)
logging.debug('tablet %s binlog player has %d players',
self.tablet_alias, expected)
@classmethod
def check_vttablet_count(klass):
if Tablet.tablets_running > 0:
raise utils.TestError('This test is not killing all its vttablets')
def execute(self, sql, bindvars=None, transaction_id=None, auto_log=True):
"""execute uses 'vtctl VtTabletExecute' to execute a command.
"""
args = [
'VtTabletExecute',
'-keyspace', self.keyspace,
'-shard', self.shard,
]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
if transaction_id:
args.extend(['-transaction_id', str(transaction_id)])
args.extend([self.tablet_alias, sql])
return utils.run_vtctl_json(args, auto_log=auto_log)
def begin(self, auto_log=True):
"""begin uses 'vtctl VtTabletBegin' to start a transaction.
"""
args = [
'VtTabletBegin',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
]
result = utils.run_vtctl_json(args, auto_log=auto_log)
return result['transaction_id']
def commit(self, transaction_id, auto_log=True):
"""commit uses 'vtctl VtTabletCommit' to commit a transaction.
"""
args = [
'VtTabletCommit',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
str(transaction_id),
]
return utils.run_vtctl(args, auto_log=auto_log)
def rollback(self, transaction_id, auto_log=True):
"""rollback uses 'vtctl VtTabletRollback' to rollback a transaction.
"""
args = [
'VtTabletRollback',
'-keyspace', self.keyspace,
'-shard', self.shard,
self.tablet_alias,
str(transaction_id),
]
return utils.run_vtctl(args, auto_log=auto_log)
def kill_tablets(tablets):
for t in tablets:
logging.debug('killing vttablet: %s', t.tablet_alias)
if t.proc is not None:
Tablet.tablets_running -= 1
t.proc.terminate()
for t in tablets:
if t.proc is not None:
t.proc.wait()
t.proc = None
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#============================================================================
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
A fake XenAPI SDK.
"""
import base64
import pickle
import random
import uuid
from xml.sax import saxutils
import zlib
import pprint
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
text = msg or ""
content = pprint.pformat(_db_content)
LOG.debug(_("%(text)s: _db_content => %(content)s"),
{'text': text, 'content': content})
def reset():
for c in _CLASSES:
_db_content[c] = {}
host = create_host('fake')
create_vm('fake',
'Running',
is_a_template=False,
is_control_domain=True,
resident_on=host)
def reset_table(table):
if table not in _CLASSES:
return
_db_content[table] = {}
def _create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
host_ref = _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
host_default_sr_ref = _create_local_srs(host_ref)
_create_local_pif(host_ref)
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
_db_content['pool'][pool_ref]['master'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
domid = status == 'Running' and random.randrange(1, 1 << 16) or -1
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status})
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {'vhd-parent': None},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created.
"""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
vdi_ref = vbd_rec['VDI']
if vdi_ref and vdi_ref != "OpaqueRef:NULL":
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('memory_static_max', str(8 * 1024 * 1024 * 1024))
vm_rec.setdefault('memory_dynamic_max', str(8 * 1024 * 1024 * 1024))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('resident_on', '')
def create_pbd(host_ref, sr_ref, attached):
config = {'path': '/var/run/sr-mount/%s' % sr_ref}
return _create_object('PBD',
{'device_config': config,
'host': host_ref,
'SR': sr_ref,
'currently_attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def _create_local_srs(host_ref):
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Also, fake the installation of
an ISO SR.
"""
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_size=80000,
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
return create_sr(name_label='Local storage',
type='ext',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_size=40000,
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref,
'network': '',
'management': 'true'})
return pif_ref
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd(host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return _db_content[table].keys()
def get_all_records(table):
return _db_content[table]
def _query_matches(record, query):
# Simple support for the XenServer query language:
# 'field "host"="<uuid>" and field "SR"="<sr uuid>"'
# Tested through existing tests (e.g. calls to find_network_with_bridge)
and_clauses = query.split(" and ")
if len(and_clauses) > 1:
matches = True
for clause in and_clauses:
matches = matches and _query_matches(record, clause)
return matches
or_clauses = query.split(" or ")
if len(or_clauses) > 1:
matches = False
for clause in or_clauses:
matches = matches or _query_matches(record, clause)
return matches
if query[:4] == 'not ':
return not _query_matches(record, query[4:])
# Now it must be a single field - bad queries never match
if query[:5] != 'field':
return False
(field, value) = query[6:].split('=', 1)
# Some fields (e.g. name_label, memory_overhead) have double
# underscores in the DB, but only single underscores when querying
field = field.replace("__", "_").strip(" \"'")
value = value.strip(" \"'")
return record[field] == value
def get_all_records_where(table_name, query):
matching_records = {}
table = _db_content[table_name]
for record in table:
if _query_matches(table[record], query):
matching_records[record] = table[record]
return matching_records
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument.
"""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as a JSON dict.
"""
arg = args or kwargs
return jsonutils.dumps(arg)
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
class SessionBase(object):
"""
Base class for Fake Sessions
"""
def __init__(self, uri):
self._session = None
def pool_get_default_SR(self, _1, pool_ref):
return _db_content['pool'].values()[0]['default-SR']
def VBD_insert(self, _1, vbd_ref, vdi_ref):
vbd_rec = get_record('VBD', vbd_ref)
get_record('VDI', vdi_ref)
vbd_rec['empty'] = False
vbd_rec['VDI'] = vdi_ref
def VBD_plug(self, _1, ref):
rec = get_record('VBD', ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = rec['userdevice']
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', ref])
rec['currently_attached'] = False
rec['device'] = ''
def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config',
vbd_ref, key])
db_ref['other_config'][key] = value
def VBD_get_other_config(self, _1, vbd_ref):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
return {}
return db_ref['other_config']
def PBD_create(self, _1, pbd_rec):
pbd_ref = _create_object('PBD', pbd_rec)
_db_content['PBD'][pbd_ref]['currently_attached'] = False
return pbd_ref
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
shared, sm_config):
ref = None
rec = None
for ref, rec in _db_content['SR'].iteritems():
if rec.get('uuid') == sr_uuid:
# make forgotten = 0 and return ref
_db_content['SR'][ref]['forgotten'] = 0
return ref
# SR not found in db, so we create one
params = {'sr_uuid': sr_uuid,
'label': label,
'desc': desc,
'type': type,
'content_type': content_type,
'shared': shared,
'sm_config': sm_config}
sr_ref = _create_object('SR', params)
_db_content['SR'][sr_ref]['uuid'] = sr_uuid
_db_content['SR'][sr_ref]['forgotten'] = 0
vdi_per_lun = False
if type in ('iscsi'):
# Just to be clear
vdi_per_lun = True
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
def SR_forget(self, _1, sr_ref):
_db_content['SR'][sr_ref]['forgotten'] = 1
def SR_scan(self, _1, sr_ref):
return
def VM_get_xenstore_data(self, _1, vm_ref):
return _db_content['VM'][vm_ref].get('xenstore_data', {})
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
pass
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config',
vdi_ref, key])
db_ref['other_config'][key] = value
def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
db_ref = _db_content['VDI'][vdi_to_copy_ref]
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
other_config = db_ref['other_config'].copy()
return create_vdi(name_label, sr_ref, sharable=sharable,
read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
sr_ref = db_ref['SR']
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
def _plugin_agent_password(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_inject_file(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_agentupdate(self, method, args):
url = args["url"]
md5 = args["md5sum"]
message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
md5=md5)
return as_json(returncode='0', message=message)
def _plugin_noop(self, method, args):
return ''
def _plugin_pickle_noop(self, method, args):
return pickle.dumps(None)
def _plugin_migration_transfer_vhd(self, method, args):
kwargs = pickle.loads(args['params'])['kwargs']
vdi_ref = self.xenapi_request('VDI.get_by_uuid',
(kwargs['vdi_uuid'], ))
assert vdi_ref
return pickle.dumps(None)
_plugin_glance_upload_vhd = _plugin_pickle_noop
_plugin_kernel_copy_vdi = _plugin_noop
_plugin_kernel_create_kernel_ramdisk = _plugin_noop
_plugin_kernel_remove_kernel_ramdisk = _plugin_noop
_plugin_migration_move_vhds_into_sr = _plugin_noop
def _plugin_xenhost_host_data(self, method, args):
return jsonutils.dumps({'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40},
'host_hostname': 'fake-xenhost',
})
def _plugin_poweraction(self, method, args):
return jsonutils.dumps({"power_action": method[5:]})
_plugin_xenhost_host_reboot = _plugin_poweraction
_plugin_xenhost_host_startup = _plugin_poweraction
_plugin_xenhost_host_shutdown = _plugin_poweraction
def _plugin_xenhost_set_host_enabled(self, method, args):
enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
return jsonutils.dumps({"status": enabled})
def _plugin_xenhost_host_uptime(self, method, args):
return jsonutils.dumps({"uptime": "fake uptime"})
def _plugin_console_get_console_log(self, method, args):
dom_id = args["dom_id"]
if dom_id == 0:
raise Failure('Guest does not have a console')
return base64.b64encode(zlib.compress("dom_id: %s" % dom_id))
def host_call_plugin(self, _1, _2, plugin, method, args):
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
if not func:
raise Exception('No simulation in host_call_plugin for %s,%s' %
(plugin, method))
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * 1024 * 1024 * 1024
def VDI_resize_online(self, *args):
return 'derp'
VDI_resize = VDI_resize_online
def _VM_reboot(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
if db_ref['power_state'] != 'Running':
raise Failure(['VM_BAD_POWER_STATE',
'fake-opaque-ref', db_ref['power_state'].lower(), 'halted'])
db_ref['power_state'] = 'Running'
def VM_clean_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Halted'
VM_clean_shutdown = VM_hard_shutdown
def VM_suspend(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Suspended'
def VM_pause(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Paused'
def pool_eject(self, session, host_ref):
pass
def pool_join(self, session, hostname, username, password):
pass
def pool_set_name_label(self, session, pool_ref, name):
pass
def host_migrate_receive(self, session, destref, nwref, options):
return "fake_migrate_data"
def VM_assert_can_migrate(self, session, vmref, migrate_data, live,
vdi_map, vif_map, options):
pass
def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map,
vif_map, options):
pass
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
def _login(self, method, params):
self._session = str(uuid.uuid4())
_session_info = {'uuid': str(uuid.uuid4()),
'this_host': _db_content['host'].keys()[0]}
_db_content['session'][self._session] = _session_info
def _logout(self):
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
"Logging out a session that is invalid or already logged "
"out: %s" % s)
del _db_content['session'][s]
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
def callit(*params):
LOG.debug(_('Calling %(name)s %(impl)s'),
{'name': name, 'impl': impl})
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug(_('Calling getter %s'), name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug(_('Calling setter %s'), name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
elif name == 'XenAPI':
return FakeXenAPI()
else:
return None
def _is_gettersetter(self, name, getter):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
return self._is_method(name, 'create')
def _is_destroy(self, name):
return self._is_method(name, 'destroy')
def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
if func == 'get_all_records':
self._check_arg_count(params, 1)
return get_all_records(cls)
if func == 'get_all_records_where':
self._check_arg_count(params, 2)
return get_all_records_where(cls, params[1])
if func == 'get_record':
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
if (ref in _db_content[cls]):
if (field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
else:
raise Failure(['HANDLE_INVALID', cls, ref])
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if len(params) == 3:
field = func[len('set_'):]
ref = params[1]
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
'is missing that field' % name)
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = (is_sr_create and
_create_sr(cls, params) or
is_vlan_create and
_create_vlan(params[1], params[2], params[3]) or
_create_object(cls, params[1]))
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
if after_hook in globals():
globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
# Add RO fields
if cls == 'VM':
obj['power_state'] = 'Halted'
return ref
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise Failure(['HANDLE_INVALID', table, ref])
# Call destroy function (if exists)
destroy_func = globals().get('destroy_%s' % table.lower())
if destroy_func:
destroy_func(ref)
else:
del _db_content[table][ref]
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
func = name[len('Async.'):]
try:
result = self.xenapi_request(func, params[1:])
if result:
result = as_value(result)
task['result'] = result
task['status'] = 'success'
except Failure as exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
actual = len(params)
if actual != expected:
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
raise Failure(['UUID_INVALID', v, result, recs, k])
return result
class FakeXenAPI(object):
def __init__(self):
self.Failure = Failure
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<xenapi.fake._Dispatcher for %s>' % self.__name
else:
return '<xenapi.fake._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__send, name)
else:
return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
|
|
# encoding: utf-8
"""
validation.py
Created by Thomas Mangin on 2013-03-18.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
__all__ = ["validation", "ValidationError"]
FORMAT = 3
DEBUG = False
from collections import deque
from collections import OrderedDict
from exabgp.data import check
TYPE=check.TYPE
PRESENCE=check.PRESENCE
class ValidationError (Exception):
internal_error = 'invalid configuration definition (internal error)'
mandatory_error = 'missing mandatory configuration field'
type_error = 'the data for this configuration option is not what was expected'
configuration_error = 'the configuration is missing this information'
conflicting_error = 'the configuration has conflicting information'
def __init__ (self,location,message):
self.location = location
self.message = message
def __str__ (self):
location = ','.join(self.location) if self.location else 'root'
return 'location ' + location + ' : ' + self.message
_attributes = OrderedDict((
('next-hop', (TYPE.string, PRESENCE.optional, '', check.ipv4)),
('origin' , (TYPE.string, PRESENCE.optional, '', ['igp','egp','incomplete'])),
('as-path' , (TYPE.array, PRESENCE.optional, '', check.aspath)),
('as-sequence' , (TYPE.array, PRESENCE.optional, '', check.assequence)),
('local-preference', (TYPE.integer, PRESENCE.optional, '', check.localpreference)),
('med', (TYPE.integer, PRESENCE.optional, '', check.med)),
('aggregator' , (TYPE.string , PRESENCE.optional, '', check.ipv4)),
('aggregator-id' , (TYPE.string , PRESENCE.optional, '', check.ipv4)),
('atomic-aggregate' , (TYPE.boolean , PRESENCE.optional, '', check.nop)),
('community' , (TYPE.array , PRESENCE.optional, '', check.community)),
('extended-community' , (TYPE.array , PRESENCE.optional, '', check.extendedcommunity)),
('aigp', (TYPE.integer, PRESENCE.optional, '', check.aigp)),
('label' , (TYPE.array , PRESENCE.optional, '', check.label)),
('cluster-list' , (TYPE.array , PRESENCE.optional, '', check.clusterlist)),
('originator-id' , (TYPE.string , PRESENCE.optional, '', check.originator)),
('path-information' , (TYPE.string|TYPE.integer , PRESENCE.optional, '', check.pathinformation)),
('route-distinguisher' , (TYPE.string , PRESENCE.optional, '', check.distinguisher)),
('split' , (TYPE.integer , PRESENCE.optional, '', check.split)),
('watchdog' , (TYPE.string , PRESENCE.optional, '', check.watchdog)),
('withdrawn' , (TYPE.boolean , PRESENCE.optional, '', check.nop)),
))
_definition = (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('exabgp' , (TYPE.integer, PRESENCE.mandatory, '', [FORMAT,])),
('neighbor' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('tcp' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('bind' , (TYPE.string, PRESENCE.mandatory, '', check.ip)),
('connect' , (TYPE.string, PRESENCE.mandatory, '', check.ip)),
('ttl-security' , (TYPE.integer, PRESENCE.optional, '', check.uint8)),
('md5' , (TYPE.string, PRESENCE.optional, '', check.md5)),
('passive' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
)))),
('api' , (TYPE.object, PRESENCE.optional, 'api', OrderedDict((
('<*>' , (TYPE.array, PRESENCE.mandatory, '', ['neighbor-changes','send-packets','receive-packets','receive-routes'])),
)))),
('session' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('router-id' , (TYPE.string, PRESENCE.mandatory, '', check.ipv4)),
('hold-time' , (TYPE.integer, PRESENCE.mandatory, '', check.uint16)),
('asn' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('local' , (TYPE.integer, PRESENCE.mandatory, '', check.uint32)),
('peer' , (TYPE.integer, PRESENCE.mandatory, '', check.uint32)),
)))),
('feature' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('updates' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('group' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('flush' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
)))),
('rib' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('adj-rib-out' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
)))),
)))),
('capability' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('family' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict((
('ipv4' , (TYPE.array, PRESENCE.optional, '', ['unicast','multicast','nlri-mpls','mpls-vpn','flow-vpn','flow'])),
('ipv6' , (TYPE.array, PRESENCE.optional, '', ['unicast','flow'])),
('alias' , (TYPE.string, PRESENCE.optional, '', ['all','minimal'])),
)))),
('asn4' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('route-refresh' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('graceful-restart' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('multi-session' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('add-path' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('aigp' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
)))),
)))),
('announce' , (TYPE.array, PRESENCE.optional, ['update,prefix','update,flow'], check.string)),
)))),
)))),
('api' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('encoder' , (TYPE.string, PRESENCE.optional, '', ['json','text'])),
('program' , (TYPE.string, PRESENCE.mandatory, '', check.nop)),
)))),
)))),
('attribute' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, '', _attributes)),
)))),
('flow' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('filtering-condition' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('source' , (TYPE.array|TYPE.string, PRESENCE.optional, '', check.flow_ipv4_range)),
('destination' , (TYPE.array|TYPE.string, PRESENCE.optional, '', check.flow_ipv4_range)),
('port' , (TYPE.array, PRESENCE.optional, '', check.flow_port)),
('source-port' , (TYPE.array, PRESENCE.optional, '', check.flow_port)),
('destination-port' , (TYPE.array, PRESENCE.optional, '', check.flow_port)),
('protocol' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['udp','tcp'])), # and value of protocols ...
('packet-length' , (TYPE.array, PRESENCE.optional, '', check.flow_length)),
('packet-fragment' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['not-a-fragment', 'dont-fragment', 'is-fragment', 'first-fragment', 'last-fragment'])),
('icmp-type' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['unreachable', 'echo-request', 'echo-reply'])),
# TODO : missing type
('icmp-code' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['host-unreachable', 'network-unreachable'])),
# TODO : missing code
('tcp-flags' , (TYPE.array|TYPE.string, PRESENCE.optional, '', ['fin', 'syn', 'rst', 'push', 'ack', 'urgent'])),
('dscp' , (TYPE.array|TYPE.integer, PRESENCE.optional, '', check.dscp)),
# TODO: MISSING SOME MORE ?
)))),
)))),
('filtering-action' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('rate-limit' , (TYPE.integer, PRESENCE.optional, '', check.float)),
('discard' , (TYPE.boolean, PRESENCE.optional, '', check.nop)),
('redirect' , (TYPE.string, PRESENCE.optional, '', check.redirect)),
('community' , (TYPE.array , PRESENCE.optional, '', check.community)),
('extended-community' , (TYPE.array , PRESENCE.optional, '', check.extendedcommunity)),
)))),
)))),
)))),
('update' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('prefix' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, 'attribute', OrderedDict(( # name of route
('<*>' , (TYPE.object, PRESENCE.mandatory, '', OrderedDict(( # name of attributes referenced
('<*>' , (TYPE.object, PRESENCE.optional, '', _attributes)), # prefix
)))),
)))),
)))),
('flow' , (TYPE.object, PRESENCE.optional, '', OrderedDict((
('<*>' , (TYPE.object, PRESENCE.optional, 'flow,filtering-condition', OrderedDict(( # name of the dos
('<*>' , (TYPE.string, PRESENCE.mandatory, 'flow,filtering-action', check.nop)),
)))),
)))),
)))),
)))
# Lookup in the definition all the keyword we used to make sure that users can not use them
# This allows us to be able to index on those words and to be sure of the underlying data
_reserved_keywords = set()
def _reserved (reserved_keywords,definition):
kind,_,_,od = definition
if kind & TYPE.object:
for key in od:
reserved_keywords.update([key])
_reserved(reserved_keywords,od[key])
_reserved(_reserved_keywords,_definition)
# Name are are long string and cause high memory usage use integer instead
# regenreate the _definition with indexes
_indexes_byname = dict()
_indexes_byid = dict()
for index,name in enumerate(_reserved_keywords):
_indexes_byname[name] = index
_indexes_byid[id] = name
# TODO: Now need to rewrite the whole definiton to use the indexes
# TODO: and update the reference to do to the lookup in _indexes_by...
# check that the configuration has the reference
def _reference (root,references,json,location):
if not references:
return
ref = references if check.array(references) else [references,]
jsn = json if check.array(json) else json.keys() if check.object(json) else [json,]
valid = []
for reference in ref:
compare = root
for path in reference.split(','):
compare = compare.get(path,{})
# prevent name conflict where we can not resolve which object is referenced.
add = compare.keys()
for k in add:
if k in valid:
raise ValidationError(location, "duplicate reference in " % ', '.join(references))
# return False
valid.extend(add)
for option in jsn:
if not option in valid:
destination = ' or '.join(references) if type(references) == type ([]) else references
raise ValidationError(location, "the referenced data in %s is not present" % destination)
return True
def _validate (root,json,definition,location=()):
kind,presence,references,contextual = definition
# ignore missing optional elements
if not json:
if presence == PRESENCE.mandatory:
raise ValidationError(location, ValidationError.mandatory_error)
return
# check that the value of the right type
if not check.kind(kind,json):
raise ValidationError(location, ValidationError.type_error)
# for object check all the elements inside
if kind & TYPE.object and check.object(json):
subdefinition = contextual
keys = deque(subdefinition.keys())
while keys:
key = keys.popleft()
if DEBUG: print " "*len(location) + key
if key.startswith('_'):
continue
if type(json) != type({}):
raise ValidationError(location, ValidationError.type_error)
if key == '<*>':
keys.extendleft(json.keys())
continue
_reference (root,references,json,location)
star = subdefinition.get('<*>',None)
subtest = subdefinition.get(key,star)
if subtest is None:
raise ValidationError(location, ValidationError.configuration_error)
_validate(root,json.get(key,None),subtest,location + [key])
# for list check all the element inside
elif kind & TYPE.array and check.array(json):
test = contextual
# This is a function
if hasattr(test, '__call__'):
for data in json:
if not test(data):
raise ValidationError(location, ValidationError.type_error)
# This is a list of valid option
elif type(test) == type([]):
for data in json:
if not data in test:
raise ValidationError(location, ValidationError.type_error)
# no idea what the data is - so something is wrong with the program
else:
raise ValidationError(location,ValidationError.internal_error)
# for non container object check the value
else:
test = contextual
# check that the value of the data
if hasattr(test, '__call__'):
if not test(json):
raise ValidationError(location, ValidationError.type_error)
# a list of valid option
elif type(test) == type([]):
if not json in test:
raise ValidationError(location, ValidationError.type_error)
else:
raise ValidationError(location,ValidationError.internal_error)
_reference (root,references,json,location)
def _inet (json):
conflicts = {
'alias': ['inet','inet4','inet6'],
'inet': ['inet4','inet6']
}
for name in json['neighbor']:
inet = [_ for _ in json['neighbor'][name]['session']['capability']['family'].keys() if not _.startswith('_')]
for conflict in conflicts:
if conflict in inet:
raise ValidationError(['neighbor',name,'session','capability','family'], ValidationError.conflicting_error)
def validation (json):
_validate(json,json,_definition)
_inet(json)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type proces_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
|
|
########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
from mock import patch, PropertyMock, DEFAULT
from .test_base import CliCommandTest
from cloudify_rest_client.client import CLOUDIFY_TENANT_HEADER
from cloudify_rest_client.executions import ExecutionsClient
from cloudify_rest_client.events import EventsClient
from cloudify_rest_client.node_instances import NodeInstance
from cloudify_rest_client.deployments import Deployment
from cloudify_rest_client.nodes import Node
from cloudify_rest_client.executions import Execution
from cloudify_rest_client.responses import ListResponse, Metadata
from cloudify_cli.cli import cfy
from cloudify_cli.exceptions import CloudifyCliError
from cloudify_cli.commands.agents import (
get_filters_map,
get_deployments_and_run_workers)
DEFAULT_TENANT_NAME = 'tenant0'
def _node_instance(tenant_name, ni_id, node_id, dep_id,
state='started'):
return NodeInstance({
'tenant_name': tenant_name,
'id': ni_id,
'host_id': ni_id,
'node_id': node_id,
'deployment_id': dep_id,
'state': state
})
class AgentsTests(CliCommandTest):
def setUp(self):
super(AgentsTests, self).setUp()
self.use_manager()
@staticmethod
def _agent_filters(node_ids=None, node_instance_ids=None,
deployment_ids=None, install_methods=None):
return {cfy.AGENT_FILTER_NODE_IDS: node_ids,
cfy.AGENT_FILTER_NODE_INSTANCE_IDS: node_instance_ids,
cfy.AGENT_FILTER_DEPLOYMENT_ID: deployment_ids,
cfy.AGENT_FILTER_INSTALL_METHODS: install_methods}
DEFAULT_TOPOLOGY = [
_node_instance(DEFAULT_TENANT_NAME, 't0d0node1_1', 'node1', 'd0'),
_node_instance(DEFAULT_TENANT_NAME, 't0d0node1_2', 'node1', 'd0'),
_node_instance(DEFAULT_TENANT_NAME, 't0d0node2_1', 'node2', 'd0'),
_node_instance(DEFAULT_TENANT_NAME, 't0d1node1_1', 'node1', 'd1'),
_node_instance(DEFAULT_TENANT_NAME, 't0d1node1_2', 'node1', 'd1'),
_node_instance(DEFAULT_TENANT_NAME, 't0d1node3_1', 'node3', 'd1'),
_node_instance('other_tenant', 't1d0node1_1', 'node1', 'd0'),
_node_instance('other_tenant', 't1d0node1_2', 'node1', 'd0'),
_node_instance('other_tenant', 't1d1node3_1', 'node3', 'd1'),
_node_instance('other_tenant', 't1d2node4_1', 'node4', 'd2'),
]
def mock_client(self, topology):
def _topology_filter(predicate, **kwargs):
tenant_name = self.client._client.headers.get(
CLOUDIFY_TENANT_HEADER)
if not tenant_name:
tenant_name = DEFAULT_TENANT_NAME
results = list()
all_tenants = kwargs.get('_all_tenants', False)
for node_instance in topology:
ni_tenant_name = node_instance['tenant_name']
if (all_tenants or ni_tenant_name == tenant_name) \
and predicate(node_instance):
results.append(node_instance)
return results
def list_node_instances(**kwargs):
def _matcher(node_instance):
ni_id = node_instance['id']
ni_node_id = node_instance['node_id']
ni_dep_id = node_instance['deployment_id']
return ni_id in kwargs.get('id', [ni_id]) and \
ni_node_id in kwargs.get('node_id', [ni_node_id]) and \
ni_dep_id in kwargs.get('deployment_id', [ni_dep_id])
instances = _topology_filter(_matcher, **kwargs)
total = len(instances)
offset, size = kwargs.get('_offset', 0), kwargs.get('_size', 1000)
instances = instances[offset:offset + size]
return ListResponse(
instances, {
'pagination': {
'size': size,
'offset': offset,
'total': total
}
})
def list_deployments(**kwargs):
tenant_name = self.client._client.headers.get(
CLOUDIFY_TENANT_HEADER)
if not tenant_name:
tenant_name = DEFAULT_TENANT_NAME
all_node_instances = _topology_filter(lambda x: True, **kwargs)
deployments = {(x['tenant_name'], x['deployment_id'])
for x in all_node_instances}
deployments = [Deployment({'id': b, 'tenant_name': a}) for a, b in
deployments]
results = list()
searched_ids = kwargs['id']
for dep in deployments:
if (not searched_ids) or dep.id in searched_ids:
results.append(dep)
return ListResponse(results, {})
def list_nodes(**kwargs):
node_ids = kwargs.get('id')
all_node_instances = _topology_filter(lambda x: True, **kwargs)
nodes = {(x['tenant_name'], x['deployment_id'], x['node_id'])
for x in all_node_instances}
nodes = [Node({'id': c, 'deployment_id': b, 'tenant_name': a}) for
(a, b, c) in nodes]
if node_ids is None:
nodes = [x for x in nodes if x['id'] in node_ids]
return ListResponse(nodes, {})
self.client.node_instances.list = list_node_instances
self.client.deployments.list = list_deployments
self.client.nodes.list = list_nodes
def assert_execution_started(self, client_mock, deployment_id,
filters):
self.assertIn(
((deployment_id, 'workflow', filters), {
'allow_custom_parameters': True
}), client_mock.call_args_list)
# Tests for get_node_instances_map
def test_parameters_error(self):
self.mock_client({})
self.assertRaises(
CloudifyCliError,
get_filters_map,
self.client,
self.logger,
AgentsTests._agent_filters(
node_instance_ids=['a1'],
deployment_ids=['d1']
),
[DEFAULT_TENANT_NAME])
def test_filters_map_empty(self):
self.mock_client({})
results = get_filters_map(
self.client, self.logger, AgentsTests._agent_filters(), False)
self.assertFalse(results)
def test_filters_map_empty_node_instances(self):
self.mock_client({})
self.assertRaises(
CloudifyCliError,
get_filters_map,
self.client,
self.logger,
AgentsTests._agent_filters(node_instance_ids=['t0d0node1_1']),
False)
def test_filters_map_empty_deployment_ids(self):
self.mock_client({})
self.assertRaises(
CloudifyCliError,
get_filters_map,
self.client,
self.logger,
AgentsTests._agent_filters(deployment_ids=['d0']),
False)
def test_filters_map_all(self):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
results = get_filters_map(
self.client, self.logger, AgentsTests._agent_filters(),
True)
self.assertEquals({
DEFAULT_TENANT_NAME: {
'd0': {},
'd1': {}
},
'other_tenant': {
'd0': {},
'd1': {},
'd2': {}
}
}, results)
def test_filters_map_node_id_single_tenant(self):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
results = get_filters_map(
self.client, self.logger, AgentsTests._agent_filters(
node_ids=['node1']), False)
self.assertEquals({
DEFAULT_TENANT_NAME: {
'd0': {'node_ids': ['node1']},
'd1': {'node_ids': ['node1']}
}
}, results)
def test_filters_map_node_id_all_tenants(self):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
results = get_filters_map(
self.client, self.logger, AgentsTests._agent_filters(
node_ids=['node1']), True)
self.assertEquals({
DEFAULT_TENANT_NAME: {
'd0': {
'node_ids': ['node1']
},
'd1': {
'node_ids': ['node1']
}
},
'other_tenant': {
'd0': {
'node_ids': ['node1']
},
# filtering by just node-id, still returns deployments
# that don't have that node; unfortunate but this is an
# optimization for the common case
'd1': {
'node_ids': ['node1']
},
'd2': {
'node_ids': ['node1']
}
}
}, results)
def test_filters_map_dep_id_single_tenant(self):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
results = get_filters_map(
self.client, self.logger, AgentsTests._agent_filters(
deployment_ids=['d0']), False)
self.assertEquals({
DEFAULT_TENANT_NAME: {
'd0': {}
}
}, results)
def test_filters_map_dep_id_all_tenants(self):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
results = get_filters_map(
self.client, self.logger, AgentsTests._agent_filters(
deployment_ids=['d0']), True)
self.assertEquals({
DEFAULT_TENANT_NAME: {
'd0': {}
},
'other_tenant': {
'd0': {}
}
}, results)
def test_filters_map_bad_dep_id(self):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
self.assertRaises(
CloudifyCliError,
get_filters_map,
self.client,
self.logger,
AgentsTests._agent_filters(deployment_ids=['error']),
False)
def test_filters_node_instance_pagination(self):
# with 2000 node-instances, the deployment with only uninitialized
# instances is skipped
self.mock_client([
_node_instance(
DEFAULT_TENANT_NAME,
'ni1_{0}'.format(i),
'node1',
'd0')
for i in range(2000)
] + [
_node_instance(DEFAULT_TENANT_NAME, 'ni2_1', 'node2', 'd1'),
_node_instance(DEFAULT_TENANT_NAME, 'ni3_1', 'node3', 'd2',
state='uninitialized')
])
filters = get_filters_map(
self.client,
self.logger,
AgentsTests._agent_filters(),
False
)
self.assertIn('d0', filters[DEFAULT_TENANT_NAME])
self.assertIn('d1', filters[DEFAULT_TENANT_NAME])
self.assertNotIn('d2', filters[DEFAULT_TENANT_NAME])
# Tests for get_deployments_and_run_workers
def test_empty_node_instances_map(self):
self.mock_client({})
self.assertRaises(
CloudifyCliError,
get_deployments_and_run_workers,
self.client,
self._agent_filters(),
[],
self.logger,
'',
False)
@patch.object(ExecutionsClient, 'start')
def test_full_topology(self, exec_client_mock):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
get_deployments_and_run_workers(
self.client, self._agent_filters(),
True, self.logger, 'workflow', False
)
self.assert_execution_started(exec_client_mock, 'd1', {})
self.assert_execution_started(exec_client_mock, 'd0', {})
self.assert_execution_started(exec_client_mock, 'd2', {})
self.assertEquals(len(exec_client_mock.call_args_list), 5)
@patch.object(ExecutionsClient, 'start')
def test_full_topology_one_nonstarted(self, exec_client_mock):
topology = list(AgentsTests.DEFAULT_TOPOLOGY)
topology.append(_node_instance(DEFAULT_TENANT_NAME, 't0d1node4_1',
'node4', 'd1', 'creating'))
self.mock_client(topology)
get_deployments_and_run_workers(
self.client, self._agent_filters(),
True, self.logger, 'workflow', False
)
self.assertEquals(len(exec_client_mock.call_args_list), 4)
@patch.object(ExecutionsClient, 'start')
def test_node_instances_map_none(self, exec_client_mock):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
get_deployments_and_run_workers(
self.client, self._agent_filters(install_methods=['provided']),
True, self.logger, 'workflow', False
)
self.assertEquals(exec_client_mock.call_count, 5)
for call in exec_client_mock.call_args_list:
self.assertTrue(call[0][2]['install_methods'] == ['provided'])
@patch.object(ExecutionsClient, 'get',
return_value=Execution({'status': 'terminated'}))
@patch.object(EventsClient, 'list',
return_value=ListResponse(
[],
Metadata({'pagination': {
'total': 0,
'offset': 0,
'size': 10}})))
def test_execution_tracking(self, events_list_mock, exec_get_mock):
self.mock_client(AgentsTests.DEFAULT_TOPOLOGY)
def _mock_execution_start(*args, **kwargs):
tenant_name = args[0].api.headers.get(CLOUDIFY_TENANT_HEADER)
deployment_id = args[1]
return Execution({'id': str(uuid.uuid4()),
'deployment_id': deployment_id,
'tenant_name': tenant_name})
def _wait_side_effect(*args, **kwargs):
client_tenant = args[0]._client.headers[CLOUDIFY_TENANT_HEADER]
execution = args[1]
self.assertEquals(client_tenant, execution['tenant_name'])
return DEFAULT
with patch('cloudify_cli.commands.agents.wait_for_execution',
return_value=PropertyMock(error=False),
side_effect=_wait_side_effect), \
patch.object(ExecutionsClient, 'start',
_mock_execution_start), \
patch('cloudify_cli.commands.agents.time.sleep'):
get_deployments_and_run_workers(
self.client, self._agent_filters(), True, self.logger,
'workflow', True)
|
|
"""
This module provides a large set of colormaps, functions for
registering new colormaps and for getting a colormap by name,
and a mixin class for adding color mapping functionality.
"""
from __future__ import print_function, division
import os
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import datad
from matplotlib._cm import cubehelix
cmap_d = dict()
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def _reverser(f):
def freversed(x):
return f(1-x)
return freversed
def revcmap(data):
"""Can only handle specification *data* in dictionary format."""
data_r = {}
for key, val in data.iteritems():
if callable(val):
valnew = _reverser(val)
# This doesn't work: lambda x: val(1-x)
# The same "val" (the first one) is used
# each time, so the colors are identical
# and the result is shades of gray.
else:
# Flip x and exchange the y values facing x = 0 and x = 1.
valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]
data_r[key] = valnew
return data_r
def _reverse_cmap_spec(spec):
"""Reverses cmap specification *spec*, can handle both dict and tuple
type specs."""
if 'red' in spec:
return revcmap(spec)
else:
revspec = list(reversed(spec))
if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))
revspec = [(1.0 - a, b) for a, b in revspec]
return revspec
def _generate_cmap(name, lutsize):
"""Generates the requested cmap from it's name *name*. The lut size is
*lutsize*."""
spec = datad[name]
# Generate the colormap object.
if 'red' in spec:
return colors.LinearSegmentedColormap(name, spec, lutsize)
else:
return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)
LUTSIZE = mpl.rcParams['image.lut']
_cmapnames = datad.keys() # need this list because datad is changed in loop
# Generate the reversed specifications ...
for cmapname in _cmapnames:
spec = datad[cmapname]
spec_reversed = _reverse_cmap_spec(spec)
datad[cmapname + '_r'] = spec_reversed
# Precache the cmaps with ``lutsize = LUTSIZE`` ...
# Use datad.keys() to also add the reversed ones added in the section above:
for cmapname in datad.iterkeys():
cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)
locals().update(cmap_d)
# Continue with definitions ...
def register_cmap(name=None, cmap=None, data=None, lut=None):
"""
Add a colormap to the set recognized by :func:`get_cmap`.
It can be used in two ways::
register_cmap(name='swirly', cmap=swirly_cmap)
register_cmap(name='choppy', data=choppydata, lut=128)
In the first case, *cmap* must be a :class:`colors.Colormap`
instance. The *name* is optional; if absent, the name will
be the :attr:`name` attribute of the *cmap*.
In the second case, the three arguments are passed to
the :class:`colors.LinearSegmentedColormap` initializer,
and the resulting colormap is registered.
"""
if name is None:
try:
name = cmap.name
except AttributeError:
raise ValueError("Arguments must include a name or a Colormap")
if not cbook.is_string_like(name):
raise ValueError("Colormap name must be a string")
if isinstance(cmap, colors.Colormap):
cmap_d[name] = cmap
return
# For the remainder, let exceptions propagate.
if lut is None:
lut = mpl.rcParams['image.lut']
cmap = colors.LinearSegmentedColormap(name, data, lut)
cmap_d[name] = cmap
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None.
Colormaps added with :func:`register_cmap` take precedence over
builtin colormaps.
If *name* is a :class:`colors.Colormap` instance, it will be
returned.
If *lut* is not None it must be an integer giving the number of
entries desired in the lookup table, and *name* must be a
standard mpl colormap name with a corresponding data dictionary
in *datad*.
"""
if name is None:
name = mpl.rcParams['image.cmap']
if isinstance(name, colors.Colormap):
return name
if name in cmap_d:
if lut is None:
return cmap_d[name]
elif name in datad:
return _generate_cmap(name, lut)
raise ValueError("Colormap %s is not recognized" % name)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry()
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = get_cmap(cmap)
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=None, bytes=False):
"""
Return a normalized rgba array corresponding to *x*.
In the normal case, *x* is a 1-D or 2-D sequence of scalars, and
the corresponding ndarray of rgba values will be returned,
based on the norm and colormap set for this ScalarMappable.
There is one special case, for handling images that are already
rgb or rgba, such as might have been read from an image file.
If *x* is an ndarray with 3 dimensions,
and the last dimension is either 3 or 4, then it will be
treated as an rgb or rgba array, and no mapping will be done.
If the last dimension is 3, the *alpha* kwarg (defaulting to 1)
will be used to fill in the transparency. If the last dimension
is 4, the *alpha* kwarg is ignored; it does not
replace the pre-existing alpha. A ValueError will be raised
if the third dimension is other than 3 or 4.
In either case, if *bytes* is *False* (default), the rgba
array will be floats in the 0-1 range; if it is *True*,
the returned rgba array will be uint8 in the 0 to 255 range.
Note: this method assumes the input is well-behaved; it does
not check for anomalies such as *x* being a masked rgba
array, or being an integer type other than uint8, or being
a floating point rgba array with values outside the 0-1 range.
"""
# First check for special case, image input:
try:
if x.ndim == 3:
if x.shape[2] == 3:
if alpha is None:
alpha = 1
if x.dtype == np.uint8:
alpha = np.uint8(alpha * 255)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
if not bytes and xx.dtype == np.uint8:
xx = xx.astype(float) / 255
return xx
except AttributeError:
# e.g., x is not an ndarray; so try mapping it
pass
# This is the normal case, mapping a scalar array:
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap or registered colormap name
"""
cmap = get_cmap(cmap)
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
|
|
from itertools import chain
import os
import socket
import sys
from ledis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long,
BytesIO, nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs)
from ledis.exceptions import (
LedisError,
ConnectionError,
BusyLoadingError,
ResponseError,
InvalidResponse,
ExecAbortError,
)
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_LF = b('\n')
class PythonParser(object):
"Plain Python parsing class"
MAX_READ_LENGTH = 1000000
encoding = None
EXCEPTION_CLASSES = {
'ERR': ResponseError,
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
}
def __init__(self):
self._fp = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._fp = connection._sock.makefile('rb')
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
"Called when the socket disconnects"
if self._fp is not None:
self._fp.close()
self._fp = None
def read(self, length=None):
"""
Read a line from the socket if no length is specified,
otherwise read ``length`` bytes. Always strip away the newlines.
"""
try:
if length is not None:
bytes_left = length + 2 # read the line ending
if length > self.MAX_READ_LENGTH:
# apparently reading more than 1MB or so from a windows
# socket can cause MemoryErrors. See:
# https://github.com/andymccurdy/redis-py/issues/205
# read smaller chunks at a time to work around this
try:
buf = BytesIO()
while bytes_left > 0:
read_len = min(bytes_left, self.MAX_READ_LENGTH)
buf.write(self._fp.read(read_len))
bytes_left -= read_len
buf.seek(0)
return buf.read(length)
finally:
buf.close()
return self._fp.read(bytes_left)[:-2]
# no length, read a full line
return self._fp.readline()[:-2]
except (socket.error, socket.timeout):
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
return self.EXCEPTION_CLASSES[error_code](response)
return ResponseError(response)
def read_response(self):
response = self.read()
if not response:
raise ConnectionError("Socket closed on remote end")
byte, response = byte_to_chr(response[0]), response[1:]
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error")
# server returned an error
if byte == '-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == '+':
pass
# int value
elif byte == ':':
response = long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self.read(length)
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Ledis server"
def __init__(self, host='localhost', port=6380, db=0,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser):
self.pid = os.getpid()
self.host = host
self.port = port
self.db = db
self.socket_timeout = socket_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class()
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def connect(self):
"Connects to the Ledis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
self.on_connect()
def _connect(self):
"Create a TCP socket connection"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect((self.host, self.port))
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Ledis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"Send an already packed command to the Ledis server"
if not self._sock:
self.connect()
try:
self._sock.sendall(command)
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except Exception:
self.disconnect()
raise
def send_command(self, *args):
"Pack and send a command to the Ledis server"
self.send_packed_command(self.pack_command(*args))
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except Exception:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, bytes):
return value
if isinstance(value, float):
value = repr(value)
if not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def pack_command(self, *args):
"Pack a series of arguments into a value Ledis command"
output = SYM_STAR + b(str(len(args))) + SYM_CRLF
for enc_value in imap(self.encode, args):
output += SYM_DOLLAR
output += b(str(len(enc_value)))
output += SYM_CRLF
output += enc_value
output += SYM_CRLF
return output
class UnixDomainSocketConnection(Connection):
def __init__(self, path='', db=0, socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser):
self.pid = os.getpid()
self.path = path
self.db = db
self.socket_timeout = socket_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class()
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
# TODO: add ability to block waiting on a connection to be released
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
ledis://localhost:6380/0
unix:///path/to/socket.sock?db=0
Three URL schemes are supported:
ledis:// creates a normal TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. ledis://localhost?db=0
2. If using the ledis:// scheme, the path argument of the url, e.g.
ledis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
url_string = url
url = urlparse(url)
qs = ''
# in python2.6, custom URL schemes don't recognize querystring values
# they're left as part of the url.path.
if '?' in url.path and not url.query:
# chop the querystring including the ? off the end of the url
# and reparse it.
qs = url.path.split('?', 1)[1]
url = urlparse(url_string[:-(len(qs) + 1)])
else:
qs = url.query
url_options = {}
for name, value in iteritems(parse_qs(qs)):
if value and len(value) > 0:
url_options[name] = value[0]
# We only support ledis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'path': url.path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': url.hostname,
'port': int(url.port or 6380),
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and url.path:
try:
url_options['db'] = int(url.path.replace('/', ''))
except (AttributeError, ValueError):
pass
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
self.pid = os.getpid()
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections or 2 ** 31
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
def _checkpid(self):
if self.pid != os.getpid():
self.disconnect()
self.__init__(self.connection_class, self.max_connections,
**self.connection_kwargs)
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
if connection.pid == self.pid:
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(object):
"""
Thread-safe blocking connection pool::
>>> from ledis.client import Ledis
>>> client = Ledis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~ledis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple ledis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~ledis.exceptions.ConnectionError`` (as the default
``:py:class: ~ledis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20, connection_class=None,
queue_class=None, **connection_kwargs):
"Compose and assign values."
# Compose.
if connection_class is None:
connection_class = Connection
if queue_class is None:
queue_class = LifoQueue
# Assign.
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.queue_class = queue_class
self.max_connections = max_connections
self.timeout = timeout
# Validate the ``max_connections``. With the "fill up the queue"
# algorithm we use, it must be a positive integer.
is_valid = isinstance(max_connections, int) and max_connections > 0
if not is_valid:
raise ValueError('``max_connections`` must be a positive integer')
# Get the current process id, so we can disconnect and reinstantiate if
# it changes.
self.pid = os.getpid()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def _checkpid(self):
"""
Check the current process id. If it has changed, disconnect and
re-instantiate this connection pool instance.
"""
# Get the current process id.
pid = os.getpid()
# If it hasn't changed since we were instantiated, then we're fine, so
# just exit, remaining connected.
if self.pid == pid:
return
# If it has changed, then disconnect and re-instantiate.
self.disconnect()
self.reinstantiate()
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the ledis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# This shouldn't normally happen but might perhaps happen after a
# reinstantiation. So, we can handle the exception by not putting
# the connection back on the pool, because we definitely do not
# want to reuse it.
pass
def disconnect(self):
"Disconnects all connections in the pool."
for connection in self._connections:
connection.disconnect()
def reinstantiate(self):
"""
Reinstatiate this instance within a new process with a new connection
pool set.
"""
self.__init__(max_connections=self.max_connections,
timeout=self.timeout,
connection_class=self.connection_class,
queue_class=self.queue_class, **self.connection_kwargs)
class Token(object):
"""
Literal strings in Redis commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to apply
and encoding rules on them.
"""
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
|
|
'''
Translates a source file using a translation model.
'''
import argparse
import numpy
import cPickle as pkl
from nmt import (build_sampler, init_params)
from mixer import *
from multiprocessing import Process, Queue
def gen_sample(tparams, f_inits, f_nexts, x, options, trng=None,
k=1, maxlen=500, stochastic=True, argmax=False):
# k is the beam size we have
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
# get initial state of decoder rnn and encoder context
rets = []
next_state_chars = []
next_state_words = []
next_bound_chars = []
next_bound_words = []
ctx0s = []
for f_init in f_inits:
ret = f_init(x)
next_state_chars.append(ret[0])
next_state_words.append(ret[1])
ctx0s.append(ret[2])
next_bound_chars.append(numpy.zeros((1, options['dec_dim'])).astype('float32'))
next_bound_words.append(numpy.zeros((1, options['dec_dim'])).astype('float32'))
next_w = -1 * numpy.ones((1,)).astype('int64') # bos indicator
num_models = len(f_inits)
for ii in xrange(maxlen):
temp_next_state_char = []
temp_next_state_word = []
temp_next_bound_char = []
temp_next_bound_word = []
temp_next_p = []
for i in xrange(num_models):
ctx = numpy.tile(ctx0s[i], [live_k, 1])
inps = [next_w, ctx, next_state_chars[i], next_state_words[i], next_bound_chars[i], next_bound_words[i]]
ret = f_nexts[i](*inps)
next_p, _, next_state_char, next_state_word, next_bound_char, next_bound_word = ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]
temp_next_p.append(next_p)
temp_next_state_char.append(next_state_char)
temp_next_state_word.append(next_state_word)
temp_next_bound_char.append(next_bound_char)
temp_next_bound_word.append(next_bound_word)
#next_p = numpy.log(numpy.array(temp_next_p)).sum(axis=0) / num_models
next_p = numpy.log(numpy.array(temp_next_p).mean(axis=0))
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score += next_p[0, nw]
if nw == 0:
break
else:
cand_scores = hyp_scores[:, None] - next_p
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states_chars = []
new_hyp_states_words = []
new_hyp_bounds_chars = []
new_hyp_bounds_words = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
for i in xrange(num_models):
new_hyp_states_char = []
new_hyp_states_word = []
new_hyp_bounds_char = []
new_hyp_bounds_word = []
for ti in trans_indices:
new_hyp_states_char.append(copy.copy(temp_next_state_char[i][ti]))
new_hyp_states_word.append(copy.copy(temp_next_state_word[i][ti]))
new_hyp_bounds_char.append(copy.copy(temp_next_bound_char[i][ti]))
new_hyp_bounds_word.append(copy.copy(temp_next_bound_word[i][ti]))
new_hyp_states_chars.append(new_hyp_states_char)
new_hyp_states_words.append(new_hyp_states_word)
new_hyp_bounds_chars.append(new_hyp_bounds_char)
new_hyp_bounds_words.append(new_hyp_bounds_word)
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
for i in xrange(num_models):
hyp_states_char = []
hyp_states_word = []
hyp_bounds_char = []
hyp_bounds_word = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] != 0:
hyp_states_char.append(new_hyp_states_chars[i][idx])
hyp_states_word.append(new_hyp_states_words[i][idx])
hyp_bounds_char.append(new_hyp_bounds_chars[i][idx])
hyp_bounds_word.append(new_hyp_bounds_words[i][idx])
next_state_chars[i] = numpy.array(hyp_states_char)
next_state_words[i] = numpy.array(hyp_states_word)
next_bound_chars[i] = numpy.array(hyp_bounds_char)
next_bound_words[i] = numpy.array(hyp_bounds_word)
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
def translate_model(queue, rqueue, pid, models, options, k, normalize):
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
trng = RandomStreams(1234)
# allocate model parameters
params = []
for i in xrange(len(models)):
params.append(init_params(options))
# load model parameters and set theano shared variables
tparams = []
for i in xrange(len(params)):
params[i] = load_params(models[i], params[i])
tparams.append(init_tparams(params[i]))
# word index
use_noise = theano.shared(numpy.float32(0.))
f_inits = []
f_nexts = []
for i in xrange(len(tparams)):
f_init, f_next = build_sampler(tparams[i], options, trng, use_noise)
f_inits.append(f_init)
f_nexts.append(f_next)
def _translate(seq):
use_noise.set_value(0.)
# sample given an input sequence and obtain scores
sample, score = gen_sample(tparams, f_inits, f_nexts,
numpy.array(seq).reshape([len(seq), 1]),
options, trng=trng, k=k, maxlen=500,
stochastic=False, argmax=False)
# normalize scores according to sequence lengths
if normalize:
lengths = numpy.array([len(s) for s in sample])
score = score / lengths
sidx = numpy.argmin(score)
return sample[sidx]
while True:
req = queue.get()
if req is None:
break
idx, x = req[0], req[1]
print pid, '-', idx
seq = _translate(x)
rqueue.put((idx, seq))
return
def main(models, dictionary, dictionary_target, source_file, saveto, k=5,
normalize=False, n_process=5, encoder_chr_level=False,
decoder_chr_level=False, utf8=False):
# load model model_options
pkl_file = models[0].split('.')[0] + '.pkl'
with open(pkl_file, 'rb') as f:
options = pkl.load(f)
# load source dictionary and invert
with open(dictionary, 'rb') as f:
word_dict = pkl.load(f)
word_idict = dict()
for kk, vv in word_dict.iteritems():
word_idict[vv] = kk
word_idict[0] = '<eos>'
word_idict[1] = 'UNK'
# load target dictionary and invert
with open(dictionary_target, 'rb') as f:
word_dict_trg = pkl.load(f)
word_idict_trg = dict()
for kk, vv in word_dict_trg.iteritems():
word_idict_trg[vv] = kk
word_idict_trg[0] = '<eos>'
word_idict_trg[1] = 'UNK'
# create input and output queues for processes
queue = Queue()
rqueue = Queue()
processes = [None] * n_process
for midx in xrange(n_process):
processes[midx] = Process(
target=translate_model,
args=(queue, rqueue, midx, models, options, k, normalize))
processes[midx].start()
# utility function
def _seqs2words(caps):
capsw = []
for cc in caps:
ww = []
for w in cc:
if w == 0:
break
if utf8:
ww.append(word_idict_trg[w].encode('utf-8'))
else:
ww.append(word_idict_trg[w])
if decoder_chr_level:
capsw.append(''.join(ww))
else:
capsw.append(' '.join(ww))
return capsw
def _send_jobs(fname):
with open(fname, 'r') as f:
for idx, line in enumerate(f):
if encoder_chr_level:
words = list(line.decode('utf-8').strip())
else:
words = line.strip().split()
x = map(lambda w: word_dict[w] if w in word_dict else 1, words)
x = map(lambda ii: ii if ii < options['n_words_src'] else 1, x)
x += [0]
#print '=============================='
#print line
#print '------------------------------'
#print ' '.join([word_idict[wx] for wx in x])
#print '=============================='
queue.put((idx, x))
return idx+1
def _finish_processes():
for midx in xrange(n_process):
queue.put(None)
def _retrieve_jobs(n_samples):
trans = [None] * n_samples
for idx in xrange(n_samples):
resp = rqueue.get()
trans[resp[0]] = resp[1]
if numpy.mod(idx, 10) == 0:
print 'Sample ', (idx+1), '/', n_samples, ' Done'
return trans
print 'Translating ', source_file, '...'
n_samples = _send_jobs(source_file)
trans = _seqs2words(_retrieve_jobs(n_samples))
_finish_processes()
with open(saveto, 'w') as f:
print >>f, '\n'.join(trans)
print 'Done'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-k', type=int, default=5)
parser.add_argument('-p', type=int, default=5)
parser.add_argument('-n', action="store_true", default=False)
parser.add_argument('-enc_c', action="store_true", default=False)
parser.add_argument('-dec_c', action="store_true", default=False)
parser.add_argument('-utf8', action="store_true", default=False)
parser.add_argument('saveto', type=str)
model_path = '/misc/kcgscratch1/ChoGroup/junyoung_exp/acl2016/wmt15/fien/bpe2char_seg_gru_decoder/0209/'
model1 = model_path + 'bpe2char_seg_gru_decoder_adam_en1.260000.npz'
model2 = model_path + 'bpe2char_seg_gru_decoder_adam_en2.270000.npz'
model3 = model_path + 'bpe2char_seg_gru_decoder_adam_en3.280000.npz'
model4 = model_path + 'bpe2char_seg_gru_decoder_adam_en4.265000.npz'
model5 = model_path + 'bpe2char_seg_gru_decoder_adam_en5.125000.npz'
model6 = model_path + 'bpe2char_seg_gru_decoder_adam_en6.265000.npz'
model7 = model_path + 'bpe2char_seg_gru_decoder_adam_en7.225000.npz'
model8 = model_path + 'bpe2char_seg_gru_decoder_adam.165000.npz'
models = [model1, model2, model3, model4, model5, model6, model7, model8]
dictionary = '/misc/kcgscratch1/ChoGroup/junyoung_exp/wmt15/fien/train/all_fi-en.en.tok.bpe.word.pkl'
dictionary_target = '/misc/kcgscratch1/ChoGroup/junyoung_exp/wmt15/fien/train/all_fi-en.fi.tok.300.pkl'
source = '/misc/kcgscratch1/ChoGroup/junyoung_exp/wmt15/fien/dev/newsdev2015-enfi-src.en.tok.bpe'
#source = '/misc/kcgscratch1/ChoGroup/junyoung_exp/wmt15/fien/test/newstest2015-fien-src.en.tok.bpe'
args = parser.parse_args()
main(models, dictionary, dictionary_target, source,
args.saveto, k=args.k, normalize=args.n, n_process=args.p,
encoder_chr_level=args.enc_c,
decoder_chr_level=args.dec_c,
utf8=args.utf8)
|
|
import numpy as np
from keras.models import Sequential, Model, model_from_json
from keras.layers import Input
from keras.layers.core import Dense, Activation, Dropout, Merge, RepeatVector
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.utils.visualize_util import plot
import keras.backend as K
import json
import random
#from IPython.display import SVG
#from keras.utils.visualize_util import plot
from pymongo import MongoClient
from math import ceil
#fix random number for repeatability
#np.random.seed(17)
input_dim = 3 #input dimensions of fused sensor data
nb_timesteps = 2000 #maximum amount of samples/timesteps per shot
nb_output_class = 3 #slap, snap, wrist, none
nb_batch_size = 36 #number of samples per batch
train_test_ratio = 0.6
nb_input_multi = 6
model_filename = 'LSTM_silkymitties.json'
weights_filename = 'LSTM_silkymitties_weights.h5'
def load_training_data():
client = MongoClient('localhost', 27017)
db = client['restdb']
handedness = []
speed = []
accuracy = []
shotTypes = []
y = []
labelled_values = db['testlblfusedshots'].find()
nb_input_samples = labelled_values.count()
x_upper = np.empty(shape=[nb_input_samples,nb_timesteps,input_dim])
x_lower = np.empty(shape=[nb_input_samples,nb_timesteps,input_dim])
handedness = np.zeros(shape=[nb_input_samples,2], dtype='float')
#speed = np.empty(shape=[nb_input_samples])
#accuracy = np.empty(shape=[nb_input_samples])
shotTypes = np.zeros(shape=[nb_input_samples,nb_output_class], dtype='float')
#slapshotTypes = np.empty(shape=[nb_input_samples])
#y = np.empty(shape=[nb_input_samples,nb_output_class])
index = 0
for target in labelled_values:
upperTheta = np.vstack((target['upperTheta']['uthetaX'], target['upperTheta']['uthetaY'], target['upperTheta']['uthetaZ']))
lowerTheta = np.vstack((target['lowerTheta']['lthetaX'], target['lowerTheta']['lthetaY'], target['lowerTheta']['lthetaZ']))
normalizedUpperTheta = upperTheta / 180.0
normalizedLowerTheta = lowerTheta / 180.0
x_upper[index] = sequence.pad_sequences(normalizedUpperTheta, maxlen=nb_timesteps, dtype='float', padding='post', truncating='post', value=0.).T
x_lower[index] = sequence.pad_sequences(normalizedLowerTheta, maxlen=nb_timesteps, dtype='float', padding='post', truncating='post', value=0.).T
shotTypes[index,shotTypeToInt(target['shotType'])] = 1.0
#slapshotTypes[index] = isSlapShot(target['shotType'])
handedness[index,isLeftHanded(target['handedness'])] = 1.0
#speed = nb.append(speed, target['speed']], axis=0)
#accuracy = nb.append(accuracy, ['accuracy'], axis=0)
index+=1
#for size in range(20, nb_input_samples, 20):
# trainIndex = round(size * train_test_ratio)
# nb_epochs = ceil(size / nb_batch_size)
# trainShotTypeCompileFit(nb_epochs, handedness[0:trainIndex], handedness[trainIndex:size], \
# x_upper[0:trainIndex], x_lower[0:trainIndex], shotTypes[0:trainIndex,:], \
# x_upper[trainIndex:size], x_lower[trainIndex:size], shotTypes[trainIndex:size,:])
#Shuffle the samples in unison to decrease data clustering
s_handedness, s_x_upper, s_x_lower, s_shotTypes = unison_shuffle(handedness,
x_upper, x_lower, shotTypes)
trainIndex = round(nb_input_samples * train_test_ratio)
nb_epochs = ceil(nb_input_samples / nb_batch_size)
#trainSlapShotCompileFit(nb_epochs, handedness[0:trainIndex], handedness[trainIndex:], \
# x_upper[0:trainIndex], x_lower[0:trainIndex], slapshotTypes[0:trainIndex], \
# x_upper[trainIndex:], x_lower[trainIndex:], slapshotTypes[trainIndex:])
trainShotTypeCompileFit(nb_epochs, s_handedness[0:trainIndex], s_handedness[trainIndex:], \
s_x_upper[0:trainIndex], s_x_lower[0:trainIndex], s_shotTypes[0:trainIndex], \
s_x_upper[trainIndex:], s_x_lower[trainIndex:], s_shotTypes[trainIndex:])
def unison_shuffle(a, b, c, d):
p = np.random.permutation(len(a))
return a[p], b[p], c[p], d[p]
def trainSlapShotCompileFit(epoch_count, train_handedness, test_handedness,
x_train_upper, x_train_lower, y_train, x_test_upper, x_test_lower, y_test):
#Upper hand LSTM input
encoder_a = Sequential()
encoder_a.add(LSTM(output_dim=nb_input_multi*input_dim,
batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid'))
#Lower hand LSTM input
encoder_b = Sequential()
encoder_b.add(LSTM(output_dim=nb_input_multi*input_dim,
batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid'))
encoded_handedness = Sequential()
encoded_handedness.add(keras.layers.core.RepeatVector(nb_timesteps))
#Merge both LSTM units with concatenation
merged = Merge([encoded_handedness, encoder_a, encoder_b], mode='concat')
decoder = Sequential()
decoder.add(merged)
decoder.add(Dropout(0.1))
decoder.add(Dense(input_dim*nb_input_multi*2 + 2, activation='relu'))
decoder.add(Dropout(0.2))
decoder.add(Dense(input_dim*2, activation='relu'))
decoder.add(Dropout(0.3))
#sigmoid activation instead of softmax to avoid normalizing to 1.0
#1 output signal for the binary classification likelihood
decoder.add(Dense(1, activation='sigmoid'))
decoder.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
decoder.fit([train_handedness, x_train_upper, x_train_lower], y_train,
batch_size=nb_batch_size, nb_epoch=epoch_count,
validation_data=([test_handedness, x_test_upper, x_test_lower], y_test))
printSummary(decoder,test_handedness,x_test_upper, x_test_lower, y_test)
return decoder
def trainShotTypeCompileFit(epoch_count, train_handedness, test_handedness,
x_train_upper, x_train_lower, y_train, x_test_upper, x_test_lower, y_test):
#Upper hand LSTM input
encoder_a = Sequential()
encoder_a.add(LSTM(4*input_dim,
batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid',
return_sequences=True))
encoder_a.add(Dropout(0.2))
encoder_a.add(LSTM(4*input_dim, return_sequences=True,
activation='sigmoid', inner_activation='hard_sigmoid'))
encoder_a.add(Dropout(0.25))
encoder_a.add(LSTM(8*input_dim, batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid'))
#Lower hand LSTM input
encoder_b = Sequential()
encoder_b.add(LSTM(4*input_dim,
batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid',
return_sequences=True))
encoder_b.add(Dropout(0.2))
encoder_b.add(LSTM(4*input_dim, return_sequences=True,
activation='sigmoid', inner_activation='hard_sigmoid'))
encoder_b.add(Dropout(0.25))
encoder_b.add(LSTM(8*input_dim, batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid'))
encoded_handedness = Sequential()
encoded_handedness.add(Dense(2, batch_input_shape=(nb_batch_size, 2)))
#Merge both LSTM units with concatenation
merged = Merge([encoded_handedness, encoder_a, encoder_b], mode='concat')
decoder = Sequential()
decoder.add(merged)
#decoder.add(Dropout(0.25))
#Use CNNs to expand then shrink to desired output signal
decoder.add(Dropout(0.25))
decoder.add(Dense(input_dim*8, activation='sigmoid'))
decoder.add(Dropout(0.25))
decoder.add(Dense(output_dim=(2*nb_output_class), activation='sigmoid'))
decoder.add(Dropout(0.3))
decoder.add(Dense(nb_output_class, activation='softmax'))
decoder.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
decoder.fit([train_handedness, x_train_upper, x_train_lower], y_train,
batch_size=nb_batch_size, nb_epoch=epoch_count,
validation_data=([test_handedness, x_test_upper, x_test_lower], y_test))
printSummary(decoder, test_handedness, x_test_upper, x_test_lower, y_test)
saveCompiledShotTypeModel(decoder)
return decoder
def saveCompiledShotTypeModel(decoder):
saved_model = decoder.to_json()
with open('LSTM_silkymitties_ShotType.json', 'w') as outfile:
json.dump(saved_model, outfile)
decoder.save_weights('LSTM_silkymitties_ShotType_weights.h5')
def loadCompiledShotTypeModel():
with open('LSTM_silkymitties_ShotType.json', 'r') as infile:
architecture = json.load(infile)
model = model_from_json(architecture)
model.load_weights('LSTM_silkymitties_ShotType_weights.h5')
return model
def predictShotTypeResult(fusedShotID):
client = MongoClient('localhost', 27017)
db = client['restdb']
shot = db['fusedshots'].find_one(fusedShotID)
x_upper = np.empty(shape=[1,nb_timesteps,input_dim])
x_lower = np.empty(shape=[1,nb_timesteps,input_dim])
raw_x_upper = np.vstack((shot['upperTheta']['uthetaX'],
shot['upperTheta']['uthetaY'], shot['upperTheta']['uthetaZ']))
raw_x_lower = np.vstack((shot['lowerTheta']['lthetaX'],
shot['lowerTheta']['lthetaY'], shot['lowerTheta']['lthetaZ']))
normalizedUpperTheta = raw_x_upper / 180.0
normalizedLowerTheta = raw_x_lower / 180.0
x_upper[0] = sequence.pad_sequences(normalizedUpperTheta, maxlen=nb_timesteps,
dtype='float', padding='post', truncating='post', value=0.).T
x_lower[0] = sequence.pad_sequences(normalizedLowerTheta, maxlen=nb_timesteps,
dtype='float', padding='post', truncating='post', value=0.).T
handedness = np.zeros(shape=[1,2])
handedness[0,isLeftHanded(shot['handedness'])] = 1.0
print("Loading Model")
model = loadCompiledShotTypeModel()
print("Loaded Model Succesfully")
result = model.predict([handedness, x_upper, x_lower], batch_size=1)
print("Result: " + str(result))
resultIndex = result.argmax()
print(resultIndex)
shotTypeResult = shotTypeToString(resultIndex)
print(shotTypeResult)
return shotTypeResult
def trainShotSpeedCompileFit(speed, test_handedness, x_train_upper, x_train_lower, y_train, x_test_upper, x_test_lower, y_test):
#Upper hand LSTM input
encoder_a = Sequential()
encoder_a.add(LSTM(output_dim=2*input_dim,
input_shape=(nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid'))
#Lower hand LSTM input
encoder_b = Sequential()
encoder_b.add(LSTM(output_dim=2*input_dim,
input_shape=(nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid'))
#Merge both LSTM units with concatenation
merged = Merge([speed, encoder_a, encoder_b], mode='concat')
decoder = Sequential()
decoder.add(merged)
#Use CNNs to reduce to desired intermediate shape
decoder.add(Dense(output_dim=2*input_dim, activation='relu'))
#sigmoid activation instead of softmax to avoid normalizing to 1.0
#1 output signal for the binary classification likelihood
decoder.add(Dense(1, activation='sigmoid'))
decoder.compile(loss='mae', optimizer='rmsprop', metrics=['accuracy'])
decoder.fit([x_train_upper, x_train_lower], y_train,
batch_size=4*input_dim, nb_epoch=nb_epoch,
validation_data=([x_test_upper, x_test_lower], y_test))
printSummary(decoder,x_test_upper, x_test_lower, y_test)
return decoder
def saveCompiledShotSpeedModel(decoder):
saved_model = decoder.to_json()
with open('LSTM_silkymitties_ShotSpeed.json', 'w') as outfile:
json.dump(saved_model, outfile)
decoder.save_weights('LSTM_silkymitties_ShotSpeed_weights.h5')
def loadCompiledShotSpeedModel():
with open('LSTM_silkymitties_ShotSpeed.json', 'r') as infile:
architecture = json.load(infile)
model = model_from_json(architecture)
model.load_weights('LSTM_silkymitties_ShotSpeed_weights.h5')
return model
def trainShotAccuracyCompileFit(accuracy, test_handedness, x_train_upper,
x_train_lower, y_train, x_test_upper, x_test_lower, y_test):
#Upper hand LSTM input
encoder_a = Sequential()
encoder_a.add(LSTM(output_dim=2*input_dim,
input_shape=(nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid'))
#Lower hand LSTM input
encoder_b = Sequential()
encoder_b.add(LSTM(output_dim=2*input_dim,
input_shape=(nb_timesteps, input_dim),
activation='sigmoid', inner_activation='hard_sigmoid'))
#Merge both LSTM units with concatenation
merged = Merge([accuracy, encoder_a, encoder_b], mode='concat')
decoder = Sequential()
decoder.add(merged)
#Use CNNs to reduce to desired output classes
decoder.add(Dense(output_dim=2*input_dim, activation='relu'))
#decoder.add(Dropout(0.5))
decoder.add(Dense(1, activation='softmax'))
decoder.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
decoder.fit([x_train_upper, x_train_lower], y_train,
batch_size=4*input_dim, nb_epoch=nb_epoch,
validation_data=([x_test_upper, x_test_lower], y_test))
printSummary(decoder,x_test, y_test)
return decoder
def saveCompiledShotAccuracyModel(decoder):
saved_model = decoder.to_json()
with open('LSTM_silkymitties_ShotAccuracy.json', 'w') as outfile:
json.dump(saved_model, outfile)
decoder.save_weights('LSTM_silkymitties_ShotAccuracy_weights.h5')
def loadCompiledShotAccuracyModel():
with open('LSTM_silkymitties_ShotAccuracy.json', 'r') as infile:
architecture = json.load(infile)
model = model_from_json(architecture)
model.load_weights('LSTM_silkymitties_ShotAccuracy_weights.h5')
return model
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
def isLeftHanded(handedness):
if str(handedness) == "L":
return 1
else:
return 0
def isSlapShot(shotType):
if str(shotType) == "slap" or str(shotType) == "Slap":
return 1
else:
return 0
def shotTypeToInt(shotType):
stringShot = str(shotType)
#if stringShot == "notashot" or stringShot == "none" or stringShot == "NoShot":
# return 0
if stringShot == "slap" or stringShot == "Slap":
return 0
elif stringShot == "wrist" or stringShot == "Wrist":
return 1
elif stringShot == "snap" or stringShot == "Snap":
return 2
def shotTypeToString(shotType):
#if stringShot == "notashot" or stringShot == "none" or stringShot == "NoShot":
# return 0
if shotType == 0:
return "Slap"
elif shotType == 1:
return "Wrist"
elif shotType == 2:
return "Snap"
def printSummary(decoder, test_handedness, x_test_upper, x_test_lower, y_test):
#print(decoder.summary())
scores = decoder.evaluate([test_handedness, x_test_upper, x_test_lower],
y_test, verbose=0, batch_size=nb_batch_size)
print("Accuracy: %2f%%" % (scores[1]*100))
print(scores)
plot(decoder, to_file='model.png', show_shapes=True)
#plot(decoder, to_file='model.png')
if __name__=='__main__':
#loadFromFile(True)
#fuseLabelledShots()
load_training_data()
|
|
"""
Data Grouper custom gui functions
============================
Created by: Chris Cadonic
For: Utility in Dr. Mandana Modirrousta's Lab
----------------------------
This file contains code for :
Implementing custom GUI modules for specific purposes in the general-use
grouper.py function.
============================
"""
from tkinter import Tk, Label, Listbox, Button, Scrollbar, Frame, \
Radiobutton, IntVar
#Button, Radiobutton
class AskColumns:
def __init__(self, master, all_cols):
self.master = master
self.master.deiconify()
self.frame = Frame(self.master)
self.all_cols = all_cols
self.prompt = 'Please select which columns will be necessary for ' \
'the output excel file.'
self.label = Label(master, text=self.prompt,
width=35,
wraplength=150,
justify='center').pack()
self.add_listbox()
'''submit_button = Button(self,
text='Submit',
command=self.update_vals())
submit_button.pack()'''
self.frame.pack()
def add_listbox(self):
self.listbox_frame = Frame(self.master)
# create a scroll bar for the list box
scrollbar = Scrollbar(self.listbox_frame)
scrollbar.pack(side='right', fill='y')
# create list box
self.listbox = Listbox(self.listbox_frame,
selectmode='multiple',
exportselection=0)
# add column names to the list box
for col_name in self.all_cols:
self.listbox.insert(self.all_cols.index(col_name), col_name)
self.listbox.pack(side='left', fill='y')
# pack the listbox
self.listbox_frame.pack()
# attach listbox to scrollbar
self.listbox.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=self.listbox.yview)
def update_vals(self):
chosen_lines = self.listbox.curselection()
cols = [self.all_cols[line] for line in chosen_lines]
self.master.quit()
return cols
class AskProcessing:
def __init__(self, master):
self.master = master
self.frame = Tk.Frame(self.master)
self.quitButton = Tk.Button(self.frame, text='Quit', width=25,
command=self.close_windows)
self.quitButton.pack()
self.frame.pack()
def close_windows(self):
self.master.destroy()
def ask_columns(all_cols):
""" GUI implementation of a window that shows the user all of the
columns contained in an excel file, and then asks the user which
columns they would need.
:param all_cols:
input list of all columns to be displayed for the user to select
:return:
"""
# set the display message
prompt = 'Please select which columns will be necessary for the output ' \
'excel file.'
# create the root window and add label to top of window
columns_window = Tk()
columns_window.resizable(0, 0) # prevent resizing of the box
Label(columns_window, text=prompt,
width=35,
wraplength=150,
justify='center').pack()
# create frame for the listbox and scrollbar
listbox_frame = Frame(columns_window)
# create a scroll bar for the list box
scrollbar = Scrollbar(listbox_frame)
scrollbar.pack(side='right', fill='y')
# create list box
listbox = Listbox(listbox_frame,
selectmode='multiple',
exportselection=0)
# add column names to the list box
for col_name in all_cols:
listbox.insert(all_cols.index(col_name), col_name)
listbox.pack(side='left', fill='y')
# pack the listbox
listbox_frame.pack()
# attach listbox to scrollbar
listbox.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=listbox.yview)
# add a submit button1
submit_button = Button(columns_window,
text='Submit',
command=columns_window.quit)
submit_button.pack()
columns_window.mainloop()
# ---------------- End of listbox gui specification
# acquire the chosen columns
chosen_lines = listbox.curselection()
cols = [all_cols[line] for line in chosen_lines]
# destroy the window after acquiring the necessary information
columns_window.destroy()
return cols
def choose_operations(available_funcs):
""" GUI implementation of a window that displays to the user possible
calculation functions for processing the current set of data.
:param: available_funcs:
a list of tuples of the form (name, function_handle), where name
and function_handle refer to all of the functions defined in
the utils.py file.
:return:
"""
# initialize list of chosen functions
chosen_funcs = []
# set the display message
prompt = 'Please select which operations will be performed on the input ' \
'datasets.'
# create the root window and add label to top of window
operations_window = Tk()
operations_window.resizable(0, 0) # prevent resizing of the box
Label(operations_window, text=prompt,
width=35,
wraplength=150,
justify='center').pack()
val = IntVar()
Radiobutton(operations_window,
text='test me',
indicatoron=0,
width=20,
padx=20,
variable=val).pack()
return chosen_funcs
if __name__ == '__main__':
pass
|
|
import maya.OpenMaya as om
import pymel.core as pm
from pymel.core import Callback
import logging
MAPTO_IGNORE = 'ignore'
MAPTO_NONE = 'none'
kColor = 999
kEnum = 1000
kMessage=10000
MAP_API_ATTRIDS = {
kEnum : "kEnum",
kColor : "MFnNumericData::k3Float",
kMessage: "kMessage",
om.MFnNumericData.k2Int:"MFnNumericData::k2Int",
om.MFnNumericData.k2Float:"MFnNumericData::k2Float",
om.MFnNumericData.kFloat:"MFnNumericData::kFloat",
om.MFnNumericData.kBoolean:"MFnNumericData::kBoolean",
om.MFnNumericData.kInt:"MFnNumericData::kInt",
om.MFnData.kString:"MFnNumericData::kString",
}
MAP_API_ATTR = {
kEnum : ['at','enum'],
kColor: ['at','spectrum'],
kMessage: ['at','message'],
om.MFnNumericData.k2Int: ['dt','long2'],
om.MFnNumericData.k2Float: ['dt','float2'],
om.MFnNumericData.kFloat:['at','float'],
om.MFnNumericData.kBoolean:['at', 'bool'],
om.MFnNumericData.kInt: ['at', 'int'],
om.MFnData.kString:['dt', 'string']
}
log = logging.getLogger("mantraLogger")
class Attribute(object):
def __init__(self, an="", dn="", tp=None, mapto=MAPTO_NONE, values=None, default=None, affectAtt = None, kat=None):
self.attname = an
self.shortname = self.attname
self.displayname = dn
self.type = tp
self.mapto = mapto
self.values = values
self.default = default
self.affectAtt = affectAtt
self.child = None
self.next = None
self.kategory = kat
self.uiElement = None
self.uiDimFunction = None
def dimConnections(att):
log.debug("dimConnections for att %s" % att.attname)
class AttributeCollection(object):
"""Creates a list of attributes.
The class can return a flattened list of string/attribute assignments,
as well as create ui templates with dim ctrls.
"""
def __init__(self):
self.attList = []
def addAttr(self, an="", dn="", tp=None, mapto=MAPTO_NONE, values=None, default=None, affectAtt = None, kat=None):
"""kategory should be one or more strings separated with a | sign"""
self.attList.append(Attribute(an=an, dn=dn, tp=tp, mapto=mapto, values=values, default=default, affectAtt = affectAtt, kat=kat))
def listAttr(self):
pass
def endLayout(self):
pm.editorTemplate(endLayout=True)
def beginLayout(self, label):
pm.editorTemplate(collapse=False, beginLayout=label)
def addControl(self, attname, displayname):
pm.editorTemplate(attname, addDynamicControl=True, label=displayname)
def beginUISegment(self, segmentName):
log.debug("beginUISegment: %s" % segmentName)
frameName = segmentName + "Frame"
columnName = segmentName + "Column"
pm.frameLayout(frameName, label = segmentName, collapsable = True, collapse = False, width = 414)
pm.columnLayout(columnName, adj = True, columnAttach=('both', 5), columnWidth=250)
def dimConnections(self, attrib):
log.debug("dimConnections self for att %s" % attrib.attname)
return
value = attrib.uiDimFunction(attrib.uiElement, query = True, value1 = True)
for att in self.attList:
if att == attrib:
dimConnections = att.affectAtt
if dimConnections:
log.debug("Dimming elements %s" % str(dimConnections))
dimList = []
for dimatt in self.attList:
if dimatt.attname in dimConnections:
dimList.append(dimatt)
for di in dimList:
di.uiDimFunction(di.uiElement, edit=True, enable=value)
def createUi(self, node = None):
log.debug("createUi for node %s" % str(node))
currentSegment = ""
layoutList = []
for att in self.attList:
if att.kategory!=None and currentSegment != att.kategory:
log.debug("kategory %s != %s" % (currentSegment, att.kategory ))
katlist = att.kategory.split("|")
diff = len(layoutList) - len(katlist)
# neue liste ist kuerzer als alte, also solange dicht machen bis die laenge gleich ist
log.debug("check diff %d" % diff)
while diff > 0:
layoutList.pop()
pm.setParent("..")
pm.setParent("..")
diff = len(layoutList) - len(katlist)
log.debug("new diff %d" % diff)
# alte liste ist nun nicht laenger als neue liste
# durchgehen und alles dichtmachen was nicht gleich ist
for i in range(len(layoutList)):
kat = katlist[i]
ckat = layoutList[i]
# wenn werte ungelich dann alles was noch kommt zumachen
if kat != ckat:
laylen = len(layoutList)
for n in range(i, laylen):
pm.setParent("..")
pm.setParent("..")
layoutList.pop(n)
# nun sollte sichergestellt sein, dass layoutList nur noch elemente
# enthaelt, die gleich sind, also kann man daran anknuepfen
for i in range(len(layoutList), len(katlist)):
log.debug("opening layout for katlist %s %d %s" % (str(katlist), i, katlist[i]))
self.beginUISegment(katlist[i])
layoutList.append(katlist[i])
currentSegment = att.kategory
if MAP_API_ATTR.has_key(att.type):
log.debug("Adding element %s with displayName %s" % (att.attname,att.displayname))
attype, attypeval = MAP_API_ATTR[att.type]
log.debug("Adding attribute named %s type %s val %s default %s" % (att.attname, attype, attypeval, att.default))
if attypeval == 'bool':
att.uiElement = pm.checkBoxGrp( att.attname, numberOfCheckBoxes = 1, label1 = att.displayname, cc = Callback(self.dimConnections,att))
att.uiDimFunction = pm.checkBoxGrp
if node:
pm.connectControl(att.uiElement, node + "." + att.attname, index = 2 )
if attypeval == 'int':
att.uiElement = pm.intFieldGrp( att.attname, numberOfFields=1, label=att.displayname, value1=att.default, cc = Callback(self.dimConnections,att))
att.uiDimFunction = pm.intFieldGrp
if node:
pm.connectControl(att.uiElement, node + "." + att.attname, index = 2 )
if attypeval == 'long2':
if node:
att.uiDimFunction = pm.attrFieldGrp( attribute='%s' % (node + "." + att.attname), cc = Callback(self.dimConnections,att))
if attypeval == 'float':
att.uiElement = pm.floatFieldGrp( att.attname, numberOfFields=1, label=att.displayname, value1=att.default, cc = Callback(self.dimConnections,att))
att.uiDimFunction = pm.floatFieldGrp
if node:
pm.connectControl(att.uiElement, node + "." + att.attname, index = 2 )
if attypeval == 'float2':
if node:
att.uiDimFunction = pm.attrFieldGrp( attribute='%s' % (node + "." + att.attname), cc = Callback(self.dimConnections,att))
if attypeval == 'string':
att.uiElement = pm.textFieldGrp( att.attname, label=att.displayname, text=att.default, cc = Callback(self.dimConnections,att))
pm.connectControl(att.uiElement, node + "." + att.attname, index = 2 )
pm.textFieldGrp(att.uiElement, edit=True, text = att.default)
if attypeval == 'enum':
poplist = map(list, enumerate(att.values))
if node:
nodeatt = node + "." + att.attname
att.uiElement = pm.attrEnumOptionMenuGrp(att.attname, label = att.displayname, at=nodeatt, ei = poplist, vcc = Callback(self.dimConnections,att))
att.uiDimFunction = pm.attrEnumOptionMenuGrp
else:
att.uiElement = pm.attrEnumOptionMenuGrp(att.attname, label = att.displayname, ei = poplist, vcc = Callback(self.dimConnections,att))
att.uiDimFunction = pm.attrEnumOptionMenuGrp
if attypeval == 'message':
pass
if len(layoutList) > 0:
for i in range(len(layoutList)):
pm.setParent("..")
pm.setParent("..")
def createUiTemplates(self):
log.debug("crateUiTemplates")
currentSegment = ""
layoutList = []
for att in self.attList:
if att.kategory!=None and currentSegment != att.kategory:
log.debug("kategory %s != %s" % (currentSegment, att.kategory ))
katlist = att.kategory.split("|")
diff = len(layoutList) - len(katlist)
# neue liste ist kuerzer als alte, also solange dicht machen bis die laenge gleich ist
log.debug("check diff %d" % diff)
while diff > 0:
layoutList.pop()
self.endLayout()
diff = len(layoutList) - len(katlist)
log.debug("new diff %d" % diff)
# alte liste ist nun nicht laenger als neue liste
# durchgehen und alles dichtmachen was nicht gleich ist
for i in range(len(layoutList)):
kat = katlist[i]
ckat = layoutList[i]
# wenn werte ungelich dann alles was noch kommt zumachen
if kat != ckat:
laylen = len(layoutList)
for n in range(i, laylen):
self.endLayout()
layoutList.pop(n)
# nun sollte sichergestellt sein, dass layoutList nur noch elemente
# enthaelt, die gleich sind, also kann man daran anknuepfen
for i in range(len(layoutList), len(katlist)):
log.debug("opening layout for katlist %s %d %s" % (str(katlist), i, katlist[i]))
self.beginLayout(katlist[i])
layoutList.append(katlist[i])
currentSegment = att.kategory
log.debug("Adding element %s with displayName %s" % (att.attname,att.displayname))
self.addControl(att.attname, att.displayname)
if len(layoutList) > 0:
for i in range(len(layoutList)):
self.endLayout()
|
|
import copy
from hashlib import md5
nodes = {}
pairs = {}
for line in open("input/dec22").readlines()[2:]:
node, _, used, avail, _ = line.split()
nn = node.split('-')
x = int(nn[1][1:])
y = int(nn[2][1:])
used = int(used[:-1])
avail = int(avail[:-1])
if y not in nodes:
nodes[y] = {}
nodes[y][x] = {'used': used, 'free': avail, 'has_data': False}
original = copy.deepcopy(nodes)
width = x + 1
height = y + 1
original[0][width-1]['has_data'] = True
cnt = 0
# calculate possible moves
for y in range(0, height):
for x in range(0, width):
if not nodes[y][x]['used']:
continue
for y2 in range(0, height):
for x2 in range(0, width):
if x == x2 and y == y2:
continue
if nodes[y][x]['used'] <= nodes[y2][x2]['free']:
# print((x, y), (x2, y2))
cnt += 1
# print(cnt)
import sys
def print_nodes(nodes):
for y in nodes:
for x in nodes[y]:
if nodes[y][x]['has_data']:
sys.stdout.write('G')
elif x == 0 and y == 0:
sys.stdout.write('e')
else:
# sys.stdout.write(str(nodes[y][x]['used']))
if nodes[y][x]['used'] > 200:
sys.stdout.write("#")
else:
sys.stdout.write('_' if not nodes[y][x]['used'] else '.')
x += 1
sys.stdout.write("\n")
sys.stdout.write("\n")
from collections import deque
queue = deque([])
# prod
queue.append((original, 35, 18, 0))
# test
# queue.append((original, 1, 1, 0))
#print(len(original[0]))
#for x in range(0, len(original[0])):
# print(original[0][x]['free'], original[0][x]['used'])
seen = {}
def hash_dict(d):
return md5(bytes(str(d), 'ascii')).hexdigest()
def move_content(nodes, from_x, from_y, to_x, to_y, move_data=False):
if move_data:
nodes[to_y][to_x]['has_data'] = nodes[from_y][from_x]['has_data']
nodes[from_y][from_x]['has_data'] = False
nodes[to_y][to_x]['free'] -= nodes[from_y][from_x]['used']
nodes[to_y][to_x]['used'] += nodes[from_y][from_x]['used']
nodes[from_y][from_x]['free'] = nodes[from_y][from_x]['free'] + nodes[from_y][from_x]['used']
nodes[from_y][from_x]['used'] = 0
if nodes[to_y][to_x]['has_data']:
return (to_x, to_y)
return None
import os.path
import pickle
import heapq
from math import sqrt
printed = {}
if not os.path.exists('input/22.pickle'):
visited = {}
# move stuff around
while queue:
nodes, x, y, steps = queue.popleft()
if y not in visited:
visited[y] = {}
elif x in visited[y]:
continue
visited[y][x] = True
if steps not in printed:
print(steps)
print_nodes(nodes)
printed[steps] = True
if nodes[y][x]['has_data']:
print("got there", steps)
break
if y and nodes[y][x]['free'] >= nodes[y-1][x]['used']:
n_nodes = copy.deepcopy(nodes)
move_content(n_nodes, x, y-1, x, y)
queue.append((n_nodes, x, y-1, steps + 1))
if y < (height - 1) and nodes[y][x]['free'] >= nodes[y+1][x]['used']:
n_nodes = copy.deepcopy(nodes)
if nodes[y+1][x]['has_data']:
# we got close to the actual data, abort here plz
break
move_content(n_nodes, x, y+1, x, y)
queue.append((n_nodes, x, y+1, steps+1))
if x and nodes[y][x]['free'] >= nodes[y][x-1]['used']:
n_nodes = copy.deepcopy(nodes)
move_content(n_nodes, x-1, y, x, y)
queue.append((n_nodes, x-1, y, steps + 1))
if x < (width - 1) and nodes[y][x]['free'] >= nodes[y][x+1]['used']:
n_nodes = copy.deepcopy(nodes)
if nodes[y][x+1]['has_data']:
# we got close to the actual data, abort here plz
print(steps)
break
move_content(n_nodes, x+1, y, x, y)
queue.append((n_nodes, x+1, y, steps + 1))
print_nodes(nodes)
pickle.dump((nodes, x, y, x+1, y, steps), open('input/22.pickle', 'wb'))
q = (nodes, x, y, x+1, y, steps)
else:
print("skipped it")
q = pickle.load(open('input/22.pickle', 'rb'))
queue = deque([q])
# we can live on the top for this, no need to get down dirty
height = min(2, height)
printed = {}
data_has_been_here = {}
# move data around
while queue:
nodes, x, y, data_x, data_y, steps = queue.popleft()
if nodes[y][x]['has_data']:
if y in data_has_been_here and x in data_has_been_here[y]:
continue
if y not in data_has_been_here:
data_has_been_here[y] = {}
data_has_been_here[y][x] = True
h = hash_dict(nodes)
if h in seen:
continue
seen[h] = True
if steps not in printed:
print(steps)
print_nodes(nodes)
printed[steps] = True
if nodes[0][0]['has_data']:
print("got there again", steps)
break
if y < (height - 1) and nodes[y][x]['free'] >= nodes[y+1][x]['used']:
n_nodes = copy.deepcopy(nodes)
d = move_content(n_nodes, x, y+1, x, y, move_data=True)
d_x = data_x
d_y = data_y
if d:
d_x, d_y = d
if abs(y + 1 - d_y) < 2:
queue.append((n_nodes, x, y+1, d_x, d_y, steps+1))
if y and nodes[y][x]['free'] >= nodes[y-1][x]['used']:
n_nodes = copy.deepcopy(nodes)
d = move_content(n_nodes, x, y-1, x, y, move_data=True)
d_x = data_x
d_y = data_y
if d:
d_x, d_y = d
if abs(y - 1 - d_y) < 2:
queue.append((n_nodes, x, y-1, d_x, d_y, steps + 1))
if x and nodes[y][x]['free'] >= nodes[y][x-1]['used']:
n_nodes = copy.deepcopy(nodes)
d = move_content(n_nodes, x-1, y, x, y, move_data=True)
d_x = data_x
d_y = data_y
if d:
d_x, d_y = d
if abs(x - 1 - d_x) < 2:
queue.append((n_nodes, x-1, y, d_x, d_y, steps + 1))
if x < (width - 1) and nodes[y][x]['free'] >= nodes[y][x+1]['used']:
n_nodes = copy.deepcopy(nodes)
d = move_content(n_nodes, x+1, y, x, y, move_data=True)
d_x = data_x
d_y = data_y
if d:
d_x, d_y = d
if abs(x + 1 - d_x) < 2:
queue.append((n_nodes, x+1, y, d_x, d_y, steps + 1))
|
|
"""Provides functionality to interact with image processing services."""
import asyncio
from datetime import timedelta
import logging
from typing import final
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.util.async_ import run_callback_threadsafe
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "image_processing"
SCAN_INTERVAL = timedelta(seconds=10)
DEVICE_CLASSES = [
"alpr", # Automatic license plate recognition
"face", # Face
"ocr", # OCR
]
SERVICE_SCAN = "scan"
EVENT_DETECT_FACE = "image_processing.detect_face"
ATTR_AGE = "age"
ATTR_CONFIDENCE = "confidence"
ATTR_FACES = "faces"
ATTR_GENDER = "gender"
ATTR_GLASSES = "glasses"
ATTR_MOTION = "motion"
ATTR_TOTAL_FACES = "total_faces"
CONF_CONFIDENCE = "confidence"
DEFAULT_TIMEOUT = 10
DEFAULT_CONFIDENCE = 80
SOURCE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_domain("camera"),
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SOURCE): vol.All(cv.ensure_list, [SOURCE_SCHEMA]),
vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE): vol.All(
vol.Coerce(float), vol.Range(min=0, max=100)
),
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
async def async_setup(hass, config):
"""Set up the image processing."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
async def async_scan_service(service):
"""Service handler for scan."""
image_entities = await component.async_extract_from_service(service)
update_tasks = []
for entity in image_entities:
entity.async_set_context(service.context)
update_tasks.append(asyncio.create_task(entity.async_update_ha_state(True)))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
DOMAIN, SERVICE_SCAN, async_scan_service, schema=make_entity_service_schema({})
)
return True
class ImageProcessingEntity(Entity):
"""Base entity class for image processing."""
timeout = DEFAULT_TIMEOUT
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return None
@property
def confidence(self):
"""Return minimum confidence for do some things."""
return None
def process_image(self, image):
"""Process image."""
raise NotImplementedError()
async def async_process_image(self, image):
"""Process image."""
return await self.hass.async_add_executor_job(self.process_image, image)
async def async_update(self):
"""Update image and process it.
This method is a coroutine.
"""
camera = self.hass.components.camera
image = None
try:
image = await camera.async_get_image(
self.camera_entity, timeout=self.timeout
)
except HomeAssistantError as err:
_LOGGER.error("Error on receive image from entity: %s", err)
return
# process image data
await self.async_process_image(image.content)
class ImageProcessingFaceEntity(ImageProcessingEntity):
"""Base entity class for face image processing."""
def __init__(self):
"""Initialize base face identify/verify entity."""
self.faces = []
self.total_faces = 0
@property
def state(self):
"""Return the state of the entity."""
confidence = 0
state = None
# No confidence support
if not self.confidence:
return self.total_faces
# Search high confidence
for face in self.faces:
if ATTR_CONFIDENCE not in face:
continue
f_co = face[ATTR_CONFIDENCE]
if f_co > confidence:
confidence = f_co
for attr in (ATTR_NAME, ATTR_MOTION):
if attr in face:
state = face[attr]
break
return state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "face"
@final
@property
def state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_FACES: self.faces, ATTR_TOTAL_FACES: self.total_faces}
def process_faces(self, faces, total):
"""Send event with detected faces and store data."""
run_callback_threadsafe(
self.hass.loop, self.async_process_faces, faces, total
).result()
@callback
def async_process_faces(self, faces, total):
"""Send event with detected faces and store data.
known are a dict in follow format:
[
{
ATTR_CONFIDENCE: 80,
ATTR_NAME: 'Name',
ATTR_AGE: 12.0,
ATTR_GENDER: 'man',
ATTR_MOTION: 'smile',
ATTR_GLASSES: 'sunglasses'
},
]
This method must be run in the event loop.
"""
# Send events
for face in faces:
if (
ATTR_CONFIDENCE in face
and self.confidence
and face[ATTR_CONFIDENCE] < self.confidence
):
continue
face.update({ATTR_ENTITY_ID: self.entity_id})
self.hass.async_add_job(self.hass.bus.async_fire, EVENT_DETECT_FACE, face)
# Update entity store
self.faces = faces
self.total_faces = total
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite."""
import copy
import datetime
from nova import db
from nova import exception
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
if key in self.values:
return self.values[key]
else:
raise NotImplementedError()
def __repr__(self):
return '<FakeModel: %s>' % self.values
def get(self, name):
return self.values[name]
def stub_out(stubs, funcs):
"""Set the stubs in mapping in the db api."""
for func in funcs:
func_name = '_'.join(func.__name__.split('_')[1:])
stubs.Set(db, func_name, func)
stubs.Set(db.api, func_name, func)
fixed_ip_fields = {'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance': False,
'instance_uuid': 'eb57d790-fc60-4119-a51a-f2b0913bdc93',
'allocated': False,
'virtual_interface_id': 0,
'virtual_interface': None,
'floating_ips': []}
network_fields = {'id': 0,
'cidr': '192.168.0.0/24',
'netmask': '255.255.255.0',
'cidr_v6': 'dead:beef::/64',
'netmask_v6': '64',
'project_id': 'fake',
'label': 'fake',
'gateway': '192.168.0.1',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'broadcast': '192.168.0.255',
'gateway_v6': 'dead:beef::1',
'dns': '192.168.0.1',
'vlan': None,
'host': None,
'injected': False,
'vpn_public_address': '192.168.0.2'}
flavor_fields = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.1.100',
'fixed_ip_id': None,
'fixed_ip': None,
'project_id': None,
'pool': 'nova',
'auto_assigned': False}
virtual_interface_fields = {'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'network_id': 0,
'instance_id': 0,
'network': FakeModel(network_fields)}
fixed_ips = [fixed_ip_fields]
floating_ips = [floating_ip_fields]
virtual_interfacees = [virtual_interface_fields]
networks = [network_fields]
def fake_floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
ips = filter(lambda i: i['fixed_ip_id'] is None and
i['project_id'] is None and
i['pool'] == pool,
floating_ips)
if not ips:
raise exception.NoMoreFloatingIps()
ips[0]['project_id'] = project_id
ips[0]['auto_assigned'] = auto_assigned
return FakeModel(ips[0])
def fake_floating_ip_deallocate(context, address):
ips = filter(lambda i: i['address'] == address,
floating_ips)
if ips:
ips[0]['project_id'] = None
ips[0]['auto_assigned'] = False
def fake_floating_ip_disassociate(context, address):
ips = filter(lambda i: i['address'] == address,
floating_ips)
if ips:
fixed_ip_address = None
if ips[0]['fixed_ip']:
fixed_ip_address = ips[0]['fixed_ip']['address']
ips[0]['fixed_ip'] = None
ips[0]['host'] = None
return fixed_ip_address
def fake_floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
float = filter(lambda i: i['address'] == floating_address,
floating_ips)
fixed = filter(lambda i: i['address'] == fixed_address,
fixed_ips)
if float and fixed:
float[0]['fixed_ip'] = fixed[0]
float[0]['fixed_ip_id'] = fixed[0]['id']
float[0]['host'] = host
def fake_floating_ip_get_all_by_host(context, host):
# TODO(jkoelker): Once we get the patches that remove host from
# the floating_ip table, we'll need to stub
# this out
pass
def fake_floating_ip_get_by_address(context, address):
if isinstance(address, FakeModel):
# NOTE(tr3buchet): yo dawg, i heard you like addresses
address = address['address']
ips = filter(lambda i: i['address'] == address,
floating_ips)
if not ips:
raise exception.FloatingIpNotFoundForAddress(address=address)
return FakeModel(ips[0])
def fake_fixed_ip_associate(context, address, instance_id):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
if not ips:
raise exception.NoMoreFixedIps(net='fake_net')
ips[0]['instance'] = True
ips[0]['instance_id'] = instance_id
def fake_fixed_ip_associate_pool(context, network_id, instance_id):
ips = filter(lambda i: (i['network_id'] == network_id or
i['network_id'] is None) and not i['instance'],
fixed_ips)
if not ips:
raise exception.NoMoreFixedIps(net=network_id)
ips[0]['instance'] = True
ips[0]['instance_id'] = instance_id
return ips[0]['address']
def fake_fixed_ip_create(context, values):
ip = dict(fixed_ip_fields)
ip['id'] = max([i['id'] for i in fixed_ips] or [-1]) + 1
for key in values:
ip[key] = values[key]
return ip
def fake_fixed_ip_disassociate(context, address):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
if ips:
ips[0]['instance_id'] = None
ips[0]['instance'] = None
ips[0]['virtual_interface'] = None
ips[0]['virtual_interface_id'] = None
def fake_fixed_ip_disassociate_all_by_timeout(context, host, time):
return 0
def fake_fixed_ip_get_all(context):
return [FakeModel(i) for i in fixed_ips]
def fake_fixed_ip_get_by_instance(context, instance_uuid):
ips = filter(lambda i: i['instance_uuid'] == instance_uuid,
fixed_ips)
return [FakeModel(i) for i in ips]
def fake_fixed_ip_get_by_address(context, address):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
if ips:
return FakeModel(ips[0])
def fake_fixed_ip_update(context, address, values):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
fif = copy.deepcopy(fixed_ip_fields)
if ips:
for key in values:
ips[0][key] = values[key]
if key == 'virtual_interface_id':
vif = filter(lambda x: x['id'] == values[key],
virtual_interfacees)
if not vif:
continue
fif['virtual_interface'] = FakeModel(vif[0])
def fake_flavor_get(context, id):
if flavor_fields['id'] == id:
return FakeModel(flavor_fields)
def fake_virtual_interface_create(context, values):
vif = dict(virtual_interface_fields)
vif['id'] = max([m['id'] for m in virtual_interfacees] or [-1]) + 1
for key in values:
vif[key] = values[key]
return FakeModel(vif)
def fake_virtual_interface_delete_by_instance(context, instance_id):
vif = copy.copy(virtual_interfacees)
addresses = [m for m in vif
if m['instance_id'] == instance_id]
try:
for address in addresses:
vif.remove(address)
except ValueError:
pass
def fake_virtual_interface_get_by_instance(context, instance_id):
return [FakeModel(m) for m in virtual_interfacees
if m['instance_id'] == instance_id]
def fake_virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id):
vif = filter(lambda m: m['instance_id'] == instance_id and
m['network_id'] == network_id,
virtual_interfacees)
if not vif:
return None
return FakeModel(vif[0])
def fake_network_create_safe(context, values):
net = dict(network_fields)
net['id'] = max([n['id'] for n in networks] or [-1]) + 1
for key in values:
net[key] = values[key]
return FakeModel(net)
def fake_network_get(context, network_id):
net = filter(lambda n: n['id'] == network_id, networks)
if not net:
return None
return FakeModel(net[0])
def fake_network_get_all(context):
return [FakeModel(n) for n in networks]
def fake_network_get_all_by_host(context, host):
nets = filter(lambda n: n['host'] == host, networks)
return [FakeModel(n) for n in nets]
def fake_network_set_host(context, network_id, host_id):
nets = filter(lambda n: n['id'] == network_id, networks)
for net in nets:
net['host'] = host_id
return host_id
def fake_network_update(context, network_id, values):
nets = filter(lambda n: n['id'] == network_id, networks)
for net in nets:
for key in values:
net[key] = values[key]
def fake_project_get_networks(context, project_id):
return [FakeModel(n) for n in networks
if n['project_id'] == project_id]
def stub_out_db_network_api(stubs):
funcs = [fake_floating_ip_allocate_address,
fake_floating_ip_deallocate,
fake_floating_ip_disassociate,
fake_floating_ip_fixed_ip_associate,
fake_floating_ip_get_all_by_host,
fake_floating_ip_get_by_address,
fake_fixed_ip_associate,
fake_fixed_ip_associate_pool,
fake_fixed_ip_create,
fake_fixed_ip_disassociate,
fake_fixed_ip_disassociate_all_by_timeout,
fake_fixed_ip_get_all,
fake_fixed_ip_get_by_instance,
fake_fixed_ip_get_by_address,
fake_fixed_ip_update,
fake_flavor_get,
fake_virtual_interface_create,
fake_virtual_interface_delete_by_instance,
fake_virtual_interface_get_by_instance,
fake_virtual_interface_get_by_instance_and_network,
fake_network_create_safe,
fake_network_get,
fake_network_get_all,
fake_network_get_all_by_host,
fake_network_set_host,
fake_network_update,
fake_project_get_networks]
stub_out(stubs, funcs)
def stub_out_db_instance_api(stubs, injected=True):
"""Stubs out the db API for creating Instances."""
def _create_instance_type(**updates):
instance_type = {'id': 2,
'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'vcpu_weight': None,
'root_gb': 0,
'ephemeral_gb': 10,
'flavorid': 1,
'rxtx_factor': 1.0,
'swap': 0,
'deleted_at': None,
'created_at': datetime.datetime(2014, 8, 8, 0, 0, 0),
'updated_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'extra_specs': {},
}
if updates:
instance_type.update(updates)
return instance_type
INSTANCE_TYPES = {
'm1.tiny': _create_instance_type(
id=2,
name='m1.tiny',
memory_mb=512,
vcpus=1,
vcpu_weight=None,
root_gb=0,
ephemeral_gb=10,
flavorid=1,
rxtx_factor=1.0,
swap=0),
'm1.small': _create_instance_type(
id=5,
name='m1.small',
memory_mb=2048,
vcpus=1,
vcpu_weight=None,
root_gb=20,
ephemeral_gb=0,
flavorid=2,
rxtx_factor=1.0,
swap=1024),
'm1.medium': _create_instance_type(
id=1,
name='m1.medium',
memory_mb=4096,
vcpus=2,
vcpu_weight=None,
root_gb=40,
ephemeral_gb=40,
flavorid=3,
rxtx_factor=1.0,
swap=0),
'm1.large': _create_instance_type(
id=3,
name='m1.large',
memory_mb=8192,
vcpus=4,
vcpu_weight=10,
root_gb=80,
ephemeral_gb=80,
flavorid=4,
rxtx_factor=1.0,
swap=0),
'm1.xlarge': _create_instance_type(
id=4,
name='m1.xlarge',
memory_mb=16384,
vcpus=8,
vcpu_weight=None,
root_gb=160,
ephemeral_gb=160,
flavorid=5,
rxtx_factor=1.0,
swap=0)}
fixed_ip_fields = {'address': '10.0.0.3',
'address_v6': 'fe80::a00:3',
'network_id': 'fake_flat'}
def fake_flavor_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
def fake_flavor_get_by_name(context, name):
return INSTANCE_TYPES[name]
def fake_flavor_get(context, id):
for name, inst_type in INSTANCE_TYPES.iteritems():
if str(inst_type['id']) == str(id):
return inst_type
return None
def fake_fixed_ip_get_by_instance(context, instance_id):
return [FakeModel(fixed_ip_fields)]
funcs = [fake_flavor_get_all,
fake_flavor_get_by_name,
fake_flavor_get,
fake_fixed_ip_get_by_instance]
stub_out(stubs, funcs)
|
|
from sympy import sin, cos, atan2, log, exp, gamma, conjugate, sqrt, \
factorial, Integral, Piecewise, Add, diff, symbols, S, Float, Dummy, Eq
from sympy import Catalan, EulerGamma, E, GoldenRatio, I, pi
from sympy import Function, Rational, Integer, Lambda
from sympy.core.relational import Relational
from sympy.logic.boolalg import And, Or, Not, Equivalent, Xor
from sympy.printing.fcode import fcode, FCodePrinter
from sympy.tensor import IndexedBase, Idx
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.matrices import Matrix, MatrixSymbol
def test_printmethod():
x = symbols('x')
class nint(Function):
def _fcode(self, printer):
return "nint(%s)" % printer._print(self.args[0])
assert fcode(nint(x)) == " nint(x)"
def test_fcode_Pow():
x, y = symbols('x,y')
n = symbols('n', integer=True)
assert fcode(x**3) == " x**3"
assert fcode(x**(y**3)) == " x**(y**3)"
assert fcode(1/(sin(x)*3.5)**(x - y**x)/(x**2 + y)) == \
" (3.5d0*sin(x))**(-x + y**x)/(x**2 + y)"
assert fcode(sqrt(x)) == ' sqrt(x)'
assert fcode(sqrt(n)) == ' sqrt(dble(n))'
assert fcode(x**0.5) == ' sqrt(x)'
assert fcode(sqrt(x)) == ' sqrt(x)'
assert fcode(sqrt(10)) == ' sqrt(10.0d0)'
assert fcode(x**-1.0) == ' 1.0/x'
assert fcode(x**-2.0, 'y', source_format='free') == 'y = x**(-2.0d0)' # 2823
assert fcode(x**Rational(3, 7)) == ' x**(3.0d0/7.0d0)'
def test_fcode_Rational():
x = symbols('x')
assert fcode(Rational(3, 7)) == " 3.0d0/7.0d0"
assert fcode(Rational(18, 9)) == " 2"
assert fcode(Rational(3, -7)) == " -3.0d0/7.0d0"
assert fcode(Rational(-3, -7)) == " 3.0d0/7.0d0"
assert fcode(x + Rational(3, 7)) == " x + 3.0d0/7.0d0"
assert fcode(Rational(3, 7)*x) == " (3.0d0/7.0d0)*x"
def test_fcode_Integer():
assert fcode(Integer(67)) == " 67"
assert fcode(Integer(-1)) == " -1"
def test_fcode_Float():
assert fcode(Float(42.0)) == " 42.0000000000000d0"
assert fcode(Float(-1e20)) == " -1.00000000000000d+20"
def test_fcode_functions():
x, y = symbols('x,y')
assert fcode(sin(x) ** cos(y)) == " sin(x)**cos(y)"
#issue 6814
def test_fcode_functions_with_integers():
x= symbols('x')
assert fcode(x * log(10)) == " x*2.30258509299405d0"
assert fcode(x * log(10)) == " x*2.30258509299405d0"
assert fcode(x * log(S(10))) == " x*2.30258509299405d0"
assert fcode(log(S(10))) == " 2.30258509299405d0"
assert fcode(exp(10)) == " 22026.4657948067d0"
assert fcode(x * log(log(10))) == " x*0.834032445247956d0"
assert fcode(x * log(log(S(10)))) == " x*0.834032445247956d0"
def test_fcode_NumberSymbol():
p = FCodePrinter()
assert fcode(Catalan) == ' parameter (Catalan = 0.915965594177219d0)\n Catalan'
assert fcode(EulerGamma) == ' parameter (EulerGamma = 0.577215664901533d0)\n EulerGamma'
assert fcode(E) == ' parameter (E = 2.71828182845905d0)\n E'
assert fcode(GoldenRatio) == ' parameter (GoldenRatio = 1.61803398874989d0)\n GoldenRatio'
assert fcode(pi) == ' parameter (pi = 3.14159265358979d0)\n pi'
assert fcode(
pi, precision=5) == ' parameter (pi = 3.1416d0)\n pi'
assert fcode(Catalan, human=False) == (set(
[(Catalan, p._print(Catalan.evalf(15)))]), set([]), ' Catalan')
assert fcode(EulerGamma, human=False) == (set([(EulerGamma, p._print(
EulerGamma.evalf(15)))]), set([]), ' EulerGamma')
assert fcode(E, human=False) == (
set([(E, p._print(E.evalf(15)))]), set([]), ' E')
assert fcode(GoldenRatio, human=False) == (set([(GoldenRatio, p._print(
GoldenRatio.evalf(15)))]), set([]), ' GoldenRatio')
assert fcode(pi, human=False) == (
set([(pi, p._print(pi.evalf(15)))]), set([]), ' pi')
assert fcode(pi, precision=5, human=False) == (
set([(pi, p._print(pi.evalf(5)))]), set([]), ' pi')
def test_fcode_complex():
assert fcode(I) == " cmplx(0,1)"
x = symbols('x')
assert fcode(4*I) == " cmplx(0,4)"
assert fcode(3 + 4*I) == " cmplx(3,4)"
assert fcode(3 + 4*I + x) == " cmplx(3,4) + x"
assert fcode(I*x) == " cmplx(0,1)*x"
assert fcode(3 + 4*I - x) == " cmplx(3,4) - x"
x = symbols('x', imaginary=True)
assert fcode(5*x) == " 5*x"
assert fcode(I*x) == " cmplx(0,1)*x"
assert fcode(3 + x) == " x + 3"
def test_implicit():
x, y = symbols('x,y')
assert fcode(sin(x)) == " sin(x)"
assert fcode(atan2(x, y)) == " atan2(x, y)"
assert fcode(conjugate(x)) == " conjg(x)"
def test_not_fortran():
x = symbols('x')
g = Function('g')
assert fcode(
gamma(x)) == "C Not supported in Fortran:\nC gamma\n gamma(x)"
assert fcode(Integral(sin(x))) == "C Not supported in Fortran:\nC Integral\n Integral(sin(x), x)"
assert fcode(g(x)) == "C Not supported in Fortran:\nC g\n g(x)"
def test_user_functions():
x = symbols('x')
assert fcode(sin(x), user_functions={"sin": "zsin"}) == " zsin(x)"
x = symbols('x')
assert fcode(
gamma(x), user_functions={"gamma": "mygamma"}) == " mygamma(x)"
g = Function('g')
assert fcode(g(x), user_functions={"g": "great"}) == " great(x)"
n = symbols('n', integer=True)
assert fcode(
factorial(n), user_functions={"factorial": "fct"}) == " fct(n)"
def test_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert fcode(g(x)) == " 2*x"
g = implemented_function('g', Lambda(x, 2*pi/x))
assert fcode(g(x)) == (
" parameter (pi = 3.14159265358979d0)\n"
" 2*pi/x"
)
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert fcode(g(A[i]), assign_to=A[i]) == (
" do i = 1, n\n"
" A(i) = (A(i) + 1)*(A(i) + 2)*A(i)\n"
" end do"
)
def test_assign_to():
x = symbols('x')
assert fcode(sin(x), assign_to="s") == " s = sin(x)"
def test_line_wrapping():
x, y = symbols('x,y')
assert fcode(((x + y)**10).expand(), assign_to="var") == (
" var = x**10 + 10*x**9*y + 45*x**8*y**2 + 120*x**7*y**3 + 210*x**6*\n"
" @ y**4 + 252*x**5*y**5 + 210*x**4*y**6 + 120*x**3*y**7 + 45*x**2*y\n"
" @ **8 + 10*x*y**9 + y**10"
)
e = [x**i for i in range(11)]
assert fcode(Add(*e)) == (
" x**10 + x**9 + x**8 + x**7 + x**6 + x**5 + x**4 + x**3 + x**2 + x\n"
" @ + 1"
)
def test_fcode_precedence():
x, y = symbols("x y")
assert fcode(And(x < y, y < x + 1), source_format="free") == \
"x < y .and. y < x + 1"
assert fcode(Or(x < y, y < x + 1), source_format="free") == \
"x < y .or. y < x + 1"
assert fcode(Xor(x < y, y < x + 1, evaluate=False),
source_format="free") == "x < y .neqv. y < x + 1"
assert fcode(Equivalent(x < y, y < x + 1), source_format="free") == \
"x < y .eqv. y < x + 1"
def test_fcode_Logical():
x, y, z = symbols("x y z")
# unary Not
assert fcode(Not(x), source_format="free") == ".not. x"
# binary And
assert fcode(And(x, y), source_format="free") == "x .and. y"
assert fcode(And(x, Not(y)), source_format="free") == "x .and. .not. y"
assert fcode(And(Not(x), y), source_format="free") == "y .and. .not. x"
assert fcode(And(Not(x), Not(y)), source_format="free") == \
".not. x .and. .not. y"
assert fcode(Not(And(x, y), evaluate=False), source_format="free") == \
".not. (x .and. y)"
# binary Or
assert fcode(Or(x, y), source_format="free") == "x .or. y"
assert fcode(Or(x, Not(y)), source_format="free") == "x .or. .not. y"
assert fcode(Or(Not(x), y), source_format="free") == "y .or. .not. x"
assert fcode(Or(Not(x), Not(y)), source_format="free") == \
".not. x .or. .not. y"
assert fcode(Not(Or(x, y), evaluate=False), source_format="free") == \
".not. (x .or. y)"
# mixed And/Or
assert fcode(And(Or(y, z), x), source_format="free") == "x .and. (y .or. z)"
assert fcode(And(Or(z, x), y), source_format="free") == "y .and. (x .or. z)"
assert fcode(And(Or(x, y), z), source_format="free") == "z .and. (x .or. y)"
assert fcode(Or(And(y, z), x), source_format="free") == "x .or. y .and. z"
assert fcode(Or(And(z, x), y), source_format="free") == "y .or. x .and. z"
assert fcode(Or(And(x, y), z), source_format="free") == "z .or. x .and. y"
# trinary And
assert fcode(And(x, y, z), source_format="free") == "x .and. y .and. z"
assert fcode(And(x, y, Not(z)), source_format="free") == \
"x .and. y .and. .not. z"
assert fcode(And(x, Not(y), z), source_format="free") == \
"x .and. z .and. .not. y"
assert fcode(And(Not(x), y, z), source_format="free") == \
"y .and. z .and. .not. x"
assert fcode(Not(And(x, y, z), evaluate=False), source_format="free") == \
".not. (x .and. y .and. z)"
# trinary Or
assert fcode(Or(x, y, z), source_format="free") == "x .or. y .or. z"
assert fcode(Or(x, y, Not(z)), source_format="free") == \
"x .or. y .or. .not. z"
assert fcode(Or(x, Not(y), z), source_format="free") == \
"x .or. z .or. .not. y"
assert fcode(Or(Not(x), y, z), source_format="free") == \
"y .or. z .or. .not. x"
assert fcode(Not(Or(x, y, z), evaluate=False), source_format="free") == \
".not. (x .or. y .or. z)"
def test_fcode_Xlogical():
x, y, z = symbols("x y z")
# binary Xor
assert fcode(Xor(x, y, evaluate=False), source_format="free") == \
"x .neqv. y"
assert fcode(Xor(x, Not(y), evaluate=False), source_format="free") == \
"x .neqv. .not. y"
assert fcode(Xor(Not(x), y, evaluate=False), source_format="free") == \
"y .neqv. .not. x"
assert fcode(Xor(Not(x), Not(y), evaluate=False),
source_format="free") == ".not. x .neqv. .not. y"
assert fcode(Not(Xor(x, y, evaluate=False), evaluate=False),
source_format="free") == ".not. (x .neqv. y)"
# binary Equivalent
assert fcode(Equivalent(x, y), source_format="free") == "x .eqv. y"
assert fcode(Equivalent(x, Not(y)), source_format="free") == \
"x .eqv. .not. y"
assert fcode(Equivalent(Not(x), y), source_format="free") == \
"y .eqv. .not. x"
assert fcode(Equivalent(Not(x), Not(y)), source_format="free") == \
".not. x .eqv. .not. y"
assert fcode(Not(Equivalent(x, y), evaluate=False),
source_format="free") == ".not. (x .eqv. y)"
# mixed And/Equivalent
assert fcode(Equivalent(And(y, z), x), source_format="free") == \
"x .eqv. y .and. z"
assert fcode(Equivalent(And(z, x), y), source_format="free") == \
"y .eqv. x .and. z"
assert fcode(Equivalent(And(x, y), z), source_format="free") == \
"z .eqv. x .and. y"
assert fcode(And(Equivalent(y, z), x), source_format="free") == \
"x .and. (y .eqv. z)"
assert fcode(And(Equivalent(z, x), y), source_format="free") == \
"y .and. (x .eqv. z)"
assert fcode(And(Equivalent(x, y), z), source_format="free") == \
"z .and. (x .eqv. y)"
# mixed Or/Equivalent
assert fcode(Equivalent(Or(y, z), x), source_format="free") == \
"x .eqv. y .or. z"
assert fcode(Equivalent(Or(z, x), y), source_format="free") == \
"y .eqv. x .or. z"
assert fcode(Equivalent(Or(x, y), z), source_format="free") == \
"z .eqv. x .or. y"
assert fcode(Or(Equivalent(y, z), x), source_format="free") == \
"x .or. (y .eqv. z)"
assert fcode(Or(Equivalent(z, x), y), source_format="free") == \
"y .or. (x .eqv. z)"
assert fcode(Or(Equivalent(x, y), z), source_format="free") == \
"z .or. (x .eqv. y)"
# mixed Xor/Equivalent
assert fcode(Equivalent(Xor(y, z, evaluate=False), x),
source_format="free") == "x .eqv. (y .neqv. z)"
assert fcode(Equivalent(Xor(z, x, evaluate=False), y),
source_format="free") == "y .eqv. (x .neqv. z)"
assert fcode(Equivalent(Xor(x, y, evaluate=False), z),
source_format="free") == "z .eqv. (x .neqv. y)"
assert fcode(Xor(Equivalent(y, z), x, evaluate=False),
source_format="free") == "x .neqv. (y .eqv. z)"
assert fcode(Xor(Equivalent(z, x), y, evaluate=False),
source_format="free") == "y .neqv. (x .eqv. z)"
assert fcode(Xor(Equivalent(x, y), z, evaluate=False),
source_format="free") == "z .neqv. (x .eqv. y)"
# mixed And/Xor
assert fcode(Xor(And(y, z), x, evaluate=False), source_format="free") == \
"x .neqv. y .and. z"
assert fcode(Xor(And(z, x), y, evaluate=False), source_format="free") == \
"y .neqv. x .and. z"
assert fcode(Xor(And(x, y), z, evaluate=False), source_format="free") == \
"z .neqv. x .and. y"
assert fcode(And(Xor(y, z, evaluate=False), x), source_format="free") == \
"x .and. (y .neqv. z)"
assert fcode(And(Xor(z, x, evaluate=False), y), source_format="free") == \
"y .and. (x .neqv. z)"
assert fcode(And(Xor(x, y, evaluate=False), z), source_format="free") == \
"z .and. (x .neqv. y)"
# mixed Or/Xor
assert fcode(Xor(Or(y, z), x, evaluate=False), source_format="free") == \
"x .neqv. y .or. z"
assert fcode(Xor(Or(z, x), y, evaluate=False), source_format="free") == \
"y .neqv. x .or. z"
assert fcode(Xor(Or(x, y), z, evaluate=False), source_format="free") == \
"z .neqv. x .or. y"
assert fcode(Or(Xor(y, z, evaluate=False), x), source_format="free") == \
"x .or. (y .neqv. z)"
assert fcode(Or(Xor(z, x, evaluate=False), y), source_format="free") == \
"y .or. (x .neqv. z)"
assert fcode(Or(Xor(x, y, evaluate=False), z), source_format="free") == \
"z .or. (x .neqv. y)"
# trinary Xor
assert fcode(Xor(x, y, z, evaluate=False), source_format="free") == \
"x .neqv. y .neqv. z"
assert fcode(Xor(x, y, Not(z), evaluate=False), source_format="free") == \
"x .neqv. y .neqv. .not. z"
assert fcode(Xor(x, Not(y), z, evaluate=False), source_format="free") == \
"x .neqv. z .neqv. .not. y"
assert fcode(Xor(Not(x), y, z, evaluate=False), source_format="free") == \
"y .neqv. z .neqv. .not. x"
def test_fcode_Relational():
x, y = symbols("x y")
assert fcode(Relational(x, y, "=="), source_format="free") == "Eq(x, y)"
assert fcode(Relational(x, y, "!="), source_format="free") == "Ne(x, y)"
assert fcode(Relational(x, y, ">="), source_format="free") == "x >= y"
assert fcode(Relational(x, y, "<="), source_format="free") == "x <= y"
assert fcode(Relational(x, y, ">"), source_format="free") == "x > y"
assert fcode(Relational(x, y, "<"), source_format="free") == "x < y"
def test_fcode_Piecewise():
x = symbols('x')
expr = Piecewise((x, x < 1), (x**2, True))
# Check that inline conditional (merge) fails if standard isn't 95+
raises(NotImplementedError, lambda: fcode(expr))
code = fcode(expr, standard=95)
expected = " merge(x, x**2, x < 1)"
assert code == expected
assert fcode(Piecewise((x, x < 1), (x**2, True)), assign_to="var") == (
" if (x < 1) then\n"
" var = x\n"
" else\n"
" var = x**2\n"
" end if"
)
a = cos(x)/x
b = sin(x)/x
for i in range(10):
a = diff(a, x)
b = diff(b, x)
expected = (
" if (x < 0) then\n"
" weird_name = -cos(x)/x + 10*sin(x)/x**2 + 90*cos(x)/x**3 - 720*\n"
" @ sin(x)/x**4 - 5040*cos(x)/x**5 + 30240*sin(x)/x**6 + 151200*cos(x\n"
" @ )/x**7 - 604800*sin(x)/x**8 - 1814400*cos(x)/x**9 + 3628800*sin(x\n"
" @ )/x**10 + 3628800*cos(x)/x**11\n"
" else\n"
" weird_name = -sin(x)/x - 10*cos(x)/x**2 + 90*sin(x)/x**3 + 720*\n"
" @ cos(x)/x**4 - 5040*sin(x)/x**5 - 30240*cos(x)/x**6 + 151200*sin(x\n"
" @ )/x**7 + 604800*cos(x)/x**8 - 1814400*sin(x)/x**9 - 3628800*cos(x\n"
" @ )/x**10 + 3628800*sin(x)/x**11\n"
" end if"
)
code = fcode(Piecewise((a, x < 0), (b, True)), assign_to="weird_name")
assert code == expected
code = fcode(Piecewise((x, x < 1), (x**2, x > 1), (sin(x), True)), standard=95)
expected = " merge(x, merge(x**2, sin(x), x > 1), x < 1)"
assert code == expected
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: fcode(expr))
def test_wrap_fortran():
# "########################################################################"
printer = FCodePrinter()
lines = [
"C This is a long comment on a single line that must be wrapped properly to produce nice output",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/must + be + wrapped + properly",
]
wrapped_lines = printer._wrap_fortran(lines)
expected_lines = [
"C This is a long comment on a single line that must be wrapped",
"C properly to produce nice output",
" this = is + a + long + and + nasty + fortran + statement + that *",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that *",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ * must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that*",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ *must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement +",
" @ that*must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that**",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ **must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement + that",
" @ **must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement +",
" @ that**must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)/",
" @ must + be + wrapped + properly",
" this = is + a + long + and + nasty + fortran + statement(that)",
" @ /must + be + wrapped + properly",
]
for line in wrapped_lines:
assert len(line) <= 72
for w, e in zip(wrapped_lines, expected_lines):
assert w == e
assert len(wrapped_lines) == len(expected_lines)
def test_wrap_fortran_keep_d0():
printer = FCodePrinter()
lines = [
' this_variable_is_very_long_because_we_try_to_test_line_break=1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break = 10.0d0'
]
expected = [
' this_variable_is_very_long_because_we_try_to_test_line_break=1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 1.0d0',
' this_variable_is_very_long_because_we_try_to_test_line_break =',
' @ 10.0d0'
]
assert printer._wrap_fortran(lines) == expected
def test_settings():
raises(TypeError, lambda: fcode(S(4), method="garbage"))
def test_free_form_code_line():
x, y = symbols('x,y')
assert fcode(cos(x) + sin(y), source_format='free') == "sin(y) + cos(x)"
def test_free_form_continuation_line():
x, y = symbols('x,y')
result = fcode(((cos(x) + sin(y))**(7)).expand(), source_format='free')
expected = (
'sin(y)**7 + 7*sin(y)**6*cos(x) + 21*sin(y)**5*cos(x)**2 + 35*sin(y)**4* &\n'
' cos(x)**3 + 35*sin(y)**3*cos(x)**4 + 21*sin(y)**2*cos(x)**5 + 7* &\n'
' sin(y)*cos(x)**6 + cos(x)**7'
)
assert result == expected
def test_free_form_comment_line():
printer = FCodePrinter({'source_format': 'free'})
lines = [ "! This is a long comment on a single line that must be wrapped properly to produce nice output"]
expected = [
'! This is a long comment on a single line that must be wrapped properly',
'! to produce nice output']
assert printer._wrap_fortran(lines) == expected
def test_loops():
n, m = symbols('n,m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
expected = (
'do i = 1, m\n'
' y(i) = 0\n'
'end do\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = %(rhs)s\n'
' end do\n'
'end do'
)
code = fcode(A[i, j]*x[j], assign_to=y[i], source_format='free')
assert (code == expected % {'rhs': 'y(i) + A(i, j)*x(j)'} or
code == expected % {'rhs': 'y(i) + x(j)*A(i, j)'} or
code == expected % {'rhs': 'x(j)*A(i, j) + y(i)'} or
code == expected % {'rhs': 'A(i, j)*x(j) + y(i)'})
def test_dummy_loops():
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'do i_%(icount)i = 1, m_%(mcount)i\n'
' y(i_%(icount)i) = x(i_%(icount)i)\n'
'end do'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = fcode(x[i], assign_to=y[i], source_format='free')
assert code == expected
def test_fcode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = fcode(e.rhs, assign_to=e.lhs, contract=False)
assert code0.endswith('Dy(i) = (y(i + 1) - y(i))/(x(i + 1) - x(i))')
def test_derived_classes():
class MyFancyFCodePrinter(FCodePrinter):
_default_settings = FCodePrinter._default_settings.copy()
printer = MyFancyFCodePrinter()
x = symbols('x')
assert printer.doprint(sin(x), "bork") == " bork = sin(x)"
def test_indent():
codelines = (
'subroutine test(a)\n'
'integer :: a, i, j\n'
'\n'
'do\n'
'do \n'
'do j = 1, 5\n'
'if (a>b) then\n'
'if(b>0) then\n'
'a = 3\n'
'donot_indent_me = 2\n'
'do_not_indent_me_either = 2\n'
'ifIam_indented_something_went_wrong = 2\n'
'if_I_am_indented_something_went_wrong = 2\n'
'end should not be unindented here\n'
'end if\n'
'endif\n'
'end do\n'
'end do\n'
'enddo\n'
'end subroutine\n'
'\n'
'subroutine test2(a)\n'
'integer :: a\n'
'do\n'
'a = a + 1\n'
'end do \n'
'end subroutine\n'
)
expected = (
'subroutine test(a)\n'
'integer :: a, i, j\n'
'\n'
'do\n'
' do \n'
' do j = 1, 5\n'
' if (a>b) then\n'
' if(b>0) then\n'
' a = 3\n'
' donot_indent_me = 2\n'
' do_not_indent_me_either = 2\n'
' ifIam_indented_something_went_wrong = 2\n'
' if_I_am_indented_something_went_wrong = 2\n'
' end should not be unindented here\n'
' end if\n'
' endif\n'
' end do\n'
' end do\n'
'enddo\n'
'end subroutine\n'
'\n'
'subroutine test2(a)\n'
'integer :: a\n'
'do\n'
' a = a + 1\n'
'end do \n'
'end subroutine\n'
)
p = FCodePrinter({'source_format': 'free'})
result = p.indent_code(codelines)
assert result == expected
def test_Matrix_printing():
x, y, z = symbols('x,y,z')
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
assert fcode(mat, A) == (
" A(1, 1) = x*y\n"
" if (y > 0) then\n"
" A(2, 1) = x + 2\n"
" else\n"
" A(2, 1) = y\n"
" end if\n"
" A(3, 1) = sin(z)")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
assert fcode(expr, standard=95) == (
" merge(2*A(3, 1), A(3, 1), x > 0) + sin(A(2, 1)) + A(1, 1)")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert fcode(m, M) == (
" M(1, 1) = sin(q(2, 1))\n"
" M(2, 1) = q(2, 1) + q(3, 1)\n"
" M(3, 1) = 2*q(5, 1)*1.0/q(2, 1)\n"
" M(1, 2) = 0\n"
" M(2, 2) = q(4, 1)\n"
" M(3, 2) = 4 + sqrt(q(1, 1))\n"
" M(1, 3) = cos(q(3, 1))\n"
" M(2, 3) = 5\n"
" M(3, 3) = 0")
|
|
"""Polynomial division algorithms for use with class Polynomial"""
from sympy.polynomials.base import *
def div(f, g, var=None, order=None, coeff=None):
"""Division with remainder.
Usage:
======
The input consists of a polynomial f and either one or a list
of polynomials g. When these are just SymPy expressions, you
can additionally specify the variables and monomial orders
with 'var' and 'order', respectively. Only f is checked for
the input types, the rest is assumed to match.
If 'coeff' is set to 'int', only term divisions with proper
coefficient divisions are allowed. That is, 3*x divides 6*x*y,
but not 2*x**2.
The output's type is Polynomial, but there is a wrapper, see
L{wrapper.div} With only one element in g given, the resulting
list of quotients is unpacked, also. The second output is the
remainder.
Notes:
======
The loop then iterates over all terms of f, checking if any of
the elements in g (or just g if it is the sole divisor) have a
leading term dividing the current term of f. If yes, the
proper multiple of the element of g is subtracted from f, so
that the term is eliminated, otherwise added to the remainder,
until f is 0.
This way, the algorithm doesn't stop, when the leading terms
of g don't divide the leading term of f, but rather try to
reduce all of f's other terms. Of course, the known univariate
single-polynomial division with remainder is a special case of
this function.
The result can depend on the order of the elements in g. For
unicity, you need that their form a Groebner base of the ideal
they generate, see L{groebner_.groebner}.
Examples:
=========
>>> x, y = symbols('xy')
>>> q, r = div(x**2 + 6*x + 1, 3*x - 1)
>>> print q
19/9 + (1/3)*x
>>> print r
28/9
>>> q, r = div(x**2 + 6*x + 1, 3*x - 1, coeff='int')
>>> print q
2
>>> print r
3 + x**2
>>> q, r = div(2*x**3*y**2 - x*y + y**3, [x-y, y**2], [x,y], 'lex')
>>> print q[0]
-y + 2*y**4 + 2*x*y**3 + 2*x**2*y**2
>>> print q[1]
(-1) + y + 2*y**3
>>> print r
0
References:
===========
Cox, Little, O'Shea: Ideals, Varieties and Algorithms,
Springer, 2. edition, p. 62
"""
if not isinstance(g, list):
g = [g]
# Only f is checked, the rest is assumed to match.
if not isinstance(f, Polynomial):
f = sympify(f)
g = map(lambda x: sympify(x), g)
if isinstance(var, Symbol):
var = [var]
if var is None:
var = merge_var(f.atoms(type=Symbol),
*[g_i.atoms(type=Symbol) for g_i in g])
f = Polynomial(f, var=var, order=order)
g = map(lambda x: Polynomial(x, var=var, order=order), g)
# Begin computation.
r = Polynomial(S.Zero, var=f.var, order=f.order)
q = []
for i in range(0,len(g)):
q.append(r)
while f.sympy_expr is not S.Zero:
for g_i in g:
if g_i.sympy_expr is S.Zero: # Avoid division by 0.
continue
# Check if leading term of f is divisible by that of g_i.
# When coeff equals 'int', check the coefficient's
# divisibility, too.
td = term_div(f.coeffs[0], g_i.coeffs[0])
if (coeff != 'int' or isinstance(td[0], Integer)) \
and all([e.is_nonnegative for e in td[1:]]):
quot = Polynomial(coeffs=(td,), var=f.var, order=f.order)
q[g.index(g_i)] += quot
f -= quot*g_i
break
else: # No division occured, add the leading term to remainder.
lt = f.leading_term()
r += lt
f -= lt
if len(q) == 1:
return q[0], r
else:
return q, r
def gcd(f, g, var=None, order=None, coeff=None):
"""Greatest common divisor.
Usage:
======
Input are two polynomials, either as SymPy expressions or as
instances of Polynomial. In the first case, the variables and
monomial order can be specified with 'var' and 'order',
respectively.
If 'coeff' is set to 'int', the content of each polynomial is
checked and their gcd multiplied to the result. Otherwise it
is monic, that is, of leading coefficient 1.
The output's type is Polynomial, but there is a wrapper, see
L{wrapper.gcd}.
Notes:
======
With only one variable, euclid's algorithm is used directly,
which is reasonably fast. But in the multivariate case, we
have to compute the gcd using the least common multiple, which
relies on Groebner bases. This is based on the formula:
f*g = gcd(f, g)*lcm(f, g)
Examples:
=========
>>> x, y = symbols('xy')
>>> print gcd(4*x**2*y, 6*x*y**2)
x*y
>>> print gcd(4*x**2*y, 6*x*y**2, coeff='int')
2*x*y
References:
===========
Cox, Little, O'Shea: Ideals, Varieties and Algorithms,
Springer, 2. edition, p. 41 & p. 187
See also L{div}, L{lcm}.
"""
# Check if f is a Polynomial already, g is assumed to match.
if not isinstance(f, Polynomial):
f = sympify(f)
g = sympify(g)
if var is None:
var = merge_var(f.atoms(type=Symbol), g.atoms(type=Symbol))
f = Polynomial(f, var=var, order=order)
g = Polynomial(g, var=var, order=order)
# Check if we need to keep an integer factor.
if coeff == 'int':
cf, f = f.as_primitive()
cg, g = g.as_primitive()
c = Integer(numbers.gcd(int(cf), int(cg)))
else:
c = S.One
if len(f.var) == 0: # Constant result.
return Polynomial(c, var=f.var, order=f.order)
elif len(f.var) == 1: # Use euclidean algorithm.
while g.sympy_expr is not S.Zero:
# Remove leading coefficient, to simplify computations.
lc, g = g.as_monic()
f, g = g, div(f, g)[-1]
else: # Use lcm and product to get multivariate gcd.
l = lcm(f, g)
q, r = div(f*g, l)
assert r.sympy_expr is S.Zero
lc, f = q.as_monic()
return Polynomial(coeffs=tuple([(c*t[0],) + t[1:] for t in f.coeffs]),
var=f.var, order=f.order)
def lcm(f, g, var=None, order=None, coeff=None):
"""Least common multiple.
Usage:
======
Input are two polynomials, either as SymPy expressions or as
instances of Polynomial. In the first case, the variables and
monomial order can be specified with 'var' and 'order',
respectively.
If 'coeff' is set to 'int', the content of each polynomial is
checked and their lcm multiplied to the result. Otherwise it
is monic, that is, of leading coefficient 1.
The output's type is Polynomial, but there is a wrapper, see
L{wrapper.lcm}.
Notes:
======
With only one variable, the gcd is used to get the lcm from
the product, via f*g = gcd(f, g)*lcm(f, g).
In the univariate case, we compute the unique generator of the
intersection of the two ideals, generated by f and g. This is
done by computing a lexicographic Groebner base of
[t*f. (t-1)*g], with t a dummy variable at the first place,
then filtering trough the base elements not containing t.
Examples:
=========
>>> x, y = symbols('xy')
>>> print lcm(4*x**2*y, 6*x*y**2)
x**2*y**2
>>> print lcm(4*x**2*y, 6*x*y**2, coeff='int')
12*x**2*y**2
References:
===========
Cox, Little, O'Shea: Ideals, Varieties and Algorithms,
Springer, 2. edition, p. 187
See also L{div}, L{gcd}.
"""
# Check if f is a Polynomial already, g is assumed to match.
if not isinstance(f, Polynomial):
f = sympify(f)
g = sympify(g)
if var is None:
var = merge_var(f.atoms(type=Symbol), g.atoms(type=Symbol))
f = Polynomial(f, var=var, order=order)
g = Polynomial(g, var=var, order=order)
# Check if we need to keep an integer factor.
if coeff == 'int':
cf, f = f.as_primitive()
cg, g = g.as_primitive()
cf, cg = int(cf), int(cg)
c = Integer(cf*cg/numbers.gcd(cf, cg))
else:
c = S.One
if len(f.var) == 0: # Constant result.
return Polynomial(c, var=f.var, order=f.order)
elif len(f.var) == 1: # Use gcd to get univariate lcm.
gcd_temp = gcd(f, g)
q, r = div(f*g, gcd_temp)
assert r.sympy_expr is S.Zero
lc, f = q.as_monic()
else:
# Compute a lexicographic Groebner base of the ideal generated
# by t*f and (t-1)*g, with unrelated t.
from sympy.polynomials import groebner_
t = Symbol('t', dummy=True)
var = [t] + f.var
G = groebner_.groebner([Polynomial(t*f.sympy_expr,
var=var, order='1-el'),
Polynomial((t-1)*g.sympy_expr,
var=var, order='1-el')],
reduced=True)
# Now intersect this result with the polynomial ring in the
# var in `var', that is, eliminate t.
I = filter(lambda p: t not in p.sympy_expr.atoms(type=Symbol), G)
# The intersection should be a principal ideal, that is generated
# by a single polynomial.
if not len(I) == 1:
raise PolynomialException("No single generator for intersection.")
f = Polynomial(I[0].sympy_expr, var=f.var, order=f.order)
return Polynomial(coeffs=tuple([(c*t[0],) + t[1:] for t in f.coeffs]),
var=f.var, order=f.order)
|
|
#!/usr/bin/python
# coding=utf-8
# Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import operator
import pymongo
import re
import subprocess
import sys
import time
sys.path.insert(0, '../')
from mongo_orchestration import set_releases, cleanup_storage
from mongo_orchestration.sharded_clusters import ShardedCluster, ShardedClusters
from mongo_orchestration.replica_sets import ReplicaSets
from mongo_orchestration.servers import Servers
from mongo_orchestration.process import PortPool, HOSTNAME
from nose.plugins.attrib import attr
from tests import unittest, SkipTest
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
MONGODB_VERSION = re.compile("db version v(\d)+\.(\d)+\.(\d)+")
@attr('shards')
@attr('test')
class ShardsTestCase(unittest.TestCase):
def setUp(self):
self.sh = ShardedClusters()
set_releases({"default-release": os.environ.get('MONGOBIN', '')},
'default-release')
PortPool().change_range()
def tearDown(self):
# self.sh.cleanup()
cleanup_storage()
def test_singleton(self):
self.assertEqual(id(self.sh), id(ShardedClusters()))
def test_set_settings(self):
default_release = 'old-release'
releases = {default_release: os.path.join(os.getcwd(), 'bin')}
self.sh.set_settings(releases, default_release)
self.assertEqual(releases, self.sh.releases)
self.assertEqual(default_release, self.sh.default_release)
def test_bool(self):
self.assertEqual(False, bool(self.sh))
self.sh.create({'id': 'sh01'})
self.assertEqual(True, bool(self.sh))
def test_operations(self):
config = {'shards': [{}, {}, {}]}
cluster = ShardedCluster(config)
self.assertEqual(len(self.sh), 0)
operator.setitem(self.sh, 1, cluster)
self.assertEqual(len(self.sh), 1)
self.assertEqual(operator.getitem(self.sh, 1)['id'], cluster.id)
operator.delitem(self.sh, 1)
self.assertEqual(len(self.sh), 0)
self.assertRaises(KeyError, operator.getitem, self.sh, 1)
cluster.cleanup()
def test_operations2(self):
self.assertTrue(len(self.sh) == 0)
config1 = {'id': 'sh01'}
config2 = {'id': 'sh02'}
self.sh.create(config1)
self.sh.create(config2)
self.assertTrue(len(self.sh) == 2)
for key in self.sh:
self.assertTrue(key in ('sh01', 'sh02'))
for key in ('sh01', 'sh02'):
self.assertTrue(key in self.sh)
def test_cleanup(self):
config1 = {'id': 'sh01'}
config2 = {'id': 'sh02'}
self.assertTrue(len(self.sh) == 0)
self.sh.create(config1)
self.sh.create(config2)
self.assertTrue(len(self.sh) == 2)
self.sh.cleanup()
self.assertTrue(len(self.sh) == 0)
def test_sh_new(self):
port = PortPool().port(check=True)
config = {
'id': 'shard_cluster_1',
'configsvrs': [{}],
'routers': [{"port": port}],
'shards': [{'id': 'sh01'}, {'id': 'sh02'},
{'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}},
]
}
cluster_id = self.sh.create(config)
self.assertEqual(cluster_id, 'shard_cluster_1')
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
c = pymongo.MongoClient(host)
result = c.admin.command("listShards")
for shard in result['shards']:
shard['_id'] in ('sh01', 'sh02', 'sh-rs-01')
c.close()
@attr('auth')
def test_sh_new_with_auth(self):
port = PortPool().port(check=True)
config = {
'id': 'shard_cluster_1',
'auth_key': 'secret',
'login': 'admin',
'password': 'adminpass',
'configsvrs': [{}],
'routers': [{"port": port}],
'shards': [{'id': 'sh01'}, {'id': 'sh02'}]
}
self.sh.create(config)
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
c = pymongo.MongoClient(host)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.command, "listShards")
c.admin.authenticate('admin', 'adminpass')
self.assertTrue(isinstance(c.admin.command("listShards"), dict))
c.close()
def test_sh_del(self):
sh1_id = self.sh.create({})
sh2_id = self.sh.create({})
self.assertEqual(len(self.sh), 2)
self.sh.remove(sh1_id)
self.assertEqual(len(self.sh), 1)
self.sh.remove(sh2_id)
self.assertEqual(len(self.sh), 0)
def test_info(self):
config = {
'configsvrs': [{}, {}, {}],
'routers': [{}, {}, {}],
'shards': [{}, {}]
}
sh_id = self.sh.create(config)
info = self.sh.info(sh_id)
self.assertTrue(isinstance(info, dict))
for item in ("shards", "configsvrs", "routers", "uri", "mongodb_uri", "orchestration"):
self.assertTrue(item in info)
self.assertEqual(len(info['shards']), 2)
self.assertEqual(len(info['configsvrs']), 3)
self.assertEqual(len(info['routers']), 3)
self.assertTrue(info['uri'].find(','))
self.assertTrue(info['mongodb_uri'].find(info['uri']))
self.assertTrue(info['mongodb_uri'].find('mongodb://') == 0)
self.assertEqual(info['orchestration'], 'sharded_clusters')
def test_configsvrs(self):
config = {}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.configsvrs(sh_id)), 1)
self.sh.cleanup()
config = {'configsvrs': [{}, {}, {}]}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.configsvrs(sh_id)), 3)
def test_routers(self):
config = {}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.routers(sh_id)), 1)
self.sh.cleanup()
config = {'routers': [{}, {}, {}]}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.routers(sh_id)), 3)
def test_router_add(self):
config = {}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.routers(sh_id)), 1)
self.sh.router_add(sh_id, {})
self.assertEqual(len(self.sh.routers(sh_id)), 2)
self.sh.router_add(sh_id, {})
self.assertEqual(len(self.sh.routers(sh_id)), 3)
self.sh.cleanup()
def test_members(self):
port = PortPool().port(check=True)
config = {'routers': [{'port': port}]}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.members(sh_id)), 0)
self.sh.cleanup()
config = {'routers': [{'port': port}], 'shards': [{}, {}, {}]}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.members(sh_id)), 3)
def test_member_info(self):
config = {'shards': [{'id': 'member1'}, {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
sh_id = self.sh.create(config)
info = self.sh.member_info(sh_id, 'member1')
self.assertEqual(info['id'], 'member1')
self.assertTrue(info['isServer'])
self.assertTrue('_id' in info)
info = self.sh.member_info(sh_id, 'sh-rs-01')
self.assertEqual(info['id'], 'sh-rs-01')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
@attr('auth')
def test_member_info_with_auth(self):
config = {'auth_key': 'secret', 'login': 'admin', 'password': 'admin', 'shards': [{'id': 'member1'}, {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
sh_id = self.sh.create(config)
info = self.sh.member_info(sh_id, 'member1')
self.assertEqual(info['id'], 'member1')
self.assertTrue(info['isServer'])
self.assertTrue('_id' in info)
info = self.sh.member_info(sh_id, 'sh-rs-01')
self.assertEqual(info['id'], 'sh-rs-01')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
def test_member_del(self):
port = PortPool().port(check=True)
config = {'routers': [{'port': port}], 'shards': [{'id': 'member1'}, {'id': 'member2'}, {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
sh_id = self.sh.create(config)
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
c = pymongo.MongoClient(host)
result = c.admin.command("listShards")
self.assertEqual(len(result['shards']), 3)
# remove member-host
result = self.sh.member_del(sh_id, 'member1')
self.assertEqual(len(c.admin.command("listShards")['shards']), 3)
self.assertEqual(result['state'], 'started')
self.assertEqual(result['shard'], 'member1')
time.sleep(5)
result = self.sh.member_del(sh_id, 'member1')
self.assertEqual(result['state'], 'completed')
self.assertEqual(len(c.admin.command("listShards")['shards']), 2)
self.assertEqual(result['shard'], 'member1')
# remove member-replicaset
result = self.sh.member_del(sh_id, 'sh-rs-01')
self.assertEqual(len(c.admin.command("listShards")['shards']), 2)
self.assertEqual(result['state'], 'started')
self.assertEqual(result['shard'], 'sh-rs-01')
time.sleep(7)
result = self.sh.member_del(sh_id, 'sh-rs-01')
self.assertEqual(result['state'], 'completed')
self.assertEqual(len(c.admin.command("listShards")['shards']), 1)
self.assertEqual(result['shard'], 'sh-rs-01')
def test_member_add(self):
port = PortPool().port(check=True)
config = {'routers': [{'port': port}]}
sh_id = self.sh.create(config)
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
c = pymongo.MongoClient(host)
self.assertEqual(len(c.admin.command("listShards")['shards']), 0)
result = self.sh.member_add(sh_id, {'id': 'test1', 'shardParams': {}})
self.assertTrue(result.get('isServer', False))
self.assertEqual(result['id'], 'test1')
self.assertEqual(len(c.admin.command("listShards")['shards']), 1)
result = self.sh.member_add(sh_id, {'id': 'test2', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}})
self.assertFalse(result.get('isServer', False))
self.assertTrue(result.get('isReplicaSet', False))
self.assertEqual(result['id'], 'test2')
self.assertEqual(len(c.admin.command("listShards")['shards']), 2)
@attr('shards')
@attr('test')
class ShardTestCase(unittest.TestCase):
def mongod_version(self):
proc = subprocess.Popen(
[os.path.join(self.bin_path, 'mongod'), '--version'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
version_raw = str(proc.stdout.read())
m = MONGODB_VERSION.match(version_raw)
if m:
return m.groups()
def setUp(self):
self.bin_path = os.environ.get('MONGOBIN', '')
set_releases({'default-release': self.bin_path},
'default-release')
PortPool().change_range()
def tearDown(self):
if hasattr(self, 'sh') and self.sh is not None:
self.sh.cleanup()
def test_len(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh), 0)
self.sh.member_add('test01', {})
self.assertEqual(len(self.sh), 1)
self.sh.member_add('test02', {})
self.assertEqual(len(self.sh), 2)
while self.sh.member_remove('test01')['state'] != 'completed':
time.sleep(1)
self.assertEqual(len(self.sh), 1)
def test_sh_new(self):
port = PortPool().port(check=True)
config = {
'id': 'shard_cluster_1',
'configsvrs': [{}],
'routers': [{"port": port}],
'shards': [{'id': 'sh01'}, {'id': 'sh02'}]
}
self.sh = ShardedCluster(config)
c = pymongo.MongoClient(self.sh.router['hostname'])
for item in c.admin.command("listShards")['shards']:
self.assertTrue(item['_id'] in ('sh01', 'sh02'))
@attr('auth')
def test_sh_new_with_auth(self):
port = PortPool().port(check=True)
config = {
'id': 'shard_cluster_1',
'auth_key': 'secret',
'login': 'admin',
'password': 'adminpass',
'configsvrs': [{}],
'routers': [{"port": port}],
'shards': [{'id': 'sh01'}, {'id': 'sh02'}]
}
self.sh = ShardedCluster(config)
c = pymongo.MongoClient(self.sh.router['hostname'])
self.assertRaises(pymongo.errors.OperationFailure, c.admin.command, "listShards")
c.admin.authenticate('admin', 'adminpass')
self.assertTrue(isinstance(c.admin.command("listShards"), dict))
for item in c.admin.command("listShards")['shards']:
self.assertTrue(item['_id'] in ('sh01', 'sh02'))
def test_cleanup(self):
config = {
'id': 'shard_cluster_1',
'configsvrs': [{}],
'routers': [{}],
'shards': [{'id': 'sh01'}, {'id': 'sh02'},
{'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}},
]
}
self.sh = ShardedCluster(config)
self.assertTrue(len(self.sh) == len(config['shards']))
self.sh.cleanup()
self.assertTrue(len(self.sh) == 0)
def test_configsvrs(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.configsvrs), 1)
self.sh.cleanup()
config = {'configsvrs': [{}, {}, {}]}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.configsvrs), 3)
self.sh.cleanup()
def test_routers(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.routers), 1)
self.sh.cleanup()
config = {'routers': [{}, {}, {}]}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.routers), 3)
self.sh.cleanup()
def test_members(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.members), 0)
self.sh.cleanup()
config = {'shards': [{}, {}, {}]}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.members), 3)
self.sh.cleanup()
def test_router(self):
config = {}
self.sh = ShardedCluster(config)
self.assertTrue(Servers().info(self.sh.router['id'])['statuses']['mongos'])
self.sh.cleanup()
config = {'routers': [{}, {}, {}]}
self.sh = ShardedCluster(config)
routers = self.sh.routers
hostname = routers[1]['hostname']
_id = routers[1]['id']
# stop routers 0 and 2
Servers().command(routers[0]['id'], 'stop')
Servers().command(routers[2]['id'], 'stop')
router = self.sh.router
self.assertEqual(router['id'], _id)
self.assertEqual(router['hostname'], hostname)
self.sh.cleanup()
def test_router_add(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.routers), 1)
self.sh.router_add({})
self.assertEqual(len(self.sh.routers), 2)
self.sh.router_add({})
self.assertEqual(len(self.sh.routers), 3)
self.sh.cleanup()
def test_router_command(self):
config = {'shards': [{}, {}]}
self.sh = ShardedCluster(config)
result = self.sh.router_command('listShards', is_eval=False)
self.assertEqual(result['ok'], 1)
self.sh.cleanup()
def test_member_add(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.members), 0)
result = self.sh.member_add('test1', {})
self.assertTrue(result.get('isServer', False))
self.assertEqual(result['id'], 'test1')
self.assertEqual(len(self.sh.members), 1)
result = self.sh.member_add('test2', {'id': 'rs1', 'members': [{}, {}]})
self.assertFalse(result.get('isServer', False))
self.assertTrue(result.get('isReplicaSet', False))
self.assertEqual(result['id'], 'test2')
self.assertEqual(len(self.sh.members), 2)
self.sh.cleanup()
def test_member_info(self):
config = {'shards': [{'id': 'member1'}, {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
self.sh = ShardedCluster(config)
info = self.sh.member_info('member1')
self.assertEqual(info['id'], 'member1')
self.assertTrue(info['isServer'])
self.assertTrue('_id' in info)
info = self.sh.member_info('sh-rs-01')
self.assertEqual(info['id'], 'sh-rs-01')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
self.sh.cleanup()
@attr('auth')
def test_member_info_with_auth(self):
config = {'auth_key': 'secret', 'login': 'admin', 'password': 'adminpass', 'shards': [{'id': 'member1'}, {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
self.sh = ShardedCluster(config)
info = self.sh.member_info('member1')
self.assertEqual(info['id'], 'member1')
self.assertTrue(info['isServer'])
self.assertTrue('_id' in info)
info = self.sh.member_info('sh-rs-01')
self.assertEqual(info['id'], 'sh-rs-01')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
self.sh.cleanup()
def test_member_remove(self):
config = {'shards': [{'id': 'member1'}, {'id': 'member2'}, {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.members), 3)
# remove member-host
result = self.sh.member_remove('member1')
self.assertEqual(len(self.sh.members), 3)
self.assertEqual(result['state'], 'started')
self.assertEqual(result['shard'], 'member1')
time.sleep(5)
result = self.sh.member_remove('member1')
self.assertEqual(result['state'], 'completed')
self.assertEqual(len(self.sh.members), 2)
self.assertEqual(result['shard'], 'member1')
# remove member-replicaset
result = self.sh.member_remove('sh-rs-01')
self.assertEqual(len(self.sh.members), 2)
self.assertEqual(result['state'], 'started')
self.assertEqual(result['shard'], 'sh-rs-01')
time.sleep(7)
result = self.sh.member_remove('sh-rs-01')
self.assertEqual(result['state'], 'completed')
self.assertEqual(len(self.sh.members), 1)
self.assertEqual(result['shard'], 'sh-rs-01')
self.sh.cleanup()
def test_info(self):
config = {
'configsvrs': [{}, {}, {}],
'routers': [{}, {}, {}],
'shards': [{}, {}]
}
self.sh = ShardedCluster(config)
info = self.sh.info()
self.assertTrue('shards' in info)
self.assertTrue('configsvrs' in info)
self.assertTrue('routers' in info)
self.assertEqual(len(info['shards']), 2)
self.assertEqual(len(info['configsvrs']), 3)
self.assertEqual(len(info['routers']), 3)
self.sh.cleanup()
def test_tagging(self):
version = self.mongod_version()
if version and version < ('2', '2', '0'):
raise SkipTest("mongodb v{version} doesn't support shard tagging".format(version='.'.join(version)))
tags = ['tag1', 'tag2']
tags_repl = ['replTag']
config = {
'configsvrs': [{}], 'routers': [{}],
'shards': [{'id': 'sh01', 'shardParams': {'tags': tags}},
{'id': 'sh02'},
{'id': 'sh03', 'shardParams': {'tags': tags_repl, 'members': [{}, {}]}}
]
}
self.sh = ShardedCluster(config)
self.assertEqual(tags, self.sh.member_info('sh01')['tags'])
self.assertEqual([], self.sh.member_info('sh02')['tags'])
self.assertEqual(tags_repl, self.sh.member_info('sh03')['tags'])
self.sh.cleanup()
def test_reset(self):
all_hosts = []
# Start a ShardedCluster with 1 router and 1 config server.
self.sh = ShardedCluster({})
# Add 1 Server shard and 1 ReplicaSet shard.
server_id = self.sh.member_add(params={})['_id']
all_hosts.append(Servers().hostname(server_id))
repl_id = self.sh.member_add(params={'members': [{}, {}, {}]})['_id']
# Shut down the standalone.
Servers().command(server_id, 'stop')
# Shut down each member of the replica set.
server_ids = [m['server_id'] for m in ReplicaSets().members(repl_id)]
for s_id in server_ids:
Servers().command(s_id, 'stop')
all_hosts.append(Servers().hostname(s_id))
# Shut down config server and router.
config_id = self.sh.configsvrs[0]['id']
print("config_id=%r" % config_id)
all_hosts.append(Servers().hostname(config_id))
router_id = self.sh.routers[0]['id']
print("router_id=%r" % router_id)
all_hosts.append(Servers().hostname(router_id))
Servers().command(config_id, 'stop')
Servers().command(router_id, 'stop')
# Reset the ShardedCluster.
self.sh.reset()
# Everything is up.
for host in all_hosts:
# No ConnectionFailure/AutoReconnect.
pymongo.MongoClient(host)
if __name__ == '__main__':
unittest.main(verbosity=3)
# suite = unittest.TestSuite()
# suite.addTest(ShardTestCase('test_sh_new'))
# suite.addTest(ShardTestCase('test_sh_new_with_auth'))
# suite.addTest(ShardsTestCase('test_operations'))
# unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# -*- coding: utf-8 -*-
""" S3 Charting Toolkit
@copyright: 2011-15 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{NumPy}} <http://www.numpy.org>}
@requires: U{B{I{MatPlotLib}} <http://matplotlib.sourceforge.net>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3Chart"]
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import current
from gluon.storage import Storage
from gluon.html import IMG
# =============================================================================
class S3Chart(object):
"""
Module for graphing
Currently a simple wrapper to matplotlib
"""
# This folder needs to be writable by the web2py process
CACHE_PATH = "/%s/static/cache/chart" % current.request.application
# -------------------------------------------------------------------------
def __init__(self, path, width=9, height=6):
"""
Create the base Figure object
@param: height x100px
@param: width x100px
"""
try:
# Causes deadlocking issues
# http://sjohannes.wordpress.com/2010/06/11/using-matplotlib-in-a-web-application/
#import matplotlib
#matplotlib.use("Agg")
#import matplotlib.pyplot as plt
#from pylab import savefig
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
MATPLOTLIB = True
except ImportError:
import sys
print >> sys.stderr, "WARNING: S3Chart unresolved dependency: matplotlib required for charting"
MATPLOTLIB = False
self.filename = path
self.width = width
self.height = height
self.asInt = False
if MATPLOTLIB:
self.fig = Figure(figsize=(width, height))
else:
self.fig = None
# -------------------------------------------------------------------------
@staticmethod
def getCachedPath(filename):
import os
path = "applications"
chartFile = "%s/%s.png" % (S3Chart.CACHE_PATH, filename)
fullPath = "%s%s" % (path, chartFile)
if os.path.exists(fullPath):
return chartFile
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def getCachedFile(filename):
"""
Return the opened cached file, if the file can't be found then
return None
"""
chartFile = S3Chart.getCachedPath(filename)
if chartFile:
try:
f = open(chartFile)
return f.read()
except:
# for some reason been unable to get the cached version
pass
return None
# -------------------------------------------------------------------------
@staticmethod
def storeCachedFile(filename, image):
"""
Save the file in the cache area, and return the path to this file
"""
path = "applications"
chartFile = "%s/%s.png" % (S3Chart.CACHE_PATH, filename)
fullPath = "%s%s" % (path, chartFile)
try:
f = open(fullPath, "w+")
print >> f, image
except:
return None
return chartFile
# -------------------------------------------------------------------------
@staticmethod
def purgeCache(prefix=None):
"""
Delete the files in the cache that match the file name prefix,
if the prefix is None then all files will be deleted
"""
import os
folder = "applications%s/" % S3Chart.CACHE_PATH
if os.path.exists(folder):
filelist = os.listdir(folder)
for file in filelist:
if prefix == None or file.startswith(prefix):
os.remove("%s%s" % (folder, file))
# -------------------------------------------------------------------------
def draw(self, output="xml"):
"""
Output the chart as a PNG embedded in an IMG tag
- used by the Delphi module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
# For interactive shell tests
#plt.show()
# For web response
#savefig(response.body)
chart = Storage()
chart.body = StringIO()
chart.headers = Storage()
chart.headers["Content-Type"] = "image/png"
canvas = self.FigureCanvas(fig)
canvas.print_figure(chart.body)
#return response.body.getvalue()
image = chart.body.getvalue()
# IE 8 and before has a 32K limit on URIs this can be quickly
# gobbled up if the image is too large. So the image will
# stored on the server and a URI used in the src
cachePath = self.storeCachedFile(self.filename, image)
if output == "xml":
if cachePath != None:
image = IMG(_src = cachePath)
else:
import base64
base64Img = base64.b64encode(image)
image = IMG(_src="data:image/png;base64,%s" % base64Img)
else:
current.response.headers["Content-Type"] = "image/png"
return image
# -------------------------------------------------------------------------
def survey_hist(self, title,
data, bins, min, max, xlabel=None, ylabel=None):
"""
Draw a Histogram
- used by the Survey module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
from numpy import arange
# Draw a histogram
ax = fig.add_subplot(111)
ax.hist(data, bins=bins, range=(min, max))
left = arange(0, bins + 1)
if self.asInt:
label = left * int(max / bins)
else:
label = left * max / bins
ax.set_xticks(label)
ax.set_xticklabels(label, rotation=30)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# -------------------------------------------------------------------------
def survey_pie(self, title, data, label):
"""
Draw a Pie Chart
- used by the Survey module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
# Draw a pie chart
ax = fig.add_subplot(111)
ax.pie(data, labels=label)
ax.legend()
ax.set_title(title)
# -------------------------------------------------------------------------
def survey_bar(self, title, data, labels, legendLabels):
"""
Draw a Bar Chart
- used by the Survey module
"""
barColourList = ["#F2D7A0", "#7B77A8", "#69889A", "#9D7B34"]
barColourListExt = [(242, 215, 160),
(123, 118, 168),
(105, 136, 154),
(157, 123, 52)
]
fig = self.fig
if not fig:
return "Matplotlib not installed"
from numpy import arange
# Draw a bar chart
if not isinstance(data[0],list):
dataList = [data]
else:
dataList = data
legendColCnt = 3
cnt = len(labels)
dcnt = len(dataList)
lcnt = 0
if legendLabels != None:
lcnt = (len(legendLabels) + legendColCnt - 1) / legendColCnt
width = 0.9 / dcnt
offset = 0
gap = 0.1 / dcnt
bcnt = 0
bars = []
height = max(0.2, 0.85 - (0.04 * lcnt))
rect = [0.08, 0.08, 0.9, height]
ax = fig.add_axes(rect)
for data in dataList:
left = arange(offset, cnt + offset) # the x locations for the bars
if bcnt < 3:
colour = barColourList[bcnt]
else:
colour = []
colourpart = barColourListExt[bcnt%4]
divisor = 256.0 - (32 * bcnt/4)
if divisor < 0.0:
divisor = divisor * -1
for part in colourpart:
calc = part/divisor
while calc > 1.0:
calc -= 1
colour.append(calc)
plot = ax.bar(left, data, width=width, color=colour)
bars.append(plot[0])
bcnt += 1
offset += width + gap
left = arange(cnt)
lblAdjust = (1.0 - gap) * 0.5
if cnt <= 3:
angle = 0
elif cnt <= 10:
angle = -10
elif cnt <= 20:
angle = -30
else:
angle = -45
ax.set_xticks(left + lblAdjust)
try: # This function is only available with version 1.1 of matplotlib
ax.set_xticklabels(labels, rotation=angle)
ax.tick_params(labelsize=self.width)
except AttributeError:
newlabels = []
for label in labels:
if len(label) > 12:
label = label[0:10] + "..."
newlabels.append(label)
ax.set_xticklabels(newlabels)
ax.set_title(title)
if legendLabels != None:
fig.legend(bars,
legendLabels,
"upper left",
mode="expand",
ncol = legendColCnt,
prop={"size":10},
)
# END =========================================================================
|
|
import datetime
import json
from google.appengine.ext import ndb
from backend.common.consts.event_type import CMP_EVENT_TYPES, EventType
from backend.common.sitevars.cmp_registration_hacks import ChampsRegistrationHacks
from backend.tasks_io.datafeeds.parsers.fms_api.fms_api_event_list_parser import (
FMSAPIEventListParser,
)
def test_parse_event_list(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2015_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2015).parse(data)
assert isinstance(events, list)
assert isinstance(districts, list)
# File has 6 events, but we ignore CMP divisions (only subdivisions), so only 5 are expected back
assert len(events) == 5
assert len(districts) == 1
def test_parse_regional_event(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2015_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, _ = FMSAPIEventListParser(2015).parse(data)
event = events[0]
assert event.key_name == "2015nyny"
assert event.name == "New York City Regional"
assert event.short_name == "New York City"
assert event.event_short == "nyny"
assert event.official is True
assert event.start_date == datetime.datetime(
year=2015, month=3, day=12, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2015, month=3, day=15, hour=23, minute=59, second=59
)
assert event.venue == "Jacob K. Javits Convention Center"
assert event.city == "New York"
assert event.state_prov == "NY"
assert event.country == "USA"
assert event.year == 2015
assert event.event_type_enum == EventType.REGIONAL
assert event.district_key is None
def test_parse_regional_event_code_override(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2015_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, _ = FMSAPIEventListParser(2015, short="nyc").parse(data)
event = events[0]
assert event.key_name == "2015nyc"
assert event.name == "New York City Regional"
assert event.short_name == "New York City"
assert event.event_short == "nyc"
assert event.official is True
assert event.start_date == datetime.datetime(
year=2015, month=3, day=12, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2015, month=3, day=15, hour=23, minute=59, second=59
)
assert event.venue == "Jacob K. Javits Convention Center"
assert event.city == "New York"
assert event.state_prov == "NY"
assert event.country == "USA"
assert event.year == 2015
assert event.event_type_enum == EventType.REGIONAL
assert event.district_key is None
def test_parse_district_event(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2015_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2015).parse(data)
event = events[1]
district = districts[0]
assert event.key_name == "2015cthar"
assert event.name == "NE District - Hartford Event"
assert event.short_name == "Hartford"
assert event.event_short == "cthar"
assert event.official is True
assert event.start_date == datetime.datetime(
year=2015, month=3, day=27, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2015, month=3, day=29, hour=23, minute=59, second=59
)
assert event.venue == "Hartford Public High School"
assert event.city == "Hartford"
assert event.state_prov == "CT"
assert event.country == "USA"
assert event.year == 2015
assert event.event_type_enum == EventType.DISTRICT
assert event.district_key == district.key
assert district.key_name == "2015ne"
assert district.abbreviation == "ne"
assert district.year == 2015
def test_parse_district_cmp(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2015_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2015).parse(data)
event = events[2]
district = districts[0]
assert event.key_name == "2015necmp"
assert (
event.name == "NE FIRST District Championship presented by United Technologies"
)
assert event.short_name == "NE FIRST"
assert event.event_short == "necmp"
assert event.official is True
assert event.start_date == datetime.datetime(
year=2015, month=4, day=8, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2015, month=4, day=11, hour=23, minute=59, second=59
)
assert event.venue == "Sports and Recreation Center, WPI"
assert event.city == "Worcester"
assert event.state_prov == "MA"
assert event.country == "USA"
assert event.year == 2015
assert event.event_type_enum == EventType.DISTRICT_CMP
assert event.district_key == district.key
def test_parse_cmp_subdivision(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2015_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, _ = FMSAPIEventListParser(2015).parse(data)
event = events[3]
assert event.key_name == "2015tes"
assert event.name == "Tesla Division"
assert event.short_name == "Tesla"
assert event.event_short == "tes"
assert event.official is True
assert event.start_date == datetime.datetime(
year=2015, month=4, day=22, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2015, month=4, day=25, hour=23, minute=59, second=59
)
assert event.venue == "Edward Jones Dome"
assert event.city == "St. Louis"
assert event.state_prov == "MO"
assert event.country == "USA"
assert event.year == 2015
assert event.event_type_enum == EventType.CMP_DIVISION
assert event.district_key is None
def test_parse_offseason(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2015_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, _ = FMSAPIEventListParser(2015).parse(data)
event = events[4]
assert event.key_name == "2015iri"
assert event.name == "Indiana Robotics Invitational"
assert event.short_name == "Indiana Robotics Invitational"
assert event.event_short == "iri"
assert event.official is False
assert event.start_date == datetime.datetime(
year=2015, month=7, day=17, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2015, month=7, day=18, hour=23, minute=59, second=59
)
assert event.venue == "Lawrence North HS"
assert event.city == "Indianapolis"
assert event.state_prov == "IN"
assert event.country == "USA"
assert event.year == 2015
assert event.event_type_enum == EventType.OFFSEASON
assert event.district_key is None
def test_parse_2017_event(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2017_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2017).parse(data)
assert len(events) == 165
assert len(districts) == 10
event = events[16]
assert event.key_name == "2017casj"
assert event.name == "Silicon Valley Regional"
assert event.short_name == "Silicon Valley"
assert event.event_short == "casj"
assert event.official is True
assert event.start_date == datetime.datetime(
year=2017, month=3, day=29, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2017, month=4, day=1, hour=23, minute=59, second=59
)
assert event.venue == "San Jose State University - The Event Center"
assert event.city == "San Jose"
assert event.state_prov == "CA"
assert event.country == "USA"
assert event.year == 2017
assert event.event_type_enum == EventType.REGIONAL
assert event.district_key is None
# New in 2017
assert event.website == "http://www.firstsv.org"
def test_parse_2017_events_with_cmp_hacks(test_data_importer):
hack_sitevar = {
"event_name_override": [
{
"event": "2017cmpmo",
"name": "FIRST Championship Event",
"short_name": "Championship",
},
{
"event": "2017cmptx",
"name": "FIRST Championship Event",
"short_name": "Championship",
},
],
"set_start_to_last_day": ["2017cmptx", "2017cmpmo"],
"divisions_to_skip": ["2017arc", "2017cars", "2017cur", "2017dal", "2017dar"],
}
ChampsRegistrationHacks.put(hack_sitevar)
path = test_data_importer._get_path(__file__, "data/2017_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2017).parse(data)
assert len(events) == 160
assert len(districts) == 10
non_einstein_types = CMP_EVENT_TYPES
non_einstein_types.remove(EventType.CMP_FINALS)
for key in hack_sitevar["divisions_to_skip"]:
assert not list(filter(lambda e: e.key_name == key, events))
einstein_stl = next(e for e in events if e.key_name == "2017cmpmo")
assert einstein_stl is not None
assert einstein_stl.name == "FIRST Championship Event (St. Louis)"
assert einstein_stl.short_name == "Championship (St. Louis)"
assert einstein_stl.start_date == datetime.datetime(
year=2017, month=4, day=29, hour=0, minute=0, second=0
)
assert einstein_stl.end_date == datetime.datetime(
year=2017, month=4, day=29, hour=23, minute=59, second=59
)
einstein_hou = next(e for e in events if e.key_name == "2017cmptx")
assert einstein_hou is not None
assert einstein_hou.name == "FIRST Championship Event (Houston)"
assert einstein_hou.short_name == "Championship (Houston)"
assert einstein_hou.start_date == datetime.datetime(
year=2017, month=4, day=22, hour=0, minute=0, second=0
)
assert einstein_hou.end_date == datetime.datetime(
year=2017, month=4, day=22, hour=23, minute=59, second=59
)
def test_parse_2017_official_offseason(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2017_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2017).parse(data)
assert len(events) == 165
assert len(districts) == 10
event = next(e for e in events if e.key_name == "2017iri")
assert event.key_name == "2017iri"
assert event.name == "Indiana Robotics Invitational"
assert event.short_name == "Indiana Robotics Invitational"
assert event.event_short == "iri"
assert event.official is True
assert event.start_date == datetime.datetime(
year=2017, month=7, day=14, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2017, month=7, day=15, hour=23, minute=59, second=59
)
assert event.venue == "Lawrence North High School"
assert event.city == "Indianapolis"
assert event.state_prov == "IN"
assert event.country == "USA"
assert event.year == 2017
assert event.event_type_enum == EventType.OFFSEASON
assert event.district_key is None
assert event.website == "http://indianaroboticsinvitational.org/"
assert event.webcast == []
def test_parse_2018_event(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2018_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2018).parse(data)
assert len(events) == 178
assert len(districts) == 10
event = events[18]
assert event.key_name == "2018casj"
assert event.name == "Silicon Valley Regional"
assert event.short_name == "Silicon Valley"
assert event.event_short == "casj"
assert event.official is True
assert event.start_date == datetime.datetime(
year=2018, month=3, day=28, hour=0, minute=0, second=0
)
assert event.end_date == datetime.datetime(
year=2018, month=3, day=31, hour=23, minute=59, second=59
)
assert event.venue == "San Jose State University - The Event Center"
assert event.city == "San Jose"
assert event.state_prov == "CA"
assert event.country == "USA"
assert event.year == 2018
assert event.event_type_enum == EventType.REGIONAL
assert event.district_key is None
assert event.website == "http://www.firstsv.org"
# New in 2018
assert event.webcast == [
{"type": "twitch", "channel": "firstinspires9"},
{"type": "twitch", "channel": "firstinspires10"},
]
def test_parse_division_parent_2017(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2017_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2017).parse(data)
assert len(events) == 165
assert len(districts) == 10
# Test division <-> parent associations
for event in events:
event_key = event.key.id()
if event_key == "2017micmp":
assert event.parent_event is None
assert event.divisions == [
ndb.Key("Event", "2017micmp1"),
ndb.Key("Event", "2017micmp2"),
ndb.Key("Event", "2017micmp3"),
ndb.Key("Event", "2017micmp4"),
]
elif event_key in {"2017micmp1", "2017micmp2", "2017micmp3", "2017micmp4"}:
assert event.parent_event == ndb.Key("Event", "2017micmp")
assert event.divisions == []
elif event_key == "2017cmptx":
assert event.parent_event is None
assert event.divisions == [
ndb.Key("Event", "2017carv"),
ndb.Key("Event", "2017gal"),
ndb.Key("Event", "2017hop"),
ndb.Key("Event", "2017new"),
ndb.Key("Event", "2017roe"),
ndb.Key("Event", "2017tur"),
]
elif event_key in {
"2017carv",
"2017gal",
"2017hop",
"2017new",
"2017roe",
"2017tur",
}:
assert event.parent_event == ndb.Key("Event", "2017cmptx")
assert event.divisions == []
elif event_key == "2017cmpmo":
assert event.parent_event is None
assert event.divisions == [
ndb.Key("Event", "2017arc"),
ndb.Key("Event", "2017cars"),
ndb.Key("Event", "2017cur"),
ndb.Key("Event", "2017dal"),
ndb.Key("Event", "2017dar"),
ndb.Key("Event", "2017tes"),
]
elif event_key in {
"2017arc",
"2017cars",
"2017cur",
"2017dal",
"2017dar",
"2017tes",
}:
assert event.parent_event == ndb.Key("Event", "2017cmpmo")
assert event.divisions == []
else:
assert event.parent_event is None
assert event.divisions == []
def test_parse_division_parent_2018(test_data_importer):
path = test_data_importer._get_path(__file__, "data/2018_event_list.json")
with open(path, "r") as f:
data = json.load(f)
events, districts = FMSAPIEventListParser(2018).parse(data)
assert len(events) == 178
assert len(districts) == 10
# Test division <-> parent associations
for event in events:
event_key = event.key.id()
if event_key == "2018oncmp":
assert event.parent_event is None
assert event.divisions == [
ndb.Key("Event", "2018oncmp1"),
ndb.Key("Event", "2018oncmp2"),
]
elif event_key in {"2018oncmp1", "2018oncmp2"}:
assert event.parent_event == ndb.Key("Event", "2018oncmp")
assert event.divisions == []
elif event_key == "2018micmp":
assert event.parent_event is None
assert event.divisions == [
ndb.Key("Event", "2018micmp1"),
ndb.Key("Event", "2018micmp2"),
ndb.Key("Event", "2018micmp3"),
ndb.Key("Event", "2018micmp4"),
]
elif event_key in {"2018micmp1", "2018micmp2", "2018micmp3", "2018micmp4"}:
assert event.parent_event == ndb.Key("Event", "2018micmp")
assert event.divisions == []
elif event_key == "2018cmptx":
assert event.parent_event is None
assert event.divisions == [
ndb.Key("Event", "2018carv"),
ndb.Key("Event", "2018gal"),
ndb.Key("Event", "2018hop"),
ndb.Key("Event", "2018new"),
ndb.Key("Event", "2018roe"),
ndb.Key("Event", "2018tur"),
]
elif event_key in {
"2018carv",
"2018gal",
"2018hop",
"2018new",
"2018roe",
"2018tur",
}:
assert event.parent_event == ndb.Key("Event", "2018cmptx")
assert event.divisions == []
elif event_key == "2018cmpmi":
assert event.parent_event is None
assert event.divisions == [
ndb.Key("Event", "2018arc"),
ndb.Key("Event", "2018cars"),
ndb.Key("Event", "2018cur"),
ndb.Key("Event", "2018dal"),
ndb.Key("Event", "2018dar"),
ndb.Key("Event", "2018tes"),
]
elif event_key in {
"2018arc",
"2018cars",
"2018cur",
"2018dal",
"2018dar",
"2018tes",
}:
assert event.parent_event == ndb.Key("Event", "2018cmpmi")
assert event.divisions == []
else:
assert event.parent_event is None
assert event.divisions == []
|
|
#!/usr/bin/env python
import re,logging,random
from automata import NFA
class dotini(list):
'''the worst .ini file "parser" ever'''
# ripping
section = re.compile('^\[(.*)\]')
item = re.compile('\s*(\w+)\s*=\s*(.*)$')
# cleaning up
comment = re.compile(';.*$')
def __init__(self):
self.state = {'name':''}
def read(self, filename):
input = file(filename,'rt')
for r in input:
if self.parse(r.strip()):
continue
logging.warning('ini : %s : unable to parse : %s', self.state['name'], repr(r))
return
def parse(self, line):
if line.startswith('#'):
return True
line = dotini.comment.sub('', line).strip()
if len(line) == 0:
return True
_ = dotini.section.match(line)
if _:
self.parse_section(_)
return True
_ = dotini.item.match(line)
if _:
self.parse_item(_)
return True
return False
def parse_section(self, m):
sectionname = m.group(1)
self.state = {}
logging.debug('ini : %s : adding section', sectionname)
self.append((sectionname, self.state))
def parse_item(self, m):
itemname,itemvalue = m.group(1),m.group(2)
self.state[itemname] = itemvalue
class db(list):
def __init__(self, *filenames):
for f in filenames:
self.parse(f)
return
def search(self, string):
return [p for p in self if p.name == string]
def parse(self, filename):
input = dotini()
input.read(filename)
# validate all items are something we know about
validitems = set(('ep_only','signature', 'section_start_only'))
items = set()
[ items.update(res.keys()) for signame,res in input ]
if items.difference(validitems):
# XXX: this condition will probably be untested, indefinitely
raise NotImplementedError('db : %s : not sure how to process signature fields : %s'%( filename, items.difference(validitems) ))
# map the strings from an ini file to something native to python
for signame,res in input:
try:
p = signature(signame, res)
self.append(p)
except Exception as e:
logging.warn('db : %s : unable to load signature for %s : %s', filename, repr(signame), type(e))
continue
return
class signature(object):
@staticmethod
def boolean(string):
if string.lower() == 'true':
return True
elif string.lower() == 'false':
return False
raise TypeError(string)
@staticmethod
def hextoken(ch):
if ch == '??' or ch in ('v3','v4') or ':' in ch: # XXX: hax
return None
if ch[0] == '?':
y = int(ch[1],16)
return set((x*0x10+y) for x in range(0x10))
if len(ch) == 1:
y = int(ch[0],16)
return set((y*0x10+x) for x in range(0x10))
if ch[1] == '?':
y = int(ch[0],16)
return set((y*0x10+x) for x in range(0x10))
return set((int(ch,16),))
def __init__(self, name, res):
string = res['signature'].lower()
self.name,self.string = name,string
self.sig = self.parse_signature(string)
# enforce mutual exclusivity
self.ep_only = signature.boolean( res.get('ep_only','false') )
self.section_start_only = signature.boolean( res.get('section_start_only','false') )
if (self.ep_only or self.section_start_only) and self.ep_only == self.section_start_only:
raise ValueError('%s ep:%s ss:%s'% (repr(name), self.ep_only, self.section_start_only))
return
def __eq__(self, other):
return (self.name, self.string) == (other.name, other.string)
def __hash__(self):
return (self.name, self.string).__hash__()
signature = re.compile('^[a-f0-9v: ?]+$')
def parse_signature(self, string):
if not signature.signature.match(string):
raise ValueError('%s %s'% (repr(self.name),repr(string)))
sig = self.sig = [signature.hextoken(x.lower()) for x in string.split(' ')]
return sig
def match(self, string):
zig = self.sig
for i,(a,b) in enumerate(zip(zig, string)):
if a is None:
continue
if ord(b) not in a:
return False
continue
return i+1 == len(zig)
def generate(self):
result = []
for x in self.sig:
if x is None:
x = random.randint(0,0xff)
else:
x, = random.sample(x, 1)
result.append(x)
return ''.join(map(chr,result))
def __repr__(self):
if self.ep_only:
type = "entrypoint"
elif self.section_start_only:
type = "sectionstart"
else:
type = "anywhere"
return ' '.join((repr(signature),type,repr(self.name)))
def compile(db):
state = {}
res = NFA(-1)
res.add_transition(-1, NFA.ANY, -1)
for row in db:
res.add_transition(-1, NFA.EPSILON, (0,row))
for i,v in enumerate(row.sig):
v = (NFA.ANY,) if v is None else (chr(x) for x in v)
[res.add_transition((i,row), x, (i+1,row)) for x in v]
res.add_transition((i+1,row), NFA.ANY, (i+1,row))
res.add_final_state((i+1,row))
state.setdefault((i+1,row), []).append(row)
return state,res
if __name__ == '__main__' and False:
import peid
reload(peid)
db = peid.db('userdb.txt')
while True:
a, = random.sample(db,1)
if '??' in a.string:
continue
break
print a
s = a.generate()
state,res = peid.compile(db)
#print 'matching %s' % (s)
# print res.to_bytecode()
print 'executing nfa'
res.bytecode(nondeterministic=False)
print res.execute(s,debug=True)
#print res.bytecode()
elif __name__ == '__main__':
import peid
# db = peid.db('data/test.txt')
db = peid.db('userdb.txt')
_ = '00 58 35 4F 21 50 25 40 41 50 5B 34 5C 50 5A 58 35 34 28 50 5E 29 37 43 43 29 37 7D 24 45 49 43 41 52 2D 53 54 41 4E 44 41 52 44 2D 41 4E 54 49 56 49 52 55 53 2D 54 45 53 54 2D 46 49 4C 45 21 24 48 2B 48 2A'
s = ''.join(chr(int(x,16)) for x in _.split(' '))
print 'records',len(db)
states,res = peid.compile(db)
print 'compiled', len(states)
print 'transitions',len(res._transitions)
print res,'versus',s
print 'compiling to bytecode'
print 'made bytecode'
#g = res.to_graph()
#g.render()
for i,x in enumerate(res.execute(s)):
print i,x
#res.bytecode().dump()
|
|
# coding: utf-8
import ui, console, time, motion
import threading, queue
from contextlib import closing
from datetime import datetime
import re, urllib.request, socket
import urllib
import os
from flask import Flask, request, Response, abort
from flask import send_from_directory
import mimetypes
import wkwebview
static_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'web/static')
from objc_util import *
import requests
from threading import Timer
import httplib2
from urllib.parse import urlparse
import math
import json, os
theHttpPort = 8080
theThread = None
theApp = Flask(__name__)
MB = 1 << 20
BUFF_SIZE = 10 * MB
# setting routes
@theApp.route("/", methods=['GET'])
def serve_home():
return send_from_directory(static_file_dir, 'index.html')
@theApp.route('/stream/<path:path>', methods=['GET'])
def stream_file_in_dir(path):
fullpath = os.path.join(static_file_dir, "stream",path)
if not os.path.isfile(fullpath): abort(404)
start, end = get_range(request)
return partial_response(fullpath, start, end)
@theApp.route('/<path:path>', methods=['GET'])
def serve_file_in_dir(path):
if not os.path.isfile(os.path.join(static_file_dir, path)): abort(404)
return send_from_directory(static_file_dir, path)
LAST_REQUEST_MS = 0
@theApp.before_request
def update_last_request_ms():
global LAST_REQUEST_MS
LAST_REQUEST_MS = time.time() * 1000
@theApp.route('/seriouslykill', methods=['POST'])
def seriouslykill():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return "Shutting down..."
@theApp.route('/kill', methods=['POST'])
def kill():
last_ms = LAST_REQUEST_MS
def shutdown():
if LAST_REQUEST_MS <= last_ms: # subsequent requests abort shutdown
requests.post('http://localhost:%d/seriouslykill' % theHttpPort)
else:
pass
Timer(1.0, shutdown).start() # wait 1 second
return "Shutting down..."
# streaming implementation...
def partial_response(path, start, end=None):
file_size = os.path.getsize(path)
# Determine (end, length)
if end is None:
end = start + BUFF_SIZE - 1
end = min(end, file_size - 1)
end = min(end, start + BUFF_SIZE - 1)
length = end - start + 1
# Read file
with open(path, 'rb') as fd:
fd.seek(start)
bytes = fd.read(length)
assert(len(bytes) == length)
response = Response(
bytes,
206,
mimetype=mimetypes.guess_type(path)[0],
direct_passthrough=True,
)
response.headers.add(
'Content-Range', 'bytes {0}-{1}/{2}'.format(
start, end, file_size,
),
)
response.headers.add(
'Accept-Ranges', 'bytes'
)
response.headers.add(
'Access-Control-Allow-Origin', '*'
)
response.headers.add(
'Vary', 'Accept-Encoding'
)
return response
def get_range(request):
range = request.headers.get('Range')
m = re.match('bytes=(?P<start>\d+)-(?P<end>\d+)?', range)
if m:
start = m.group('start')
end = m.group('end')
start = int(start)
if end is not None:
end = int(end)
return start, end
else:
return 0, None
# thread worker
class workerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
def run(self):
NSNetService = ObjCClass('NSNetService') # Bonjour publication
service = NSNetService.alloc().initWithDomain_type_name_port_('', '_http._tcp', 'iOS webVR Viewer', theHttpPort)
try:
service.publish()
theApp.run(host='0.0.0.0', port=theHttpPort)
finally:
service.stop()
service.release()
def stop(self):
requests.post('http://localhost:%d/kill' % theHttpPort)
# webview delegate ...
class MyWebViewDelegate (object):
def __init__(self, webview):
self.wv = webview
def webview_should_start_load(self, webview, url, nav_type):
if url.startswith('ios-log'):
txt = urllib.parse.unquote(url)
# hiding some messages
if 'Invalid timestamps detected.' in txt:
pass
else:
print(txt)
return True
def webview_did_start_load(self, webview):
pass
def webview_did_finish_load(self, webview):
print("webview_did_finish_load")
def webview_did_fail_load(self, webview, error_code, error_msg):
pass
# the main class
class MyWebVRView(ui.View):
def __init__(self, url):
self.finished = False
self.start_workerThread()
self.width, self.height = ui.get_window_size()
self.background_color= 'black'
# the webview
self.wv = wkwebview.WKWebView(frame=self.bounds, flex='WH')
self.wv.delegate = MyWebViewDelegate(self.wv)
self.wv.background_color= 'black'
self.add_subview(self.wv)
bi_back = ui.ButtonItem(image=ui.Image.named('iob:ios7_arrow_back_32'), action=self.goBack)
bi_forward = ui.ButtonItem(image=ui.Image.named('iob:ios7_arrow_forward_32'), action=self.goForward)
self.right_button_items = [bi_forward, bi_back]
self.clearCache()
self.loadURL(url)
# launch the layout
self.present("full_screen", hide_title_bar=False)
def goBack(self, bi):
self.wv.go_back()
def goForward(self, bi):
self.wv.go_forward()
# here we observe the exit
def will_close(self):
self.finished = True
# some thread management used to
# react to the remote change
# of the current url
def start_workerThread(self):
global theThread
theThread = workerThread()
theThread.start()
def stop_workerThread(self):
if theThread is None:
return
theThread.stop()
# main loop...
def run(self):
while not self.finished:
time.sleep(1.0/60)
self.stop_workerThread()
def clearCache(self):
js_code = """window.location.reload(true);"""
res=self.wv.eval_js(js_code)
def loadURL(self, url):
self.wv.load_url(url, no_cache=True)
if __name__ == '__main__':
# disable the ios screensaver
console.set_idle_timer_disabled(True)
#access to localhost
url = "http://localhost:%d/" % theHttpPort
# fasten your seatbelts, start the engine and let's get doing!...
MyWebVRView(url).run()
# restore the ios screensaver
console.set_idle_timer_disabled(False)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Interfaces to deal with the various types of fieldmap sources
.. testsetup::
>>> tmpdir = getfixture('tmpdir')
>>> tmp = tmpdir.chdir() # changing to a temporary directory
>>> nb.Nifti1Image(np.zeros((90, 90, 60)), None, None).to_filename(
... tmpdir.join('epi.nii.gz').strpath)
"""
import numpy as np
import nibabel as nb
from nipype import logging
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (
BaseInterfaceInputSpec, TraitedSpec, File, isdefined, traits,
SimpleInterface)
LOGGER = logging.getLogger('nipype.interface')
class FieldEnhanceInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='input fieldmap')
in_mask = File(exists=True, desc='brain mask')
in_magnitude = File(exists=True, desc='input magnitude')
unwrap = traits.Bool(False, usedefault=True, desc='run phase unwrap')
despike = traits.Bool(True, usedefault=True, desc='run despike filter')
bspline_smooth = traits.Bool(True, usedefault=True, desc='run 3D bspline smoother')
mask_erode = traits.Int(1, usedefault=True, desc='mask erosion iterations')
despike_threshold = traits.Float(0.2, usedefault=True, desc='mask erosion iterations')
num_threads = traits.Int(1, usedefault=True, nohash=True, desc='number of jobs')
class FieldEnhanceOutputSpec(TraitedSpec):
out_file = File(desc='the output fieldmap')
out_unwrapped = File(desc='unwrapped fieldmap')
class FieldEnhance(SimpleInterface):
"""
The FieldEnhance interface wraps a workflow to massage the input fieldmap
and return it masked, despiked, etc.
"""
input_spec = FieldEnhanceInputSpec
output_spec = FieldEnhanceOutputSpec
def _run_interface(self, runtime):
from scipy import ndimage as sim
fmap_nii = nb.load(self.inputs.in_file)
data = np.squeeze(fmap_nii.get_data().astype(np.float32))
# Despike / denoise (no-mask)
if self.inputs.despike:
data = _despike2d(data, self.inputs.despike_threshold)
mask = None
if isdefined(self.inputs.in_mask):
masknii = nb.load(self.inputs.in_mask)
mask = masknii.get_data().astype(np.uint8)
# Dilate mask
if self.inputs.mask_erode > 0:
struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 1)
mask = sim.binary_erosion(
mask, struc,
iterations=self.inputs.mask_erode
).astype(np.uint8) # pylint: disable=no-member
self._results['out_file'] = fname_presuffix(
self.inputs.in_file, suffix='_enh', newpath=runtime.cwd)
datanii = nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header)
if self.inputs.unwrap:
data = _unwrap(data, self.inputs.in_magnitude, mask)
self._results['out_unwrapped'] = fname_presuffix(
self.inputs.in_file, suffix='_unwrap', newpath=runtime.cwd)
nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header).to_filename(
self._results['out_unwrapped'])
if not self.inputs.bspline_smooth:
datanii.to_filename(self._results['out_file'])
return runtime
else:
from ..utils import bspline as fbsp
from statsmodels.robust.scale import mad
# Fit BSplines (coarse)
bspobj = fbsp.BSplineFieldmap(datanii, weights=mask,
njobs=self.inputs.num_threads)
bspobj.fit()
smoothed1 = bspobj.get_smoothed()
# Manipulate the difference map
diffmap = data - smoothed1.get_data()
sderror = mad(diffmap[mask > 0])
LOGGER.info('SD of error after B-Spline fitting is %f', sderror)
errormask = np.zeros_like(diffmap)
errormask[np.abs(diffmap) > (10 * sderror)] = 1
errormask *= mask
nslices = 0
try:
errorslice = np.squeeze(np.argwhere(errormask.sum(0).sum(0) > 0))
nslices = errorslice[-1] - errorslice[0]
except IndexError: # mask is empty, do not refine
pass
if nslices > 1:
diffmapmsk = mask[..., errorslice[0]:errorslice[-1]]
diffmapnii = nb.Nifti1Image(
diffmap[..., errorslice[0]:errorslice[-1]] * diffmapmsk,
datanii.affine, datanii.header)
bspobj2 = fbsp.BSplineFieldmap(diffmapnii, knots_zooms=[24., 24., 4.],
njobs=self.inputs.num_threads)
bspobj2.fit()
smoothed2 = bspobj2.get_smoothed().get_data()
final = smoothed1.get_data().copy()
final[..., errorslice[0]:errorslice[-1]] += smoothed2
else:
final = smoothed1.get_data()
nb.Nifti1Image(final, datanii.affine, datanii.header).to_filename(
self._results['out_file'])
return runtime
class FieldToRadSInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='input fieldmap')
fmap_range = traits.Float(desc='range of input field map')
class FieldToRadSOutputSpec(TraitedSpec):
out_file = File(desc='the output fieldmap')
fmap_range = traits.Float(desc='range of input field map')
class FieldToRadS(SimpleInterface):
"""
The FieldToRadS converts from arbitrary units to rad/s
"""
input_spec = FieldToRadSInputSpec
output_spec = FieldToRadSOutputSpec
def _run_interface(self, runtime):
fmap_range = None
if isdefined(self.inputs.fmap_range):
fmap_range = self.inputs.fmap_range
self._results['out_file'], self._results['fmap_range'] = _torads(
self.inputs.in_file, fmap_range, newpath=runtime.cwd)
return runtime
class FieldToHzInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='input fieldmap')
range_hz = traits.Float(mandatory=True, desc='range of input field map')
class FieldToHzOutputSpec(TraitedSpec):
out_file = File(desc='the output fieldmap')
class FieldToHz(SimpleInterface):
"""
The FieldToHz converts from arbitrary units to Hz
"""
input_spec = FieldToHzInputSpec
output_spec = FieldToHzOutputSpec
def _run_interface(self, runtime):
self._results['out_file'] = _tohz(
self.inputs.in_file, self.inputs.range_hz, newpath=runtime.cwd)
return runtime
class Phasediff2FieldmapInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='input fieldmap')
metadata = traits.Dict(mandatory=True, desc='BIDS metadata dictionary')
class Phasediff2FieldmapOutputSpec(TraitedSpec):
out_file = File(desc='the output fieldmap')
class Phasediff2Fieldmap(SimpleInterface):
"""
Convert a phase difference map into a fieldmap in Hz
"""
input_spec = Phasediff2FieldmapInputSpec
output_spec = Phasediff2FieldmapOutputSpec
def _run_interface(self, runtime):
self._results['out_file'] = phdiff2fmap(
self.inputs.in_file,
_delta_te(self.inputs.metadata),
newpath=runtime.cwd)
return runtime
def _despike2d(data, thres, neigh=None):
"""
despiking as done in FSL fugue
"""
if neigh is None:
neigh = [-1, 0, 1]
nslices = data.shape[-1]
for k in range(nslices):
data2d = data[..., k]
for i in range(data2d.shape[0]):
for j in range(data2d.shape[1]):
vals = []
thisval = data2d[i, j]
for ii in neigh:
for jj in neigh:
try:
vals.append(data2d[i + ii, j + jj])
except IndexError:
pass
vals = np.array(vals)
patch_range = vals.max() - vals.min()
patch_med = np.median(vals)
if (patch_range > 1e-6 and
(abs(thisval - patch_med) / patch_range) > thres):
data[i, j, k] = patch_med
return data
def _unwrap(fmap_data, mag_file, mask=None):
from math import pi
from nipype.interfaces.fsl import PRELUDE
magnii = nb.load(mag_file)
if mask is None:
mask = np.ones_like(fmap_data, dtype=np.uint8)
fmapmax = max(abs(fmap_data[mask > 0].min()), fmap_data[mask > 0].max())
fmap_data *= pi / fmapmax
nb.Nifti1Image(fmap_data, magnii.affine).to_filename('fmap_rad.nii.gz')
nb.Nifti1Image(mask, magnii.affine).to_filename('fmap_mask.nii.gz')
nb.Nifti1Image(magnii.get_data(), magnii.affine).to_filename('fmap_mag.nii.gz')
# Run prelude
res = PRELUDE(phase_file='fmap_rad.nii.gz',
magnitude_file='fmap_mag.nii.gz',
mask_file='fmap_mask.nii.gz').run()
unwrapped = nb.load(res.outputs.unwrapped_phase_file).get_data() * (fmapmax / pi)
return unwrapped
def get_ees(in_meta, in_file=None):
"""
Calculate the *effective echo spacing* :math:`t_\\text{ees}`
for an input :abbr:`EPI (echo-planar imaging)` scan.
There are several procedures to calculate the effective
echo spacing. The basic one is that an ``EffectiveEchoSpacing``
field is set in the JSON sidecar. The following examples
use an ``'epi.nii.gz'`` file-stub which has 90 pixels in the
j-axis encoding direction.
>>> meta = {'EffectiveEchoSpacing': 0.00059,
... 'PhaseEncodingDirection': 'j-'}
>>> get_ees(meta)
0.00059
If the *total readout time* :math:`T_\\text{ro}` (``TotalReadoutTime``
BIDS field) is provided, then the effective echo spacing can be
calculated reading the number of voxels :math:`N_\\text{PE}` along the
readout direction and the parallel acceleration
factor of the EPI
.. math ::
= T_\\text{ro} \\, (N_\\text{PE} / f_\\text{acc} - 1)^{-1}
where :math:`N_y` is the number of pixels along the phase-encoding direction
:math:`y`, and :math:`f_\\text{acc}` is the parallel imaging acceleration factor
(:abbr:`GRAPPA (GeneRalized Autocalibrating Partial Parallel Acquisition)`,
:abbr:`ARC (Autocalibrating Reconstruction for Cartesian imaging)`, etc.).
>>> meta = {'TotalReadoutTime': 0.02596,
... 'PhaseEncodingDirection': 'j-',
... 'ParallelReductionFactorInPlane': 2}
>>> get_ees(meta, in_file='epi.nii.gz')
0.00059
Some vendors, like Philips, store different parameter names
(see http://dbic.dartmouth.edu/pipermail/mrusers/attachments/\
20141112/eb1d20e6/attachment.pdf):
>>> meta = {'WaterFatShift': 8.129,
... 'MagneticFieldStrength': 3,
... 'PhaseEncodingDirection': 'j-',
... 'ParallelReductionFactorInPlane': 2}
>>> get_ees(meta, in_file='epi.nii.gz')
0.00041602630141921826
"""
import nibabel as nb
from fmriprep.interfaces.fmap import _get_pe_index
# Use case 1: EES is defined
ees = in_meta.get('EffectiveEchoSpacing', None)
if ees is not None:
return ees
# All other cases require the parallel acc and npe (N vox in PE dir)
acc = float(in_meta.get('ParallelReductionFactorInPlane', 1.0))
npe = nb.load(in_file).shape[_get_pe_index(in_meta)]
etl = npe // acc
# Use case 2: TRT is defined
trt = in_meta.get('TotalReadoutTime', None)
if trt is not None:
return trt / (etl - 1)
# Use case 3 (philips scans)
wfs = in_meta.get('WaterFatShift', None)
if wfs is not None:
fstrength = in_meta['MagneticFieldStrength']
wfd_ppm = 3.4 # water-fat diff in ppm
g_ratio_mhz_t = 42.57 # gyromagnetic ratio for proton (1H) in MHz/T
wfs_hz = fstrength * wfd_ppm * g_ratio_mhz_t
return wfs / (wfs_hz * etl)
raise ValueError('Unknown effective echo-spacing specification')
def get_trt(in_meta, in_file=None):
"""
Calculate the *total readout time* for an input
:abbr:`EPI (echo-planar imaging)` scan.
There are several procedures to calculate the total
readout time. The basic one is that a ``TotalReadoutTime``
field is set in the JSON sidecar. The following examples
use an ``'epi.nii.gz'`` file-stub which has 90 pixels in the
j-axis encoding direction.
>>> meta = {'TotalReadoutTime': 0.02596}
>>> get_trt(meta)
0.02596
If the *effective echo spacing* :math:`t_\\text{ees}`
(``EffectiveEchoSpacing`` BIDS field) is provided, then the
total readout time can be calculated reading the number
of voxels along the readout direction :math:`T_\\text{ro}`
and the parallel acceleration factor of the EPI :math:`f_\\text{acc}`.
.. math ::
T_\\text{ro} = t_\\text{ees} \\, (N_\\text{PE} / f_\\text{acc} - 1)
>>> meta = {'EffectiveEchoSpacing': 0.00059,
... 'PhaseEncodingDirection': 'j-',
... 'ParallelReductionFactorInPlane': 2}
>>> get_trt(meta, in_file='epi.nii.gz')
0.02596
Some vendors, like Philips, store different parameter names:
>>> meta = {'WaterFatShift': 8.129,
... 'MagneticFieldStrength': 3,
... 'PhaseEncodingDirection': 'j-',
... 'ParallelReductionFactorInPlane': 2}
>>> get_trt(meta, in_file='epi.nii.gz')
0.018721183563864822
"""
# Use case 1: TRT is defined
trt = in_meta.get('TotalReadoutTime', None)
if trt is not None:
return trt
# All other cases require the parallel acc and npe (N vox in PE dir)
acc = float(in_meta.get('ParallelReductionFactorInPlane', 1.0))
npe = nb.load(in_file).shape[_get_pe_index(in_meta)]
etl = npe // acc
# Use case 2: TRT is defined
ees = in_meta.get('EffectiveEchoSpacing', None)
if ees is not None:
return ees * (etl - 1)
# Use case 3 (philips scans)
wfs = in_meta.get('WaterFatShift', None)
if wfs is not None:
fstrength = in_meta['MagneticFieldStrength']
wfd_ppm = 3.4 # water-fat diff in ppm
g_ratio_mhz_t = 42.57 # gyromagnetic ratio for proton (1H) in MHz/T
wfs_hz = fstrength * wfd_ppm * g_ratio_mhz_t
return wfs / wfs_hz
raise ValueError('Unknown total-readout time specification')
def _get_pe_index(meta):
pe = meta['PhaseEncodingDirection']
try:
return {'i': 0, 'j': 1, 'k': 2}[pe[0]]
except KeyError:
raise RuntimeError('"%s" is an invalid PE string' % pe)
def _torads(in_file, fmap_range=None, newpath=None):
"""
Convert a field map to rad/s units
If fmap_range is None, the range of the fieldmap
will be automatically calculated.
Use fmap_range=0.5 to convert from Hz to rad/s
"""
from math import pi
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
out_file = fname_presuffix(in_file, suffix='_rad', newpath=newpath)
fmapnii = nb.load(in_file)
fmapdata = fmapnii.get_data()
if fmap_range is None:
fmap_range = max(abs(fmapdata.min()), fmapdata.max())
fmapdata = fmapdata * (pi / fmap_range)
out_img = nb.Nifti1Image(fmapdata, fmapnii.affine, fmapnii.header)
out_img.set_data_dtype('float32')
out_img.to_filename(out_file)
return out_file, fmap_range
def _tohz(in_file, range_hz, newpath=None):
"""Convert a field map to Hz units"""
from math import pi
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
out_file = fname_presuffix(in_file, suffix='_hz', newpath=newpath)
fmapnii = nb.load(in_file)
fmapdata = fmapnii.get_data()
fmapdata = fmapdata * (range_hz / pi)
out_img = nb.Nifti1Image(fmapdata, fmapnii.affine, fmapnii.header)
out_img.set_data_dtype('float32')
out_img.to_filename(out_file)
return out_file
def phdiff2fmap(in_file, delta_te, newpath=None):
r"""
Converts the input phase-difference map into a fieldmap in Hz,
using the eq. (1) of [Hutton2002]_:
.. math::
\Delta B_0 (\text{T}^{-1}) = \frac{\Delta \Theta}{2\pi\gamma \Delta\text{TE}}
In this case, we do not take into account the gyromagnetic ratio of the
proton (:math:`\gamma`), since it will be applied inside TOPUP:
.. math::
\Delta B_0 (\text{Hz}) = \frac{\Delta \Theta}{2\pi \Delta\text{TE}}
"""
import math
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
# GYROMAG_RATIO_H_PROTON_MHZ = 42.576
out_file = fname_presuffix(in_file, suffix='_fmap', newpath=newpath)
image = nb.load(in_file)
data = (image.get_data().astype(np.float32) / (2. * math.pi * delta_te))
nii = nb.Nifti1Image(data, image.affine, image.header)
nii.set_data_dtype(np.float32)
nii.to_filename(out_file)
return out_file
def _delta_te(in_values, te1=None, te2=None):
"""Read :math:`\Delta_\text{TE}` from BIDS metadata dict"""
if isinstance(in_values, float):
te2 = in_values
te1 = 0.
if isinstance(in_values, dict):
te1 = in_values.get('EchoTime1')
te2 = in_values.get('EchoTime2')
if not all((te1, te2)):
te2 = in_values.get('EchoTimeDifference')
te1 = 0
if isinstance(in_values, list):
te2, te1 = in_values
if isinstance(te1, list):
te1 = te1[1]
if isinstance(te2, list):
te2 = te2[1]
# For convienience if both are missing we should give one error about them
if te1 is None and te2 is None:
raise RuntimeError('EchoTime1 and EchoTime2 metadata fields not found. '
'Please consult the BIDS specification.')
if te1 is None:
raise RuntimeError(
'EchoTime1 metadata field not found. Please consult the BIDS specification.')
if te2 is None:
raise RuntimeError(
'EchoTime2 metadata field not found. Please consult the BIDS specification.')
return abs(float(te2) - float(te1))
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""CGI server interface to Python runtime.
CGI-compliant interface between the Python runtime and user-provided Python
code.
"""
from __future__ import with_statement
import cStringIO
from email import feedparser
import imp
import logging
import marshal
import os
import sys
import traceback
import types
def HandleRequest(unused_environ, handler_name, unused_url, post_data,
unused_error, application_root, python_lib,
import_hook=None):
"""Handle a single CGI request.
Handles a request for handler_name in the form 'path/to/handler.py' with the
environment contained in environ.
Args:
handler_name: A str containing the user-specified handler file to use for
this request as specified in the script field of a handler in app.yaml.
post_data: A stream containing the post data for this request.
application_root: A str containing the root path of the application.
python_lib: A str containing the root the Python App Engine library.
import_hook: Optional import hook (PEP 302 style loader).
Returns:
A dict containing zero or more of the following:
error: App Engine error code. 0 for OK, 1 for error. Defaults to OK if not
set. If set, then the other fields may be missing.
response_code: HTTP response code.
headers: A list of tuples (key, value) of HTTP headers.
body: A str of the body of the response.
"""
body = cStringIO.StringIO()
module_name = _FileToModuleName(handler_name)
parent_module, _, submodule_name = module_name.rpartition('.')
parent_module = _GetModuleOrNone(parent_module)
main = None
if module_name in sys.modules:
module = sys.modules[module_name]
main = _GetValidMain(module)
if not main:
module = imp.new_module('__main__')
if import_hook is not None:
module.__loader__ = import_hook
saved_streams = sys.stdin, sys.stdout
try:
sys.modules['__main__'] = module
module.__dict__['__name__'] = '__main__'
sys.stdin = post_data
sys.stdout = body
if main:
os.environ['PATH_TRANSLATED'] = module.__file__
main()
else:
filename = _AbsolutePath(handler_name, application_root, python_lib)
if filename.endswith(os.sep + '__init__.py'):
module.__path__ = [os.path.dirname(filename)]
if import_hook is None:
code, filename = _LoadModuleCode(filename)
else:
code = import_hook.get_code(module_name)
if not code:
return {'error': 2}
os.environ['PATH_TRANSLATED'] = filename
module.__file__ = filename
try:
sys.modules[module_name] = module
eval(code, module.__dict__)
except:
del sys.modules[module_name]
if parent_module and submodule_name in parent_module.__dict__:
del parent_module.__dict__[submodule_name]
raise
else:
if parent_module:
parent_module.__dict__[submodule_name] = module
return _ParseResponse(body.getvalue())
except:
exception = sys.exc_info()
message = ''.join(traceback.format_exception(exception[0], exception[1],
exception[2].tb_next))
logging.error(message)
return {'error': 1}
finally:
sys.stdin, sys.stdout = saved_streams
module.__name__ = module_name
if '__main__' in sys.modules:
del sys.modules['__main__']
def _ParseResponse(response):
"""Parses an HTTP response into a dict.
Args:
response: A str containing the HTTP response.
Returns:
A dict with fields:
body: A str containing the body.
headers: A list containing tuples (key, value) of key and value pairs.
response_code: An int containing the HTTP response code.
"""
parser = feedparser.FeedParser()
parser._set_headersonly()
parser.feed(response)
parsed_response = parser.close()
if 'Status' in parsed_response:
status = int(parsed_response['Status'].split(' ', 1)[0])
del parsed_response['Status']
else:
status = 200
return {'body': parsed_response.get_payload(),
'headers': parsed_response.items(),
'response_code': status}
def _ParseHeader(header):
"""Parses a str header into a (key, value) pair."""
key, _, value = header.partition(':')
return key.strip(), value.strip()
def _GetValidMain(module):
"""Returns a main function in module if it exists and is valid or None.
A main function is valid if it can be called with no arguments, i.e. calling
module.main() would be valid.
Args:
module: The module in which to search for a main function.
Returns:
A function that takes no arguments if found or None otherwise.
"""
if not hasattr(module, 'main'):
return None
main = module.main
if not hasattr(main, '__call__'):
return None
defaults = main.__defaults__
if defaults:
default_argcount = len(defaults)
else:
default_argcount = 0
if (main.__code__.co_argcount - default_argcount) == 0:
return main
else:
return None
def _FileToModuleName(filename):
"""Returns the module name corresponding to a filename."""
_, lib, suffix = filename.partition('$PYTHON_LIB/')
if lib:
module = suffix
else:
module = filename
module = os.path.normpath(module)
if '.py' in module:
module = module.rpartition('.py')[0]
module = module.replace(os.sep, '.')
module = module.strip('.')
if module.endswith('.__init__'):
module = module.rpartition('.__init__')[0]
return module
def _AbsolutePath(filename, application_root, python_lib):
"""Returns the absolute path of a Python script file.
Args:
filename: A str containing the handler script path.
application_root: The absolute path of the root of the application.
python_lib: The absolute path of the Python library.
Returns:
The absolute path of the handler script.
"""
_, lib, suffix = filename.partition('$PYTHON_LIB/')
if lib:
filename = os.path.join(python_lib, suffix)
else:
filename = os.path.join(application_root, filename)
if filename.endswith(os.sep) or os.path.isdir(filename):
filename = os.path.join(filename, '__init__.py')
return filename
def _LoadModuleCode(filename):
"""Loads the code of a module, using compiled bytecode if available.
Args:
filename: The Python script filename.
Returns:
A 2-tuple (code, filename) where:
code: A code object contained in the file or None if it does not exist.
filename: The name of the file loaded, either the same as the arg
filename, or the corresponding .pyc file.
"""
compiled_filename = filename + 'c'
if os.path.exists(compiled_filename):
with open(compiled_filename, 'r') as f:
magic_numbers = f.read(8)
if len(magic_numbers) == 8 and magic_numbers[:4] == imp.get_magic():
try:
return _FixCodeFilename(marshal.load(f), filename), compiled_filename
except (EOFError, ValueError):
pass
if os.path.exists(filename):
with open(filename, 'r') as f:
code = compile(f.read(), filename, 'exec', 0, True)
return code, filename
else:
return None, filename
def _FixCodeFilename(code, filename):
"""Creates a CodeType with co_filename replaced with filename.
Also affects nested code objects in co_consts.
Args:
code: The code object to be replaced.
filename: The replacement filename.
Returns:
A new code object with its co_filename set to the provided filename.
"""
if isinstance(code, types.CodeType):
code = types.CodeType(
code.co_argcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
tuple([_FixCodeFilename(c, filename) for c in code.co_consts]),
code.co_names,
code.co_varnames,
filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars)
return code
def _GetModuleOrNone(module_name):
"""Returns a module if it exists or None."""
module = None
if module_name:
try:
module = __import__(module_name)
except ImportError:
pass
else:
for name in module_name.split('.')[1:]:
module = getattr(module, name)
return module
|
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2010,2011,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq update model`."""
from sqlalchemy.orm import object_session, contains_eager
from aquilon.exceptions_ import ArgumentError, UnimplementedError
from aquilon.aqdb.types import CpuType, NicType
from aquilon.aqdb.model import (Vendor, Model, MachineSpecs, Machine, Disk,
HardwareEntity, Interface)
from aquilon.worker.broker import BrokerCommand
class CommandUpdateModel(BrokerCommand):
requires_plenaries = True
required_parameters = ["model", "vendor"]
# Quick hash of the arguments this method takes to the corresponding
# aqdb label.
argument_lookup = {'cpuname': 'name', 'cpuvendor': 'vendor',
'cpunum': 'cpu_quantity',
'memory': 'memory', 'disktype': 'disk_type',
'diskcontroller': 'controller_type',
'disksize': 'disk_capacity',
'nicmodel': 'name', 'nicvendor': 'vendor'}
def render(self, session, plenaries, model, vendor, newmodel, newvendor,
comments, update_existing_machines, **arguments):
for (arg, value) in arguments.items():
# Cleaning the strings isn't strictly necessary but allows
# for simple equality checks below and removes the need to
# call refresh().
if arg in ['newmodel', 'newvendor',
'cpuname', 'cpuvendor', 'disktype', 'diskcontroller',
'nicmodel', 'nicvendor']:
if value is not None:
arguments[arg] = value.lower().strip()
dbmodel = Model.get_unique(session, name=model, vendor=vendor,
compel=True)
if not update_existing_machines and (newmodel or newvendor):
raise ArgumentError("Cannot update model name or vendor without "
"updating any existing machines.")
dbmachines = set()
# The sub-branching here is a little difficult to read...
# Basically, there are three different checks to handle
# setting a new vendor, a new name, or both.
if newvendor:
dbnewvendor = Vendor.get_unique(session, newvendor, compel=True)
if newmodel:
Model.get_unique(session, name=newmodel, vendor=dbnewvendor,
preclude=True)
else:
Model.get_unique(session, name=dbmodel.name,
vendor=dbnewvendor, preclude=True)
dbmodel.vendor = dbnewvendor
if newmodel:
if not newvendor:
Model.get_unique(session, name=newmodel, vendor=dbmodel.vendor,
preclude=True)
dbmodel.name = newmodel
if newvendor or newmodel:
q = session.query(Machine).filter_by(model=dbmodel)
dbmachines.update(q)
# For now, can't update model_type. There are too many spots
# that special case things like aurora_node or virtual_machine to
# know that the transistion is safe. If there is enough need we
# can always add those transitions later.
if arguments['machine_type'] is not None:
raise UnimplementedError("Cannot (yet) change a model's "
"machine type.")
if comments is not None:
dbmodel.comments = comments
# The comments also do not affect the templates.
cpu_args = ['cpuname', 'cpuvendor']
cpu_info = {self.argument_lookup[arg]: arguments[arg]
for arg in cpu_args}
cpu_values = [v for v in cpu_info.values() if v is not None]
nic_args = ['nicmodel', 'nicvendor']
nic_info = {self.argument_lookup[arg]: arguments[arg]
for arg in nic_args}
nic_values = [v for v in nic_info.values() if v is not None]
spec_args = ['cpunum', 'memory', 'disktype', 'diskcontroller',
'disksize']
specs = {self.argument_lookup[arg]: arguments[arg]
for arg in spec_args}
spec_values = [v for v in specs.values() if v is not None]
if not dbmodel.machine_specs:
if cpu_values or nic_values or spec_values:
# You can't add a non-machine model with machine_specs
# thus we only need to check here if you try and update
if not dbmodel.model_type.isMachineType():
raise ArgumentError("Machine specfications are only valid"
" for machine types")
if not cpu_values or len(spec_values) < len(spec_args):
raise ArgumentError("Missing required parameters to store "
"machine specs for the model. Please "
"give all CPU, disk, RAM, and NIC "
"count information.")
dbcpu = Model.get_unique(session, compel=True,
model_type=CpuType.Cpu, **cpu_info)
if nic_values:
dbnic = Model.get_unique(session, compel=True,
model_type=NicType.Nic, **nic_info)
else:
dbnic = Model.default_nic_model(session)
dbmachine_specs = MachineSpecs(model=dbmodel, cpu_model=dbcpu,
nic_model=dbnic, **specs)
session.add(dbmachine_specs)
# Anything below that updates specs should have been verified above.
if cpu_values:
dbcpu = Model.get_unique(session, compel=True,
model_type=CpuType.Cpu, **cpu_info)
self.update_machine_specs(model=dbmodel, dbmachines=dbmachines,
attr='cpu_model', value=dbcpu,
fix_existing=update_existing_machines)
for arg in ['memory', 'cpunum']:
if arguments[arg] is not None:
self.update_machine_specs(model=dbmodel, dbmachines=dbmachines,
attr=self.argument_lookup[arg],
value=arguments[arg],
fix_existing=update_existing_machines)
if arguments['disktype']:
if update_existing_machines:
raise ArgumentError("Please do not specify "
"--update_existing_machines to change "
"the model disktype. This cannot "
"be converted automatically.")
dbmodel.machine_specs.disk_type = arguments['disktype']
for arg in ['diskcontroller', 'disksize']:
if arguments[arg] is not None:
self.update_disk_specs(model=dbmodel, dbmachines=dbmachines,
attr=self.argument_lookup[arg],
value=arguments[arg],
fix_existing=update_existing_machines)
if nic_values:
dbnic = Model.get_unique(session, compel=True, **nic_info)
self.update_interface_specs(model=dbmodel, dbmachines=dbmachines,
value=dbnic,
fix_existing=update_existing_machines)
session.flush()
plenaries.add(dbmachines)
plenaries.write()
return
def update_machine_specs(self, model, dbmachines,
attr=None, value=None, fix_existing=False):
session = object_session(model)
if fix_existing:
oldattr = getattr(model.machine_specs, attr)
filters = {'model': model, attr: oldattr}
q = session.query(Machine).filter_by(**filters)
for dbmachine in q:
setattr(dbmachine, attr, value)
dbmachines.add(dbmachine)
setattr(model.machine_specs, attr, value)
def update_disk_specs(self, model, dbmachines,
attr=None, value=None, fix_existing=False):
session = object_session(model)
if fix_existing:
oldattr = getattr(model.machine_specs, attr)
# disk_capacity => capacity
disk_attr = attr.replace('disk_', '')
filters = {disk_attr: oldattr}
q = session.query(Disk)
q = q.filter_by(**filters)
q = q.join(Machine)
q = q.filter_by(model=model)
for dbdisk in q:
setattr(dbdisk, disk_attr, value)
dbmachines.add(dbdisk.machine)
setattr(model.machine_specs, attr, value)
def update_interface_specs(self, model, dbmachines, value=None,
fix_existing=False):
session = object_session(model)
if fix_existing:
old_nic_model = model.machine_specs.nic_model
q = session.query(Interface)
# Skip interfaces where the model was set explicitely to something
# other than the default
q = q.filter(Interface.model == old_nic_model)
q = q.join(HardwareEntity)
q = q.filter(HardwareEntity.model == model)
q = q.options(contains_eager('hardware_entity'))
for dbiface in q:
dbiface.model = value
dbmachines.add(dbiface.hardware_entity)
model.machine_specs.nic_model = value
|
|
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for Ceph backup service."""
import hashlib
import os
import tempfile
import uuid
import mock
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
import six
from six.moves import range
from jacket.storage.backup import driver
from jacket.storage.backup.drivers import ceph
from jacket import context
from jacket import db
from jacket.storage import exception
from jacket.storage.i18n import _
from jacket.objects import storage
from jacket.storage import test
from jacket.storage.volume.drivers import rbd as rbddriver
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockObjectNotFoundException(MockException):
"""Used as mock for rados.MockObjectNotFoundException."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
# NOTE(dosaboy): mock Popen to, by default, raise Exception in order to
# ensure that any test ending up in a subprocess fails
# if not properly mocked.
@mock.patch('subprocess.Popen', spec=True)
# NOTE(dosaboy): mock out eventlet.sleep() so that it does nothing.
@mock.patch('eventlet.sleep', spec=True)
@mock.patch('time.time', spec=True)
# NOTE(dosaboy): set spec to empty object so that hasattr calls return
# False by default.
@mock.patch('storage.backup.drivers.ceph.rbd')
@mock.patch('storage.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_time, mock_sleep,
mock_popen):
mock_time.side_effect = inst.time_inc
mock_popen.side_effect = Exception
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.service.rbd = inst.mock_rbd
inst.service.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
class BackupCephTestCase(test.TestCase):
"""Test case for ceph backup driver."""
def _create_volume_db_entry(self, id, size):
vol = {'id': id, 'size': size, 'status': 'available'}
return storage.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size,
userid=str(uuid.uuid4()),
projectid=str(uuid.uuid4())):
backup = {'id': backupid, 'size': size, 'volume_id': volid,
'user_id': userid, 'project_id': projectid}
return storage.backup_create(self.ctxt, backup)['id']
def time_inc(self):
self.counter += 1
return self.counter
def _get_wrapped_rbd_io(self, rbd_image):
rbd_meta = rbddriver.RBDImageMetadata(rbd_image, 'pool_foo',
'user_foo', 'conf_foo')
return rbddriver.RBDImageIOWrapper(rbd_meta)
def _setup_mock_popen(self, mock_popen, retval=None, p1hook=None,
p2hook=None):
class MockPopen(object):
hooks = [p2hook, p1hook]
def __init__(mock_inst, cmd, *args, **kwargs):
self.callstack.append('popen_init')
mock_inst.stdout = mock.Mock()
mock_inst.stdout.close = mock.Mock()
mock_inst.stdout.close.side_effect = \
lambda *args: self.callstack.append('stdout_close')
mock_inst.returncode = 0
hook = mock_inst.__class__.hooks.pop()
if hook is not None:
hook()
def communicate(mock_inst):
self.callstack.append('communicate')
return retval
mock_popen.side_effect = MockPopen
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(BackupCephTestCase, self).setUp()
self.ctxt = context.get_admin_context()
# Create volume.
self.volume_size = 1
self.volume_id = str(uuid.uuid4())
self._create_volume_db_entry(self.volume_id, self.volume_size)
self.volume = storage.volume_get(self.ctxt, self.volume_id)
# Create backup of volume.
self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id,
self.volume_size)
self.backup = storage.Backup.get_by_id(self.ctxt, self.backup_id)
# Create alternate volume.
self.alt_volume_id = str(uuid.uuid4())
self._create_volume_db_entry(self.alt_volume_id, self.volume_size)
self.alt_volume = storage.volume_get(self.ctxt, self.alt_volume_id)
self.chunk_size = 1024
self.num_chunks = 128
self.data_length = self.num_chunks * self.chunk_size
self.checksum = hashlib.sha256()
# Create a file with some data in it.
self.volume_file = tempfile.NamedTemporaryFile()
self.addCleanup(self.volume_file.close)
for _i in range(0, self.num_chunks):
data = os.urandom(self.chunk_size)
self.checksum.update(data)
self.volume_file.write(data)
self.volume_file.seek(0)
# Always trigger an exception if a command is executed since it should
# always be dealt with gracefully. At time of writing on rbd
# export/import-diff is executed and if they fail we expect to find
# alternative means of backing up.
mock_exec = mock.Mock()
mock_exec.side_effect = processutils.ProcessExecutionError
self.service = ceph.CephBackupDriver(self.ctxt, execute=mock_exec)
# Ensure that time.time() always returns more than the last time it was
# called to avoid div by zero errors.
self.counter = float(0)
self.callstack = []
@common_mocks
def test_get_rbd_support(self):
del self.service.rbd.RBD_FEATURE_LAYERING
del self.service.rbd.RBD_FEATURE_STRIPINGV2
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_LAYERING'))
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_STRIPINGV2'))
oldformat, features = self.service._get_rbd_support()
self.assertTrue(oldformat)
self.assertEqual(0, features)
self.service.rbd.RBD_FEATURE_LAYERING = 1
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1, features)
self.service.rbd.RBD_FEATURE_STRIPINGV2 = 2
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1 | 2, features)
@common_mocks
def test_get_most_recent_snap(self):
last = 'backup.%s.snap.9824923.1212' % (uuid.uuid4())
image = self.mock_rbd.Image.return_value
image.list_snaps.return_value = \
[{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': last},
{'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}]
snap = self.service._get_most_recent_snap(image)
self.assertEqual(last, snap)
@common_mocks
def test_get_backup_snap_name(self):
snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4())
def get_backup_snaps(inst, *args):
return [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4()),
'backup_id': str(uuid.uuid4())},
{'name': snap_name,
'backup_id': self.backup_id}]
with mock.patch.object(self.service, 'get_backup_snaps'):
name = self.service._get_backup_snap_name(self.service.rbd.Image(),
'base_foo',
self.backup_id)
self.assertIsNone(name)
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.side_effect = get_backup_snaps
name = self.service._get_backup_snap_name(self.service.rbd.Image(),
'base_foo',
self.backup_id)
self.assertEqual(snap_name, name)
self.assertTrue(mock_get_backup_snaps.called)
@common_mocks
def test_get_backup_snaps(self):
image = self.mock_rbd.Image.return_value
image.list_snaps.return_value = [
{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.wambam.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': 'bbbackup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}]
snaps = self.service.get_backup_snaps(image)
self.assertEqual(3, len(snaps))
@common_mocks
def test_transfer_data_from_rbd_to_file(self):
def fake_read(offset, length):
self.volume_file.seek(offset)
return self.volume_file.read(length)
self.mock_rbd.Image.return_value.read.side_effect = fake_read
self.mock_rbd.Image.return_value.size.return_value = self.data_length
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
self.service._transfer_data(rbd_io, 'src_foo', test_file,
'dest_foo', self.data_length)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_rbd_to_rbd(self):
def fake_read(offset, length):
self.volume_file.seek(offset)
return self.volume_file.read(length)
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
rbd1 = mock.Mock()
rbd1.read.side_effect = fake_read
rbd1.size.return_value = os.fstat(self.volume_file.fileno()).st_size
rbd2 = mock.Mock()
rbd2.write.side_effect = mock_write_data
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
src_rbd_io = self._get_wrapped_rbd_io(rbd1)
dest_rbd_io = self._get_wrapped_rbd_io(rbd2)
self.service._transfer_data(src_rbd_io, 'src_foo', dest_rbd_io,
'dest_foo', self.data_length)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_file_to_rbd(self):
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
self.mock_rbd.Image.return_value.write.side_effect = mock_write_data
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
self.service._transfer_data(self.volume_file, 'src_foo',
rbd_io, 'dest_foo', self.data_length)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_file_to_file(self):
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
self.service._transfer_data(self.volume_file, 'src_foo', test_file,
'dest_foo', self.data_length)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_backup_volume_from_file(self):
checksum = hashlib.sha256()
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
self.service.rbd.Image.return_value.write.side_effect = mock_write_data
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, '_discard_bytes'):
with tempfile.NamedTemporaryFile() as test_file:
self.service.backup(self.backup, self.volume_file)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
self.assertTrue(self.service.rbd.Image.return_value.write.called)
@common_mocks
def test_get_backup_base_name(self):
name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.assertEqual("volume-%s.backup.base" % (self.volume_id), name)
self.assertRaises(exception.InvalidParameterValue,
self.service._get_backup_base_name,
self.volume_id)
name = self.service._get_backup_base_name(self.volume_id, '1234')
self.assertEqual("volume-%s.backup.%s" % (self.volume_id, '1234'),
name)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd(self, mock_popen, mock_fnctl):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
with mock.patch.object(self.service, '_full_backup') as \
mock_full_backup:
with mock.patch.object(self.service,
'_try_delete_base_image'):
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
self.service.backup(self.backup, rbdio)
self.assertEqual(['popen_init',
'read',
'popen_init',
'write',
'stdout_close',
'communicate'], self.callstack)
self.assertFalse(mock_full_backup.called)
self.assertTrue(mock_get_backup_snaps.called)
# Ensure the files are equal
self.assertEqual(checksum.digest(),
self.checksum.digest())
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd_fail(self, mock_popen, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In _backup_rbd(), after an exception.BackupRBDOperationFailed
occurs in self._rbd_diff_transfer(), we want to check the
process when the second exception occurs in
self._try_delete_base_image().
"""
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'), \
mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
def mock_rbd_diff_transfer_side_effect(src_name, src_pool,
dest_name, dest_pool,
src_user, src_conf,
dest_user, dest_conf,
src_snap, from_snap):
raise exception.BackupRBDOperationFailed(_('mock'))
# Raise a pseudo exception.BackupRBDOperationFailed.
mock_rbd_diff_transfer.side_effect \
= mock_rbd_diff_transfer_side_effect
with mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service,
'_try_delete_base_image') as \
mock_try_delete_base_image:
def mock_try_delete_base_image_side_effect(backup_id,
volume_id,
base_name):
raise self.service.rbd.ImageNotFound(_('mock'))
# Raise a pesudo exception rbd.ImageNotFound.
mock_try_delete_base_image.side_effect \
= mock_try_delete_base_image_side_effect
with mock.patch.object(self.service, '_backup_metadata'):
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
# We expect that the second exception is
# notified.
self.assertRaises(
self.service.rbd.ImageNotFound,
self.service.backup,
self.backup, rbdio)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd_fail2(self, mock_popen, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception.BackupOperationError occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete().
"""
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'), \
mock.patch.object(self.service, '_rbd_diff_transfer'), \
mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service, '_backup_metadata') as \
mock_backup_metadata:
def mock_backup_metadata_side_effect(backup):
raise exception.BackupOperationError(_('mock'))
# Raise a pseudo exception.BackupOperationError.
mock_backup_metadata.side_effect = mock_backup_metadata_side_effect
with mock.patch.object(self.service, 'delete') as mock_delete:
def mock_delete_side_effect(backup):
raise self.service.rbd.ImageBusy()
# Raise a pseudo exception rbd.ImageBusy.
mock_delete.side_effect = mock_delete_side_effect
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
# We expect that the second exception is
# notified.
self.assertRaises(
self.service.rbd.ImageBusy,
self.service.backup,
self.backup, rbdio)
@common_mocks
def test_backup_vol_length_0(self):
volume_id = str(uuid.uuid4())
self._create_volume_db_entry(volume_id, 0)
backup_id = str(uuid.uuid4())
self._create_backup_db_entry(backup_id, volume_id, 1)
backup = storage.Backup.get_by_id(self.ctxt, backup_id)
self.assertRaises(exception.InvalidParameterValue, self.service.backup,
backup, self.volume_file)
@common_mocks
def test_restore(self):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
def mock_read_data(offset, length):
return self.volume_file.read(self.data_length)
self.mock_rbd.Image.return_value.read.side_effect = mock_read_data
self.mock_rbd.Image.return_value.size.return_value = \
self.chunk_size * self.num_chunks
with mock.patch.object(self.service, '_restore_metadata') as \
mock_restore_metadata:
with mock.patch.object(self.service, '_discard_bytes') as \
mock_discard_bytes:
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
self.service.restore(self.backup, self.volume_id,
test_file)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
self.assertTrue(mock_restore_metadata.called)
self.assertTrue(mock_discard_bytes.called)
self.assertTrue(mock_discard_bytes.called)
self.assertTrue(self.service.rbd.Image.return_value.read.called)
@common_mocks
def test_discard_bytes(self):
# Lower the chunksize to a memory managable number
self.service.chunk_size = 1024
image = self.mock_rbd.Image.return_value
wrapped_rbd = self._get_wrapped_rbd_io(image)
self.service._discard_bytes(wrapped_rbd, 0, 0)
self.assertEqual(0, image.discard.call_count)
self.service._discard_bytes(wrapped_rbd, 0, 1234)
self.assertEqual(1, image.discard.call_count)
image.reset_mock()
# Test discard with no remainder
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
self.service._discard_bytes(wrapped_rbd, 0,
self.service.chunk_size * 2)
self.assertEqual(2, image.write.call_count)
self.assertEqual(2, image.flush.call_count)
self.assertFalse(image.discard.called)
image.reset_mock()
# Now test with a remainder.
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
self.service._discard_bytes(wrapped_rbd, 0,
(self.service.chunk_size * 2) + 1)
self.assertEqual(3, image.write.call_count)
self.assertEqual(3, image.flush.call_count)
self.assertFalse(image.discard.called)
@common_mocks
def test_delete_backup_snapshot(self):
snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4())
base_name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.mock_rbd.RBD.remove_snap = mock.Mock()
with mock.patch.object(self.service, '_get_backup_snap_name') as \
mock_get_backup_snap_name:
mock_get_backup_snap_name.return_value = snap_name
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = None
rem = self.service._delete_backup_snapshot(self.mock_rados,
base_name,
self.backup_id)
self.assertTrue(mock_get_backup_snap_name.called)
self.assertTrue(mock_get_backup_snaps.called)
self.assertEqual((snap_name, 0), rem)
@common_mocks
@mock.patch('storage.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_try_delete_base_image_diff_format(self, mock_meta_backup):
backup_name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
with mock.patch.object(self.service, '_delete_backup_snapshot') as \
mock_del_backup_snap:
snap_name = self.service._get_new_snap_name(self.backup_id)
mock_del_backup_snap.return_value = (snap_name, 0)
self.service.delete(self.backup)
self.assertTrue(mock_del_backup_snap.called)
self.assertTrue(self.mock_rbd.RBD.return_value.list.called)
self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
@common_mocks
@mock.patch('storage.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_try_delete_base_image(self, mock_meta_backup):
backup_name = self.service._get_backup_base_name(self.volume_id,
self.backup_id)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'):
self.service.delete(self.backup)
self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
@common_mocks
def test_try_delete_base_image_busy(self):
"""This should induce retries then raise rbd.ImageBusy."""
backup_name = self.service._get_backup_base_name(self.volume_id,
self.backup_id)
rbd = self.mock_rbd.RBD.return_value
rbd.list.return_value = [backup_name]
rbd.remove.side_effect = self.mock_rbd.ImageBusy
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
self.assertRaises(self.mock_rbd.ImageBusy,
self.service._try_delete_base_image,
self.backup['id'], self.backup['volume_id'])
self.assertTrue(mock_get_backup_snaps.called)
self.assertTrue(rbd.list.called)
self.assertTrue(rbd.remove.called)
self.assertTrue(MockImageBusyException in RAISED_EXCEPTIONS)
@common_mocks
@mock.patch('storage.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_delete(self, mock_meta_backup):
with mock.patch.object(self.service, '_try_delete_base_image'):
self.service.delete(self.backup)
self.assertEqual([], RAISED_EXCEPTIONS)
@common_mocks
@mock.patch('storage.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_delete_image_not_found(self, mock_meta_backup):
with mock.patch.object(self.service, '_try_delete_base_image') as \
mock_del_base:
mock_del_base.side_effect = self.mock_rbd.ImageNotFound
# ImageNotFound exception is caught so that storage entry can be cleared
self.service.delete(self.backup)
self.assertEqual([MockImageNotFoundException], RAISED_EXCEPTIONS)
@common_mocks
def test_diff_restore_allowed_with_image_not_exists(self):
"""Test diff restore not allowed when backup not diff-format."""
not_allowed = (False, None)
backup_base = 'backup.base'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (False, backup_base)
resp = self.service._diff_restore_allowed(*args_vols_different)
self.assertEqual(not_allowed, resp)
mock_rbd_image_exists.assert_called_once_with(
backup_base,
self.backup['volume_id'],
self.mock_rados)
@common_mocks
def test_diff_restore_allowed_with_no_restore_point(self):
"""Test diff restore not allowed when no restore point found.
Detail conditions:
1. backup base is diff-format
2. restore point does not exist
"""
not_allowed = (False, None)
backup_base = 'backup.base'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = None
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual(not_allowed, resp)
self.assertTrue(mock_rbd_image_exists.called)
mock_get_restore_point.assert_called_once_with(
backup_base,
self.backup['id'])
@common_mocks
def test_diff_restore_allowed_with_not_rbd(self):
"""Test diff restore not allowed when destination volume is not rbd.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is not an rbd.
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
mock_file_is_rbd.assert_called_once_with(
rbd_io)
@common_mocks
def test_diff_restore_allowed_with_same_volume(self):
"""Test diff restore not allowed when volumes are same.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are the same
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_same = [backup_base, self.backup, self.volume, rbd_io,
self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
resp = self.service._diff_restore_allowed(*args_vols_same)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
@common_mocks
def test_diff_restore_allowed_with_has_extents(self):
"""Test diff restore not allowed when destination volume has data.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are different
5. destination volume has data on it - full copy is mandated
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
with mock.patch.object(self.service, '_rbd_has_extents') \
as mock_rbd_has_extents:
mock_rbd_has_extents.return_value = True
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
mock_rbd_has_extents.assert_called_once_with(
rbd_io.rbd_image)
@common_mocks
def test_diff_restore_allowed_with_no_extents(self):
"""Test diff restore allowed when no data in destination volume.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are different
5. destination volume no data on it
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
with mock.patch.object(self.service, '_rbd_has_extents') \
as mock_rbd_has_extents:
mock_rbd_has_extents.return_value = False
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((True, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
self.assertTrue(mock_rbd_has_extents.called)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_piped_execute(self, mock_popen, mock_fcntl):
mock_fcntl.return_value = 0
self._setup_mock_popen(mock_popen, ['out', 'err'])
self.service._piped_execute(['foo'], ['bar'])
self.assertEqual(['popen_init', 'popen_init',
'stdout_close', 'communicate'], self.callstack)
@common_mocks
def test_restore_metdata(self):
version = 2
def mock_read(*args):
base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META
glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META
return jsonutils.dumps({base_tag: {'image_name': 'image.base'},
glance_tag: {'image_name': 'image.glance'},
'version': version})
self.mock_rados.Object.return_value.read.side_effect = mock_read
self.service._restore_metadata(self.backup, self.volume_id)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.assertTrue(self.mock_rados.Object.return_value.read.called)
version = 3
try:
self.service._restore_metadata(self.backup, self.volume_id)
except exception.BackupOperationError as exc:
msg = _("Metadata restore failed due to incompatible version")
self.assertEqual(msg, six.text_type(exc))
else:
# Force a test failure
self.assertFalse(True)
@common_mocks
@mock.patch('storage.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_backup_metadata_already_exists(self, mock_meta_backup):
def mock_set(json_meta):
msg = (_("Metadata backup object '%s' already exists") %
("backup.%s.meta" % (self.backup_id)))
raise exception.VolumeMetadataBackupExists(msg)
mock_meta_backup.return_value.set = mock.Mock()
mock_meta_backup.return_value.set.side_effect = mock_set
with mock.patch.object(self.service, 'get_metadata') as \
mock_get_metadata:
mock_get_metadata.return_value = "some.json.metadata"
try:
self.service._backup_metadata(self.backup)
except exception.BackupOperationError as e:
msg = (_("Failed to backup volume metadata - Metadata backup "
"object 'backup.%s.meta' already exists") %
(self.backup_id))
self.assertEqual(msg, six.text_type(e))
else:
# Make the test fail
self.assertFalse(True)
self.assertFalse(mock_meta_backup.set.called)
@common_mocks
def test_backup_metata_error(self):
"""Ensure that delete() is called if the metadata backup fails.
Also ensure that the exception is propagated to the caller.
"""
with mock.patch.object(self.service, '_backup_metadata') as \
mock_backup_metadata:
mock_backup_metadata.side_effect = exception.BackupOperationError
with mock.patch.object(self.service, '_get_volume_size_gb'):
with mock.patch.object(self.service, '_file_is_rbd',
return_value=False):
with mock.patch.object(self.service, '_full_backup'):
with mock.patch.object(self.service, 'delete') as \
mock_delete:
self.assertRaises(exception.BackupOperationError,
self.service.backup, self.backup,
mock.Mock(),
backup_metadata=True)
self.assertTrue(mock_delete.called)
@common_mocks
def test_restore_invalid_metadata_version(self):
def mock_read(*args):
base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META
glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META
return jsonutils.dumps({base_tag: {'image_name': 'image.base'},
glance_tag: {'image_name': 'image.glance'},
'version': 3})
self.mock_rados.Object.return_value.read.side_effect = mock_read
with mock.patch.object(ceph.VolumeMetadataBackup, '_exists') as \
mock_exists:
mock_exists.return_value = True
self.assertRaises(exception.BackupOperationError,
self.service._restore_metadata,
self.backup, self.volume_id)
self.assertTrue(mock_exists.called)
self.assertTrue(self.mock_rados.Object.return_value.read.called)
def common_meta_backup_mocks(f):
"""Decorator to set mocks common to all metadata backup tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('storage.backup.drivers.ceph.rbd')
@mock.patch('storage.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd):
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rados.ObjectNotFound = MockObjectNotFoundException
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
class VolumeMetadataBackupTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(VolumeMetadataBackupTestCase, self).setUp()
self.backup_id = str(uuid.uuid4())
self.mb = ceph.VolumeMetadataBackup(mock.Mock(), self.backup_id)
@common_meta_backup_mocks
def test_name(self):
self.assertEqual('backup.%s.meta' % (self.backup_id), self.mb.name)
@common_meta_backup_mocks
def test_exists(self):
# True
self.assertTrue(self.mb.exists)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.mock_rados.Object.return_value.reset_mock()
# False
self.mock_rados.Object.return_value.stat.side_effect = (
self.mock_rados.ObjectNotFound)
self.assertFalse(self.mb.exists)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS)
@common_meta_backup_mocks
def test_set(self):
obj_data = []
called = []
def mock_read(*args):
called.append('read')
self.assertTrue(len(obj_data) == 1)
return obj_data[0]
def _mock_write(data):
obj_data.append(data)
called.append('write')
self.mb.get = mock.Mock()
self.mb.get.side_effect = mock_read
with mock.patch.object(ceph.VolumeMetadataBackup, 'set') as mock_write:
mock_write.side_effect = _mock_write
self.mb.set({'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, self.mb.get())
self.assertTrue(self.mb.get.called)
self.mb._exists = mock.Mock()
self.mb._exists.return_value = True
# use the unmocked set() method.
self.assertRaises(exception.VolumeMetadataBackupExists, self.mb.set,
{'doo': 'dah'})
# check the meta obj state has not changed.
self.assertEqual({'foo': 'bar'}, self.mb.get())
self.assertEqual(['write', 'read', 'read'], called)
@common_meta_backup_mocks
def test_get(self):
self.mock_rados.Object.return_value.stat.side_effect = (
self.mock_rados.ObjectNotFound)
self.mock_rados.Object.return_value.read.return_value = 'meta'
self.assertIsNone(self.mb.get())
self.mock_rados.Object.return_value.stat.side_effect = None
self.assertEqual('meta', self.mb.get())
@common_meta_backup_mocks
def remove_if_exists(self):
with mock.patch.object(self.mock_rados.Object, 'remove') as \
mock_remove:
mock_remove.side_effect = self.mock_rados.ObjectNotFound
self.mb.remove_if_exists()
self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS)
self.mock_rados.Object.remove.side_effect = None
self.mb.remove_if_exists()
self.assertEqual([], RAISED_EXCEPTIONS)
|
|
from bamboo_boy.utils import with_canopy
import random
from unittest.case import expectedFailure
from django.contrib.auth.decorators import login_required
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils.decorators import method_decorator
from rest_framework.test import APIRequestFactory, APITestCase
from comments.models import DocumentNode, DocumentComment, NodeSnapshot
from comments.views import add_node, get_metadata, update_node
from privacy.backend import AdminNotAuthorized
from projects.views.private import project_comments_moderation
from rtd_tests.factories.comments_factories import DocumentNodeFactory, \
DocumentCommentFactory, ProjectsWithComments
from rtd_tests.factories.general_factories import UserFactory
from rtd_tests.factories.projects_factories import ProjectFactory
@with_canopy(ProjectsWithComments)
class ModerationTests(TestCase):
def test_approved_comments(self):
c = self.canopy.first_unmoderated_comment
# This comment has never been approved...
self.assertFalse(c.has_been_approved_since_most_recent_node_change())
# ...until now!
c.moderate(user=self.canopy.owner, decision=1)
self.assertTrue(c.has_been_approved_since_most_recent_node_change())
def test_new_node_snapshot_causes_comment_to_show_as_not_approved_since_change(self):
c = self.canopy.first_unmoderated_comment
c.moderate(user=self.canopy.owner, decision=1)
self.assertTrue(c.has_been_approved_since_most_recent_node_change())
c.node.snapshots.create(hash=random.getrandbits(128))
self.assertFalse(c.has_been_approved_since_most_recent_node_change())
def test_unmoderated_project_shows_all_comments(self):
visible_comments = self.canopy.unmoderated_node.visible_comments()
self.assertIn(self.canopy.first_unmoderated_comment, visible_comments)
self.assertIn(self.canopy.second_unmoderated_comment, visible_comments)
def test_unapproved_comment_is_not_visible_on_moderated_project(self):
# We take a look at the visible comments and find that neither comment is among them.
visible_comments = self.canopy.moderated_node.visible_comments()
self.assertNotIn(self.canopy.first_moderated_comment, visible_comments)
self.assertNotIn(self.canopy.second_moderated_comment, visible_comments)
def test_moderated_project_with_unchanged_nodes_shows_only_approved_comment(self):
# Approve the first comment...
self.canopy.first_moderated_comment.moderate(user=self.canopy.owner, decision=1)
# ...and find that the first comment, but not the second one, is visible.
visible_comments = self.canopy.moderated_node.visible_comments()
self.assertIn(self.canopy.first_moderated_comment, visible_comments)
self.assertNotIn(self.canopy.second_moderated_comment, visible_comments)
def test_moderated_project_with_changed_nodes_dont_show_comments_that_havent_been_approved_since(self):
# Approve the first comment...
self.canopy.first_moderated_comment.moderate(user=self.canopy.owner, decision=1)
# ...but this time, change the node.
self.canopy.first_moderated_comment.node.snapshots.create(hash=random.getrandbits(128))
# Now it does not show as visible.
visible_comments = self.canopy.moderated_node.visible_comments()
self.assertNotIn(self.canopy.first_moderated_comment, visible_comments)
def test_unapproved_comments_appear_in_moderation_queue(self):
queue = self.canopy.moderated_project.moderation_queue()
self.assertIn(self.canopy.first_moderated_comment, queue)
self.assertIn(self.canopy.second_moderated_comment, queue)
def test_approved_comments_do_not_appear_in_moderation_queue(self):
self.canopy.first_moderated_comment.moderate(user=self.canopy.owner, decision=1)
queue = self.canopy.moderated_project.moderation_queue()
self.assertNotIn(self.canopy.first_moderated_comment, queue)
self.assertIn(self.canopy.second_moderated_comment, queue)
class NodeAndSnapshotTests(TestCase):
def test_update_with_same_hash_does_not_create_new_snapshot(self):
node = DocumentNodeFactory()
hash = "SOMEHASH"
commit = "SOMEGITCOMMIT"
# We initially have just one snapshot.
self.assertEqual(node.snapshots.count(), 1)
# ...but when we update the hash, we have two.
node.update_hash(hash, commit)
self.assertEqual(node.snapshots.count(), 2)
# If we update with the same exact hash and commit, it doesn't create a new snapshot.
node.update_hash(hash, commit)
self.assertEqual(node.snapshots.count(), 2)
def test_node_cannot_be_created_without_commit_and_hash(self):
project = ProjectFactory()
some_version = project.versions.all()[0]
self.assertRaises(TypeError,
DocumentNode.objects.create,
project=project,
version=some_version,
hash=random.getrandbits(128)
)
self.assertRaises(TypeError,
DocumentNode.objects.create,
project=project,
version=some_version,
commit=random.getrandbits(128)
)
def test_node_can_be_sought_From_new_hash(self):
first_hash = "THEoriginalHASH"
second_hash = 'ANEWCRAZYHASH'
node = DocumentNodeFactory(hash=first_hash)
comnent = DocumentCommentFactory()
node.update_hash(second_hash, 'ANEWCRAZYCOMMIT')
node_from_orm = DocumentNode.objects.from_hash(node.version.slug,
node.page,
node.latest_hash(),
project_slug=node.project.slug)
self.assertEqual(node, node_from_orm)
node.update_hash(first_hash, 'AthirdCommit')
node_from_orm2 = DocumentNode.objects.from_hash(node.version.slug, node.page, first_hash, node.project.slug)
self.assertEqual(node, node_from_orm2)
@expectedFailure
def test_nodes_with_same_hash_oddness(self):
node_hash = "AcommonHASH"
page = "somepage"
commit = "somecommit"
project = ProjectFactory()
project.add_node(node_hash=node_hash,
page=page,
version=project.versions.all()[0].slug,
commit=commit,
)
# A new commit with a second instance of the exact same content.
project.add_node(node_hash=node_hash,
page=page,
version=project.versions.all()[0].slug,
commit="ANEWCOMMIT",
)
try:
project.nodes.from_hash(project.versions.all()[0].slug, page, node_hash, project.slug)
except NotImplementedError:
self.fail("We don't have indexing yet.")
@with_canopy(ProjectsWithComments)
class CommentModerationViewsTests(TestCase):
def test_unmoderated_comments_are_listed_in_view(self):
request = RequestFactory()
request.user = self.canopy.owner
request.META = {}
response = project_comments_moderation(request, self.canopy.moderated_project.slug)
self.assertIn(self.canopy.first_moderated_comment.text, response.content)
@with_canopy(ProjectsWithComments)
class CommentAPIViewsTests(APITestCase):
request_factory = APIRequestFactory()
def test_get_comments_view(self):
number_of_comments = DocumentComment.objects.count() # (from the canopy)
response = self.client.get('/api/v2/comments/')
self.assertEqual(number_of_comments, response.data['count'])
# moooore comments.
DocumentCommentFactory.create_batch(50)
response = self.client.get('/api/v2/comments/')
self.assertEqual(number_of_comments + 50, response.data['count'])
def test_get_metadata_view(self):
node = DocumentNodeFactory()
get_data = {
'project': node.project.slug,
'version': node.version.slug,
'page': node.page
}
request = self.request_factory.get('/_get_metadata/', get_data)
response = get_metadata(request)
response.render()
number_of_comments = response.data[node.latest_hash()]
# There haven't been any comments yet.
self.assertEqual(number_of_comments, 0)
# Now we'll make one.
comment = DocumentCommentFactory(node=node, text="Our first comment!")
second_request = self.request_factory.get('/_get_metadata/', get_data)
second_response = get_metadata(request)
second_response.render()
number_of_comments = second_response.data[node.latest_hash()]
# And sure enough - one comment.
self.assertEqual(number_of_comments, 1)
def test_add_node_view(self):
node = self.canopy.moderated_project.nodes.all()[0]
post_data = {
'document': node.page,
'id': node.latest_hash(),
'project': node.project.slug,
'version': node.version.slug,
'commit': node.latest_commit(),
}
# Now let's delete the node....
DocumentNode.objects.all().delete()
# ...we have no nodes.
self.assertEqual(DocumentNode.objects.count(), 0)
# Hit the API again.
request = self.request_factory.post('/_add_node/', post_data)
response = add_node(request)
# We do now have exactly one Node.
self.assertEqual(DocumentNode.objects.count(), 1)
def test_update_node_view(self):
node = DocumentNodeFactory()
# Our node has one snapshot.
self.assertEqual(node.snapshots.count(), 1)
new_hash = "CRAZYnewHASHtoUPDATEnode"
commit = "COOLNEWGITCOMMITHASH"
post_data = {
'old_hash': node.latest_hash(),
'new_hash': new_hash,
'commit': commit,
'project': node.project.slug,
'version': node.version.slug,
'page': node.page
}
request = self.request_factory.post('/_update_node/', post_data)
response = update_node(request)
response.render()
self.assertEqual(response.data['current_hash'], new_hash)
# We now have two snapshots.
self.assertEqual(node.snapshots.count(), 2)
# And the latest hash is the one we just set.
self.assertEqual(node.latest_hash(), new_hash)
def test_add_comment_view_without_existing_hash(self):
comment_text = "Here's a comment added to a new hash."
node = DocumentNodeFactory()
UserFactory(username="test", password="test")
number_of_nodes = DocumentNode.objects.count()
post_data = {
'node': random.getrandbits(128),
'commit': random.getrandbits(128),
'project': node.project.slug,
'version': node.version.slug,
'document_page': node.page,
'text': comment_text
}
self.client.login(username="test", password="test")
response = self.client.post('/api/v2/comments/', post_data)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['text'], comment_text)
self.assertEqual(DocumentNode.objects.count(), number_of_nodes + 1) # We created a new node.
def test_add_comment_view_with_existing_hash(self):
node = DocumentNodeFactory()
user = UserFactory(username="test", password="test")
comment_text = "Here's a comment added through the comment view."
post_data = {
'node': node.latest_hash(),
'commit': node.latest_hash(),
'project': node.project.slug,
'version': node.version.slug,
'document_page': node.page,
'text': comment_text
}
self.client.login(username="test", password="test")
response = self.client.post('/api/v2/comments/', post_data)
comment_from_orm = node.comments.filter(text=comment_text)
self.assertTrue(comment_from_orm.exists())
self.assertEqual(comment_from_orm[0].node, node,
"The comment exists, but lives in a different node! Not supposed to happen.")
def test_add_comment_view_with_changed_hash(self):
first_hash = "THEoriginalHASH"
second_hash = 'ANEWCRAZYHASH'
comment_text = "This comment will follow its node despite hash changing."
# Create a comment on a node whose latest hash is the first one.
node = DocumentNodeFactory(hash=first_hash)
comnent = DocumentCommentFactory(node=node, text=comment_text)
# Now change the node's hash.
node.update_hash(second_hash, 'ANEWCRAZYCOMMIT')
node_from_orm = DocumentNode.objects.from_hash(version_slug=node.version.slug,
page=node.page,
node_hash=node.latest_hash(),
project_slug=node.project.slug)
# It's the same node.
self.assertEqual(node, node_from_orm)
# Get all the comments with the second hash.
query_params = {'node': second_hash,
'document_page': node.page,
'project': node.project.slug,
'version': node.version.slug,
}
response = self.client.get('/api/v2/comments/', query_params)
self.assertEqual(response.data['results'][0]['text'], comment_text)
def test_retrieve_comment_on_old_hash(self):
pass
def test_post_comment_on_old_hash(self):
pass
def test_moderate_comment_by_approving(self):
user = UserFactory(username="test", password="test")
project = ProjectFactory()
project.users.add(user)
node = DocumentNodeFactory(project=project)
comment = DocumentCommentFactory(node=node)
post_data = {
'decision': 1,
}
self.assertFalse(comment.has_been_approved_since_most_recent_node_change())
self.client.login(username="test", password="test")
response = self.client.put('/api/v2/comments/%s/moderate/' % comment.id, post_data)
self.assertEqual(response.data['decision'], 1)
self.assertTrue(comment.has_been_approved_since_most_recent_node_change())
def test_stranger_cannot_moderate_comments(self):
node = DocumentNodeFactory()
user = UserFactory()
comment = DocumentCommentFactory(node=node)
post_data = {
'decision': 1,
}
response = self.client.put('/api/v2/comments/%s/moderate/' % comment.id,
post_data
)
self.assertEqual(response.status_code, 403)
|
|
# -*- coding: latin-1 -*-
"""Defines the HTML colour names along with their values"""
class InvalidColorException(Exception):
pass
class HTMLColors(object):
htmlcolors = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkgrey': '#A9A9A9',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkslategrey': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'grey': '#808080',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgray': '#D3D3D3',
'lightgreen': '#90EE90',
'lightgrey': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370D8',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#D87093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#F4A460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'
}
@classmethod
def get_color_name(cls, color_value):
"""Returns the color name matching this value. Returns an empty string
if no suitable match is found"""
if isinstance(color_value, basestring):
value = color_value.upper()
for col, val in HTMLColors.htmlcolors.iteritems():
if val == value:
return col
return ""
@classmethod
def get_color_value(cls, color_name):
"""Returns the color value mathing this name. Returns an empty string
if no suitable match is found"""
if isinstance(color_name, basestring):
color = color_name.lower()
if color in HTMLColors.htmlcolors:
return HTMLColors.htmlcolors[color]
return ""
@classmethod
def get_color_shortest(cls, color_name_or_value):
"""Attempts to retrieve by name and value and returns the shortest
string. Returns the input string if nothing is found"""
name = cls.get_color_name(color_name_or_value) or color_name_or_value
value = cls.get_color_value(color_name_or_value) or color_name_or_value
if value:
if len(value) == 7:
condensed = [ value[v] for v in range(1, 7, 2) if value[v] == value[v + 1] ]
if len(condensed) == 3:
value = "#" + "".join(condensed)
l_name = len(name)
l_value = len(value)
if l_name == l_value:
return color_name_or_value
if (l_name and not l_value) or (name and l_value and l_name < l_value):
return name
if (l_value and not l_name) or (value and l_name and l_value < l_name):
return value
return color_name_or_value
@classmethod
def is_valid_color_value(cls, color_value):
"""Validates the characters, not the format"""
c_value = color_value.strip().lower()
if c_value and c_value.startswith("#"):
c_value = c_value[1:]
if not c_value:
return False
chars = "abcdef0123456789"
invalid = [ c for c in c_value if chars.find(c) == -1 ]
return not invalid
@classmethod
def get_rgb(cls, color):
if not color.startswith("#"):
# Are we passing a color, if this doens't work, we can't do anything!
c_value = cls.get_color_value(color)
if c_value:
color = c_value
if cls.is_valid_color_value(color):
if len(color) in (4, 7):
c_value = color[1:]
if len(c_value) == 3:
r, g, b = [ int(hex, 16) for hex in [ c_value[cnt:cnt + 1] * 2 for cnt in range(3) ] ]
else:
r, g, b = [ int(hex, 16) for hex in [ c_value[cnt * 2:cnt * 2 + 2] for cnt in range(3) ] ]
return r, g, b
raise InvalidColorException("%s is not a valid color" % ( color, ))
@classmethod
def get_color_from_rgb(cls, r, g, b):
"""Return an html color from rgb"""
value = "#" + "".join([ ("0" + hex(i)[2:])[-2:] for i in (r, g, b) ])
return cls.get_color_shortest(value)
@classmethod
def lighten(cls, color, percentage=25):
"""Lightens a HTML color by the percentage specified"""
r, g, b = cls.get_rgb(color)
if percentage:
r, g, b = [ int(i + (percentage * (255 - i + 1) / 100.0)) for i in (r, g, b) ]
return cls.get_color_from_rgb(r, g, b)
@classmethod
def darken(cls, color, percentage=25):
"""Darkens a HTML colour by the percentage specified"""
r, g, b = cls.get_rgb(color)
if percentage:
r, g, b = [ int(i - (percentage * i / 100.0)) for i in (r, g, b) ]
return cls.get_color_from_rgb(r, g, b)
@classmethod
def get_rgb_from_hsl(cls, h, s, l):
if s == 0:
r = g = b = int(l)
else:
if l < 0.5:
temp2 = l * (1.0 + s)
else:
temp2 = l + s - l * s
temp1 = 2.0 * l - temp2
hp = h / 360.0
def _get_color(temp1, temp2, temp3):
if temp3 < 0:
temp3 = temp3 + 1.0
if temp3 > 1:
temp3 = temp3 - 1.0
if 6.0 * temp3 < 1:
return temp1 + (temp2 - temp1) * 6.0 * temp3
elif 2.0 * temp3 < 1:
return temp2
elif 3.0 * temp3 < 2:
return temp1 + (temp2 - temp1) * ((2.0/3.0) - temp3) * 6.0
return temp1
r = int(_get_color(temp1, temp2, hp + 1.0/3.0) * 255)
g = int(_get_color(temp1, temp2, hp) * 255)
b = int(_get_color(temp1, temp2, hp - 1.0/3.0) * 255)
return (r, g, b)
@classmethod
def get_color_from_hsl(cls, h, s, l):
rgb = cls.get_rgb_from_hsl(h, s, l)
return cls.get_color_from_rgb(*rgb)
@classmethod
def get_hsl_from_rgb(cls, r, g, b):
"""Returns tuple (hue, saturation, lightness) given an rgb"""
r, g, b = [ v / 255.0 for v in (r, g, b) ]
rgb = [r, g, b]
mincolor = min(rgb)
maxcolor = max(rgb)
l = (mincolor + maxcolor) / 2
if maxcolor == mincolor:
s = 0
h = 0
return (h, s, l)
if l < 0.5:
s = (maxcolor - mincolor) / (maxcolor + mincolor)
else:
s = (maxcolor - mincolor) / (2.0 - maxcolor - mincolor)
if r == maxcolor:
h = (g - b) / (maxcolor - mincolor)
if g == maxcolor:
h = 2.0 + (b - r) / (maxcolor - mincolor)
if b == maxcolor:
h = 4.0 + (r - g) / (maxcolor - mincolor)
s, l = [ int(v * 100.0) for v in [s, l] ]
h = int(h * 60.0)
if h < 0:
h = h + 360
return (h, s, l)
@classmethod
def get_hue_from_color(cls, color):
return cls.get_hue_from_rgb(*cls.get_rgb(color))
@classmethod
def get_hue_from_rgb(cls, r, g, b):
return cls.get_hsl_from_rgb(r, g, b)[0]
@classmethod
def get_saturation_from_color(cls, color):
return cls.get_saturation_from_rgb(*cls.get_rgb(color))
@classmethod
def get_saturation_from_rgb(cls, r, g, b):
return cls.get_hsl_from_rgb(r, g, b)[1]
@classmethod
def get_lightness_from_color(cls, color):
return cls.get_lightness_from_rgb(*cls.get_rgb(color))
@classmethod
def get_lightness_from_rgb(cls, r, g, b):
return cls.get_hsl_from_rgb(r, g, b)[2]
|
|
"""
DeCliff filter contributed by Minecraft Forums user "DrRomz"
Originally posted here:
http://www.minecraftforum.net/topic/13807-mcedit-minecraft-world-editor-compatible-with-mc-beta-18/page__st__3940__p__7648793#entry7648793
"""
from numpy import zeros, array
import itertools
from pymclevel import alphaMaterials
am = alphaMaterials
# Consider below materials when determining terrain height
blocks = [
am.Stone,
am.Grass,
am.Dirt,
am.Bedrock,
am.Sand,
am.Sandstone,
am.Clay,
am.Gravel,
am.GoldOre,
am.IronOre,
am.CoalOre,
am.LapisLazuliOre,
am.DiamondOre,
am.RedstoneOre,
am.RedstoneOreGlowing,
am.Netherrack,
am.SoulSand,
am.Glowstone
]
terrainBlocktypes = [b.ID for b in blocks]
terrainBlockmask = zeros((256,), dtype='bool')
# Truth table used to calculate terrain height
# trees, leaves, etc. sit on top of terrain
terrainBlockmask[terrainBlocktypes] = True
inputs = (
# Option to limit change to raise_cliff_floor / lower_cliff_top
# Default is to adjust both and meet somewhere in the middle
("Raise/Lower", ("Both", "Lower Only", "Raise Only")),
)
#
# Calculate the maximum adjustment that can be made from
# cliff_pos in direction dir (-1/1) keeping terain at most
# maxstep blocks away from previous column
def maxadj(heightmap, slice_no, cliff_pos, dir, pushup, maxstep, slice_width):
ret = 0
if dir < 0:
if cliff_pos < 2:
return 0
end = 0
else:
if cliff_pos > slice_width - 2:
return 0
end = slice_width - 1
for cur_pos in range(cliff_pos, end, dir):
if pushup:
ret = ret + \
max([0, maxstep - dir * heightmap[slice_no, cur_pos] +
dir * heightmap[slice_no, cur_pos + dir]])
else:
ret = ret + \
min([0, -maxstep + dir * heightmap[slice_no, cur_pos] -
dir * heightmap[slice_no, cur_pos + dir]])
return ret
#
# Raise/lower column at cliff face by adj and decrement change as we move away
# from the face. Each level will be at most maxstep blocks from those beside it.
#
# This function dosn't actually change anything, but just sets array 'new'
# with the desired height.
def adjheight(orig, new, slice_no, cliff_pos, dir, adj, can_adj, maxstep, slice_width):
cur_adj = adj
prev = 0
done_adj = 0
if dir < 0:
end = 1
else:
end = slice_width - 1
if adj == 0 or can_adj == 0:
for cur_pos in range(cliff_pos, end, dir):
new[slice_no, cur_pos] = orig[slice_no, cur_pos]
else:
for cur_pos in range(cliff_pos, end, dir):
if adj > 0:
done_adj = done_adj + \
max([0, maxstep - orig[slice_no, cur_pos] +
orig[slice_no, cur_pos + dir]])
if orig[slice_no, cur_pos] - \
orig[slice_no, cur_pos + dir] > 0:
cur_adj = max([0, cur_adj - orig[slice_no, cur_pos] +
orig[slice_no, cur_pos + dir]])
prev = adj - cur_adj
else:
done_adj = done_adj + \
min([0, -maxstep +
orig[slice_no, cur_pos] -
orig[slice_no, cur_pos + dir]])
if orig[slice_no, cur_pos] - \
orig[slice_no, cur_pos + dir] > 0:
cur_adj = min([0, cur_adj + orig[slice_no, cur_pos] - orig[slice_no, cur_pos + dir]])
prev = adj - cur_adj
new[slice_no, cur_pos] = max([0, orig[slice_no, cur_pos] + cur_adj])
if cur_adj != 0 and \
abs(prev) < abs(int(adj * done_adj / can_adj)):
cur_adj += prev - int(adj * done_adj / can_adj)
prev = int(adj * done_adj / can_adj)
new[slice_no, end] = orig[slice_no, end]
def perform(level, box, options):
if box.volume > 16000000:
raise ValueError("Volume too big for this filter method!")
RLOption = options["Raise/Lower"]
schema = level.extractSchematic(box)
schema.removeEntitiesInBox(schema.bounds)
schema.removeTileEntitiesInBox(schema.bounds)
terrainBlocks = terrainBlockmask[schema.Blocks]
coords = terrainBlocks.nonzero()
# Swap values around so long edge of selected rectangle is first
# - the long edge is assumed to run parallel to the cliff face
# and we want to process slices perpendicular to the face
# heightmap will have x,z (or z,x) index with highest ground level
if schema.Width > schema.Length:
heightmap = zeros((schema.Width, schema.Length), dtype='float32')
heightmap[coords[0], coords[1]] = coords[2]
newHeightmap = zeros((schema.Width, schema.Length), dtype='uint16')
slice_count = schema.Width
slice_width = schema.Length
else:
heightmap = zeros((schema.Length, schema.Width), dtype='float32')
heightmap[coords[1], coords[0]] = coords[2]
newHeightmap = zeros((schema.Length, schema.Width), dtype='uint16')
slice_count = schema.Length
slice_width = schema.Width
nonTerrainBlocks = ~terrainBlocks
nonTerrainBlocks &= schema.Blocks != 0
for slice_no in range(0, slice_count):
cliff_height = 0
# determine pos and height of cliff in this slice
for cur_pos in range(0, slice_width - 1):
if abs(heightmap[slice_no, cur_pos] -
heightmap[slice_no, cur_pos + 1]) > abs(cliff_height):
cliff_height = \
heightmap[slice_no, cur_pos] - \
heightmap[slice_no, cur_pos + 1]
cliff_pos = cur_pos
if abs(cliff_height) < 2:
# nothing to adjust - just copy heightmap to newHightmap
adjheight(heightmap, newHeightmap, slice_no, 0, 1, 0, 1, 1, slice_width)
continue
# Try to keep adjusted columns within 1 column of their neighbours
# but ramp up to 4 blocks up/down on each column when needed
for max_step in range(1, 4):
can_left = maxadj(heightmap, slice_no, cliff_pos, -1, cliff_height < 0, max_step, slice_width)
can_right = maxadj(heightmap, slice_no, cliff_pos + 1, 1, cliff_height > 0, max_step, slice_width)
if can_right < 0 and RLOption == "Raise Only":
can_right = 0
if can_right > 0 and RLOption == "Lower Only":
can_right = 0
if can_left < 0 and RLOption == "Raise Only":
can_left = 0
if can_left > 0 and RLOption == "Lower Only":
can_left = 0
if 0 > cliff_height > can_right - can_left:
if abs(can_left) > abs(can_right):
adj_left = -1 * (cliff_height - max([int(cliff_height / 2), can_right]))
adj_right = cliff_height + adj_left
else:
adj_right = cliff_height - max([int(cliff_height / 2), -can_left])
adj_left = -1 * (cliff_height - adj_right + 1)
else:
if 0 < cliff_height < can_right - can_left:
if abs(can_left) > abs(can_right):
adj_left = -1 * (cliff_height - min([int(cliff_height / 2), can_right]))
adj_right = cliff_height + adj_left
else:
adj_right = cliff_height - min([int(cliff_height / 2), -can_left]) - 1
adj_left = -1 * (cliff_height - adj_right)
else:
adj_right = 0
adj_left = 0
continue
break
adjheight(heightmap, newHeightmap, slice_no, cliff_pos, -1, adj_left, can_left, max_step, slice_width)
adjheight(heightmap, newHeightmap, slice_no, cliff_pos + 1, 1, adj_right, can_right, max_step, slice_width)
# OK, newHeightMap has new height for each column
# so it's just a matter of moving everything up/down
for x, z in itertools.product(xrange(1, schema.Width - 1), xrange(1, schema.Length - 1)):
if schema.Width > schema.Length:
oh = heightmap[x, z]
nh = newHeightmap[x, z]
else:
oh = heightmap[z, x]
nh = newHeightmap[z, x]
delta = nh - oh
column = array(schema.Blocks[x, z])
# Keep bottom 5 blocks, so we don't loose bedrock
keep = min([5, nh])
Waterdepth = 0
# Detect Water on top
if column[oh + 1:oh + 2] == am.Water.ID or \
column[oh + 1:oh + 2] == am.Ice.ID:
for cur_pos in range(oh + 1, schema.Height):
if column[cur_pos:cur_pos + 1] != am.Water.ID and \
column[cur_pos:cur_pos + 1] != am.Ice.ID:
break
Waterdepth += 1
if delta == 0:
column[oh:] = schema.Blocks[x, z, oh:]
if delta < 0:
# Moving column down
column[keep:delta] = schema.Blocks[x, z, keep - delta:]
column[delta:] = am.Air.ID
if Waterdepth > 0:
# Avoid steping small lakes, etc on cliff top
# replace with dirt 'n grass
column[nh:nh + 1] = am.Grass.ID
column[nh + 1:nh + 1 + delta] = am.Air.ID
if delta > 0:
# Moving column up
column[keep + delta:] = schema.Blocks[x, z, keep:-delta]
# Put stone in gap at the bottom
column[keep:keep + delta] = am.Stone.ID
if Waterdepth > 0:
if Waterdepth > delta:
# Retain Ice
if column[nh + Waterdepth:nh + Waterdepth + 1] == am.Ice.ID:
column[nh + Waterdepth - delta:nh + 1 + Waterdepth - delta] = \
am.Ice.ID
column[nh + 1 + Waterdepth - delta:nh + 1 + Waterdepth] = am.Air.ID
else:
if Waterdepth < delta - 2:
column[nh:nh + 1] = am.Grass.ID
column[nh + 1:nh + 1 + Waterdepth] = am.Air.ID
else:
# Beach at the edge
column[nh - 4:nh - 2] = am.Sandstone.ID
column[nh - 2:nh + 1] = am.Sand.ID
column[nh + 1:nh + 1 + Waterdepth] = am.Air.ID
schema.Blocks[x, z] = column
level.copyBlocksFrom(schema, schema.bounds, box.origin)
|
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import os
from mock import MagicMock
from mock import patch
from mock import PropertyMock
import msgpack
from nanomsg import Socket
from oslo_config import cfg
from oslotest.base import BaseTestCase
from six.moves.queue import Queue
from watcher_metering.publisher.publisher import Publisher
from watcher_metering.publisher.worker import Worker
from watcher_metering.store.loader import StoreClientLoader
from watcher_metering.tests.publisher.publisher_fixtures import ConfFixture
class TestPublisher(BaseTestCase):
# patches to be applied for each test in this test suite
patches = []
def setUp(self):
super(TestPublisher, self).setUp()
self.conf = cfg.ConfigOpts()
self.useFixture(ConfFixture(self.conf))
# Patches the publisher socket class
self.m_publisher_socket_cls = MagicMock(spec=Socket)
# Patches the publisher socket instance
self.m_publisher_socket = MagicMock(spec=Socket, name="nn_socket")
self.m_publisher_socket_cls.return_value = self.m_publisher_socket
self.patches.extend([
# Deactivates the nanomsg socket
patch(
"watcher_metering.publisher.base.nanomsg.Socket",
new=self.m_publisher_socket_cls,
),
patch.object(
StoreClientLoader, "load", new=MagicMock(),
),
])
# Applies all of our patches before each test
for _patch in self.patches:
_patch.start()
self.publisher = Publisher(
use_nanoconfig_service=False,
publisher_endpoint="fake://fake_endpoint",
nanoconfig_service_endpoint="",
nanoconfig_update_endpoint="",
nanoconfig_profile="nanoconfig://test_profile",
metrics_store="riemann",
max_queue_size=5,
max_worker=5,
min_worker=1,
)
def tearDown(self):
super(TestPublisher, self).tearDown()
for _patch in self.patches:
_patch.stop()
@patch.object(Worker, "start", MagicMock())
@patch.object(Queue, "put")
@patch.object(Publisher, "terminated", new_callable=PropertyMock)
def test_on_receive(self, m_terminated, m_put):
# mock the termination condition to finish after 1 iteration
# Last value to mock out the stop() call
m_terminated.side_effect = [False, True, True]
# mock the recv
m_recv = self.m_publisher_socket.recv
fake_metric = OrderedDict(
name="compute.node.cpu.percent",
timestamp="2015-08-04T15:15:45.703542",
unit="%",
type="gauge",
value=97.9,
resource_id="test_node",
host="test_node",
resource_metadata=OrderedDict(
host="test_node",
title="compute.node.cpu.percent",
)
)
m_recv.return_value = msgpack.dumps(fake_metric)
# start publisher
self.publisher.run()
self.assertEqual(self.m_publisher_socket.bind.call_count, 1)
m_put.assert_called_once_with({
'value': 97.9,
'name': 'compute.node.cpu.percent',
'host': 'test_node',
'resource_id': 'test_node',
'timestamp': '2015-08-04T15:15:45.703542',
'resource_metadata': {
'title': 'compute.node.cpu.percent',
'host': 'test_node'
},
'unit': '%',
'type': 'gauge'
})
@patch.object(Publisher, "start_worker")
def test_adjust_pool_size_expand_pool(self, m_start_worker):
self.publisher.max_queue_size = 5
self.publisher.max_worker = 5
self.publisher.min_worker = 1
def _fake_start_worker():
fake_worker = MagicMock(spec=Worker)
self.publisher.workers.append(fake_worker)
m_start_worker.side_effect = _fake_start_worker
self.publisher.start_worker() # Add a fake worker
self.publisher.msg_queue.put("Dummy1") # Add a fake job in the queue
self.publisher.msg_queue.put("Dummy2")
self.publisher.msg_queue.put("Dummy3")
self.publisher.adjust_pool_size()
self.assertEqual(self.publisher.num_workers, 4)
@patch.object(Publisher, "start_worker")
def test_adjust_pool_size_shrink_pool(self, m_start_worker):
self.publisher.max_queue_size = 5
self.publisher.max_worker = 5
self.publisher.min_worker = 1
def _fake_start_worker():
fake_worker = MagicMock(spec=Worker)
self.publisher.workers.append(fake_worker)
m_start_worker.side_effect = _fake_start_worker
self.publisher.start_worker() # Add a fake worker
self.publisher.start_worker()
self.publisher.start_worker()
self.publisher.start_worker()
self.publisher.start_worker()
self.publisher.msg_queue.put("Dummy1") # Add a fake job in the queue
self.publisher.adjust_pool_size()
self.assertEqual(self.publisher.num_workers, 2)
@patch.object(Publisher, "start_worker")
def test_adjust_pool_size_keep_same_size(self, m_start_worker):
self.publisher.max_queue_size = 5
self.publisher.max_worker = 5
self.publisher.min_worker = 1
def _fake_start_worker():
fake_worker = MagicMock(spec=Worker)
self.publisher.workers.append(fake_worker)
m_start_worker.side_effect = _fake_start_worker
self.publisher.start_worker() # Add a fake worker
self.publisher.msg_queue.put("Dummy1") # Add a fake job in the queue
self.publisher.adjust_pool_size()
self.assertEqual(self.publisher.num_workers, 1)
@patch.object(Publisher, "start_worker")
def test_check_workers_alive(self, m_start_worker):
self.publisher.max_worker = 1
self.publisher.min_worker = 1
fake_worker_dead = MagicMock(spec=Worker, is_alive=lambda: False)
fake_worker_alive = MagicMock(spec=Worker, is_alive=lambda: True)
def _fake_start_worker():
self.publisher.workers.append(fake_worker_dead)
yield
self.publisher.workers.append(fake_worker_alive)
yield
m_start_worker.side_effect = _fake_start_worker()
self.publisher.start_worker()
self.publisher.check_workers_alive()
self.assertEqual(self.publisher.num_workers, 1)
self.assertEqual(self.publisher.workers[0].is_alive(), True)
def test_start_worker(self):
self.publisher.start_worker()
self.assertEqual(len(self.publisher.workers), 1)
self.assertEqual(self.publisher.num_workers, 1)
def test_stop_worker(self):
self.publisher.start_worker()
self.publisher.start_worker()
self.publisher.stop_worker()
self.assertEqual(len(self.publisher.workers), 1)
self.assertEqual(self.publisher.num_workers, 1)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.publisher.base.os.environ.get")
def test_setup_nanoconfig_valid_using_default(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.publisher.use_nanoconfig_service = True
self.publisher.nanoconfig_service_endpoint = ""
self.publisher.nanoconfig_update_endpoint = ""
self.publisher.setup_socket()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE") # First call
m_env_getter.assert_called_with("NN_CONFIG_UPDATES") # Last call
self.assertEqual(m_env_setter.call_count, 0)
self.assertEqual(self.publisher.nanoconfig_service_endpoint,
"FAKE_NN_CONFIG_SERVICE")
self.assertEqual(self.publisher.nanoconfig_update_endpoint,
"FAKE_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.publisher.base.os.environ.get")
def test_setup_nanoconfig_valid_custom_values(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE",
"FAKE_NN_CONFIG_UPDATES"]
self.publisher.use_nanoconfig_service = True
self.publisher.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.publisher.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.publisher.setup_socket()
self.assertEqual(m_env_getter.call_count, 2)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_any_call("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
m_env_setter.assert_called_with("NN_CONFIG_UPDATES",
"CUSTOM_NN_CONFIG_UPDATES")
self.assertEqual(self.publisher.nanoconfig_service_endpoint,
"CUSTOM_NN_CONFIG_SERVICE")
self.assertEqual(self.publisher.nanoconfig_update_endpoint,
"CUSTOM_NN_CONFIG_UPDATES")
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.publisher.base.os.environ.get")
def test_setup_nanoconfig_invalid_service(self, m_env_getter,
m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.publisher.use_nanoconfig_service = True
self.publisher.nanoconfig_service_endpoint = ""
self.publisher.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES"
self.assertRaises(ValueError, self.publisher.setup_socket)
m_env_getter.assert_called_once_with("NN_CONFIG_SERVICE")
self.assertEqual(m_env_setter.call_count, 0)
@patch.object(os._Environ, "__setitem__")
@patch("watcher_metering.publisher.base.os.environ.get")
def test_setup_nanoconfig_invalid_update(self, m_env_getter, m_env_setter):
# Override default where it is set to False
m_env_getter.return_value = "" # Emulates empty ENV vars
self.publisher.use_nanoconfig_service = True
self.publisher.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE"
self.publisher.nanoconfig_update_endpoint = ""
self.assertRaises(ValueError, self.publisher.setup_socket)
m_env_getter.assert_any_call("NN_CONFIG_SERVICE")
m_env_getter.assert_called_with("NN_CONFIG_UPDATES")
m_env_setter.assert_called_once_with("NN_CONFIG_SERVICE",
"CUSTOM_NN_CONFIG_SERVICE")
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains a Google Cloud Storage hook."""
import functools
import gzip as gz
import os
import shutil
import time
import warnings
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from io import BytesIO
from os import path
from tempfile import NamedTemporaryFile
from typing import Callable, List, Optional, Sequence, Set, Tuple, TypeVar, Union, cast, overload
from urllib.parse import urlparse
from google.api_core.exceptions import NotFound
# not sure why but mypy complains on missing `storage` but it is clearly there and is importable
from google.cloud import storage # type: ignore[attr-defined]
from google.cloud.exceptions import GoogleCloudError
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.utils.helpers import normalize_directory_path
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils import timezone
from airflow.version import version
RT = TypeVar('RT')
T = TypeVar("T", bound=Callable)
# Use default timeout from google-cloud-storage
DEFAULT_TIMEOUT = 60
def _fallback_object_url_to_object_name_and_bucket_name(
object_url_keyword_arg_name='object_url',
bucket_name_keyword_arg_name='bucket_name',
object_name_keyword_arg_name='object_name',
) -> Callable[[T], T]:
"""
Decorator factory that convert object URL parameter to object name and bucket name parameter.
:param object_url_keyword_arg_name: Name of the object URL parameter
:param bucket_name_keyword_arg_name: Name of the bucket name parameter
:param object_name_keyword_arg_name: Name of the object name parameter
:return: Decorator
"""
def _wrapper(func: T):
@functools.wraps(func)
def _inner_wrapper(self: "GCSHook", *args, **kwargs) -> RT:
if args:
raise AirflowException(
"You must use keyword arguments in this methods rather than positional"
)
object_url = kwargs.get(object_url_keyword_arg_name)
bucket_name = kwargs.get(bucket_name_keyword_arg_name)
object_name = kwargs.get(object_name_keyword_arg_name)
if object_url and bucket_name and object_name:
raise AirflowException(
"The mutually exclusive parameters. `object_url`, `bucket_name` together "
"with `object_name` parameters are present. "
"Please provide `object_url` or `bucket_name` and `object_name`."
)
if object_url:
bucket_name, object_name = _parse_gcs_url(object_url)
kwargs[bucket_name_keyword_arg_name] = bucket_name
kwargs[object_name_keyword_arg_name] = object_name
del kwargs[object_url_keyword_arg_name]
if not object_name or not bucket_name:
raise TypeError(
f"{func.__name__}() missing 2 required positional arguments: "
f"'{bucket_name_keyword_arg_name}' and '{object_name_keyword_arg_name}' "
f"or {object_url_keyword_arg_name}"
)
if not object_name:
raise TypeError(
f"{func.__name__}() missing 1 required positional argument: "
f"'{object_name_keyword_arg_name}'"
)
if not bucket_name:
raise TypeError(
f"{func.__name__}() missing 1 required positional argument: "
f"'{bucket_name_keyword_arg_name}'"
)
return func(self, *args, **kwargs)
return cast(T, _inner_wrapper)
return _wrapper
# A fake bucket to use in functions decorated by _fallback_object_url_to_object_name_and_bucket_name.
# This allows the 'bucket' argument to be of type str instead of Optional[str],
# making it easier to type hint the function body without dealing with the None
# case that can never happen at runtime.
PROVIDE_BUCKET: str = cast(str, None)
class GCSHook(GoogleBaseHook):
"""
Interact with Google Cloud Storage. This hook uses the Google Cloud
connection.
"""
_conn = None # type: Optional[storage.Client]
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
google_cloud_storage_conn_id: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=2,
)
gcp_conn_id = google_cloud_storage_conn_id
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
def get_conn(self) -> storage.Client:
"""Returns a Google Cloud Storage service object."""
if not self._conn:
self._conn = storage.Client(
credentials=self._get_credentials(), client_info=CLIENT_INFO, project=self.project_id
)
return self._conn
def copy(
self,
source_bucket: str,
source_object: str,
destination_bucket: Optional[str] = None,
destination_object: Optional[str] = None,
) -> None:
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:param source_object: The object to copy.
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
f'Either source/destination bucket or source/destination object must be different, '
f'not both the same: bucket={source_bucket}, object={source_object}'
)
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
destination_object = source_bucket.copy_blob( # type: ignore[attr-defined]
blob=source_object, destination_bucket=destination_bucket, new_name=destination_object
)
self.log.info(
'Object %s in bucket %s copied to object %s in bucket %s',
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object.name, # type: ignore[union-attr]
destination_bucket.name, # type: ignore[union-attr]
)
def rewrite(
self,
source_bucket: str,
source_object: str,
destination_bucket: str,
destination_object: Optional[str] = None,
) -> None:
"""
Has the same functionality as copy, except that will work on files
over 5 TB, as well as when copying between locations and/or storage
classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:param source_object: The object to copy.
:param destination_bucket: The destination of the object to copied to.
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
"""
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
f'Either source/destination bucket or source/destination object must be different, '
f'not both the same: bucket={source_bucket}, object={source_object}'
)
if not source_bucket or not source_object:
raise ValueError('source_bucket and source_object cannot be empty.')
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(blob_name=source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object)
self.log.info('Total Bytes: %s | Bytes Written: %s', total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object, token=token)
self.log.info('Total Bytes: %s | Bytes Written: %s', total_bytes, bytes_rewritten)
self.log.info(
'Object %s in bucket %s rewritten to object %s in bucket %s',
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object,
destination_bucket.name, # type: ignore[attr-defined]
)
@overload
def download(
self,
bucket_name: str,
object_name: str,
filename: None = None,
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: Optional[int] = 1,
) -> bytes:
...
@overload
def download(
self,
bucket_name: str,
object_name: str,
filename: str,
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: Optional[int] = 1,
) -> str:
...
def download(
self,
bucket_name: str,
object_name: str,
filename: Optional[str] = None,
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: Optional[int] = 1,
) -> Union[str, bytes]:
"""
Downloads a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param filename: If set, a local file path where the file should be written to.
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to download the file.
"""
# TODO: future improvement check file size before downloading,
# to check for local space availability
num_file_attempts = 0
while True:
try:
num_file_attempts += 1
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename:
blob.download_to_filename(filename, timeout=timeout)
self.log.info('File downloaded to %s', filename)
return filename
else:
return blob.download_as_bytes()
except GoogleCloudError:
if num_file_attempts == num_max_attempts:
self.log.error(
'Download attempt of object: %s from %s has failed. Attempt: %s, max %s.',
object_name,
object_name,
num_file_attempts,
num_max_attempts,
)
raise
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
def download_as_byte_array(
self,
bucket_name: str,
object_name: str,
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: Optional[int] = 1,
) -> bytes:
"""
Downloads a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to download the file.
"""
# We do not pass filename, so will never receive string as response
return self.download(
bucket_name=bucket_name,
object_name=object_name,
chunk_size=chunk_size,
timeout=timeout,
num_max_attempts=num_max_attempts,
)
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file(
self,
bucket_name: str = PROVIDE_BUCKET,
object_name: Optional[str] = None,
object_url: Optional[str] = None,
dir: Optional[str] = None,
):
"""
Downloads the file to a temporary directory and returns a file handle
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param object_url: File reference url. Must start with "gs: //"
:param dir: The tmp sub directory to download the file to. (passed to NamedTemporaryFile)
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name, dir=dir) as tmp_file:
self.download(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
tmp_file.flush()
yield tmp_file
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file_and_upload(
self,
bucket_name: str = PROVIDE_BUCKET,
object_name: Optional[str] = None,
object_url: Optional[str] = None,
):
"""
Creates temporary file, returns a file handle and uploads the files content
on close.
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param object_url: File reference url. Must start with "gs: //"
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
yield tmp_file
tmp_file.flush()
self.upload(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
def upload(
self,
bucket_name: str,
object_name: str,
filename: Optional[str] = None,
data: Optional[Union[str, bytes]] = None,
mime_type: Optional[str] = None,
gzip: bool = False,
encoding: str = 'utf-8',
chunk_size: Optional[int] = None,
timeout: Optional[int] = DEFAULT_TIMEOUT,
num_max_attempts: int = 1,
) -> None:
"""
Uploads a local file or file data as string or bytes to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:param object_name: The object name to set when uploading the file.
:param filename: The local file path to the file to be uploaded.
:param data: The file's data as a string or bytes to be uploaded.
:param mime_type: The file's mime type set when uploading the file.
:param gzip: Option to compress local file or file data for upload
:param encoding: bytes encoding for file data if provided as string
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to try to upload the file.
"""
def _call_with_retry(f: Callable[[], None]) -> None:
"""Helper functions to upload a file or a string with a retry mechanism and exponential back-off.
:param f: Callable that should be retried.
"""
num_file_attempts = 0
while num_file_attempts < num_max_attempts:
try:
num_file_attempts += 1
f()
except GoogleCloudError as e:
if num_file_attempts == num_max_attempts:
self.log.error(
'Upload attempt of object: %s from %s has failed. Attempt: %s, max %s.',
object_name,
object_name,
num_file_attempts,
num_max_attempts,
)
raise e
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename and data:
raise ValueError(
"'filename' and 'data' parameter provided. Please "
"specify a single parameter, either 'filename' for "
"local file uploads or 'data' for file content uploads."
)
elif filename:
if not mime_type:
mime_type = 'application/octet-stream'
if gzip:
filename_gz = filename + '.gz'
with open(filename, 'rb') as f_in:
with gz.open(filename_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
_call_with_retry(
partial(blob.upload_from_filename, filename=filename, content_type=mime_type, timeout=timeout)
)
if gzip:
os.remove(filename)
self.log.info('File %s uploaded to %s in %s bucket', filename, object_name, bucket_name)
elif data:
if not mime_type:
mime_type = 'text/plain'
if gzip:
if isinstance(data, str):
data = bytes(data, encoding)
out = BytesIO()
with gz.GzipFile(fileobj=out, mode="w") as f:
f.write(data)
data = out.getvalue()
_call_with_retry(partial(blob.upload_from_string, data, content_type=mime_type, timeout=timeout))
self.log.info('Data stream uploaded to %s in %s bucket', object_name, bucket_name)
else:
raise ValueError("'filename' and 'data' parameter missing. One is required to upload to gcs.")
def exists(self, bucket_name: str, object_name: str) -> bool:
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists()
def get_blob_update_time(self, bucket_name: str, object_name: str):
"""
Get the update time of a file in Google Cloud Storage
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob to get updated time from the Google cloud
storage bucket.
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
if blob is None:
raise ValueError(f"Object ({object_name}) not found in Bucket ({bucket_name})")
return blob.updated
def is_updated_after(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param ts: The timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
def is_updated_between(
self, bucket_name: str, object_name: str, min_ts: datetime, max_ts: datetime
) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param min_ts: The minimum timestamp to check against.
:param max_ts: The maximum timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not min_ts.tzinfo:
min_ts = min_ts.replace(tzinfo=timezone.utc)
if not max_ts.tzinfo:
max_ts = max_ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s is between %s and %s", blob_update_time, min_ts, max_ts)
if min_ts <= blob_update_time < max_ts:
return True
return False
def is_updated_before(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated before given time in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param ts: The timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s < %s", blob_update_time, ts)
if blob_update_time < ts:
return True
return False
def is_older_than(self, bucket_name: str, object_name: str, seconds: int) -> bool:
"""
Check if object is older than given time
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param seconds: The time in seconds to check against
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
from datetime import timedelta
current_time = timezone.utcnow()
given_time = current_time - timedelta(seconds=seconds)
self.log.info("Verify object date: %s is older than %s", blob_update_time, given_time)
if blob_update_time < given_time:
return True
return False
def delete(self, bucket_name: str, object_name: str) -> None:
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:param object_name: name of the object to delete
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info('Blob %s deleted.', object_name)
def delete_bucket(self, bucket_name: str, force: bool = False) -> None:
"""
Delete a bucket object from the Google Cloud Storage.
:param bucket_name: name of the bucket which will be deleted
:param force: false not allow to delete non empty bucket, set force=True
allows to delete non empty bucket
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
self.log.info("Deleting %s bucket", bucket_name)
try:
bucket.delete(force=force)
self.log.info("Bucket %s has been deleted", bucket_name)
except NotFound:
self.log.info("Bucket %s not exists", bucket_name)
def list(self, bucket_name, versions=None, max_results=None, prefix=None, delimiter=None) -> list:
"""
List all objects from the bucket with the give string prefix in name
:param bucket_name: bucket name
:param versions: if true, list all versions of the objects
:param max_results: max count of items to return in a single page of responses
:param prefix: prefix string which filters objects whose name begin with
this prefix
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def list_by_timespan(
self,
bucket_name: str,
timespan_start: datetime,
timespan_end: datetime,
versions: Optional[bool] = None,
max_results: Optional[int] = None,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
) -> List[str]:
"""
List all objects from the bucket with the give string prefix in name that were
updated in the time between ``timespan_start`` and ``timespan_end``.
:param bucket_name: bucket name
:param timespan_start: will return objects that were updated at or after this datetime (UTC)
:param timespan_end: will return objects that were updated before this datetime (UTC)
:param versions: if true, list all versions of the objects
:param max_results: max count of items to return in a single page of responses
:param prefix: prefix string which filters objects whose name begin with
this prefix
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = []
for blob in blobs:
if timespan_start <= blob.updated.replace(tzinfo=timezone.utc) < timespan_end:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def get_size(self, bucket_name: str, object_name: str) -> int:
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
"""
self.log.info('Checking the file size of object: %s in bucket_name: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_size = blob.size
self.log.info('The file size of %s is %s bytes.', object_name, blob_size)
return blob_size
def get_crc32c(self, bucket_name: str, object_name: str):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
"""
self.log.info(
'Retrieving the crc32c checksum of object_name: %s in bucket_name: %s',
object_name,
bucket_name,
)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_crc32c = blob.crc32c
self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c)
return blob_crc32c
def get_md5hash(self, bucket_name: str, object_name: str) -> str:
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
"""
self.log.info('Retrieving the MD5 hash of object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_md5hash = blob.md5_hash
self.log.info('The md5Hash of %s is %s', object_name, blob_md5hash)
return blob_md5hash
@GoogleBaseHook.fallback_to_default_project_id
def create_bucket(
self,
bucket_name: str,
resource: Optional[dict] = None,
storage_class: str = 'MULTI_REGIONAL',
location: str = 'US',
project_id: Optional[str] = None,
labels: Optional[dict] = None,
) -> str:
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace, so
you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:param project_id: The ID of the Google Cloud Project.
:param labels: User-provided labels, in key/value pairs.
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info(
'Creating Bucket: %s; Location: %s; Storage Class: %s', bucket_name, location, storage_class
)
# Add airflow-version label to the bucket
labels = labels or {}
labels['airflow-version'] = 'v' + version.replace('.', '-').replace('+', '-')
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item]) # type: ignore[index]
bucket.storage_class = storage_class
bucket.labels = labels
bucket.create(project=project_id, location=location)
return bucket.id
def insert_bucket_acl(
self, bucket_name: str, entity: str, role: str, user_project: Optional[str] = None
) -> None:
"""
Creates a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
"""
self.log.info('Creating a new ACL entry in bucket: %s', bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket.acl.reload()
bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
bucket.acl.user_project = user_project
bucket.acl.save()
self.log.info('A new ACL entry created in bucket: %s', bucket_name)
def insert_object_acl(
self,
bucket_name: str,
object_name: str,
entity: str,
role: str,
generation: Optional[int] = None,
user_project: Optional[str] = None,
) -> None:
"""
Creates a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:param generation: Optional. If present, selects a specific revision of this object.
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
"""
self.log.info('Creating a new ACL entry for object: %s in bucket: %s', object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name, generation=generation)
# Reload fetches the current ACL from Cloud Storage.
blob.acl.reload()
blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
blob.acl.user_project = user_project
blob.acl.save()
self.log.info('A new ACL entry created for object: %s in bucket: %s', object_name, bucket_name)
def compose(self, bucket_name: str, source_objects: List, destination_object: str) -> None:
"""
Composes a list of existing object into a new object in the same storage bucket_name
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:param source_objects: The list of source objects that will be composed
into a single object.
:param destination_object: The path of the object if given.
"""
if not source_objects:
raise ValueError('source_objects cannot be empty.')
if not bucket_name or not destination_object:
raise ValueError('bucket_name and destination_object cannot be empty.')
self.log.info("Composing %s to %s in the bucket %s", source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[bucket.blob(blob_name=source_object) for source_object in source_objects]
)
self.log.info("Completed successfully.")
def sync(
self,
source_bucket: str,
destination_bucket: str,
source_object: Optional[str] = None,
destination_object: Optional[str] = None,
recursive: bool = True,
allow_overwrite: bool = False,
delete_extra_files: bool = False,
) -> None:
"""
Synchronizes the contents of the buckets.
Parameters ``source_object`` and ``destination_object`` describe the root sync directories. If they
are not passed, the entire bucket will be synchronized. If they are passed, they should point
to directories.
.. note::
The synchronization of individual files is not supported. Only entire directories can be
synchronized.
:param source_bucket: The name of the bucket containing the source objects.
:param destination_bucket: The name of the bucket containing the destination objects.
:param source_object: The root sync directory in the source bucket.
:param destination_object: The root sync directory in the destination bucket.
:param recursive: If True, subdirectories will be considered
:param recursive: If True, subdirectories will be considered
:param allow_overwrite: if True, the files will be overwritten if a mismatched file is found.
By default, overwriting files is not allowed
:param delete_extra_files: if True, deletes additional files from the source that not found in the
destination. By default extra files are not deleted.
.. note::
This option can delete data quickly if you specify the wrong source/destination combination.
:return: none
"""
client = self.get_conn()
# Create bucket object
source_bucket_obj = client.bucket(source_bucket)
destination_bucket_obj = client.bucket(destination_bucket)
# Normalize parameters when they are passed
source_object = normalize_directory_path(source_object)
destination_object = normalize_directory_path(destination_object)
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
# Prepare synchronization plan
to_copy_blobs, to_delete_blobs, to_rewrite_blobs = self._prepare_sync_plan(
source_bucket=source_bucket_obj,
destination_bucket=destination_bucket_obj,
source_object=source_object,
destination_object=destination_object,
recursive=recursive,
)
self.log.info(
"Planned synchronization. To delete blobs count: %s, to upload blobs count: %s, "
"to rewrite blobs count: %s",
len(to_delete_blobs),
len(to_copy_blobs),
len(to_rewrite_blobs),
)
# Copy missing object to new bucket
if not to_copy_blobs:
self.log.info("Skipped blobs copying.")
else:
for blob in to_copy_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.copy(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs copied.")
# Delete redundant files
if not to_delete_blobs:
self.log.info("Skipped blobs deleting.")
elif delete_extra_files:
# TODO: Add batch. I tried to do it, but the Google library is not stable at the moment.
for blob in to_delete_blobs:
self.delete(blob.bucket.name, blob.name)
self.log.info("Blobs deleted.")
# Overwrite files that are different
if not to_rewrite_blobs:
self.log.info("Skipped blobs overwriting.")
elif allow_overwrite:
for blob in to_rewrite_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.rewrite(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs rewritten.")
self.log.info("Synchronization finished.")
def _calculate_sync_destination_path(
self, blob: storage.Blob, destination_object: Optional[str], source_object_prefix_len: int
) -> str:
return (
path.join(destination_object, blob.name[source_object_prefix_len:])
if destination_object
else blob.name[source_object_prefix_len:]
)
@staticmethod
def _prepare_sync_plan(
source_bucket: storage.Bucket,
destination_bucket: storage.Bucket,
source_object: Optional[str],
destination_object: Optional[str],
recursive: bool,
) -> Tuple[Set[storage.Blob], Set[storage.Blob], Set[storage.Blob]]:
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
destination_object_prefix_len = len(destination_object) if destination_object else 0
delimiter = "/" if not recursive else None
# Fetch blobs list
source_blobs = list(source_bucket.list_blobs(prefix=source_object, delimiter=delimiter))
destination_blobs = list(
destination_bucket.list_blobs(prefix=destination_object, delimiter=delimiter)
)
# Create indexes that allow you to identify blobs based on their name
source_names_index = {a.name[source_object_prefix_len:]: a for a in source_blobs}
destination_names_index = {a.name[destination_object_prefix_len:]: a for a in destination_blobs}
# Create sets with names without parent object name
source_names = set(source_names_index.keys())
destination_names = set(destination_names_index.keys())
# Determine objects to copy and delete
to_copy = source_names - destination_names
to_delete = destination_names - source_names
to_copy_blobs = {source_names_index[a] for a in to_copy} # type: Set[storage.Blob]
to_delete_blobs = {destination_names_index[a] for a in to_delete} # type: Set[storage.Blob]
# Find names that are in both buckets
names_to_check = source_names.intersection(destination_names)
to_rewrite_blobs = set() # type: Set[storage.Blob]
# Compare objects based on crc32
for current_name in names_to_check:
source_blob = source_names_index[current_name]
destination_blob = destination_names_index[current_name]
# if the objects are different, save it
if source_blob.crc32c != destination_blob.crc32c:
to_rewrite_blobs.add(source_blob)
return to_copy_blobs, to_delete_blobs, to_rewrite_blobs
def gcs_object_is_directory(bucket: str) -> bool:
"""
Return True if given Google Cloud Storage URL (gs://<bucket>/<blob>)
is a directory or an empty bucket. Otherwise return False.
"""
_, blob = _parse_gcs_url(bucket)
return len(blob) == 0 or blob.endswith('/')
def _parse_gcs_url(gsurl: str) -> Tuple[str, str]:
"""
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
"""
parsed_url = urlparse(gsurl)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket name')
if parsed_url.scheme.lower() != "gs":
raise AirflowException(f"Schema must be to 'gs://': Current schema: '{parsed_url.scheme}://'")
bucket = parsed_url.netloc
# Remove leading '/' but NOT trailing one
blob = parsed_url.path.lstrip('/')
return bucket, blob
|
|
#
# Copyright (C) 2016 Intel Corp. Isaku Yamahata <isaku.yamahata@gmail com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import testscenarios
from networking_odl.common import constants as const
from networking_odl.db import db
from networking_odl.journal import dependency_validations
from networking_odl.tests.unit import test_base_db
load_tests = testscenarios.load_tests_apply_scenarios
_NET_ID = 'NET_ID'
_NET_DATA = {'id': _NET_ID}
_SUBNET_ID = 'SUBNET_ID'
_SUBNET_DATA = {'network_id': _NET_ID}
_PORT_ID = 'PORT_ID'
_PORT_DATA = {'network_id': _NET_ID,
'fixed_ips': [{'subnet_id': _SUBNET_ID}]}
_PORT_DATA_DUPLICATE_SUBNET = {
'network_id': _NET_ID,
'fixed_ips': [{'subnet_id': _SUBNET_ID},
{'subnet_id': _SUBNET_ID}]
}
_ROUTER_ID = 'ROUTER_ID'
_ROUTER_DATA = {'id': 'ROUTER_ID',
'gw_port_id': 'GW_PORT_ID'}
_L2GW_ID = 'l2gw_id'
_L2GW_DATA = {'id': _L2GW_ID}
_L2GWCONN_ID = 'l2gwconn_id'
_L2GWCONN_DATA = {'id': _L2GWCONN_ID,
'network_id': _NET_ID,
'gateway_id': _L2GW_ID}
_TRUNK_ID = 'TRUNK_ID'
_SUBPORT_ID = 'CPORT_ID'
_TRUNK_DATA = {'trunk_id': _TRUNK_ID,
'port_id': _PORT_ID,
'sub_ports': [{'port_id': _SUBPORT_ID}]}
_BGPVPN_ID = 'BGPVPN_ID'
_SG_ID = 'SG_ID'
_SG_DATA = {'id': _SG_ID}
_SG_RULE_ID = 'SG_RULE_ID'
_SG_RULE_DATA = {'id': _SG_RULE_ID,
'security_group_id': _SG_ID}
def get_data(res_type, operation):
if res_type == const.ODL_NETWORK:
return [_NET_DATA]
elif res_type == const.ODL_SUBNET:
if operation == const.ODL_DELETE:
return [[_NET_ID]]
return [_SUBNET_DATA]
elif res_type == const.ODL_PORT:
# TODO(yamahata): test case of (ODL_port, ODL_DELETE) is missing
if operation == const.ODL_DELETE:
return [[_NET_ID, _SUBNET_ID]]
return [_PORT_DATA, _PORT_DATA_DUPLICATE_SUBNET]
elif res_type == const.ODL_ROUTER:
return [_ROUTER_DATA]
elif res_type == const.ODL_L2GATEWAY:
return [_L2GW_DATA]
elif res_type == const.ODL_L2GATEWAY_CONNECTION:
return [_L2GWCONN_DATA]
elif res_type == const.ODL_TRUNK:
if operation == const.ODL_DELETE:
return [[_PORT_ID, _SUBPORT_ID]]
return [_TRUNK_DATA]
elif res_type == const.ODL_BGPVPN:
if operation == const.ODL_DELETE:
return [[_NET_ID, _ROUTER_ID]]
else:
routers = []
networks = []
if operation == const.ODL_UPDATE:
routers = [_ROUTER_ID]
networks = [_NET_ID]
return [{'id': _BGPVPN_ID, 'networks': networks,
'routers': routers,
'route_distinguishers': ['100:1']}]
elif res_type == const.ODL_SG:
return [_SG_DATA]
elif res_type == const.ODL_SG_RULE:
if operation == const.ODL_DELETE:
return [[_SG_RULE_ID]]
return [_SG_RULE_DATA]
return [[]]
def subnet_fail_network_dep(net_op, subnet_op):
return {'expected': 1,
'first_type': const.ODL_NETWORK,
'first_operation': net_op,
'first_id': _NET_ID,
'second_type': const.ODL_SUBNET,
'second_operation': subnet_op,
'second_id': _SUBNET_ID}
def subnet_succeed_network_dep(net_op, subnet_op):
return {'expected': 0,
'first_type': const.ODL_SUBNET,
'first_operation': subnet_op,
'first_id': _SUBNET_ID,
'second_type': const.ODL_NETWORK,
'second_operation': net_op,
'second_id': _NET_ID}
# TODO(vthapar) add tests for l2gw dependency validations
class BaseDependencyValidationsTestCase(object):
def test_dependency(self):
db.create_pending_row(
self.db_context, self.first_type, self.first_id,
self.first_operation,
get_data(self.first_type, self.first_operation))
for data in get_data(self.second_type, self.second_operation):
deps = dependency_validations.calculate(
self.db_context, self.second_operation, self.second_type,
self.second_id, data)
self.assertEqual(self.expected, len(deps))
class SubnetDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("subnet_create_depends_on_older_network_create",
subnet_fail_network_dep(const.ODL_CREATE, const.ODL_CREATE)),
("subnet_create_depends_on_older_network_update",
subnet_fail_network_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("subnet_create_depends_on_older_network_delete",
subnet_fail_network_dep(const.ODL_DELETE, const.ODL_CREATE)),
("subnet_create_doesnt_depend_on_newer_network_create",
subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_CREATE)),
("subnet_create_doesnt_depend_on_newer_network_update",
subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("subnet_create_doesnt_depend_on_newer_network_delete",
subnet_succeed_network_dep(const.ODL_DELETE, const.ODL_CREATE)),
("subnet_update_depends_on_older_network_create",
subnet_fail_network_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("subnet_update_depends_on_older_network_update",
subnet_fail_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("subnet_update_depends_on_older_network_delete",
subnet_fail_network_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("subnet_update_doesnt_depend_on_newer_network_create",
subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("subnet_update_doesnt_depend_on_newer_network_update",
subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("subnet_update_doesnt_depend_on_newer_network_delete",
subnet_succeed_network_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("subnet_delete_doesnt_depend_on_older_network_create",
subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_DELETE)),
("subnet_delete_doesnt_depend_on_older_network_update",
subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_DELETE)),
("subnet_delete_doesnt_depend_on_newer_network_create",
subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_DELETE)),
("subnet_delete_doesnt_depend_on_newer_network_update",
subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_DELETE)),
)
def security_rule_fail_security_group_dep(sg_op, sgr_op):
return {'expected': 1,
'first_type': const.ODL_SG,
'first_operation': sg_op,
'first_id': _SG_ID,
'second_type': const.ODL_SG_RULE,
'second_operation': sgr_op,
'second_id': _SG_RULE_ID}
def security_rule_succeed_security_group_dep(sg_op, sgr_op):
return {'expected': 0,
'first_type': const.ODL_SG_RULE,
'first_operation': sgr_op,
'first_id': _SG_RULE_ID,
'second_type': const.ODL_SG,
'second_operation': sg_op,
'second_id': _SG_ID}
class SecurityRuleDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("security_rule_create_depends_on_older_security_group_create",
security_rule_fail_security_group_dep(const.ODL_CREATE,
const.ODL_CREATE)),
("security_rule_create_depends_on_older_security_group_update",
security_rule_fail_security_group_dep(const.ODL_UPDATE,
const.ODL_CREATE)),
("security_rule_create_depends_on_older_security_group_delete",
security_rule_fail_security_group_dep(const.ODL_DELETE,
const.ODL_CREATE)),
("security_rule_create_doesnt_depend_on_newer_security_group_create",
security_rule_succeed_security_group_dep(const.ODL_CREATE,
const.ODL_CREATE)),
("security_rule_create_doesnt_depend_on_newer_security_group_update",
security_rule_succeed_security_group_dep(const.ODL_UPDATE,
const.ODL_CREATE)),
("security_rule_create_doesnt_depend_on_newer_security_group_delete",
security_rule_succeed_security_group_dep(const.ODL_DELETE,
const.ODL_CREATE)),
("security_rule_update_depends_on_older_security_group_create",
security_rule_fail_security_group_dep(const.ODL_CREATE,
const.ODL_UPDATE)),
("security_rule_update_depends_on_older_security_group_update",
security_rule_fail_security_group_dep(const.ODL_UPDATE,
const.ODL_UPDATE)),
("security_rule_update_depends_on_older_security_group_delete",
security_rule_fail_security_group_dep(const.ODL_DELETE,
const.ODL_UPDATE)),
("security_rule_update_doesnt_depend_on_newer_security_group_create",
security_rule_succeed_security_group_dep(const.ODL_CREATE,
const.ODL_UPDATE)),
("security_rule_update_doesnt_depend_on_newer_security_group_update",
security_rule_succeed_security_group_dep(const.ODL_UPDATE,
const.ODL_UPDATE)),
("security_rule_update_doesnt_depend_on_newer_security_group_delete",
security_rule_succeed_security_group_dep(const.ODL_DELETE,
const.ODL_UPDATE)),
("security_rule_delete_doesnt_depend_on_older_security_group_create",
security_rule_succeed_security_group_dep(const.ODL_CREATE,
const.ODL_DELETE)),
("security_rule_delete_doesnt_depend_on_older_security_group_update",
security_rule_succeed_security_group_dep(const.ODL_UPDATE,
const.ODL_DELETE)),
("security_rule_delete_doesnt_depend_on_newer_security_group_create",
security_rule_succeed_security_group_dep(const.ODL_CREATE,
const.ODL_DELETE)),
("security_rule_delete_doesnt_depend_on_newer_security_group_update",
security_rule_succeed_security_group_dep(const.ODL_UPDATE,
const.ODL_DELETE)),
)
def port_fail_network_dep(net_op, port_op):
return {'expected': 1,
'first_type': const.ODL_NETWORK,
'first_operation': net_op,
'first_id': _NET_ID,
'second_type': const.ODL_PORT,
'second_operation': port_op,
'second_id': _PORT_ID}
def port_succeed_network_dep(net_op, port_op):
return {'expected': 0,
'first_type': const.ODL_PORT,
'first_operation': port_op,
'first_id': _PORT_ID,
'second_type': const.ODL_NETWORK,
'second_operation': net_op,
'second_id': _NET_ID}
def port_fail_subnet_dep(subnet_op, port_op):
return {'expected': 1,
'first_type': const.ODL_SUBNET,
'first_operation': subnet_op,
'first_id': _SUBNET_ID,
'second_type': const.ODL_PORT,
'second_operation': port_op,
'second_id': _PORT_ID}
def port_succeed_subnet_dep(subnet_op, port_op):
return {'expected': 0,
'first_type': const.ODL_PORT,
'first_operation': port_op,
'first_id': _PORT_ID,
'second_type': const.ODL_SUBNET,
'second_operation': subnet_op,
'second_id': _SUBNET_ID}
class PortDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("port_create_depends_on_older_network_create",
port_fail_network_dep(const.ODL_CREATE, const.ODL_CREATE)),
("port_create_depends_on_older_network_update",
port_fail_network_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("port_create_depends_on_older_network_delete",
port_fail_network_dep(const.ODL_DELETE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_network_create",
port_succeed_network_dep(const.ODL_CREATE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_network_update",
port_succeed_network_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_network_delete",
port_succeed_network_dep(const.ODL_DELETE, const.ODL_CREATE)),
("port_update_depends_on_older_network_create",
port_fail_network_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("port_update_depends_on_older_network_update",
port_fail_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("port_update_depends_on_older_network_delete",
port_fail_network_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_network_create",
port_succeed_network_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_network_update",
port_succeed_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_network_delete",
port_succeed_network_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("port_create_depends_on_older_subnet_create",
port_fail_subnet_dep(const.ODL_CREATE, const.ODL_CREATE)),
("port_create_depends_on_older_subnet_update",
port_fail_subnet_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("port_create_depends_on_older_subnet_delete",
port_fail_subnet_dep(const.ODL_DELETE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_subnet_create",
port_succeed_subnet_dep(const.ODL_CREATE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_subnet_update",
port_succeed_subnet_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_subnet_delete",
port_succeed_subnet_dep(const.ODL_DELETE, const.ODL_CREATE)),
("port_update_depends_on_older_subnet_create",
port_fail_subnet_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("port_update_depends_on_older_subnet_update",
port_fail_subnet_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("port_update_depends_on_older_subnet_delete",
port_fail_subnet_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_subnet_create",
port_succeed_subnet_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_subnet_update",
port_succeed_subnet_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_subnet_delete",
port_succeed_subnet_dep(const.ODL_DELETE, const.ODL_UPDATE)),
)
def trunk_dep(first_type, second_type, first_op, second_op, result,
sub_port=False):
expected = {'fail': 1, 'pass': 0}
port_id = _SUBPORT_ID if sub_port else _PORT_ID
type_id = {const.ODL_PORT: port_id,
const.ODL_TRUNK: _TRUNK_ID}
return {'expected': expected[result],
'first_type': first_type,
'first_operation': first_op,
'first_id': type_id[first_type],
'second_type': second_type,
'second_operation': second_op,
'second_id': type_id[second_type]}
class TrunkDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("trunk_create_depends_on_older_port_create",
trunk_dep(const.ODL_PORT, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_CREATE, 'fail')),
("trunk_create_doesnt_depend_on_newer_port_create",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("trunk_create_doesnt_depend_on_port_update",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_CREATE, const.ODL_UPDATE, 'pass')),
("trunk_create_doesnt_depend_on_newer_port_delete",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_CREATE, const.ODL_DELETE, 'pass')),
# TODO(vthapar): add more/better validations for subport
# trunk update means subport add/delete
("trunk_update_depends_on_older_trunk_create",
trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_UPDATE, 'fail', True)),
("trunk_update_depends_on_older_port_create",
trunk_dep(const.ODL_PORT, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_UPDATE, 'fail', True)),
("trunk_update_doesnt_depend_on_newer_port_create",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_UPDATE, const.ODL_CREATE, 'pass', True)),
("trunk_update_doesnt_depend_on_port_update",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_UPDATE, const.ODL_UPDATE, 'pass', True)),
("trunk_update_doesnt_depend_on_newer_port_delete",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_UPDATE, const.ODL_DELETE, 'pass', True)),
# trunk delete cases
("trunk_delete_depends_on_older_trunk_create",
trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_DELETE, 'fail', True)),
("trunk_delete_depends_on_older_trunk_update",
trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK,
const.ODL_UPDATE, const.ODL_DELETE, 'fail', True)),
("trunk_delete_doesnt_depend_on_older_port_create",
trunk_dep(const.ODL_PORT, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_DELETE, 'pass')),
)
def l2gw_dep(first_type, second_type, first_op, second_op, result):
expected = {'fail': 1, 'pass': 0}
type_id = {const.ODL_NETWORK: _NET_ID,
const.ODL_L2GATEWAY: _L2GW_ID,
const.ODL_L2GATEWAY_CONNECTION: _L2GWCONN_ID}
return {'expected': expected[result],
'first_type': first_type,
'first_operation': first_op,
'first_id': type_id[first_type],
'second_type': second_type,
'second_operation': second_op,
'second_id': type_id[second_type]}
class L2GWDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("L2GWConn_create_depends_on_older_network_create",
l2gw_dep(const.ODL_NETWORK, const.ODL_L2GATEWAY_CONNECTION,
const.ODL_CREATE, const.ODL_CREATE, 'fail')),
("L2GWConn_create_depends_on_older_L2GW_create",
l2gw_dep(const.ODL_L2GATEWAY, const.ODL_L2GATEWAY_CONNECTION,
const.ODL_CREATE, const.ODL_CREATE, 'fail')),
("L2GWConn_create_doesnt_depend_on_newer_network_create",
l2gw_dep(const.ODL_L2GATEWAY_CONNECTION, const.ODL_NETWORK,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("L2GWConn_create_doesnt_depend_on_newer_L2GW_create",
l2gw_dep(const.ODL_L2GATEWAY_CONNECTION, const.ODL_L2GATEWAY,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
)
# TODO(vthapar): Refactor *_dep into a common method
def bgpvpn_dep(first_type, second_type, first_op, second_op, result):
expected = {'fail': 1, 'pass': 0}
type_id = {const.ODL_NETWORK: _NET_ID,
const.ODL_ROUTER: _ROUTER_ID,
const.ODL_BGPVPN: _BGPVPN_ID}
return {'expected': expected[result],
'first_type': first_type,
'first_operation': first_op,
'first_id': type_id[first_type],
'second_type': second_type,
'second_operation': second_op,
'second_id': type_id[second_type]}
class BGPVPNDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("bgpvpn_create_doesnt_depend_on_older_network_create",
bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("bgpvpn_create_doesnt_depend_on_newer_network_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_NETWORK,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("bgpvpn_create_doesnt_depend_on_older_router_create",
bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("bgpvpn_create_doesnt_depend_on_newer_router_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_ROUTER,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("bgpvpn_update_depends_on_older_bgpvpn_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_UPDATE, 'fail')),
("bgpvpn_update_depends_on_older_network_create",
bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_UPDATE, 'fail')),
("bgpvpn_update_doesnt_depend_on_newer_network_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_NETWORK,
const.ODL_UPDATE, const.ODL_CREATE, 'pass')),
("bgpvpn_update_depends_on_older_router_create",
bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_UPDATE, 'fail')),
("bgpvpn_update_doesnt_depend_on_newer_router_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_ROUTER,
const.ODL_UPDATE, const.ODL_CREATE, 'pass')),
# bgpvpn delete cases
("bgpvpn_delete_depends_on_older_bgpvpn_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_DELETE, 'fail')),
("bgpvpn_delete_depends_on_older_bgpvpn_update",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN,
const.ODL_UPDATE, const.ODL_DELETE, 'fail')),
("bgpvpn_delete_doesnt_depend_on_older_network_create",
bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_DELETE, 'pass')),
("bgpvpn_delete_doesnt_depend_on_older_router_create",
bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_DELETE, 'pass')),
)
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from mox3 import mox
import netaddr
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import flavors
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import instance
from nova.objects import instance_info_cache
from nova.objects import pci_device
from nova.objects import security_group
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_instance_fault
from nova.tests.unit.objects import test_instance_info_cache
from nova.tests.unit.objects import test_instance_numa_topology
from nova.tests.unit.objects import test_instance_pci_requests
from nova.tests.unit.objects import test_migration_context as test_mig_ctxt
from nova.tests.unit.objects import test_objects
from nova.tests.unit.objects import test_security_group
from nova.tests.unit.objects import test_vcpu_model
from nova.tests import uuidsentinel as uuids
from nova import utils
class _TestInstanceObject(object):
@property
def fake_instance(self):
db_inst = fake_instance.fake_db_instance(id=2,
access_ip_v4='1.2.3.4',
access_ip_v6='::1')
db_inst['uuid'] = uuids.db_instance
db_inst['cell_name'] = 'api!child'
db_inst['terminated_at'] = None
db_inst['deleted_at'] = None
db_inst['created_at'] = None
db_inst['updated_at'] = None
db_inst['launched_at'] = datetime.datetime(1955, 11, 12,
22, 4, 0)
db_inst['deleted'] = False
db_inst['security_groups'] = []
db_inst['pci_devices'] = []
db_inst['user_id'] = self.context.user_id
db_inst['project_id'] = self.context.project_id
db_inst['tags'] = []
db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache,
instance_uuid=db_inst['uuid'])
db_inst['system_metadata'] = {
'image_name': 'os2-warp',
'image_min_ram': 100,
'image_hw_disk_bus': 'ide',
'image_hw_vif_model': 'ne2k_pci',
}
return db_inst
def test_datetime_deserialization(self):
red_letter_date = timeutils.parse_isotime(
utils.isotime(datetime.datetime(1955, 11, 5)))
inst = objects.Instance(uuid=uuids.instance,
launched_at=red_letter_date)
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': inst.VERSION,
'nova_object.data':
{'uuid': uuids.instance,
'launched_at': '1955-11-05T00:00:00Z'},
'nova_object.changes': ['launched_at', 'uuid']}
self.assertJsonEqual(primitive, expected)
inst2 = objects.Instance.obj_from_primitive(primitive)
self.assertIsInstance(inst2.launched_at, datetime.datetime)
self.assertEqual(red_letter_date, inst2.launched_at)
def test_ip_deserialization(self):
inst = objects.Instance(uuid=uuids.instance, access_ip_v4='1.2.3.4',
access_ip_v6='::1')
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': inst.VERSION,
'nova_object.data':
{'uuid': uuids.instance,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '::1'},
'nova_object.changes': ['uuid', 'access_ip_v6',
'access_ip_v4']}
self.assertJsonEqual(primitive, expected)
inst2 = objects.Instance.obj_from_primitive(primitive)
self.assertIsInstance(inst2.access_ip_v4, netaddr.IPAddress)
self.assertIsInstance(inst2.access_ip_v6, netaddr.IPAddress)
self.assertEqual(netaddr.IPAddress('1.2.3.4'), inst2.access_ip_v4)
self.assertEqual(netaddr.IPAddress('::1'), inst2.access_ip_v6)
def test_get_without_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, 'uuid',
columns_to_join=[]
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, 'uuid',
expected_attrs=[])
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertFalse(inst.obj_attr_is_set(attr))
def test_get_with_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
self.mox.StubOutWithMock(
db, 'instance_extra_get_by_instance_uuid')
exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:]
exp_cols.remove('fault')
exp_cols.remove('numa_topology')
exp_cols.remove('pci_requests')
exp_cols.remove('vcpu_model')
exp_cols.remove('ec2_ids')
exp_cols.remove('migration_context')
exp_cols = list(filter(lambda x: 'flavor' not in x, exp_cols))
exp_cols.extend(['extra', 'extra.numa_topology', 'extra.pci_requests',
'extra.flavor', 'extra.vcpu_model',
'extra.migration_context'])
fake_topology = (test_instance_numa_topology.
fake_db_topology['numa_topology'])
fake_requests = jsonutils.dumps(test_instance_pci_requests.
fake_pci_requests)
fake_flavor = jsonutils.dumps(
{'cur': objects.Flavor().obj_to_primitive(),
'old': None, 'new': None})
fake_vcpu_model = jsonutils.dumps(
test_vcpu_model.fake_vcpumodel.obj_to_primitive())
fake_mig_context = jsonutils.dumps(
test_mig_ctxt.fake_migration_context_obj.obj_to_primitive())
fake_service = {'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False, 'id': 123,
'host': 'fake-host', 'binary': 'nova-fake',
'topic': 'fake-service-topic', 'report_count': 1,
'forced_down': False, 'disabled': False,
'disabled_reason': None, 'last_seen_up': None,
'version': 1,
}
fake_instance = dict(self.fake_instance,
services=[fake_service],
extra={
'numa_topology': fake_topology,
'pci_requests': fake_requests,
'flavor': fake_flavor,
'vcpu_model': fake_vcpu_model,
'migration_context': fake_mig_context,
})
db.instance_get_by_uuid(
self.context, 'uuid',
columns_to_join=exp_cols).AndReturn(fake_instance)
fake_faults = test_instance_fault.fake_faults
db.instance_fault_get_by_instance_uuids(
self.context, [fake_instance['uuid']]
).AndReturn(fake_faults)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(
self.context, 'uuid',
expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS)
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertTrue(inst.obj_attr_is_set(attr))
self.assertEqual(123, inst.services[0].id)
def test_lazy_load_services_on_deleted_instance(self):
# We should avoid trying to hit the database to reload the instance
# and just set the services attribute to an empty list.
instance = objects.Instance(self.context, uuid=uuids.instance,
deleted=True)
self.assertEqual(0, len(instance.services))
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(self.context, 'instid',
columns_to_join=['info_cache',
'security_groups']
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = objects.Instance.get_by_id(self.context, 'instid')
self.assertEqual(self.fake_instance['uuid'], inst.uuid)
def test_load(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(self.fake_instance)
fake_inst2 = dict(self.fake_instance,
metadata=[{'key': 'foo', 'value': 'bar'}])
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['metadata']
).AndReturn(fake_inst2)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertFalse(hasattr(inst, '_obj_metadata'))
meta = inst.metadata
self.assertEqual({'foo': 'bar'}, meta)
self.assertTrue(hasattr(inst, '_obj_metadata'))
# Make sure we don't run load again
meta2 = inst.metadata
self.assertEqual({'foo': 'bar'}, meta2)
def test_load_invalid(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance)
self.assertRaises(exception.ObjectActionError,
inst.obj_load_attr, 'foo')
def test_get_remote(self):
# isotime doesn't have microseconds and is always UTC
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_instance = self.fake_instance
db.instance_get_by_uuid(self.context, uuids.instance,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_instance)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, uuids.instance)
self.assertEqual(fake_instance['id'], inst.id)
self.assertEqual(fake_instance['launched_at'],
inst.launched_at.replace(tzinfo=None))
self.assertEqual(fake_instance['access_ip_v4'],
str(inst.access_ip_v4))
self.assertEqual(fake_instance['access_ip_v6'],
str(inst.access_ip_v6))
def test_refresh(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(dict(self.fake_instance,
host='orig-host'))
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(dict(self.fake_instance,
host='new-host'))
self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
'refresh')
instance_info_cache.InstanceInfoCache.refresh()
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual('orig-host', inst.host)
inst.refresh()
self.assertEqual('new-host', inst.host)
self.assertEqual(set([]), inst.obj_what_changed())
def test_refresh_does_not_recurse(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance,
metadata={})
inst_copy = objects.Instance()
inst_copy.uuid = inst.uuid
self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid')
objects.Instance.get_by_uuid(self.context, uuid=inst.uuid,
expected_attrs=['metadata'],
use_slave=False
).AndReturn(inst_copy)
self.mox.ReplayAll()
self.assertRaises(exception.OrphanedObjectError, inst.refresh)
def _save_test_helper(self, cell_type, save_kwargs):
"""Common code for testing save() for cells/non-cells."""
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, host='oldhost', user_data='old',
vm_state='old', task_state='old')
fake_uuid = old_ref['uuid']
expected_updates = dict(vm_state='meow', task_state='wuff',
user_data='new')
new_ref = dict(old_ref, host='newhost', **expected_updates)
exp_vm_state = save_kwargs.get('expected_vm_state')
exp_task_state = save_kwargs.get('expected_task_state')
admin_reset = save_kwargs.get('admin_state_reset', False)
if exp_vm_state:
expected_updates['expected_vm_state'] = exp_vm_state
if exp_task_state:
if (exp_task_state == 'image_snapshot' and
'instance_version' in save_kwargs and
save_kwargs['instance_version'] == '1.9'):
expected_updates['expected_task_state'] = [
'image_snapshot', 'image_snapshot_pending']
else:
expected_updates['expected_task_state'] = exp_task_state
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI)
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_at_top')
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_from_api')
self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
use_mock_anything=True)
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
columns_to_join=['info_cache', 'security_groups',
'system_metadata', 'extra', 'extra.flavor']
).AndReturn((old_ref, new_ref))
if cell_type == 'api':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_from_api(
self.context, mox.IsA(objects.Instance),
exp_vm_state, exp_task_state, admin_reset)
elif cell_type == 'compute':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_at_top(self.context,
mox.IsA(objects.Instance))
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'])
if 'instance_version' in save_kwargs:
inst.VERSION = save_kwargs.pop('instance_version')
self.assertEqual('old', inst.task_state)
self.assertEqual('old', inst.vm_state)
self.assertEqual('old', inst.user_data)
inst.vm_state = 'meow'
inst.task_state = 'wuff'
inst.user_data = 'new'
save_kwargs.pop('context', None)
inst.save(**save_kwargs)
self.assertEqual('newhost', inst.host)
self.assertEqual('meow', inst.vm_state)
self.assertEqual('wuff', inst.task_state)
self.assertEqual('new', inst.user_data)
# NOTE(danms): Ignore flavor migrations for the moment
self.assertEqual(set([]), inst.obj_what_changed() - set(['flavor']))
def test_save(self):
self._save_test_helper(None, {})
def test_save_in_api_cell(self):
self._save_test_helper('api', {})
def test_save_in_compute_cell(self):
self._save_test_helper('compute', {})
def test_save_exp_vm_state(self):
self._save_test_helper(None, {'expected_vm_state': ['meow']})
def test_save_exp_task_state(self):
self._save_test_helper(None, {'expected_task_state': ['meow']})
def test_save_exp_vm_state_api_cell(self):
self._save_test_helper('api', {'expected_vm_state': ['meow']})
def test_save_exp_task_state_api_cell(self):
self._save_test_helper('api', {'expected_task_state': ['meow']})
def test_save_exp_task_state_api_cell_admin_reset(self):
self._save_test_helper('api', {'admin_state_reset': True})
def test_save_rename_sends_notification(self):
# Tests that simply changing the 'display_name' on the instance
# will send a notification.
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, display_name='hello')
fake_uuid = old_ref['uuid']
expected_updates = dict(display_name='goodbye')
new_ref = dict(old_ref, **expected_updates)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
columns_to_join=['info_cache', 'security_groups',
'system_metadata', 'extra', 'extra.flavor']
).AndReturn((old_ref, new_ref))
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'],
use_slave=False)
self.assertEqual('hello', inst.display_name)
inst.display_name = 'goodbye'
inst.save()
self.assertEqual('goodbye', inst.display_name)
# NOTE(danms): Ignore flavor migrations for the moment
self.assertEqual(set([]), inst.obj_what_changed() - set(['flavor']))
def test_save_related_object_if_none(self):
with mock.patch.object(objects.Instance, '_save_pci_requests'
) as save_mock:
inst = objects.Instance()
inst = objects.Instance._from_db_object(self.context, inst,
self.fake_instance)
inst.pci_requests = None
inst.save()
self.assertTrue(save_mock.called)
@mock.patch('nova.db.instance_update_and_get_original')
@mock.patch.object(instance.Instance, '_from_db_object')
def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update):
# NOTE(danms): This tests that we don't update the pci_devices
# field from the contents of the database. This is not because we
# don't necessarily want to, but because the way pci_devices is
# currently implemented it causes versioning issues. When that is
# resolved, this test should go away.
mock_update.return_value = None, None
inst = objects.Instance(context=self.context, id=123)
inst.uuid = uuids.test_instance_not_refresh
inst.pci_devices = pci_device.PciDeviceList()
inst.save()
self.assertNotIn('pci_devices',
mock_fdo.call_args_list[0][1]['expected_attrs'])
@mock.patch('nova.db.instance_extra_update_by_uuid')
@mock.patch('nova.db.instance_update_and_get_original')
@mock.patch.object(instance.Instance, '_from_db_object')
def test_save_updates_numa_topology(self, mock_fdo, mock_update,
mock_extra_update):
fake_obj_numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([1]), memory=128)])
fake_obj_numa_topology.instance_uuid = uuids.instance
jsonified = fake_obj_numa_topology._to_json()
mock_update.return_value = None, None
inst = objects.Instance(
context=self.context, id=123, uuid=uuids.instance)
inst.numa_topology = fake_obj_numa_topology
inst.save()
# NOTE(sdague): the json representation of nova object for
# NUMA isn't stable from a string comparison
# perspective. There are sets which get converted to lists,
# and based on platform differences may show up in different
# orders. So we can't have mock do the comparison. Instead
# manually compare the final parameter using our json equality
# operator which does the right thing here.
mock_extra_update.assert_called_once_with(
self.context, inst.uuid, mock.ANY)
called_arg = mock_extra_update.call_args_list[0][0][2]['numa_topology']
self.assertJsonEqual(called_arg, jsonified)
mock_extra_update.reset_mock()
inst.numa_topology = None
inst.save()
mock_extra_update.assert_called_once_with(
self.context, inst.uuid, {'numa_topology': None})
@mock.patch('nova.db.instance_extra_update_by_uuid')
def test_save_vcpu_model(self, mock_update):
inst = fake_instance.fake_instance_obj(self.context)
inst.vcpu_model = test_vcpu_model.fake_vcpumodel
inst.save()
self.assertTrue(mock_update.called)
self.assertEqual(1, mock_update.call_count)
actual_args = mock_update.call_args
self.assertEqual(self.context, actual_args[0][0])
self.assertEqual(inst.uuid, actual_args[0][1])
self.assertEqual(['vcpu_model'], list(actual_args[0][2].keys()))
self.assertJsonEqual(jsonutils.dumps(
test_vcpu_model.fake_vcpumodel.obj_to_primitive()),
actual_args[0][2]['vcpu_model'])
mock_update.reset_mock()
inst.vcpu_model = None
inst.save()
mock_update.assert_called_once_with(
self.context, inst.uuid, {'vcpu_model': None})
@mock.patch('nova.db.instance_extra_update_by_uuid')
def test_save_migration_context_model(self, mock_update):
inst = fake_instance.fake_instance_obj(self.context)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.save()
self.assertTrue(mock_update.called)
self.assertEqual(1, mock_update.call_count)
actual_args = mock_update.call_args
self.assertEqual(self.context, actual_args[0][0])
self.assertEqual(inst.uuid, actual_args[0][1])
self.assertEqual(['migration_context'], list(actual_args[0][2].keys()))
self.assertIsInstance(
objects.MigrationContext.obj_from_db_obj(
actual_args[0][2]['migration_context']),
objects.MigrationContext)
mock_update.reset_mock()
inst.migration_context = None
inst.save()
mock_update.assert_called_once_with(
self.context, inst.uuid, {'migration_context': None})
def test_save_flavor_skips_unchanged_flavors(self):
inst = objects.Instance(context=self.context,
flavor=objects.Flavor())
inst.obj_reset_changes()
with mock.patch('nova.db.instance_extra_update_by_uuid') as mock_upd:
inst.save()
self.assertFalse(mock_upd.called)
@mock.patch.object(notifications, 'send_update')
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_from_api')
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_at_top')
@mock.patch.object(db, 'instance_update_and_get_original')
def _test_skip_cells_sync_helper(self, mock_db_update, mock_update_at_top,
mock_update_from_api, mock_notif_update, cell_type):
self.flags(enable=True, cell_type=cell_type, group='cells')
inst = fake_instance.fake_instance_obj(self.context, cell_name='fake')
inst.vm_state = 'foo'
inst.task_state = 'bar'
inst.cell_name = 'foo!bar@baz'
old_ref = dict(base.obj_to_primitive(inst), vm_state='old',
task_state='old')
new_ref = dict(old_ref, vm_state='foo', task_state='bar')
newer_ref = dict(new_ref, vm_state='bar', task_state='foo')
mock_db_update.side_effect = [(old_ref, new_ref), (new_ref, newer_ref)]
with inst.skip_cells_sync():
inst.save()
mock_update_at_top.assert_has_calls([])
mock_update_from_api.assert_has_calls([])
self.assertFalse(mock_notif_update.called)
inst.vm_state = 'bar'
inst.task_state = 'foo'
def fake_update_from_api(context, instance, expected_vm_state,
expected_task_state, admin_state_reset):
self.assertEqual('foo!bar@baz', instance.cell_name)
# This is re-mocked so that cell_name can be checked above. Since
# instance objects have no equality testing assert_called_once_with
# doesn't work.
with mock.patch.object(cells_rpcapi.CellsAPI,
'instance_update_from_api',
side_effect=fake_update_from_api) as fake_update_from_api:
inst.save()
self.assertEqual('foo!bar@baz', inst.cell_name)
self.assertTrue(mock_notif_update.called)
if cell_type == 'compute':
mock_update_at_top.assert_called_once_with(self.context, mock.ANY)
# Compare primitives since we can't check instance object equality
expected_inst_p = base.obj_to_primitive(inst)
actual_inst = mock_update_at_top.call_args[0][1]
actual_inst_p = base.obj_to_primitive(actual_inst)
self.assertEqual(expected_inst_p, actual_inst_p)
self.assertFalse(fake_update_from_api.called)
elif cell_type == 'api':
self.assertFalse(mock_update_at_top.called)
fake_update_from_api.assert_called_once_with(self.context,
mock.ANY, None, None, False)
expected_calls = [
mock.call(self.context, inst.uuid,
{'vm_state': 'foo', 'task_state': 'bar',
'cell_name': 'foo!bar@baz'},
columns_to_join=['system_metadata', 'extra',
'extra.flavor']),
mock.call(self.context, inst.uuid,
{'vm_state': 'bar', 'task_state': 'foo'},
columns_to_join=['system_metadata'])]
mock_db_update.assert_has_calls(expected_calls)
def test_skip_cells_api(self):
self._test_skip_cells_sync_helper(cell_type='api')
def test_skip_cells_compute(self):
self._test_skip_cells_sync_helper(cell_type='compute')
def test_get_deleted(self):
fake_inst = dict(self.fake_instance, id=123, deleted=123)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(danms): Make sure it's actually a bool
self.assertTrue(inst.deleted)
def test_get_not_cleaned(self):
fake_inst = dict(self.fake_instance, id=123, cleaned=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertFalse(inst.cleaned)
def test_get_cleaned(self):
fake_inst = dict(self.fake_instance, id=123, cleaned=1)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertTrue(inst.cleaned)
def test_with_info_cache(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}])
nwinfo1_json = nwinfo1.json()
nwinfo2_json = nwinfo2.json()
fake_info_cache = test_instance_info_cache.fake_info_cache
fake_inst['info_cache'] = dict(
fake_info_cache,
network_info=nwinfo1_json,
instance_uuid=fake_uuid)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
db.instance_info_cache_update(self.context, fake_uuid,
{'network_info': nwinfo2_json}).AndReturn(fake_info_cache)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(nwinfo1, inst.info_cache.network_info)
self.assertEqual(fake_uuid, inst.info_cache.instance_uuid)
inst.info_cache.network_info = nwinfo2
inst.save()
def test_with_info_cache_none(self):
fake_inst = dict(self.fake_instance, info_cache=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['info_cache'])
self.assertIsNone(inst.info_cache)
def test_with_security_groups(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['security_groups'] = [
{'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
{'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'security_group_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
db.security_group_update(self.context, 1, {'description': 'changed'}
).AndReturn(fake_inst['security_groups'][0])
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(2, len(inst.security_groups))
for index, group in enumerate(fake_inst['security_groups']):
for key in group:
self.assertEqual(group[key],
inst.security_groups[index][key])
self.assertIsInstance(inst.security_groups[index],
security_group.SecurityGroup)
self.assertEqual(set(), inst.security_groups.obj_what_changed())
inst.security_groups[0].description = 'changed'
inst.save()
self.assertEqual(set(), inst.security_groups.obj_what_changed())
def test_with_empty_security_groups(self):
fake_inst = dict(self.fake_instance, security_groups=[])
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(0, len(inst.security_groups))
def test_with_empty_pci_devices(self):
fake_inst = dict(self.fake_instance, pci_devices=[])
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(0, len(inst.pci_devices))
def test_with_pci_devices(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['pci_devices'] = [
{'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'numa_node': 0,
'product_id': 'p1',
'dev_type': fields.PciDeviceType.STANDARD,
'status': fields.PciDeviceStatus.ALLOCATED,
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'request_id': None,
'parent_addr': None,
'extra_info': '{}'},
{
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'numa_node': 1,
'product_id': 'p',
'dev_type': fields.PciDeviceType.STANDARD,
'status': fields.PciDeviceStatus.ALLOCATED,
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'request_id': None,
'parent_addr': 'a1',
'extra_info': '{}'},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(2, len(inst.pci_devices))
self.assertEqual(fake_uuid, inst.pci_devices[0].instance_uuid)
self.assertEqual(fake_uuid, inst.pci_devices[1].instance_uuid)
def test_with_fault(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_faults = [dict(x, instance_uuid=fake_uuid)
for x in test_instance_fault.fake_faults['fake-uuid']]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=[]
).AndReturn(self.fake_instance)
db.instance_fault_get_by_instance_uuids(
self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
expected_attrs=['fault'])
self.assertEqual(fake_faults[0], dict(inst.fault.items()))
@mock.patch('nova.objects.EC2Ids.get_by_instance')
@mock.patch('nova.db.instance_get_by_uuid')
def test_with_ec2_ids(self, mock_get, mock_ec2):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
fake_ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_ec2.return_value = fake_ec2_ids
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
expected_attrs=['ec2_ids'])
mock_ec2.assert_called_once_with(self.context, mock.ANY)
self.assertEqual(fake_ec2_ids.instance_id, inst.ec2_ids.instance_id)
@mock.patch('nova.db.instance_get_by_uuid')
def test_with_image_meta(self, mock_get):
fake_inst = dict(self.fake_instance)
mock_get.return_value = fake_inst
inst = instance.Instance.get_by_uuid(self.context,
fake_inst['uuid'],
expected_attrs=['image_meta'])
image_meta = inst.image_meta
self.assertIsInstance(image_meta, objects.ImageMeta)
self.assertEqual(100, image_meta.min_ram)
self.assertEqual('ide', image_meta.properties.hw_disk_bus)
self.assertEqual('ne2k_pci', image_meta.properties.hw_vif_model)
def test_iteritems_with_extra_attrs(self):
self.stubs.Set(objects.Instance, 'name', 'foo')
inst = objects.Instance(uuid=uuids.instance)
self.assertEqual(sorted({'uuid': uuids.instance,
'name': 'foo',
}.items()), sorted(inst.items()))
def _test_metadata_change_tracking(self, which):
inst = objects.Instance(uuid=uuids.instance)
setattr(inst, which, {})
inst.obj_reset_changes()
getattr(inst, which)['foo'] = 'bar'
self.assertEqual(set([which]), inst.obj_what_changed())
inst.obj_reset_changes()
self.assertEqual(set(), inst.obj_what_changed())
def test_create_skip_scheduled_at(self):
self.mox.StubOutWithMock(db, 'instance_create')
vals = {'host': 'foo-host',
'memory_mb': 128,
'system_metadata': {'foo': 'bar'},
'extra': {
'vcpu_model': None,
'numa_topology': None,
'pci_requests': None,
}}
fake_inst = fake_instance.fake_db_instance(**vals)
db.instance_create(self.context, vals).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context,
host='foo-host', memory_mb=128,
scheduled_at=None,
system_metadata={'foo': 'bar'})
inst.create()
self.assertEqual('foo-host', inst.host)
def test_metadata_change_tracking(self):
self._test_metadata_change_tracking('metadata')
def test_system_metadata_change_tracking(self):
self._test_metadata_change_tracking('system_metadata')
def test_create_stubbed(self):
self.mox.StubOutWithMock(db, 'instance_create')
vals = {'host': 'foo-host',
'memory_mb': 128,
'system_metadata': {'foo': 'bar'},
'extra': {
'vcpu_model': None,
'numa_topology': None,
'pci_requests': None,
}}
fake_inst = fake_instance.fake_db_instance(**vals)
db.instance_create(self.context, vals).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context,
host='foo-host', memory_mb=128,
system_metadata={'foo': 'bar'})
inst.create()
def test_create(self):
self.mox.StubOutWithMock(db, 'instance_create')
extras = {'vcpu_model': None,
'numa_topology': None,
'pci_requests': None}
db.instance_create(self.context, {'extra': extras}).AndReturn(
self.fake_instance)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context)
inst.create()
self.assertEqual(self.fake_instance['id'], inst.id)
self.assertIsNotNone(inst.ec2_ids)
def test_create_with_values(self):
inst1 = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
host='foo-host')
inst1.create()
self.assertEqual('foo-host', inst1.host)
inst2 = objects.Instance.get_by_uuid(self.context, inst1.uuid)
self.assertEqual('foo-host', inst2.host)
def test_create_with_extras(self):
inst = objects.Instance(context=self.context,
uuid=self.fake_instance['uuid'],
numa_topology=test_instance_numa_topology.fake_obj_numa_topology,
pci_requests=objects.InstancePCIRequests(
requests=[
objects.InstancePCIRequest(count=123,
spec=[])]),
vcpu_model=test_vcpu_model.fake_vcpumodel,
)
inst.create()
self.assertIsNotNone(inst.numa_topology)
self.assertIsNotNone(inst.pci_requests)
self.assertEqual(1, len(inst.pci_requests.requests))
self.assertIsNotNone(inst.vcpu_model)
got_numa_topo = objects.InstanceNUMATopology.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual(inst.numa_topology.instance_uuid,
got_numa_topo.instance_uuid)
got_pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual(123, got_pci_requests.requests[0].count)
vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual('fake-model', vcpu_model.model)
def test_recreate_fails(self):
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
host='foo-host')
inst.create()
self.assertRaises(exception.ObjectActionError, inst.create)
def test_create_with_special_things(self):
self.mox.StubOutWithMock(db, 'instance_create')
fake_inst = fake_instance.fake_db_instance()
db.instance_create(self.context,
{'host': 'foo-host',
'security_groups': ['foo', 'bar'],
'info_cache': {'network_info': '[]'},
'extra': {
'vcpu_model': None,
'numa_topology': None,
'pci_requests': None,
},
}
).AndReturn(fake_inst)
self.mox.ReplayAll()
secgroups = security_group.SecurityGroupList()
secgroups.objects = []
for name in ('foo', 'bar'):
secgroup = security_group.SecurityGroup()
secgroup.name = name
secgroups.objects.append(secgroup)
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.network_info = network_model.NetworkInfo()
inst = objects.Instance(context=self.context,
host='foo-host', security_groups=secgroups,
info_cache=info_cache)
inst.create()
def test_destroy_stubbed(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
deleted_at = datetime.datetime(1955, 11, 6)
fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at,
deleted=True)
db.instance_destroy(self.context, uuids.instance,
constraint=None).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context, id=1,
uuid=uuids.instance, host='foo')
inst.destroy()
self.assertEqual(timeutils.normalize_time(deleted_at),
timeutils.normalize_time(inst.deleted_at))
self.assertTrue(inst.deleted)
def test_destroy(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance(context=self.context, id=db_inst['id'],
uuid=db_inst['uuid'])
inst.destroy()
self.assertRaises(exception.InstanceNotFound,
db.instance_get_by_uuid, self.context,
db_inst['uuid'])
def test_destroy_host_constraint(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance.get_by_uuid(self.context, db_inst['uuid'])
inst.host = None
self.assertRaises(exception.ObjectActionError,
inst.destroy)
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_destroy_at_top')
@mock.patch.object(db, 'instance_destroy')
def test_destroy_cell_sync_to_top(self, mock_destroy, mock_destroy_at_top):
self.flags(enable=True, cell_type='compute', group='cells')
fake_inst = fake_instance.fake_db_instance(deleted=True)
mock_destroy.return_value = fake_inst
inst = objects.Instance(context=self.context, id=1,
uuid=uuids.instance)
inst.destroy()
mock_destroy_at_top.assert_called_once_with(self.context, mock.ANY)
actual_inst = mock_destroy_at_top.call_args[0][1]
self.assertIsInstance(actual_inst, instance.Instance)
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_destroy_at_top')
@mock.patch.object(db, 'instance_destroy')
def test_destroy_no_cell_sync_to_top(self, mock_destroy,
mock_destroy_at_top):
fake_inst = fake_instance.fake_db_instance(deleted=True)
mock_destroy.return_value = fake_inst
inst = objects.Instance(context=self.context, id=1,
uuid=uuids.instance)
inst.destroy()
self.assertFalse(mock_destroy_at_top.called)
def test_name_does_not_trigger_lazy_loads(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance.get_by_uuid(self.context, db_inst['uuid'])
self.assertFalse(inst.obj_attr_is_set('fault'))
self.flags(instance_name_template='foo-%(uuid)s')
self.assertEqual('foo-%s' % db_inst['uuid'], inst.name)
self.assertFalse(inst.obj_attr_is_set('fault'))
def test_from_db_object_not_overwrite_info_cache(self):
info_cache = instance_info_cache.InstanceInfoCache()
inst = objects.Instance(context=self.context,
info_cache=info_cache)
db_inst = fake_instance.fake_db_instance()
db_inst['info_cache'] = dict(
test_instance_info_cache.fake_info_cache)
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['info_cache'])
self.assertIs(info_cache, inst.info_cache)
def test_from_db_object_info_cache_not_set(self):
inst = instance.Instance(context=self.context,
info_cache=None)
db_inst = fake_instance.fake_db_instance()
db_inst.pop('info_cache')
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['info_cache'])
self.assertIsNone(inst.info_cache)
def test_from_db_object_security_groups_net_set(self):
inst = instance.Instance(context=self.context,
info_cache=None)
db_inst = fake_instance.fake_db_instance()
db_inst.pop('security_groups')
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['security_groups'])
self.assertEqual([], inst.security_groups.objects)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
def test_get_with_pci_requests(self, mock_get):
mock_get.return_value = objects.InstancePCIRequests()
db_instance = db.instance_create(self.context, {
'user_id': self.context.user_id,
'project_id': self.context.project_id})
instance = objects.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['pci_requests'])
self.assertTrue(instance.obj_attr_is_set('pci_requests'))
self.assertIsNotNone(instance.pci_requests)
def test_get_flavor(self):
db_flavor = flavors.get_default_flavor()
inst = objects.Instance(flavor=db_flavor)
self.assertEqual(db_flavor['flavorid'],
inst.get_flavor().flavorid)
def test_get_flavor_namespace(self):
db_flavor = flavors.get_default_flavor()
inst = objects.Instance(old_flavor=db_flavor)
self.assertEqual(db_flavor['flavorid'],
inst.get_flavor('old').flavorid)
@mock.patch.object(db, 'instance_metadata_delete')
def test_delete_metadata_key(self, db_delete):
inst = objects.Instance(context=self.context,
id=1, uuid=uuids.instance)
inst.metadata = {'foo': '1', 'bar': '2'}
inst.obj_reset_changes()
inst.delete_metadata_key('foo')
self.assertEqual({'bar': '2'}, inst.metadata)
self.assertEqual({}, inst.obj_get_changes())
db_delete.assert_called_once_with(self.context, inst.uuid, 'foo')
def test_reset_changes(self):
inst = objects.Instance()
inst.metadata = {'1985': 'present'}
inst.system_metadata = {'1955': 'past'}
self.assertEqual({}, inst._orig_metadata)
inst.obj_reset_changes(['metadata'])
self.assertEqual({'1985': 'present'}, inst._orig_metadata)
self.assertEqual({}, inst._orig_system_metadata)
def test_load_generic_calls_handler(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(inst, '_load_generic') as mock_load:
def fake_load(name):
inst.system_metadata = {}
mock_load.side_effect = fake_load
inst.system_metadata
mock_load.assert_called_once_with('system_metadata')
def test_load_fault_calls_handler(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(inst, '_load_fault') as mock_load:
def fake_load():
inst.fault = None
mock_load.side_effect = fake_load
inst.fault
mock_load.assert_called_once_with()
def test_load_ec2_ids_calls_handler(self):
inst = objects.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(inst, '_load_ec2_ids') as mock_load:
def fake_load():
inst.ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_load.side_effect = fake_load
inst.ec2_ids
mock_load.assert_called_once_with()
def test_load_migration_context(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid',
return_value=test_mig_ctxt.fake_migration_context_obj
) as mock_get:
inst.migration_context
mock_get.assert_called_once_with(self.context, inst.uuid)
def test_load_migration_context_no_context(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid',
side_effect=exception.MigrationContextNotFound(
instance_uuid=inst.uuid)
) as mock_get:
mig_ctxt = inst.migration_context
mock_get.assert_called_once_with(self.context, inst.uuid)
self.assertIsNone(mig_ctxt)
def test_load_migration_context_no_data(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance)
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid') as mock_get:
loaded_ctxt = inst._load_migration_context(db_context=None)
self.assertFalse(mock_get.called)
self.assertIsNone(loaded_ctxt)
def test_apply_revert_migration_context(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance,
numa_topology=None)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.apply_migration_context()
self.assertIsInstance(inst.numa_topology, objects.InstanceNUMATopology)
inst.revert_migration_context()
self.assertIsNone(inst.numa_topology)
def test_drop_migration_context(self):
inst = instance.Instance(context=self.context, uuid=uuids.instance)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.migration_context.instance_uuid = inst.uuid
inst.migration_context.id = 7
with mock.patch(
'nova.db.instance_extra_update_by_uuid') as update_extra:
inst.drop_migration_context()
self.assertIsNone(inst.migration_context)
update_extra.assert_called_once_with(self.context, inst.uuid,
{"migration_context": None})
def test_mutated_migration_context(self):
numa_topology = (test_instance_numa_topology.
fake_obj_numa_topology.obj_clone())
numa_topology.cells[0].memory = 1024
numa_topology.cells[1].memory = 1024
inst = instance.Instance(context=self.context, uuid=uuids.instance,
numa_topology=numa_topology)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
with inst.mutated_migration_context():
self.assertIs(inst.numa_topology,
inst.migration_context.new_numa_topology)
self.assertIs(numa_topology, inst.numa_topology)
def test_clear_numa_topology(self):
numa_topology = (test_instance_numa_topology.
fake_obj_numa_topology.obj_clone())
numa_topology.cells[0].id = 42
numa_topology.cells[1].id = 43
inst = instance.Instance(context=self.context, uuid=uuids.instance,
numa_topology=numa_topology)
inst.obj_reset_changes()
inst.clear_numa_topology()
self.assertIn('numa_topology', inst.obj_what_changed())
self.assertEqual(-1, numa_topology.cells[0].id)
self.assertEqual(-1, numa_topology.cells[1].id)
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_load_generic(self, mock_get):
inst2 = instance.Instance(metadata={'foo': 'bar'})
mock_get.return_value = inst2
inst = instance.Instance(context=self.context, uuid=uuids.instance)
inst.metadata
@mock.patch('nova.db.instance_fault_get_by_instance_uuids')
def test_load_fault(self, mock_get):
fake_fault = test_instance_fault.fake_faults['fake-uuid'][0]
mock_get.return_value = {uuids.load_fault_instance: [fake_fault]}
inst = objects.Instance(context=self.context,
uuid=uuids.load_fault_instance)
fault = inst.fault
mock_get.assert_called_once_with(self.context,
[uuids.load_fault_instance])
self.assertEqual(fake_fault['id'], fault.id)
self.assertNotIn('metadata', inst.obj_what_changed())
@mock.patch('nova.objects.EC2Ids.get_by_instance')
def test_load_ec2_ids(self, mock_get):
fake_ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_get.return_value = fake_ec2_ids
inst = objects.Instance(context=self.context, uuid=uuids.instance)
ec2_ids = inst.ec2_ids
mock_get.assert_called_once_with(self.context, inst)
self.assertEqual(fake_ec2_ids, ec2_ids)
@mock.patch('nova.objects.SecurityGroupList.get_by_instance')
def test_load_security_groups(self, mock_get):
secgroups = []
for name in ('foo', 'bar'):
secgroup = security_group.SecurityGroup()
secgroup.name = name
secgroups.append(secgroup)
fake_secgroups = security_group.SecurityGroupList(objects=secgroups)
mock_get.return_value = fake_secgroups
inst = objects.Instance(context=self.context, uuid='fake')
secgroups = inst.security_groups
mock_get.assert_called_once_with(self.context, inst)
self.assertEqual(fake_secgroups, secgroups)
@mock.patch('nova.objects.PciDeviceList.get_by_instance_uuid')
def test_load_pci_devices(self, mock_get):
fake_pci_devices = pci_device.PciDeviceList()
mock_get.return_value = fake_pci_devices
inst = objects.Instance(context=self.context, uuid=uuids.pci_devices)
pci_devices = inst.pci_devices
mock_get.assert_called_once_with(self.context, uuids.pci_devices)
self.assertEqual(fake_pci_devices, pci_devices)
def test_get_with_extras(self):
pci_requests = objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(count=123, spec=[])])
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
pci_requests=pci_requests)
inst.create()
uuid = inst.uuid
inst = objects.Instance.get_by_uuid(self.context, uuid)
self.assertFalse(inst.obj_attr_is_set('pci_requests'))
inst = objects.Instance.get_by_uuid(
self.context, uuid, expected_attrs=['pci_requests'])
self.assertTrue(inst.obj_attr_is_set('pci_requests'))
class TestInstanceObject(test_objects._LocalTest,
_TestInstanceObject):
def _test_save_objectfield_fk_constraint_fails(self, foreign_key,
expected_exception):
# NOTE(danms): Do this here and not in the remote test because
# we're mocking out obj_attr_is_set() without the thing actually
# being set, which confuses the heck out of the serialization
# stuff.
error = db_exc.DBReferenceError('table', 'constraint', foreign_key,
'key_table')
# Prevent lazy-loading any fields, results in InstanceNotFound
attrs = objects.instance.INSTANCE_OPTIONAL_ATTRS
instance = fake_instance.fake_instance_obj(self.context,
expected_attrs=attrs)
fields_with_save_methods = [field for field in instance.fields
if hasattr(instance, '_save_%s' % field)]
for field in fields_with_save_methods:
@mock.patch.object(instance, '_save_%s' % field)
@mock.patch.object(instance, 'obj_attr_is_set')
def _test(mock_is_set, mock_save_field):
mock_is_set.return_value = True
mock_save_field.side_effect = error
instance.obj_reset_changes(fields=[field])
instance._changed_fields.add(field)
self.assertRaises(expected_exception, instance.save)
instance.obj_reset_changes(fields=[field])
_test()
def test_save_objectfield_missing_instance_row(self):
self._test_save_objectfield_fk_constraint_fails(
'instance_uuid', exception.InstanceNotFound)
def test_save_objectfield_reraises_if_not_instance_related(self):
self._test_save_objectfield_fk_constraint_fails(
'other_foreign_key', db_exc.DBReferenceError)
class TestRemoteInstanceObject(test_objects._RemoteTest,
_TestInstanceObject):
pass
class _TestInstanceListObject(object):
def fake_instance(self, id, updates=None):
db_inst = fake_instance.fake_db_instance(id=2,
access_ip_v4='1.2.3.4',
access_ip_v6='::1')
db_inst['terminated_at'] = None
db_inst['deleted_at'] = None
db_inst['created_at'] = None
db_inst['updated_at'] = None
db_inst['launched_at'] = datetime.datetime(1955, 11, 12,
22, 4, 0)
db_inst['security_groups'] = []
db_inst['deleted'] = 0
db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache,
instance_uuid=db_inst['uuid'])
if updates:
db_inst.update(updates)
return db_inst
def test_get_all_by_filters(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
'asc', limit=None, marker=None,
columns_to_join=['metadata']
).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, 'uuid', 'asc',
expected_attrs=['metadata'], use_slave=False)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
def test_get_all_by_filters_sorted(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters_sort')
db.instance_get_all_by_filters_sort(self.context, {'foo': 'bar'},
limit=None, marker=None,
columns_to_join=['metadata'],
sort_keys=['uuid'],
sort_dirs=['asc']).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, expected_attrs=['metadata'],
use_slave=False, sort_keys=['uuid'], sort_dirs=['asc'])
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
@mock.patch.object(db, 'instance_get_all_by_filters_sort')
@mock.patch.object(db, 'instance_get_all_by_filters')
def test_get_all_by_filters_calls_non_sort(self,
mock_get_by_filters,
mock_get_by_filters_sort):
'''Verifies InstanceList.get_by_filters calls correct DB function.'''
# Single sort key/direction is set, call non-sorted DB function
objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, sort_key='key', sort_dir='dir',
limit=100, marker='uuid', use_slave=True)
mock_get_by_filters.assert_called_once_with(
self.context, {'foo': 'bar'}, 'key', 'dir', limit=100,
marker='uuid', columns_to_join=None)
self.assertEqual(0, mock_get_by_filters_sort.call_count)
@mock.patch.object(db, 'instance_get_all_by_filters_sort')
@mock.patch.object(db, 'instance_get_all_by_filters')
def test_get_all_by_filters_calls_sort(self,
mock_get_by_filters,
mock_get_by_filters_sort):
'''Verifies InstanceList.get_by_filters calls correct DB function.'''
# Multiple sort keys/directions are set, call sorted DB function
objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, limit=100, marker='uuid',
use_slave=True, sort_keys=['key1', 'key2'],
sort_dirs=['dir1', 'dir2'])
mock_get_by_filters_sort.assert_called_once_with(
self.context, {'foo': 'bar'}, limit=100,
marker='uuid', columns_to_join=None,
sort_keys=['key1', 'key2'], sort_dirs=['dir1', 'dir2'])
self.assertEqual(0, mock_get_by_filters.call_count)
def test_get_all_by_filters_works_for_cleaned(self):
fakes = [self.fake_instance(1),
self.fake_instance(2, updates={'deleted': 2,
'cleaned': None})]
self.context.read_deleted = 'yes'
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context,
{'deleted': True, 'cleaned': False},
'uuid', 'asc', limit=None, marker=None,
columns_to_join=['metadata']).AndReturn(
[fakes[1]])
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_filters(
self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
expected_attrs=['metadata'], use_slave=False)
self.assertEqual(1, len(inst_list))
self.assertIsInstance(inst_list.objects[0], instance.Instance)
self.assertEqual(fakes[1]['uuid'], inst_list.objects[0].uuid)
def test_get_by_host(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(self.context, 'foo',
columns_to_join=None).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_host(self.context, 'foo')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
self.assertEqual(self.context, inst_list.objects[i]._context)
self.assertEqual(set(), inst_list.obj_what_changed())
def test_get_by_host_and_node(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host_and_node(self.context, 'foo', 'bar',
columns_to_join=None).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_host_and_node(self.context,
'foo', 'bar')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
def test_get_by_host_and_not_type(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type')
db.instance_get_all_by_host_and_not_type(self.context, 'foo',
type_id='bar').AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_host_and_not_type(
self.context, 'foo', 'bar')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
@mock.patch('nova.objects.instance._expected_cols')
@mock.patch('nova.db.instance_get_all')
def test_get_all(self, mock_get_all, mock_exp):
fakes = [self.fake_instance(1), self.fake_instance(2)]
mock_get_all.return_value = fakes
mock_exp.return_value = mock.sentinel.exp_att
inst_list = objects.InstanceList.get_all(
self.context, expected_attrs='fake')
mock_get_all.assert_called_once_with(
self.context, columns_to_join=mock.sentinel.exp_att)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
def test_get_hung_in_rebooting(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
dt = utils.isotime()
self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
db.instance_get_all_hung_in_rebooting(self.context, dt).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_hung_in_rebooting(self.context,
dt)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
def test_get_active_by_window_joined(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
# NOTE(mriedem): Send in a timezone-naive datetime since the
# InstanceList.get_active_by_window_joined method should convert it
# to tz-aware for the DB API call, which we'll assert with our stub.
dt = timeutils.utcnow()
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host,
columns_to_join):
# make sure begin is tz-aware
self.assertIsNotNone(begin.utcoffset())
self.assertIsNone(end)
self.assertEqual(['metadata'], columns_to_join)
return fakes
with mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined):
inst_list = objects.InstanceList.get_active_by_window_joined(
self.context, dt, expected_attrs=['metadata'])
for fake, obj in zip(fakes, inst_list.objects):
self.assertIsInstance(obj, instance.Instance)
self.assertEqual(fake['uuid'], obj.uuid)
def test_with_fault(self):
fake_insts = [
fake_instance.fake_db_instance(uuid=uuids.faults_instance,
host='host'),
fake_instance.fake_db_instance(uuid=uuids.faults_instance_nonexist,
host='host'),
]
fake_faults = test_instance_fault.fake_faults
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_all_by_host(self.context, 'host',
columns_to_join=[]).AndReturn(fake_insts)
db.instance_fault_get_by_instance_uuids(
self.context, [x['uuid'] for x in fake_insts]
).AndReturn(fake_faults)
self.mox.ReplayAll()
instances = objects.InstanceList.get_by_host(self.context, 'host',
expected_attrs=['fault'],
use_slave=False)
self.assertEqual(2, len(instances))
self.assertEqual(fake_faults['fake-uuid'][0],
dict(instances[0].fault))
self.assertIsNone(instances[1].fault)
def test_fill_faults(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
inst1 = objects.Instance(uuid=uuids.db_fault_1)
inst2 = objects.Instance(uuid=uuids.db_fault_2)
insts = [inst1, inst2]
for inst in insts:
inst.obj_reset_changes()
db_faults = {
'uuid1': [{'id': 123,
'instance_uuid': uuids.db_fault_1,
'code': 456,
'message': 'Fake message',
'details': 'No details',
'host': 'foo',
'deleted': False,
'deleted_at': None,
'updated_at': None,
'created_at': None,
}
]}
db.instance_fault_get_by_instance_uuids(self.context,
[x.uuid for x in insts],
).AndReturn(db_faults)
self.mox.ReplayAll()
inst_list = objects.InstanceList()
inst_list._context = self.context
inst_list.objects = insts
faulty = inst_list.fill_faults()
self.assertEqual([uuids.db_fault_1], list(faulty))
self.assertEqual(db_faults['uuid1'][0]['message'],
inst_list[0].fault.message)
self.assertIsNone(inst_list[1].fault)
for inst in inst_list:
self.assertEqual(set(), inst.obj_what_changed())
@mock.patch('nova.objects.instance.Instance.obj_make_compatible')
def test_get_by_security_group(self, mock_compat):
fake_secgroup = dict(test_security_group.fake_secgroup)
fake_secgroup['instances'] = [
fake_instance.fake_db_instance(id=1,
system_metadata={'foo': 'bar'}),
fake_instance.fake_db_instance(id=2),
]
with mock.patch.object(db, 'security_group_get') as sgg:
sgg.return_value = fake_secgroup
secgroup = security_group.SecurityGroup()
secgroup.id = fake_secgroup['id']
instances = instance.InstanceList.get_by_security_group(
self.context, secgroup)
self.assertEqual(2, len(instances))
self.assertEqual([1, 2], [x.id for x in instances])
self.assertTrue(instances[0].obj_attr_is_set('system_metadata'))
self.assertEqual({'foo': 'bar'}, instances[0].system_metadata)
def test_get_by_grantee_security_group_ids(self):
fake_instances = [
fake_instance.fake_db_instance(id=1),
fake_instance.fake_db_instance(id=2)
]
with mock.patch.object(
db, 'instance_get_all_by_grantee_security_groups') as igabgsg:
igabgsg.return_value = fake_instances
secgroup_ids = [1]
instances = objects.InstanceList.get_by_grantee_security_group_ids(
self.context, secgroup_ids)
igabgsg.assert_called_once_with(self.context, secgroup_ids)
self.assertEqual(2, len(instances))
self.assertEqual([1, 2], [x.id for x in instances])
class TestInstanceListObject(test_objects._LocalTest,
_TestInstanceListObject):
pass
class TestRemoteInstanceListObject(test_objects._RemoteTest,
_TestInstanceListObject):
pass
class TestInstanceObjectMisc(test.TestCase):
def test_expected_cols(self):
self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar'])
self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
self.assertIsNone(instance._expected_cols(None))
def test_expected_cols_extra(self):
self.assertEqual(['metadata', 'extra', 'extra.numa_topology'],
instance._expected_cols(['metadata',
'numa_topology']))
|
|
from uuid import uuid4
import warnings
from cqlengine.query import QueryException, ModelQuerySet, DMLQuery
from cqlengine.tests.base import BaseCassEngTestCase
from cqlengine.exceptions import ModelException, CQLEngineException
from cqlengine.models import Model, ModelDefinitionException, ColumnQueryEvaluator, UndefinedKeyspaceWarning
from cqlengine import columns
import cqlengine
class TestModelClassFunction(BaseCassEngTestCase):
"""
Tests verifying the behavior of the Model metaclass
"""
def test_column_attributes_handled_correctly(self):
"""
Tests that column attributes are moved to a _columns dict
and replaced with simple value attributes
"""
class TestModel(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
text = columns.Text()
#check class attibutes
self.assertHasAttr(TestModel, '_columns')
self.assertHasAttr(TestModel, 'id')
self.assertHasAttr(TestModel, 'text')
#check instance attributes
inst = TestModel()
self.assertHasAttr(inst, 'id')
self.assertHasAttr(inst, 'text')
self.assertIsNone(inst.id)
self.assertIsNone(inst.text)
def test_db_map(self):
"""
Tests that the db_map is properly defined
-the db_map allows columns
"""
class WildDBNames(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
content = columns.Text(db_field='words_and_whatnot')
numbers = columns.Integer(db_field='integers_etc')
db_map = WildDBNames._db_map
self.assertEquals(db_map['words_and_whatnot'], 'content')
self.assertEquals(db_map['integers_etc'], 'numbers')
def test_attempting_to_make_duplicate_column_names_fails(self):
"""
Tests that trying to create conflicting db column names will fail
"""
with self.assertRaises(ModelException):
class BadNames(Model):
words = columns.Text()
content = columns.Text(db_field='words')
def test_column_ordering_is_preserved(self):
"""
Tests that the _columns dics retains the ordering of the class definition
"""
class Stuff(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
words = columns.Text()
content = columns.Text()
numbers = columns.Integer()
self.assertEquals([x for x in Stuff._columns.keys()], ['id', 'words', 'content', 'numbers'])
def test_exception_raised_when_creating_class_without_pk(self):
with self.assertRaises(ModelDefinitionException):
class TestModel(Model):
count = columns.Integer()
text = columns.Text(required=False)
def test_value_managers_are_keeping_model_instances_isolated(self):
"""
Tests that instance value managers are isolated from other instances
"""
class Stuff(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
num = columns.Integer()
inst1 = Stuff(num=5)
inst2 = Stuff(num=7)
self.assertNotEquals(inst1.num, inst2.num)
self.assertEquals(inst1.num, 5)
self.assertEquals(inst2.num, 7)
def test_superclass_fields_are_inherited(self):
"""
Tests that fields defined on the super class are inherited properly
"""
class TestModel(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
text = columns.Text()
class InheritedModel(TestModel):
numbers = columns.Integer()
assert 'text' in InheritedModel._columns
assert 'numbers' in InheritedModel._columns
def test_column_family_name_generation(self):
""" Tests that auto column family name generation works as expected """
class TestModel(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
text = columns.Text()
assert TestModel.column_family_name(include_keyspace=False) == 'test_model'
def test_normal_fields_can_be_defined_between_primary_keys(self):
"""
Tests tha non primary key fields can be defined between primary key fields
"""
def test_at_least_one_non_primary_key_column_is_required(self):
"""
Tests that an error is raised if a model doesn't contain at least one primary key field
"""
def test_model_keyspace_attribute_must_be_a_string(self):
"""
Tests that users can't set the keyspace to None, or something else
"""
def test_indexes_arent_allowed_on_models_with_multiple_primary_keys(self):
"""
Tests that attempting to define an index on a model with multiple primary keys fails
"""
def test_meta_data_is_not_inherited(self):
"""
Test that metadata defined in one class, is not inherited by subclasses
"""
def test_partition_keys(self):
"""
Test compound partition key definition
"""
class ModelWithPartitionKeys(cqlengine.Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
c1 = cqlengine.Text(primary_key=True)
p1 = cqlengine.Text(partition_key=True)
p2 = cqlengine.Text(partition_key=True)
cols = ModelWithPartitionKeys._columns
self.assertTrue(cols['c1'].primary_key)
self.assertFalse(cols['c1'].partition_key)
self.assertTrue(cols['p1'].primary_key)
self.assertTrue(cols['p1'].partition_key)
self.assertTrue(cols['p2'].primary_key)
self.assertTrue(cols['p2'].partition_key)
obj = ModelWithPartitionKeys(p1='a', p2='b')
self.assertEquals(obj.pk, ('a', 'b'))
def test_del_attribute_is_assigned_properly(self):
""" Tests that columns that can be deleted have the del attribute """
class DelModel(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
key = columns.Integer(primary_key=True)
data = columns.Integer(required=False)
model = DelModel(key=4, data=5)
del model.data
with self.assertRaises(AttributeError):
del model.key
def test_does_not_exist_exceptions_are_not_shared_between_model(self):
""" Tests that DoesNotExist exceptions are not the same exception between models """
class Model1(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
class Model2(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
try:
raise Model1.DoesNotExist
except Model2.DoesNotExist:
assert False, "Model1 exception should not be caught by Model2"
except Model1.DoesNotExist:
#expected
pass
def test_does_not_exist_inherits_from_superclass(self):
""" Tests that a DoesNotExist exception can be caught by it's parent class DoesNotExist """
class Model1(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
class Model2(Model1):
pass
try:
raise Model2.DoesNotExist
except Model1.DoesNotExist:
#expected
pass
except Exception:
assert False, "Model2 exception should not be caught by Model1"
def test_abstract_model_keyspace_warning_is_skipped(self):
with warnings.catch_warnings(record=True) as warn:
class NoKeyspace(Model):
__abstract__ = True
key = columns.UUID(primary_key=True)
self.assertEqual(len(warn), 0)
class TestManualTableNaming(BaseCassEngTestCase):
class RenamedTest(cqlengine.Model):
__keyspace__ = 'whatever'
__table_name__ = 'manual_name'
id = cqlengine.UUID(primary_key=True)
data = cqlengine.Text()
def test_proper_table_naming(self):
assert self.RenamedTest.column_family_name(include_keyspace=False) == 'manual_name'
assert self.RenamedTest.column_family_name(include_keyspace=True) == 'whatever.manual_name'
class AbstractModel(Model):
__abstract__ = True
class ConcreteModel(AbstractModel):
pkey = columns.Integer(primary_key=True)
data = columns.Integer()
class AbstractModelWithCol(Model):
__abstract__ = True
pkey = columns.Integer(primary_key=True)
class ConcreteModelWithCol(AbstractModelWithCol):
data = columns.Integer()
class AbstractModelWithFullCols(Model):
__abstract__ = True
pkey = columns.Integer(primary_key=True)
data = columns.Integer()
class TestAbstractModelClasses(BaseCassEngTestCase):
def test_id_field_is_not_created(self):
""" Tests that an id field is not automatically generated on abstract classes """
assert not hasattr(AbstractModel, 'id')
assert not hasattr(AbstractModelWithCol, 'id')
def test_id_field_is_not_created_on_subclass(self):
assert not hasattr(ConcreteModel, 'id')
def test_abstract_attribute_is_not_inherited(self):
""" Tests that __abstract__ attribute is not inherited """
assert not ConcreteModel.__abstract__
assert not ConcreteModelWithCol.__abstract__
def test_attempting_to_save_abstract_model_fails(self):
""" Attempting to save a model from an abstract model should fail """
with self.assertRaises(CQLEngineException):
AbstractModelWithFullCols.create(pkey=1, data=2)
def test_attempting_to_create_abstract_table_fails(self):
""" Attempting to create a table from an abstract model should fail """
from cqlengine.management import sync_table
with self.assertRaises(CQLEngineException):
sync_table(AbstractModelWithFullCols)
def test_attempting_query_on_abstract_model_fails(self):
""" Tests attempting to execute query with an abstract model fails """
with self.assertRaises(CQLEngineException):
iter(AbstractModelWithFullCols.objects(pkey=5)).next()
def test_abstract_columns_are_inherited(self):
""" Tests that columns defined in the abstract class are inherited into the concrete class """
assert hasattr(ConcreteModelWithCol, 'pkey')
assert isinstance(ConcreteModelWithCol.pkey, ColumnQueryEvaluator)
assert isinstance(ConcreteModelWithCol._columns['pkey'], columns.Column)
def test_concrete_class_table_creation_cycle(self):
""" Tests that models with inherited abstract classes can be created, and have io performed """
from cqlengine.management import sync_table, drop_table
sync_table(ConcreteModelWithCol)
w1 = ConcreteModelWithCol.create(pkey=5, data=6)
w2 = ConcreteModelWithCol.create(pkey=6, data=7)
r1 = ConcreteModelWithCol.get(pkey=5)
r2 = ConcreteModelWithCol.get(pkey=6)
assert w1.pkey == r1.pkey
assert w1.data == r1.data
assert w2.pkey == r2.pkey
assert w2.data == r2.data
drop_table(ConcreteModelWithCol)
class TestCustomQuerySet(BaseCassEngTestCase):
""" Tests overriding the default queryset class """
class TestException(Exception): pass
def test_overriding_queryset(self):
class QSet(ModelQuerySet):
def create(iself, **kwargs):
raise self.TestException
class CQModel(Model):
__queryset__ = QSet
part = columns.UUID(primary_key=True)
data = columns.Text()
with self.assertRaises(self.TestException):
CQModel.create(part=uuid4(), data='s')
def test_overriding_dmlqueryset(self):
class DMLQ(DMLQuery):
def save(iself):
raise self.TestException
class CDQModel(Model):
__dmlquery__ = DMLQ
part = columns.UUID(primary_key=True)
data = columns.Text()
with self.assertRaises(self.TestException):
CDQModel().save()
class TestCachedLengthIsNotCarriedToSubclasses(BaseCassEngTestCase):
def test_subclassing(self):
length = len(ConcreteModelWithCol())
class AlreadyLoadedTest(ConcreteModelWithCol):
new_field = columns.Integer()
self.assertGreater(len(AlreadyLoadedTest()), length)
|
|
# -*- coding: utf-8 -*-
import uuid
import datetime
from sqlalchemy import Column, ForeignKey, func
from sqlalchemy.types import Unicode, Boolean, DateTime, Integer, Float, Binary
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from shapely import wkb
from shapely.geometry import asShape
from shapely.geometry.multipoint import asMultiPoint
from shapely.geometry.multilinestring import asMultiLineString
from shapely.geometry.multipolygon import MultiPolygonAdapter
from geoalchemy2 import Geometry
from geoalchemy2.shape import from_shape
import geojson
Base = declarative_base()
class Map(Base):
__tablename__ = 'map'
uuid = Column(Unicode, primary_key=True,
default=lambda: str(uuid.uuid4().hex))
user_login = Column(Unicode(50))
title = Column(Unicode(50))
description = Column(Unicode)
public = Column(Boolean)
create_date = Column(DateTime, default=datetime.datetime.now)
update_date = Column(DateTime, onupdate=datetime.datetime.now)
zoom = Column(Integer)
x = Column(Float)
y = Column(Float)
theme = Column(Unicode)
bg_layer = Column(Unicode)
bg_opacity = Column(Float, default=100)
layers = Column(Unicode)
layers_indices = Column(Unicode)
layers_opacity = Column(Unicode)
layers_visibility = Column(Unicode)
selected_node = Column(Unicode)
rating = Column(Float, default=0)
rating_count = Column(Integer, default=0)
category_id = Column(Integer, ForeignKey('category.id'), default=999)
label = Column(Unicode)
category = relationship('Category', backref='maps')
features = relationship('Feature', backref='map')
def get_title(self):
if self.title is not None:
return self.title.replace("'", "_")
return None
def todict(self):
def convert_datetime(value):
if value is not None:
return value.strftime("%Y-%m-%d %H:%M:%S")
else:
return None
for c in self.__table__.columns:
if isinstance(c.type, DateTime):
value = convert_datetime(getattr(self, c.name))
else:
value = getattr(self, c.name)
yield(c.name, value)
def __iter__(self):
"""Returns an iterable that supports .next()
so we can do dict(sa_instance)
"""
return self.todict()
@staticmethod
def get(id, session):
""" Get map by its id. """
return session.query(Map).get(id)
@staticmethod
def belonging_to(user, session):
""" Get maps that belong to user. """
maps = session.query(Map).filter(
func.lower(Map.user_login) == func.lower(user)) \
.order_by("category_id asc,title asc").all()
return [{'title': map.title,
'uuid': map.uuid,
'public': map.public,
'create_date': map.create_date,
'update_date': map.update_date,
'category': map.category.name
if map.category_id is not None else None,
'owner': map.user_login.lower()} for map in maps]
class Feature(Base):
__tablename__ = 'feature'
__template__ = 'tooltips/category.mako'
id = Column(Integer, primary_key=True)
name = Column(Unicode(50))
description = Column(Unicode)
image = Column(Unicode(255))
thumbnail = Column(Unicode(255))
color = Column(Unicode(255))
stroke = Column(Integer, default=2)
is_label = Column(Boolean, default=False)
is_circle = Column(Boolean, default=False)
linestyle = Column(Integer, default=0)
show_orientation = Column(Boolean, default=False)
geometry = Column(Geometry(srid=2169))
map_id = Column(Unicode, ForeignKey('map.uuid'))
symbol_id = Column(Integer)
size = Column(Float, default=10)
angle = Column(Float, default=0)
font_size = Column(Integer, default=15)
opacity = Column(Float, default=0.5)
shape = Column(Unicode(255))
last_modified_by = Column(Unicode(50))
display_order = Column(Integer, default=0)
update_date = Column(DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now)
def __init__(self, feature=None):
if feature:
self.__update__(feature)
def __update__(self, feature):
try:
order = feature.properties.get('display_order')
self.display_order = order
except:
self.display_order = 0
self.name = feature.properties.get('name')
self.description = feature.properties.get('description')
self.thumbnail = feature.properties.get('thumbnail')
self.image = feature.properties.get('image')
self.color = feature.properties.get('color')
self.stroke = feature.properties.get('stroke')
self.is_label = feature.properties.get('isLabel')
self.is_circle = feature.properties.get('isCircle')
self.show_orientation = feature.properties.get('showOrientation')
linestyle = feature.properties.get('linestyle')
self.linestyle = 0 if linestyle == 'plain' else 1\
if linestyle == 'dashed' else 2\
if linestyle == 'dotted' else 0
self.shape = feature.properties.get('shape')
size = feature.properties.get('size')
self.size = size if size is not None and str(size).isnumeric()\
else 10
angle = feature.properties.get('angle')
try:
self.angle = float(angle)
except TypeError:
self.angle = 0
font_size = feature.properties.get('fontSize')
self.font_size = font_size if font_size is not None and\
str(font_size).isnumeric() else 15
symbol_id = feature.properties.get('symbolId')
self.symbol_id = None if symbol_id is not None and\
len(str(symbol_id)) == 0\
else symbol_id
opacity = feature.properties.get('opacity')
self.opacity = opacity if opacity is not None and\
str(opacity).isnumeric() else 0.5
if hasattr(feature.geometry, "__geo_interface__"):
ob = feature.geometry.__geo_interface__
else:
ob = feature.geometry
geom_type = ob.get("type").lower()
if geom_type != 'geometrycollection':
# openlayers gpx writter creates a 4 dimension geometry and
# shapely does not allow if for linestring.
if geom_type == 'linestring':
feature.geometry.coordinates = \
[coordinate[0:2] for coordinate
in feature.geometry.coordinates]
elif geom_type == 'multilinestring':
multilinestring = feature.geometry.coordinates
feature.geometry.coordinates = \
[[coord[0:2] for coord in multilinestring[i]]
for i in range(len(multilinestring))]
shape = asShape(feature.geometry)
else:
geoms = []
is_transformable = True
types = None
for geom in feature.geometry.geometries:
if hasattr(geom, "__geo_interface__"):
ob = geom.__geo_interface__
else:
ob = geom
geom_type = ob.get("type").lower()
if types is None:
types = geom_type
else:
is_transformable = types == geom_type
if not is_transformable:
break
geoms.append(asShape(geom))
if is_transformable:
if types == "point":
shape = asMultiPoint(geoms)
elif types == "linestring":
shape = asMultiLineString(geoms)
elif types == "polygon":
shape = MultiPolygonAdapter(geoms, context_type='geojson')
else:
shape = None
# ST_FORCE2D is used because the db only allows geometry with
# 2 dimensions.
self.geometry = func.ST_Force2D(from_shape(shape, srid=2169))\
if shape is not None else None
@property
def __geo_interface__(self):
geometry = wkb.loads(str(self.geometry), True)
properties = dict(name=self.name,
description=self.description,
thumbnail=self.thumbnail,
image=self.image,
color=self.color,
stroke=self.stroke,
isLabel=self.is_label,
isCircle=self.is_circle,
showOrientation=self.show_orientation,
linestyle='plain' if self.linestyle == 0
else 'dashed' if self.linestyle == 1 else 'dotted',
fid=self.id,
symbolId=self.symbol_id,
angle=self.angle if self.angle is not None else 0,
size=self.size if self.size is not None else 10,
fontSize=self.font_size
if self.font_size is not None else 15,
opacity=self.opacity
if self.opacity is not None else 0.5,
shape=self.shape,
display_order=self.display_order
if self.display_order is not None else 0,
)
return geojson.Feature(id=self.id,
geometry=geometry,
properties=properties
)
@property
def geom(self):
if hasattr(self.geometry, "geom_wkb"):
return wkb.loads(str(self.geometry.geom_wkb))
else:
if hasattr(self, "_shape"):
return self._shape
else:
return None
class RoleCategories(Base):
__tablename__ = 'role_categories'
role_id = Column('role_id', Integer, ForeignKey('role.id'),
primary_key=True)
category_id = Column('category_id', Integer, ForeignKey('category.id'),
primary_key=True)
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(Unicode(255))
allow_labeling = Column(Boolean)
list = Column(Boolean)
def __init__(self, name):
self.name = name
def todict(self):
return {'id': self.id,
'name': self.name,
'allow_labeling': self.allow_labeling}
@staticmethod
def belonging_to(user, session):
user_role = session.query(Role).get(
getattr(user, 'mymaps_role', user.settings_role.id))
try:
categories = user_role.categories\
if user_role.categories is not None else []
except:
categories = []
return [category.todict() for category in categories]
@staticmethod
def all(session):
categories = session.query(Category).all()
return [category.todict() for category in categories]
class Role(Base):
__tablename__ = 'role'
id = Column(Integer, primary_key=True)
name = Column(Unicode(255))
categories = relationship(Category, secondary="role_categories")
def __init__(self, name):
self.name = name
class Symbols(Base):
__tablename__ = 'symbols'
id = Column(Integer, primary_key=True)
symbol_name = Column(Unicode(255))
login_owner = Column(Unicode(255))
symbol = Column(Binary)
is_public = Column(Boolean)
synchronized = Column(Boolean, default=False)
class Images(Base):
__tablename__ = 'images'
id = Column(Integer, primary_key=True)
image = Column(Binary)
name = Column(Unicode(255))
login_owner = Column(Unicode(255))
class Item(Base):
__tablename__ = 'item'
__table_args__ = ({'schema': 'themes_prod', 'autoload': False})
id = Column(Unicode(255), primary_key=True)
isgroup = Column(Boolean)
label = Column(Unicode(255))
open_expanded = Column(Boolean)
icon_type = Column(Unicode(10))
image_format = Column(Unicode(20))
metadataid = Column(Unicode(50))
legendname = Column(Unicode(255))
queryable = Column(Boolean)
exclusion = Column(Unicode(1000))
opacity = Column(Float)
service_url = Column(Unicode(255))
category_id = Column(Integer)
server_resolutions = Column(Unicode(255))
use_client_zoom = Column(Boolean)
is_wms = Column(Boolean)
wms_url = Column(Unicode(1000))
wms_layers = Column(Unicode(2500))
wms_format = Column(Unicode(20))
wms_profiles_guichet = Column(Unicode(255))
is_poi = Column(Boolean)
id_collection = Column(Integer)
class MapUser(Base):
__tablename__ = 'shared_map_user'
map_uuid = Column(Unicode, ForeignKey('map.uuid'), primary_key=True)
user_login = Column(Unicode(50), primary_key=True)
read_only = Column(Boolean)
class CategoryUser(Base):
__tablename__ = 'shared_category_user'
category_id = Column(Integer, ForeignKey('category.id'), primary_key=True)
user_login = Column(Unicode(50), primary_key=True)
read_only = Column(Boolean)
|
|
from __future__ import absolute_import
from sentry.api.bases.project import ProjectPermission
from sentry.models import ApiKey
from sentry.testutils import TestCase
class ProjectPermissionBase(TestCase):
def setUp(self):
self.org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.project = self.create_project(organization=self.org)
super(ProjectPermissionBase, self).setUp()
def has_object_perm(self, method, obj, auth=None, user=None, is_superuser=None):
perm = ProjectPermission()
request = self.make_request(user=user, auth=auth, method=method)
if is_superuser:
request.superuser.set_logged_in(request.user)
return perm.has_permission(request, None) and perm.has_object_permission(request, None, obj)
class ProjectPermissionTest(ProjectPermissionBase):
def test_regular_user(self):
user = self.create_user(is_superuser=False)
assert not self.has_object_perm("GET", self.project, user=user)
def test_superuser(self):
user = self.create_user(is_superuser=True)
assert self.has_object_perm("GET", self.project, user=user, is_superuser=True)
def test_member_for_project_read(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.project, user=user)
def test_member_for_project_write(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert not self.has_object_perm("POST", self.project, user=user)
def test_member_for_project_delete(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_member_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.project, user=user)
def test_api_key_with_org_access(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["project:read"])
assert self.has_object_perm("GET", self.project, auth=key)
def test_api_key_without_org_access(self):
key = ApiKey.objects.create(
organization=self.create_organization(), scope_list=["project:read"]
)
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_without_access(self):
key = ApiKey.objects.create(organization=self.org)
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_with_wrong_access(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["team:read"])
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_with_wrong_access_for_method(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["project:read"])
assert not self.has_object_perm("PUT", self.project, auth=key)
def test_admin_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="admin", teams=[team])
# if `allow_joinleave` is True, admins can act on teams
# they don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_admin_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="admin", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager", teams=[team])
# managers should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_owner_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner", teams=[team])
# owners should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_owner_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_project_no_team_sentry_app_installed(self):
project = self.create_project(teams=[self.team])
self.team.delete()
other_org = self.create_organization()
sentry_app = self.create_sentry_app(
name="my_app",
organization=other_org,
scopes=("project:write",),
webhook_url="http://example.com",
)
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=self.org, user=self.user
)
assert self.has_object_perm("POST", project, user=sentry_app.proxy_user)
def test_project_no_team_sentry_app_not_installed(self):
project = self.create_project(teams=[self.team])
self.team.delete()
other_org = self.create_organization()
sentry_app = self.create_sentry_app(
name="my_app",
organization=other_org,
scopes=("project:write",),
webhook_url="http://example.com",
)
# install on other org
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=other_org, user=self.user
)
assert not self.has_object_perm("POST", project, user=sentry_app.proxy_user)
class ProjectPermissionNoJoinLeaveTest(ProjectPermissionBase):
def setUp(self):
super(ProjectPermissionNoJoinLeaveTest, self).setUp()
self.org = self.create_organization()
self.org.flags.allow_joinleave = False
self.org.save()
self.team = self.create_team(organization=self.org)
self.project = self.create_project(organization=self.org)
def test_regular_user(self):
user = self.create_user(is_superuser=False)
assert not self.has_object_perm("GET", self.project, user=user)
def test_superuser(self):
user = self.create_user(is_superuser=True)
assert self.has_object_perm("GET", self.project, user=user, is_superuser=True)
def test_member_for_project_read(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.project, user=user)
def test_member_for_project_write(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert not self.has_object_perm("POST", self.project, user=user)
def test_member_for_project_delete(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_member_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.project, user=user)
def test_api_key_with_org_access(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["project:read"])
assert self.has_object_perm("GET", self.project, auth=key)
def test_api_key_without_org_access(self):
key = ApiKey.objects.create(
organization=self.create_organization(), scope_list=["project:read"]
)
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_without_access(self):
key = ApiKey.objects.create(organization=self.org)
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_with_wrong_access(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["team:read"])
assert not self.has_object_perm("GET", self.project, auth=key)
def test_api_key_with_wrong_access_for_method(self):
key = ApiKey.objects.create(organization=self.org, scope_list=["project:read"])
assert not self.has_object_perm("PUT", self.project, auth=key)
def test_admin_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="admin", teams=[team])
# if `allow_joinleave` is False, admins can't act on teams
# they don't have access to
assert not self.has_object_perm("POST", self.project, user=user)
def test_admin_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="admin", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager", teams=[team])
# managers should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_owner_without_team_access(self):
team = self.create_team(organization=self.org)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner", teams=[team])
# owners should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", self.project, user=user)
def test_owner_with_team_access(self):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner", teams=[self.team])
assert self.has_object_perm("POST", self.project, user=user)
def test_manager_when_project_has_no_teams(self):
project = self.create_project(organization=self.org, teams=[])
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="manager")
# managers should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", project, user=user)
def test_owner_when_project_has_no_teams(self):
project = self.create_project(organization=self.org, teams=[])
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.org, role="owner")
# owners should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("POST", project, user=user)
|
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Traverses the source tree, parses all found DEPS files, and constructs
a dependency rule table to be used by subclasses.
The format of the deps file:
First you have the normal module-level deps. These are the ones used by
gclient. An example would be:
deps = {
"base":"http://foo.bar/trunk/base"
}
DEPS files not in the top-level of a module won't need this. Then you
have any additional include rules. You can add (using "+") or subtract
(using "-") from the previously specified rules (including
module-level deps). You can also specify a path that is allowed for
now but that we intend to remove, using "!"; this is treated the same
as "+" when check_deps is run by our bots, but a presubmit step will
show a warning if you add a new include of a file that is only allowed
by "!".
Note that for .java files, there is currently no difference between
"+" and "!", even in the presubmit step.
include_rules = [
# Code should be able to use base (it's specified in the module-level
# deps above), but nothing in "base/evil" because it's evil.
"-base/evil",
# But this one subdirectory of evil is OK.
"+base/evil/not",
# And it can include files from this other directory even though there is
# no deps rule for it.
"+tools/crime_fighter",
# This dependency is allowed for now but work is ongoing to remove it,
# so you shouldn't add further dependencies on it.
"!base/evil/ok_for_now.h",
]
If you have certain include rules that should only be applied for some
files within this directory and subdirectories, you can write a
section named specific_include_rules that is a hash map of regular
expressions to the list of rules that should apply to files matching
them. Note that such rules will always be applied before the rules
from 'include_rules' have been applied, but the order in which rules
associated with different regular expressions is applied is arbitrary.
specific_include_rules = {
".*_(unit|browser|api)test\.cc": [
"+libraries/testsupport",
],
}
DEPS files may be placed anywhere in the tree. Each one applies to all
subdirectories, where there may be more DEPS files that provide additions or
subtractions for their own sub-trees.
There is an implicit rule for the current directory (where the DEPS file lives)
and all of its subdirectories. This prevents you from having to explicitly
allow the current directory everywhere. This implicit rule is applied first,
so you can modify or remove it using the normal include rules.
The rules are processed in order. This means you can explicitly allow a higher
directory and then take away permissions from sub-parts, or the reverse.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and use
only lowercase.
"""
import copy
import os.path
import posixpath
import subprocess
from rules import Rule, Rules
# Variable name used in the DEPS file to add or subtract include files from
# the module-level deps.
INCLUDE_RULES_VAR_NAME = 'include_rules'
# Variable name used in the DEPS file to add or subtract include files
# from module-level deps specific to files whose basename (last
# component of path) matches a given regular expression.
SPECIFIC_INCLUDE_RULES_VAR_NAME = 'specific_include_rules'
# Optionally present in the DEPS file to list subdirectories which should not
# be checked. This allows us to skip third party code, for example.
SKIP_SUBDIRS_VAR_NAME = 'skip_child_includes'
def NormalizePath(path):
"""Returns a path normalized to how we write DEPS rules and compare paths."""
return os.path.normcase(path).replace(os.path.sep, posixpath.sep)
def _GitSourceDirectories(base_directory):
"""Returns set of normalized paths to subdirectories containing sources
managed by git."""
if not os.path.exists(os.path.join(base_directory, '.git')):
return set()
base_dir_norm = NormalizePath(base_directory)
git_source_directories = set([base_dir_norm])
git_cmd = 'git.bat' if os.name == 'nt' else 'git'
git_ls_files_cmd = [git_cmd, 'ls-files']
# FIXME: Use a context manager in Python 3.2+
popen = subprocess.Popen(git_ls_files_cmd,
stdout=subprocess.PIPE,
bufsize=1, # line buffering, since read by line
cwd=base_directory)
try:
try:
for line in popen.stdout:
dir_path = os.path.join(base_directory, os.path.dirname(line))
dir_path_norm = NormalizePath(dir_path)
# Add the directory as well as all the parent directories,
# stopping once we reach an already-listed directory.
while dir_path_norm not in git_source_directories:
git_source_directories.add(dir_path_norm)
dir_path_norm = posixpath.dirname(dir_path_norm)
finally:
popen.stdout.close()
finally:
popen.wait()
return git_source_directories
class DepsBuilder(object):
"""Parses include_rules from DEPS files."""
def __init__(self,
base_directory=None,
verbose=False,
being_tested=False,
ignore_temp_rules=False,
ignore_specific_rules=False):
"""Creates a new DepsBuilder.
Args:
base_directory: local path to root of checkout, e.g. C:\chr\src.
verbose: Set to True for debug output.
being_tested: Set to True to ignore the DEPS file at tools/checkdeps/DEPS.
ignore_temp_rules: Ignore rules that start with Rule.TEMP_ALLOW ("!").
"""
base_directory = (base_directory or
os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir))
self.base_directory = os.path.abspath(base_directory) # Local absolute path
self.verbose = verbose
self._under_test = being_tested
self._ignore_temp_rules = ignore_temp_rules
self._ignore_specific_rules = ignore_specific_rules
# Set of normalized paths
self.git_source_directories = _GitSourceDirectories(self.base_directory)
# Map of normalized directory paths to rules to use for those
# directories, or None for directories that should be skipped.
# Normalized is: absolute, lowercase, / for separator.
self.directory_rules = {}
self._ApplyDirectoryRulesAndSkipSubdirs(Rules(), self.base_directory)
def _ApplyRules(self, existing_rules, includes, specific_includes,
cur_dir_norm):
"""Applies the given include rules, returning the new rules.
Args:
existing_rules: A set of existing rules that will be combined.
include: The list of rules from the "include_rules" section of DEPS.
specific_includes: E.g. {'.*_unittest\.cc': ['+foo', '-blat']} rules
from the "specific_include_rules" section of DEPS.
cur_dir_norm: The current directory, normalized path. We will create an
implicit rule that allows inclusion from this directory.
Returns: A new set of rules combining the existing_rules with the other
arguments.
"""
rules = copy.deepcopy(existing_rules)
# First apply the implicit "allow" rule for the current directory.
base_dir_norm = NormalizePath(self.base_directory)
if not cur_dir_norm.startswith(base_dir_norm):
raise Exception(
'Internal error: base directory is not at the beginning for\n'
' %s and base dir\n'
' %s' % (cur_dir_norm, base_dir_norm))
relative_dir = posixpath.relpath(cur_dir_norm, base_dir_norm)
# Make the help string a little more meaningful.
source = relative_dir or 'top level'
rules.AddRule('+' + relative_dir,
relative_dir,
'Default rule for ' + source)
def ApplyOneRule(rule_str, dependee_regexp=None):
"""Deduces a sensible description for the rule being added, and
adds the rule with its description to |rules|.
If we are ignoring temporary rules, this function does nothing
for rules beginning with the Rule.TEMP_ALLOW character.
"""
if self._ignore_temp_rules and rule_str.startswith(Rule.TEMP_ALLOW):
return
rule_block_name = 'include_rules'
if dependee_regexp:
rule_block_name = 'specific_include_rules'
if relative_dir:
rule_description = relative_dir + "'s %s" % rule_block_name
else:
rule_description = 'the top level %s' % rule_block_name
rules.AddRule(rule_str, relative_dir, rule_description, dependee_regexp)
# Apply the additional explicit rules.
for rule_str in includes:
ApplyOneRule(rule_str)
# Finally, apply the specific rules.
if self._ignore_specific_rules:
return rules
for regexp, specific_rules in specific_includes.iteritems():
for rule_str in specific_rules:
ApplyOneRule(rule_str, regexp)
return rules
def _ApplyDirectoryRules(self, existing_rules, dir_path_local_abs):
"""Combines rules from the existing rules and the new directory.
Any directory can contain a DEPS file. Top-level DEPS files can contain
module dependencies which are used by gclient. We use these, along with
additional include rules and implicit rules for the given directory, to
come up with a combined set of rules to apply for the directory.
Args:
existing_rules: The rules for the parent directory. We'll add-on to these.
dir_path_local_abs: The directory path that the DEPS file may live in (if
it exists). This will also be used to generate the
implicit rules. This is a local path.
Returns: A 2-tuple of:
(1) the combined set of rules to apply to the sub-tree,
(2) a list of all subdirectories that should NOT be checked, as specified
in the DEPS file (if any).
Subdirectories are single words, hence no OS dependence.
"""
dir_path_norm = NormalizePath(dir_path_local_abs)
# Check for a .svn directory in this directory or that this directory is
# contained in git source directories. This will tell us if it's a source
# directory and should be checked.
if not (os.path.exists(os.path.join(dir_path_local_abs, '.svn')) or
dir_path_norm in self.git_source_directories):
return None, []
# Check the DEPS file in this directory.
if self.verbose:
print 'Applying rules from', dir_path_local_abs
def FromImpl(*_):
pass # NOP function so "From" doesn't fail.
def FileImpl(_):
pass # NOP function so "File" doesn't fail.
class _VarImpl:
def __init__(self, local_scope):
self._local_scope = local_scope
def Lookup(self, var_name):
"""Implements the Var syntax."""
try:
return self._local_scope['vars'][var_name]
except KeyError:
raise Exception('Var is not defined: %s' % var_name)
local_scope = {}
global_scope = {
'File': FileImpl,
'From': FromImpl,
'Var': _VarImpl(local_scope).Lookup,
}
deps_file_path = os.path.join(dir_path_local_abs, 'DEPS')
# The second conditional here is to disregard the
# tools/checkdeps/DEPS file while running tests. This DEPS file
# has a skip_child_includes for 'testdata' which is necessary for
# running production tests, since there are intentional DEPS
# violations under the testdata directory. On the other hand when
# running tests, we absolutely need to verify the contents of that
# directory to trigger those intended violations and see that they
# are handled correctly.
if os.path.isfile(deps_file_path) and not (
self._under_test and
os.path.basename(dir_path_local_abs) == 'checkdeps'):
execfile(deps_file_path, global_scope, local_scope)
elif self.verbose:
print ' No deps file found in', dir_path_local_abs
# Even if a DEPS file does not exist we still invoke ApplyRules
# to apply the implicit "allow" rule for the current directory
include_rules = local_scope.get(INCLUDE_RULES_VAR_NAME, [])
specific_include_rules = local_scope.get(SPECIFIC_INCLUDE_RULES_VAR_NAME,
{})
skip_subdirs = local_scope.get(SKIP_SUBDIRS_VAR_NAME, [])
return (self._ApplyRules(existing_rules, include_rules,
specific_include_rules, dir_path_norm),
skip_subdirs)
def _ApplyDirectoryRulesAndSkipSubdirs(self, parent_rules,
dir_path_local_abs):
"""Given |parent_rules| and a subdirectory |dir_path_local_abs| of the
directory that owns the |parent_rules|, add |dir_path_local_abs|'s rules to
|self.directory_rules|, and add None entries for any of its
subdirectories that should be skipped.
"""
directory_rules, excluded_subdirs = self._ApplyDirectoryRules(
parent_rules, dir_path_local_abs)
dir_path_norm = NormalizePath(dir_path_local_abs)
self.directory_rules[dir_path_norm] = directory_rules
for subdir in excluded_subdirs:
subdir_path_norm = posixpath.join(dir_path_norm, subdir)
self.directory_rules[subdir_path_norm] = None
def GetDirectoryRules(self, dir_path_local):
"""Returns a Rules object to use for the given directory, or None
if the given directory should be skipped.
Also modifies |self.directory_rules| to store the Rules.
This takes care of first building rules for parent directories (up to
|self.base_directory|) if needed, which may add rules for skipped
subdirectories.
Args:
dir_path_local: A local path to the directory you want rules for.
Can be relative and unnormalized.
"""
if os.path.isabs(dir_path_local):
dir_path_local_abs = dir_path_local
else:
dir_path_local_abs = os.path.join(self.base_directory, dir_path_local)
dir_path_norm = NormalizePath(dir_path_local_abs)
if dir_path_norm in self.directory_rules:
return self.directory_rules[dir_path_norm]
parent_dir_local_abs = os.path.dirname(dir_path_local_abs)
parent_rules = self.GetDirectoryRules(parent_dir_local_abs)
# We need to check for an entry for our dir_path again, since
# GetDirectoryRules can modify entries for subdirectories, namely setting
# to None if they should be skipped, via _ApplyDirectoryRulesAndSkipSubdirs.
# For example, if dir_path == 'A/B/C' and A/B/DEPS specifies that the C
# subdirectory be skipped, GetDirectoryRules('A/B') will fill in the entry
# for 'A/B/C' as None.
if dir_path_norm in self.directory_rules:
return self.directory_rules[dir_path_norm]
if parent_rules:
self._ApplyDirectoryRulesAndSkipSubdirs(parent_rules, dir_path_local_abs)
else:
# If the parent directory should be skipped, then the current
# directory should also be skipped.
self.directory_rules[dir_path_norm] = None
return self.directory_rules[dir_path_norm]
|
|
import os
import pytest
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from imagesoup import ImageSoup, ImageResult
from imagesoup.utils import Blacklist
from imagesoup.reverse_search import *
def test_creating_soup():
soup = ImageSoup()
assert isinstance(soup, ImageSoup)
def test_search_query_only_returns_100_images():
soup = ImageSoup()
images = soup.search('python')
assert len(images) == 100
def test_search_n_images_set_by_user():
N_IMAGES = 20
soup = ImageSoup()
images = soup.search('python', n_images=N_IMAGES)
assert len(images) == N_IMAGES
def test_search_with_image_size_parameter():
soup = ImageSoup()
images = soup.search('python site:python.org', image_size='large')
im = images[0]
width, height = im.size
assert width > 500 and height > 500
def test_search_image_exact_size():
soup = ImageSoup()
size = (400, 400)
images = soup.search('python', image_size=size)
im = images[0]
assert im.size == size
def test_search_image_aspect_ratio():
soup = ImageSoup()
images = soup.search('python site:python.org', aspect_ratio='tall')
im = images[0]
assert im.height > im.width
def test_search_image_returns_fewer_results_than_n_images(capsys):
soup = ImageSoup()
images = soup.search('python', n_images=2000)
out, err = capsys.readouterr()
assert out.startswith('Search query "python" returned only')
def test_imageresult_url():
soup = ImageSoup()
images = soup.search('python site:python.org')
im = images[0]
assert im.URL.startswith('http')
def test_imageresult_show_image():
soup = ImageSoup()
images = soup.search('python logo png')
try:
images[0].show()
except:
pytest.fail('Cant show image')
def test_imageresult_resize():
soup = ImageSoup()
images = soup.search('python logo PNG')
im = images[0]
new_size = (400, 400)
new_image = im.resize(new_size)
assert new_image.size == new_size
def test_get_image_main_color():
soup = ImageSoup()
images = soup.search('python logo PNG')
im = images[0]
main_color = im.main_color(reduce_size=True)
assert len(main_color) == 1
assert main_color[0][0] == 'white'
def test_imageresult_tofile():
soup = ImageSoup()
images = soup.search('pyhon site:python.org')
im = images[0]
im.to_file()
STANDARD_NAME = 'image'
assert os.path.isfile(STANDARD_NAME + '.png') is True
os.remove(STANDARD_NAME + '.png')
USER_INPUT_NAME = 'pythonlogo.png'
im.to_file(USER_INPUT_NAME)
assert os.path.isfile(USER_INPUT_NAME) is True
os.remove(USER_INPUT_NAME)
def test_imageresult_verify_valid_file():
soup = ImageSoup()
images = soup.search('python site:python.org')
im = images[0]
assert im.verify() is True
def test_imageresult_verify_invalid_file():
URL = 'https://httpstat.us/200'
im = ImageResult(URL)
assert im.verify() is False
def test_blacklist():
bl = Blacklist()
assert isinstance(bl, Blacklist)
assert os.path.isfile(bl.filename) is True
os.remove(bl.filename)
def test_blacklist_add():
bl = Blacklist()
bl.add('http://www.python.org')
assert len(bl.domains) == 1
assert bl.domains == ['python.org']
def test_blacklist_delete():
bl = Blacklist()
bl.delete('python.org')
assert bl.domains == []
def test_blacklist_reset():
bl = Blacklist()
bl.add('http://www.python.org')
bl.reset()
assert bl.domains == []
def test_blacklist_query_string():
bl = Blacklist()
bl.add('http://www.python.org')
bl.add('https://github.com/')
query = '-site:python.org -site:github.com'
assert bl.query_string() == query
os.remove(bl.filename)
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||#
@pytest.fixture
@pytest.mark.reverse_search
def image_filepath():
TEST_IMAGE_FILE = 'test_image1.png'
here = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(here, TEST_IMAGE_FILE)
assert os.path.isfile(filepath) is True
return filepath
@pytest.mark.reverse_search
def test_reverse_search_init():
revsoup = ReverseSearch()
assert isinstance(revsoup, ReverseSearch)
@pytest.mark.reverse_search
def test_reverse_search_post_search(image_filepath):
revsoup = ReverseSearch()
HTML = revsoup.post_image_search_on_google(image_filepath)
assert isinstance(HTML, str) is True
assert 'python' in HTML
@pytest.mark.reverse_search
def test_reverse_search_search(image_filepath):
revsoup = ReverseSearch()
search_result = revsoup.search(image_filepath)
assert isinstance(search_result, ReverseSearchResult) is True
@pytest.mark.reverse_search
def test_reverse_search_result_label(image_filepath):
revsoup = ReverseSearch()
search_result = revsoup.search(image_filepath)
expected_label = 'python 3 logo'
assert search_result.label == expected_label
@pytest.mark.reverse_search
def test_rev_search_result_similar_images(image_filepath):
revsoup = ReverseSearch()
search_result = revsoup.search(image_filepath)
assert isinstance(search_result.similar_images, list)
assert len(search_result.similar_images) == 100
assert all(isinstance(i, ImageResult) for i in search_result.similar_images)
|
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict
from appium.protocols.webdriver.can_execute_commands import CanExecuteCommands
from ..mobilecommand import MobileCommand as Command
if TYPE_CHECKING:
# noinspection PyUnresolvedReferences
from appium.webdriver.webdriver import WebDriver
class Applications(CanExecuteCommands):
def background_app(self, seconds: int) -> 'WebDriver':
"""Puts the application in the background on the device for a certain duration.
Args:
seconds: the duration for the application to remain in the background
Returns:
Union['WebDriver', 'Applications']: Self instance
"""
data = {
'seconds': seconds,
}
self.execute(Command.BACKGROUND, data)
return self # type: ignore
def is_app_installed(self, bundle_id: str) -> bool:
"""Checks whether the application specified by `bundle_id` is installed on the device.
Args:
bundle_id: the id of the application to query
Returns:
`True` if app is installed
"""
data = {
'bundleId': bundle_id,
}
return self.execute(Command.IS_APP_INSTALLED, data)['value']
def install_app(self, app_path: str, **options: Any) -> 'WebDriver':
"""Install the application found at `app_path` on the device.
Args:
app_path: the local or remote path to the application to install
Keyword Args:
replace (bool): [Android only] whether to reinstall/upgrade the package if it is
already present on the device under test. True by default
timeout (int): [Android only] how much time to wait for the installation to complete.
60000ms by default.
allowTestPackages (bool): [Android only] whether to allow installation of packages marked
as test in the manifest. False by default
useSdcard (bool): [Android only] whether to use the SD card to install the app. False by default
grantPermissions (bool): [Android only] whether to automatically grant application permissions
on Android 6+ after the installation completes. False by default
Returns:
Union['WebDriver', 'Applications']: Self instance
"""
data: Dict[str, Any] = {
'appPath': app_path,
}
if options:
data.update({'options': options})
self.execute(Command.INSTALL_APP, data)
return self # type: ignore
def remove_app(self, app_id: str, **options: Any) -> 'WebDriver':
"""Remove the specified application from the device.
Args:
app_id: the application id to be removed
Keyword Args:
keepData (bool): [Android only] whether to keep application data and caches after it is uninstalled.
False by default
timeout (int): [Android only] how much time to wait for the uninstall to complete.
20000ms by default.
Returns:
Union['WebDriver', 'Applications']: Self instance
"""
data: Dict[str, Any] = {
'appId': app_id,
}
if options:
data.update({'options': options})
self.execute(Command.REMOVE_APP, data)
return self # type: ignore
def launch_app(self) -> 'WebDriver':
"""Start on the device the application specified in the desired capabilities.
Returns:
Union['WebDriver', 'Applications']: Self instance
"""
self.execute(Command.LAUNCH_APP)
return self # type: ignore
def close_app(self) -> 'WebDriver':
"""Stop the running application, specified in the desired capabilities, on
the device.
Returns:
Union['WebDriver', 'Applications']: Self instance
"""
self.execute(Command.CLOSE_APP)
return self # type: ignore
def terminate_app(self, app_id: str, **options: Any) -> bool:
"""Terminates the application if it is running.
Args:
app_id: the application id to be terminates
Keyword Args:
`timeout` (int): [Android only] how much time to wait for the uninstall to complete.
500ms by default.
Returns:
True if the app has been successfully terminated
"""
data: Dict[str, Any] = {
'appId': app_id,
}
if options:
data.update({'options': options})
return self.execute(Command.TERMINATE_APP, data)['value']
def activate_app(self, app_id: str) -> 'WebDriver':
"""Activates the application if it is not running
or is running in the background.
Args:
app_id: the application id to be activated
Returns:
Union['WebDriver', 'Applications']: Self instance
"""
data = {
'appId': app_id,
}
self.execute(Command.ACTIVATE_APP, data)
return self # type: ignore
def query_app_state(self, app_id: str) -> int:
"""Queries the state of the application.
Args:
app_id: the application id to be queried
Returns:
One of possible application state constants. See ApplicationState
class for more details.
"""
data = {
'appId': app_id,
}
return self.execute(Command.QUERY_APP_STATE, data)['value']
def app_strings(self, language: str = None, string_file: str = None) -> Dict[str, str]:
"""Returns the application strings from the device for the specified
language.
Args:
language: strings language code
string_file: the name of the string file to query
Returns:
The key is string id and the value is the content.
"""
data = {}
if language is not None:
data['language'] = language
if string_file is not None:
data['stringFile'] = string_file
return self.execute(Command.GET_APP_STRINGS, data)['value']
def reset(self) -> 'WebDriver':
"""Resets the current application on the device.
Returns:
Union['WebDriver', 'Applications']: Self instance
"""
self.execute(Command.RESET)
return self # type: ignore
def _add_commands(self) -> None:
# noinspection PyProtectedMember,PyUnresolvedReferences
commands = self.command_executor._commands
commands[Command.BACKGROUND] = ('POST', '/session/$sessionId/appium/app/background')
commands[Command.IS_APP_INSTALLED] = (
'POST',
'/session/$sessionId/appium/device/app_installed',
)
commands[Command.INSTALL_APP] = ('POST', '/session/$sessionId/appium/device/install_app')
commands[Command.REMOVE_APP] = ('POST', '/session/$sessionId/appium/device/remove_app')
commands[Command.TERMINATE_APP] = (
'POST',
'/session/$sessionId/appium/device/terminate_app',
)
commands[Command.ACTIVATE_APP] = (
'POST',
'/session/$sessionId/appium/device/activate_app',
)
commands[Command.QUERY_APP_STATE] = (
'POST',
'/session/$sessionId/appium/device/app_state',
)
commands[Command.GET_APP_STRINGS] = ('POST', '/session/$sessionId/appium/app/strings')
commands[Command.RESET] = ('POST', '/session/$sessionId/appium/app/reset')
commands[Command.LAUNCH_APP] = ('POST', '/session/$sessionId/appium/app/launch')
commands[Command.CLOSE_APP] = ('POST', '/session/$sessionId/appium/app/close')
|
|
"""
Read graphs in GML format.
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
for format specification.
Example graphs in GML format:
http://www-personal.umich.edu/~mejn/netdata/
Requires pyparsing: http://pyparsing.wikispaces.com/
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2008-2009 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['read_gml', 'parse_gml', 'write_gml']
import sys
import time
import networkx
from networkx.exception import NetworkXException, NetworkXError
from networkx.utils import _get_fh, is_string_like
def read_gml(path):
"""Read graph in GML format from path.
Parameters
----------
path : filename or filehandle
The filename or filehandle to read from.
Returns
-------
G : Graph or DiGraph
Raises
------
ImportError
If the pyparsing module is not available.
See Also
--------
write_gml, parse_gml
Notes
-----
This doesn't implement the complete GML specification for
nested attributes for graphs, edges, and nodes.
Requires pyparsing: http://pyparsing.wikispaces.com/
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_gml(G,'test.gml')
>>> H=nx.read_gml('test.gml')
"""
fh=_get_fh(path,mode='r')
G=parse_gml(fh)
return G
def parse_gml(lines):
"""Parse GML graph from a string or iterable.
Parameters
----------
lines : string or iterable
Data in GML format.
Returns
-------
G : Graph or DiGraph
Raises
------
ImportError
If the pyparsing module is not available.
See Also
--------
write_gml, read_gml
Notes
-----
This doesn't implement the complete GML specification for
nested attributes for graphs, edges, and nodes.
Requires pyparsing: http://pyparsing.wikispaces.com/
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_gml(G,'test.gml')
>>> fh=open('test.gml')
>>> H=nx.read_gml(fh)
"""
try:
from pyparsing import ParseException
except ImportError:
raise ImportError, \
"Import Error: not able to import pyparsing: http://pyparsing.wikispaces.com/"
try:
data = "".join(lines)
gml = pyparse_gml()
tokens =gml.parseString(data)
except ParseException, err:
print err.line
print " "*(err.column-1) + "^"
print err
graph_attr=tokens.asDict()
# determine directed or undirected and init corresponding NX class
directed=graph_attr.get('directed',0)
if directed==1:
G=networkx.DiGraph()
else:
G=networkx.Graph()
G.node_attr={} # store node attributes here
G.graph_attr=graph_attr
# first pass, nodes and labels
label={}
for item in tokens:
if item[0]=='node':
d=item.asDict()
id=d['id']
if 'label'in d:
label[id]=d['label']
del d['label']
else:
label[id]=id
del d['id']
G.add_node(label[id],**d)
# second pass, edges
for item in tokens:
if item[0]=='edge':
d=item.asDict()
source=d.pop('source')
target=d.pop('target')
G.add_edge(label[source],label[target],**d)
return G
graph = None
def pyparse_gml():
"""A pyparsing tokenizer for GML graph format.
This is not indented to be called directly.
See Also
--------
write_gml, read_gml, parse_gml
Notes
-----
This doesn't implement the complete GML specification for
nested attributes for graphs, edges, and nodes.
"""
global graph
try:
from pyparsing import \
Literal, CaselessLiteral,Word,\
ZeroOrMore, Group, Dict, Optional, Combine,\
ParseException, restOfLine, White, alphanums, nums,\
OneOrMore,quotedString,removeQuotes,dblQuotedString
except ImportError:
raise ImportError, \
"Import Error: not able to import pyparsing: http://pyparsing.wikispaces.com/"
if not graph:
creator = Literal("Creator")+ Optional( restOfLine )
graphkey = Literal("graph").suppress()
lbrack = Literal("[").suppress()
rbrack = Literal("]").suppress()
pound = ("#")
comment = pound + Optional( restOfLine )
white = White(" \t\n")
point = Literal(".")
e = CaselessLiteral("E")
integer = Word(nums).setParseAction(lambda s,l,t:[ int(t[0])])
real = Combine( Word("+-"+nums, nums )+
Optional(point+Optional(Word(nums)))+
Optional(e+Word("+-"+nums, nums))).setParseAction(
lambda s,l,t:[ float(t[0]) ])
key=Word(alphanums)
value=integer^real^Word(alphanums)^quotedString.setParseAction(removeQuotes)
keyvalue = Dict(Group(key+OneOrMore(white).suppress()\
+value+OneOrMore(white).suppress()))
node = Group(Literal("node") + lbrack + OneOrMore(keyvalue) + rbrack)
edge = Group(Literal("edge") + lbrack + OneOrMore(keyvalue) + rbrack)
graph = Optional(creator)+\
graphkey + lbrack + ZeroOrMore(edge|node|keyvalue) + rbrack
graph.ignore(comment)
return graph
def write_gml(G, path):
"""
Write the graph G in GML format to the file or file handle path.
Parameters
----------
path : filename or filehandle
The filename or filehandle to write. Filenames ending in
.gz or .gz2 will be compressed.
See Also
--------
read_gml, parse_gml
Notes
-----
The output file will use the default text encoding on your system.
It is possible to write files in other encodings by opening
the file with the codecs module. See doc/examples/unicode.py
for hints.
>>> G=nx.path_graph(4)
>>> import codecs
>>> fh=codecs.open('test.gml','w',encoding='iso8859-1')# use iso8859-1
>>> nx.write_gml(G,fh)
GML specifications indicate that the file should only use
7bit ASCII text encoding.iso8859-1 (latin-1).
Only a single level of attributes for graphs, nodes, and edges,
is supported.
Examples
---------
>>> G=nx.path_graph(4)
>>> nx.write_gml(G,"test.gml")
path can be a filehandle or a string with the name of the file.
>>> fh=open("test.gml",'w')
>>> nx.write_gml(G,fh)
Filenames ending in .gz or .bz2 will be compressed.
>>> nx.write_gml(G,"test.gml.gz")
"""
fh=_get_fh(path,mode='w')
# comments="#"
# pargs=comments+" "+' '.join(sys.argv)
# fh.write("%s\n" % (pargs))
# fh.write(comments+" GMT %s\n" % (time.asctime(time.gmtime())))
# fh.write(comments+" %s\n" % (G.name))
# check for attributes or assign empty dict
if hasattr(G,'graph_attr'):
graph_attr=G.graph_attr
else:
graph_attr={}
if hasattr(G,'node_attr'):
node_attr=G.node_attr
else:
node_attr={}
indent=2*' '
count=iter(range(G.number_of_nodes()))
node_id={}
fh.write("graph [\n")
if G.is_directed():
fh.write(indent+"directed 1\n")
# write graph attributes
for k,v in G.graph.items():
if is_string_like(v):
v='"'+v+'"'
fh.write(indent+"%s %s\n"%(k,v))
# write nodes
for n in G:
fh.write(indent+"node [\n")
# get id or assign number
nid=G.node[n].get('id',count.next())
node_id[n]=nid
fh.write(2*indent+"id %s\n"%nid)
fh.write(2*indent+"label \"%s\"\n"%n)
if n in G:
for k,v in G.node[n].items():
if is_string_like(v): v='"'+v+'"'
if k=='id': continue
fh.write(2*indent+"%s %s\n"%(k,v))
fh.write(indent+"]\n")
# write edges
for u,v,edgedata in G.edges_iter(data=True):
# try to guess what is on the edge and do something reasonable
fh.write(indent+"edge [\n")
fh.write(2*indent+"source %s\n"%node_id[u])
fh.write(2*indent+"target %s\n"%node_id[v])
for k,v in edgedata.items():
if k=='source': continue
if k=='target': continue
if is_string_like(v): v='"'+v+'"'
fh.write(2*indent+"%s %s\n"%(k,v))
fh.write(indent+"]\n")
fh.write("]\n")
|
|
__author__ = 'olga'
rbps = set(['ENSG00000003756',
'ENSG00000005075',
'ENSG00000011304',
'ENSG00000023734',
'ENSG00000047315',
'ENSG00000048740',
'ENSG00000049883',
'ENSG00000051596',
'ENSG00000054118',
'ENSG00000060339',
'ENSG00000060688',
'ENSG00000061936',
'ENSG00000063244',
'ENSG00000064607',
'ENSG00000065978',
'ENSG00000067596',
'ENSG00000070047',
'ENSG00000070495',
'ENSG00000071894',
'ENSG00000072501',
'ENSG00000075292',
'ENSG00000076770',
'ENSG00000077312',
'ENSG00000078328',
'ENSG00000079134',
'ENSG00000083544',
'ENSG00000083896',
'ENSG00000084463',
'ENSG00000087365',
'ENSG00000088247',
'ENSG00000088930',
'ENSG00000089280',
'ENSG00000090060',
'ENSG00000090470',
'ENSG00000091009',
'ENSG00000091542',
'ENSG00000092199',
'ENSG00000092208',
'ENSG00000096063',
'ENSG00000096401',
'ENSG00000096746',
'ENSG00000099783',
'ENSG00000099817',
'ENSG00000099995',
'ENSG00000100028',
'ENSG00000100138',
'ENSG00000100142',
'ENSG00000100296',
'ENSG00000100319',
'ENSG00000100320',
'ENSG00000100410',
'ENSG00000100461',
'ENSG00000100650',
'ENSG00000100836',
'ENSG00000101138',
'ENSG00000101161',
'ENSG00000101343',
'ENSG00000101489',
'ENSG00000101811',
'ENSG00000102978',
'ENSG00000103005',
'ENSG00000103067',
'ENSG00000104413',
'ENSG00000104824',
'ENSG00000104852',
'ENSG00000104859',
'ENSG00000104897',
'ENSG00000105258',
'ENSG00000105323',
'ENSG00000105568',
'ENSG00000105705',
'ENSG00000106344',
'ENSG00000106355',
'ENSG00000108349',
'ENSG00000108561',
'ENSG00000108654',
'ENSG00000108848',
'ENSG00000108883',
'ENSG00000109606',
'ENSG00000109819',
'ENSG00000109971',
'ENSG00000110844',
'ENSG00000111196',
'ENSG00000111605',
'ENSG00000111701',
'ENSG00000111732',
'ENSG00000111786',
'ENSG00000111880',
'ENSG00000112081',
'ENSG00000112531',
'ENSG00000112739',
'ENSG00000113575',
'ENSG00000114503',
'ENSG00000115128',
'ENSG00000115524',
'ENSG00000115875',
'ENSG00000116350',
'ENSG00000116560',
'ENSG00000116679',
'ENSG00000116752',
'ENSG00000116754',
'ENSG00000116830',
'ENSG00000116954',
'ENSG00000117360',
'ENSG00000117569',
'ENSG00000117751',
'ENSG00000119203',
'ENSG00000119314',
'ENSG00000119707',
'ENSG00000119953',
'ENSG00000120948',
'ENSG00000121067',
'ENSG00000121774',
'ENSG00000122566',
'ENSG00000124193',
'ENSG00000124380',
'ENSG00000124383',
'ENSG00000124701',
'ENSG00000125351',
'ENSG00000125651',
'ENSG00000125676',
'ENSG00000125743',
'ENSG00000125835',
'ENSG00000125870',
'ENSG00000125944',
'ENSG00000126461',
'ENSG00000126653',
'ENSG00000126698',
'ENSG00000126945',
'ENSG00000128739',
'ENSG00000129152',
'ENSG00000130520',
'ENSG00000130810',
'ENSG00000131051',
'ENSG00000131652',
'ENSG00000131795',
'ENSG00000131876',
'ENSG00000131981',
'ENSG00000132485',
'ENSG00000132792',
'ENSG00000132819',
'ENSG00000133226',
'ENSG00000134186',
'ENSG00000134398',
'ENSG00000134453',
'ENSG00000134748',
'ENSG00000135250',
'ENSG00000135316',
'ENSG00000135486',
'ENSG00000135828',
'ENSG00000135829',
'ENSG00000136450',
'ENSG00000136527',
'ENSG00000136709',
'ENSG00000136875',
'ENSG00000136937',
'ENSG00000137948',
'ENSG00000138231',
'ENSG00000138398',
'ENSG00000138433',
'ENSG00000138668',
'ENSG00000139168',
'ENSG00000139218',
'ENSG00000139343',
'ENSG00000139746',
'ENSG00000139767',
'ENSG00000139793',
'ENSG00000139910',
'ENSG00000140488',
'ENSG00000140829',
'ENSG00000140830',
'ENSG00000140939',
'ENSG00000141759',
'ENSG00000142528',
'ENSG00000143368',
'ENSG00000143889',
'ENSG00000143977',
'ENSG00000144028',
'ENSG00000144231',
'ENSG00000145216',
'ENSG00000145833',
'ENSG00000146457',
'ENSG00000147140',
'ENSG00000147274',
'ENSG00000147669',
'ENSG00000148584',
'ENSG00000148843',
'ENSG00000149187',
'ENSG00000149532',
'ENSG00000151657',
'ENSG00000152601',
'ENSG00000153006',
'ENSG00000153187',
'ENSG00000153914',
'ENSG00000154548',
'ENSG00000154743',
'ENSG00000155858',
'ENSG00000155966',
'ENSG00000158941',
'ENSG00000159140',
'ENSG00000159409',
'ENSG00000160075',
'ENSG00000160201',
'ENSG00000160710',
'ENSG00000160917',
'ENSG00000161082',
'ENSG00000161265',
'ENSG00000161547',
'ENSG00000161981',
'ENSG00000162374',
'ENSG00000162385',
'ENSG00000162664',
'ENSG00000163156',
'ENSG00000163605',
'ENSG00000163634',
'ENSG00000163882',
'ENSG00000163950',
'ENSG00000164167',
'ENSG00000164329',
'ENSG00000164610',
'ENSG00000164944',
'ENSG00000165119',
'ENSG00000165494',
'ENSG00000165630',
'ENSG00000165934',
'ENSG00000167005',
'ENSG00000167088',
'ENSG00000167258',
'ENSG00000167281',
'ENSG00000168002',
'ENSG00000168438',
'ENSG00000168566',
'ENSG00000168883',
'ENSG00000169045',
'ENSG00000169217',
'ENSG00000169249',
'ENSG00000169564',
'ENSG00000169800',
'ENSG00000169813',
'ENSG00000169976',
'ENSG00000170144',
'ENSG00000170860',
'ENSG00000170892',
'ENSG00000172062',
'ENSG00000172409',
'ENSG00000172850',
'ENSG00000173627',
'ENSG00000173914',
'ENSG00000173933',
'ENSG00000174231',
'ENSG00000174243',
'ENSG00000174891',
'ENSG00000175324',
'ENSG00000176102',
'ENSG00000177613',
'ENSG00000177700',
'ENSG00000177733',
'ENSG00000178607',
'ENSG00000179837',
'ENSG00000179950',
'ENSG00000181222',
'ENSG00000181817',
'ENSG00000182004',
'ENSG00000182173',
'ENSG00000182196',
'ENSG00000182872',
'ENSG00000183431',
'ENSG00000183684',
'ENSG00000184209',
'ENSG00000184937',
'ENSG00000185246',
'ENSG00000185272',
'ENSG00000185736',
'ENSG00000185946',
'ENSG00000185978',
'ENSG00000188342',
'ENSG00000189091',
'ENSG00000196504',
'ENSG00000197111',
'ENSG00000197381',
'ENSG00000197976',
'ENSG00000198082',
'ENSG00000198307',
'ENSG00000198563',
'ENSG00000198860',
'ENSG00000203867',
'ENSG00000204392',
'ENSG00000204560',
'ENSG00000205571',
'ENSG00000205937',
'ENSG00000206486',
'ENSG00000213079',
'ENSG00000213516',
'ENSG00000213782',
'ENSG00000214575',
'ENSG00000215425',
'ENSG00000224979',
'ENSG00000225073',
'ENSG00000225859',
'ENSG00000225998',
'ENSG00000226171',
'ENSG00000226941',
'ENSG00000229496',
'ENSG00000230624',
'ENSG00000231377',
'ENSG00000231502',
'ENSG00000233049',
'ENSG00000233418',
'ENSG00000233561',
'ENSG00000234414',
'ENSG00000235439',
'ENSG00000236826',
'ENSG00000239665',
'ENSG00000242389',
'ENSG00000242875',
'ENSG00000244395',
'ENSG00000248643',
'ENSG00000257413',
'ENSG00000259956',
'ENSG00000260485',
'ENSG00000261230',
'ENSG00000262170',
'ENSG00000262716',
'ENSG00000262868',
'ENSG00000263077',
'ENSG00000263977',
'ENSG00000265228',
'ENSG00000265241',
'ENSG00000267483',
'ENSG00000267863',
'ENSG00000267903',
'ENSG00000268562',
'ENSG00000268588',
'ENSG00000268642',
'ENSG00000268813',
'ENSG00000268901',
'ENSG00000268973',
'ENSG00000269221',
'ENSG00000269366',
'ENSG00000269384',
'ENSG00000269754'])
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import math
from collections import defaultdict
from typing import Dict, Generic, List, Optional, TypeVar
import numpy as np
from scipy.special import loggamma
from scipy.stats import norm
logger = logging.getLogger(__name__)
T = TypeVar("T")
class Counter(Generic[T]):
def __init__(self) -> None:
self.counts: Dict[T, int] = defaultdict(int)
self.count_sum: int = 0
def __repr__(self):
return f"counts: {dict(self.counts)}. count_sum = {self.count_sum}."
def increment(self, observation: T) -> None:
self.counts[observation] += 1
self.count_sum += 1
def decrement(self, observation: T) -> None:
if observation not in self.counts or self.counts[observation] < 1:
raise RuntimeError(
f"Trying to decrement {observation}, but was never observed"
)
self.counts[observation] -= 1
self.count_sum -= 1
def get_count(self, observation: T) -> int:
return self.counts[observation]
def get_count_sum(self) -> int:
return self.count_sum
class DirichletPrior:
def __init__(self, dimension: int, scale: float, mean_vals: List[float]) -> None:
self.dimension = dimension
self.scale = scale
self.mean_vals = mean_vals
self.vals = [self.scale * mean_val for mean_val in self.mean_vals]
self._validate()
def __repr__(self):
return (
f"dimension = {self.dimension}. "
f"scale = {self.scale}. "
f"mean = {self.mean_vals}."
)
@classmethod
def from_dim_scale(cls, dimension: int, scale: float) -> "DirichletPrior":
prior = cls(dimension, scale, mean_vals=[(1.0 / dimension)] * dimension)
return prior
@classmethod
def from_scale_mean(cls, scale: float, mean_vals: List[float]) -> "DirichletPrior":
prior = cls(len(mean_vals), scale, mean_vals=mean_vals)
return prior
def _validate(self):
if abs(sum(self.mean_vals) - 1.0) > 1e-6:
raise RuntimeError(f"Invalid DirichletPrior {self.mean_vals}")
class DirichletMultinomial:
def __init__(self, prior: DirichletPrior, data: Counter = None) -> None:
self.prior: DirichletPrior = prior
self.data: Counter = data if data is not None else Counter()
self.posteriors: List[List[float]] = []
def __repr__(self):
return (
f"prior: {self.prior}. data: {self.data}. "
f"posterior: {self.get_posterior_dist()}"
)
@classmethod
def from_prior(cls, prior) -> "DirichletMultinomial":
return DirichletMultinomial(prior)
@classmethod
def from_dim_alpha(cls, dim, alpha) -> "DirichletMultinomial":
prior = DirichletPrior.from_dim_scale(dim, alpha)
return DirichletMultinomial(prior)
@classmethod
def from_scale_mean(cls, scale, mean) -> "DirichletMultinomial":
prior = DirichletPrior.from_scale_mean(scale, mean)
return DirichletMultinomial(prior)
def add_posterior_estimate(self) -> None:
self.posteriors.append(self.get_posterior_dist())
def summarize_posterior_estimate(self, lb: float = 2.5, ub: float = 97.5):
mean_est = np.mean(self.posteriors, axis=0)
ci_est = np.percentile(self.posteriors, [lb, ub], axis=0)
return mean_est.tolist(), ci_est.tolist()
def increment(self, observation: int) -> None:
self.data.increment(observation)
def decrement(self, observation: int) -> None:
self.data.decrement(observation)
def get_posterior_count(self, observation: int) -> float:
return self.prior.vals[observation] + self.data.get_count(observation)
def get_posterior_parameter(self) -> List[float]:
return [
self.get_posterior_count(observation)
for observation in range(self.prior.dimension)
]
def get_posterior_count_sum(self) -> float:
return self.prior.scale + self.data.get_count_sum()
def get_posterior_prob(self, observation: int) -> float:
return self.get_posterior_count(observation) / self.get_posterior_count_sum()
def get_posterior_dist(self) -> List[float]:
return [
self.get_posterior_prob(observation)
for observation in range(self.prior.dimension)
]
def sample_from_posterior(self) -> List[float]:
return np.random.dirichlet(self.get_posterior_parameter()).tolist()
def get_log_likelihood(self) -> float:
llh = loggamma(self.prior.scale)
for i_dim in range(self.prior.dimension):
prior_val = self.prior.vals[i_dim]
llh -= loggamma(prior_val)
llh += loggamma(prior_val + self.data.get_count(i_dim))
llh -= loggamma(self.prior.scale + self.data.get_count_sum())
return llh
class NormalInverseGammaPrior:
__slots__ = ["mu", "sigma", "alpha", "beta"]
def __init__(self, mu: float, sigma: float, alpha: float, beta: float) -> None:
self.mu = mu
self.sigma = sigma
self.alpha = alpha
self.beta = beta
def __repr__(self):
return (
f"mu = {self.mu}. sigma = {self.sigma}. "
f"alpha = {self.alpha}. beta = {self.beta}."
)
@classmethod
def from_hyperparameter(
cls, mu: float, sigma: float, alpha: float, beta: float
) -> "NormalInverseGammaPrior":
prior = cls(mu, sigma, alpha, beta)
return prior
class Normal:
def __init__(self) -> None:
self.observations: List[float] = []
self.count = 0
self.sum = 0.0
self.sum_squared = 0.0
def __repr__(self):
return (
f"count = {self.count}. sum = {self.sum}. "
f"sum_squared = {self.sum_squared}"
)
def add_observation(self, observation: float) -> None:
self.observations.append(observation)
self.count += 1
self.sum += observation
self.sum_squared += observation * observation
def remove_observation(self, observation: float) -> None:
self.observations.remove(observation)
self.count -= 1
self.sum -= observation
self.sum_squared -= observation * observation
def get_count(self) -> int:
return self.count
def get_sum(self) -> float:
return self.sum
def get_sum_squared(self) -> float:
return self.sum_squared
class NormalInverseGammaNormal:
def __init__(
self,
prior: NormalInverseGammaPrior = None, # MLE if prior is None
data: Normal = None,
) -> None:
self.prior: NormalInverseGammaPrior = prior
self.data: Normal = data if data is not None else Normal()
self.mean: float = 0.0
self.variance: float = 0.0
if data is not None:
self.estimate_parameters()
def __repr__(self):
return (
f"prior: {self.prior}. data: {self.data}. "
f"mean: {self.mean}. variance: {self.variance}"
)
@classmethod
def from_prior_hyperparameters(
cls, mu, sigma, alpha, beta
) -> "NormalInverseGammaNormal":
prior = NormalInverseGammaPrior.from_hyperparameter(mu, sigma, alpha, beta)
return NormalInverseGammaNormal(prior)
def add_observation(self, observation: float, estimate: bool = True) -> None:
self.data.add_observation(observation)
if estimate:
self.estimate_parameters()
def remove_observation(self, observation: float, estimate: bool = True) -> None:
self.data.remove_observation(observation)
if estimate:
self.estimate_parameters()
def estimate_parameters(self) -> None:
# MLE
self.mean = self.data.sum / self.data.count
self.variance = (
self.data.sum_squared - self.data.count * self.mean * self.mean
) / (self.data.count - 1)
def get_posterior_log_prob(self, observation: float) -> float:
return norm.logpdf(observation, self.mean, math.sqrt(self.variance))
def get_posterior_prob(self, observation: float) -> float:
return norm.pdf(observation, self.mean, math.sqrt(self.variance))
def get_log_likelihood(self) -> float:
return sum(self.get_posterior_log_prob(x) for x in self.data.observations)
|
|
from __future__ import with_statement
from numpy import linspace, zeros
# Enthought library imports
from kiva.constants import STROKE
from traits.api import (Any, Bool, Enum, Float, Int, Property,
on_trait_change, Trait)
from traitsui.api import EnumEditor
# Local, relative imports
from colors import ColorTrait
from component import Component
from markers import MarkerNameDict, marker_names, CustomMarker
slider_marker_names = list(marker_names) + ["rect"]
SliderMarkerTrait = Trait("rect", "rect", MarkerNameDict,
editor=EnumEditor(values=slider_marker_names))
class Slider(Component):
""" A horizontal or vertical slider bar """
#------------------------------------------------------------------------
# Model traits
#------------------------------------------------------------------------
min = Float()
max = Float()
value = Float()
# The number of ticks to show on the slider.
num_ticks = Int(4)
#------------------------------------------------------------------------
# Bar and endcap appearance
#------------------------------------------------------------------------
# Whether this is a horizontal or vertical slider
orientation = Enum("h", "v")
# The thickness, in pixels, of the lines used to render the ticks,
# endcaps, and main slider bar.
bar_width = Int(4)
bar_color = ColorTrait("black")
# Whether or not to render endcaps on the slider bar
endcaps = Bool(True)
# The extent of the endcaps, in pixels. This is a read-only property,
# since the endcap size can be set as either a fixed number of pixels or
# a percentage of the widget's size in the transverse direction.
endcap_size = Property
# The extent of the tickmarks, in pixels. This is a read-only property,
# since the endcap size can be set as either a fixed number of pixels or
# a percentage of the widget's size in the transverse direction.
tick_size = Property
#------------------------------------------------------------------------
# Slider appearance
#------------------------------------------------------------------------
# The kind of marker to use for the slider.
slider = SliderMarkerTrait("rect")
# If the slider marker is "rect", this is the thickness of the slider,
# i.e. its extent in the dimension parallel to the long axis of the widget.
# For other slider markers, this has no effect.
slider_thickness = Int(9)
# The size of the slider, in pixels. This is a read-only property, since
# the slider size can be set as either a fixed number of pixels or a
# percentage of the widget's size in the transverse direction.
slider_size = Property
# For slider markers with a filled area, this is the color of the filled
# area. For slider markers that are just lines/strokes (e.g. cross, plus),
# this is the color of the stroke.
slider_color = ColorTrait("red")
# For slider markers with a filled area, this is the color of the outline
# border drawn around the filled area. For slider markers that have just
# lines/strokes, this has no effect.
slider_border = ColorTrait("none")
# For slider markers with a filled area, this is the width, in pixels,
# of the outline around the area. For slider markers that are just lines/
# strokes, this is the thickness of the stroke.
slider_outline_width = Int(1)
# The kiva.CompiledPath representing the custom path to render for the
# slider, if the **slider** trait is set to "custom".
custom_slider = Any()
#------------------------------------------------------------------------
# Interaction traits
#------------------------------------------------------------------------
# Can this slider be interacted with, or is it just a display
interactive = Bool(True)
mouse_button = Enum("left", "right")
event_state = Enum("normal", "dragging")
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# Returns the coordinate index (0 or 1) corresponding to our orientation.
# Used internally; read-only property.
axis_ndx = Property()
_slider_size_mode = Enum("fixed", "percent")
_slider_percent = Float(0.0)
_cached_slider_size = Int(10)
_endcap_size_mode = Enum("fixed", "percent")
_endcap_percent = Float(0.0)
_cached_endcap_size = Int(20)
_tick_size_mode = Enum("fixed", "percent")
_tick_size_percent = Float(0.0)
_cached_tick_size = Int(20)
# A tuple of (dx, dy) of the difference between the mouse position and
# center of the slider.
_offset = Any((0,0))
def set_range(self, min, max):
self.min = min
self.max = max
def map_screen(self, val):
""" Returns an (x,y) coordinate corresponding to the location of
**val** on the slider.
"""
# Some local variables to handle orientation dependence
axis_ndx = self.axis_ndx
other_ndx = 1 - axis_ndx
screen_low = self.position[axis_ndx]
screen_high = screen_low + self.bounds[axis_ndx]
# The return coordinate. The return value along the non-primary
# axis will be the same in all cases.
coord = [0,0]
coord[other_ndx] = self.position[other_ndx] + self.bounds[other_ndx]/2
# Handle exceptional/boundary cases
if val <= self.min:
coord[axis_ndx] = screen_low
return coord
elif val >= self.max:
coord[axis_ndx] = screen_high
return coord
elif self.min == self.max:
coord[axis_ndx] = (screen_low + screen_high) / 2
return coord
# Handle normal cases
coord[axis_ndx] = (val - self.min) / (self.max - self.min) * self.bounds[axis_ndx] + screen_low
return coord
def map_data(self, x, y, clip=True):
""" Returns a value between min and max that corresponds to the given
x and y values.
Parameters
==========
x, y : Float
The screen coordinates to map
clip : Bool (default=True)
Whether points outside the range should be clipped to the max
or min value of the slider (depending on which it's closer to)
Returns
=======
value : Float
"""
# Some local variables to handle orientation dependence
axis_ndx = self.axis_ndx
other_ndx = 1 - axis_ndx
screen_low = self.position[axis_ndx]
screen_high = screen_low + self.bounds[axis_ndx]
if self.orientation == "h":
coord = x
else:
coord = y
# Handle exceptional/boundary cases
if coord >= screen_high:
return self.max
elif coord <= screen_low:
return self.min
elif screen_high == screen_low:
return (self.max + self.min) / 2
# Handle normal cases
return (coord - screen_low) /self.bounds[axis_ndx] * \
(self.max - self.min) + self.min
def set_slider_pixels(self, pixels):
""" Sets the width of the slider to be a fixed number of pixels
Parameters
==========
pixels : int
The number of pixels wide that the slider should be
"""
self._slider_size_mode = "fixed"
self._cached_slider_size = pixels
def set_slider_percent(self, percent):
""" Sets the width of the slider to be a percentage of the width
of the slider widget.
Parameters
==========
percent : float
The percentage, between 0.0 and 1.0
"""
self._slider_size_mode = "percent"
self._slider_percent = percent
self._update_sizes()
def set_endcap_pixels(self, pixels):
""" Sets the width of the endcap to be a fixed number of pixels
Parameters
==========
pixels : int
The number of pixels wide that the endcap should be
"""
self._endcap_size_mode = "fixed"
self._cached_endcap_size = pixels
def set_endcap_percent(self, percent):
""" Sets the width of the endcap to be a percentage of the width
of the endcap widget.
Parameters
==========
percent : float
The percentage, between 0.0 and 1.0
"""
self._endcap_size_mode = "percent"
self._endcap_percent = percent
self._update_sizes()
def set_tick_pixels(self, pixels):
""" Sets the width of the tick marks to be a fixed number of pixels
Parameters
==========
pixels : int
The number of pixels wide that the endcap should be
"""
self._tick_size_mode = "fixed"
self._cached_tick_size = pixels
def set_tick_percent(self, percent):
""" Sets the width of the tick marks to be a percentage of the width
of the endcap widget.
Parameters
==========
percent : float
The percentage, between 0.0 and 1.0
"""
self._tick_size_mode = "percent"
self._tick_percent = percent
self._update_sizes()
#------------------------------------------------------------------------
# Rendering methods
#------------------------------------------------------------------------
def _draw_mainlayer(self, gc, view_bounds=None, mode="normal"):
start = [0,0]
end = [0,0]
axis_ndx = self.axis_ndx
other_ndx = 1 - axis_ndx
bar_x = self.x + self.width / 2
bar_y = self.y + self.height / 2
# Draw the bar and endcaps
gc.set_stroke_color(self.bar_color_)
gc.set_line_width(self.bar_width)
if self.orientation == "h":
gc.move_to(self.x, bar_y)
gc.line_to(self.x2, bar_y)
gc.stroke_path()
if self.endcaps:
start_y = bar_y - self._cached_endcap_size / 2
end_y = bar_y + self._cached_endcap_size / 2
gc.move_to(self.x, start_y)
gc.line_to(self.x, end_y)
gc.move_to(self.x2, start_y)
gc.line_to(self.x2, end_y)
if self.num_ticks > 0:
x_pts = linspace(self.x, self.x2, self.num_ticks+2).astype(int)
starts = zeros((len(x_pts), 2),dtype=int)
starts[:,0] = x_pts
starts[:,1] = bar_y - self._cached_tick_size / 2
ends = starts.copy()
ends[:,1] = bar_y + self._cached_tick_size / 2
gc.line_set(starts, ends)
else:
gc.move_to(bar_x, self.y)
gc.line_to(bar_x, self.y2)
if self.endcaps:
start_x = bar_x - self._cached_endcap_size / 2
end_x = bar_x + self._cached_endcap_size / 2
gc.move_to(start_x, self.y)
gc.line_to(end_x, self.y)
gc.move_to(start_x, self.y2)
gc.line_to(end_x, self.y2)
if self.num_ticks > 0:
y_pts = linspace(self.y, self.y2, self.num_ticks+2).astype(int)
starts = zeros((len(y_pts), 2),dtype=int)
starts[:,1] = y_pts
starts[:,0] = bar_x - self._cached_tick_size / 2
ends = starts.copy()
ends[:,0] = bar_x + self._cached_tick_size / 2
gc.line_set(starts, ends)
gc.stroke_path()
# Draw the slider
pt = self.map_screen(self.value)
if self.slider == "rect":
gc.set_fill_color(self.slider_color_)
gc.set_stroke_color(self.slider_border_)
gc.set_line_width(self.slider_outline_width)
rect = self._get_rect_slider_bounds()
gc.rect(*rect)
gc.draw_path()
else:
self._render_marker(gc, pt, self._cached_slider_size, self.slider_(), self.custom_slider)
def _get_rect_slider_bounds(self):
""" Returns the (x, y, w, h) bounds of the rectangle representing the slider.
Used for rendering and hit detection.
"""
bar_x = self.x + self.width / 2
bar_y = self.y + self.height / 2
pt = self.map_screen(self.value)
if self.orientation == "h":
slider_height = self._cached_slider_size
return (pt[0] - self.slider_thickness, bar_y - slider_height/2,
self.slider_thickness, slider_height)
else:
slider_width = self._cached_slider_size
return (bar_x - slider_width/2, pt[1] - self.slider_thickness,
slider_width, self.slider_thickness)
def _render_marker(self, gc, point, size, marker, custom_path):
with gc:
gc.begin_path()
if marker.draw_mode == STROKE:
gc.set_stroke_color(self.slider_color_)
gc.set_line_width(self.slider_thickness)
else:
gc.set_fill_color(self.slider_color_)
gc.set_stroke_color(self.slider_border_)
gc.set_line_width(self.slider_outline_width)
if hasattr(gc, "draw_marker_at_points") and \
(marker.__class__ != CustomMarker) and \
(gc.draw_marker_at_points([point], size, marker.kiva_marker) != 0):
pass
elif hasattr(gc, "draw_path_at_points"):
if marker.__class__ != CustomMarker:
path = gc.get_empty_path()
marker.add_to_path(path, size)
mode = marker.draw_mode
else:
path = custom_path
mode = STROKE
if not marker.antialias:
gc.set_antialias(False)
gc.draw_path_at_points([point], path, mode)
else:
if not marker.antialias:
gc.set_antialias(False)
if marker.__class__ != CustomMarker:
gc.translate_ctm(*point)
# Kiva GCs have a path-drawing interface
marker.add_to_path(gc, size)
gc.draw_path(marker.draw_mode)
else:
path = custom_path
gc.translate_ctm(*point)
gc.add_path(path)
gc.draw_path(STROKE)
#------------------------------------------------------------------------
# Interaction event handlers
#------------------------------------------------------------------------
def normal_left_down(self, event):
if self.mouse_button == "left":
return self._mouse_pressed(event)
def dragging_left_up(self, event):
if self.mouse_button == "left":
return self._mouse_released(event)
def normal_right_down(self, event):
if self.mouse_button == "right":
return self._mouse_pressed(event)
def dragging_right_up(self, event):
if self.mouse_button == "right":
return self._mouse_released(event)
def dragging_mouse_move(self, event):
dx, dy = self._offset
self.value = self.map_data(event.x - dx, event.y - dy)
event.handled = True
self.request_redraw()
def dragging_mouse_leave(self, event):
self.event_state = "normal"
def _mouse_pressed(self, event):
# Determine the slider bounds so we can hit test it
pt = self.map_screen(self.value)
if self.slider == "rect":
x, y, w, h = self._get_rect_slider_bounds()
x2 = x + w
y2 = y + h
else:
x, y = pt
size = self._cached_slider_size
x -= size/2
y -= size/2
x2 = x + size
y2 = y + size
# Hit test both the slider and against the bar. If the user has
# clicked on the bar but outside of the slider, we set the _offset
# and call dragging_mouse_move() to teleport the slider to the
# mouse click position.
if self.orientation == "v" and (x <= event.x <= x2):
if not (y <= event.y <= y2):
self._offset = (event.x - pt[0], 0)
self.dragging_mouse_move(event)
else:
self._offset = (event.x - pt[0], event.y - pt[1])
elif self.orientation == "h" and (y <= event.y <= y2):
if not (x <= event.x <= x2):
self._offset = (0, event.y - pt[1])
self.dragging_mouse_move(event)
else:
self._offset = (event.x - pt[0], event.y - pt[1])
else:
# The mouse click missed the bar and the slider.
return
event.handled = True
self.event_state = "dragging"
return
def _mouse_released(self, event):
self.event_state = "normal"
event.handled = True
#------------------------------------------------------------------------
# Private trait event handlers and property getters/setters
#------------------------------------------------------------------------
def _get_axis_ndx(self):
if self.orientation == "h":
return 0
else:
return 1
def _get_slider_size(self):
return self._cached_slider_size
def _get_endcap_size(self):
return self._cached_endcap_size
def _get_tick_size(self):
return self._cached_tick_size
@on_trait_change("bounds,bounds_items")
def _update_sizes(self):
if self._slider_size_mode == "percent":
if self.orientation == "h":
self._cached_slider_size = int(self.height * self._slider_percent)
else:
self._cached_slider_size = int(self.width * self._slider_percent)
if self._endcap_size_mode == "percent":
if self.orientation == "h":
self._cached_endcap_size = int(self.height * self._endcap_percent)
else:
self._cached_endcap_size = int(self.width * self._endcap_percent)
return
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import json
import re
from unittest import mock
import pytest
from airflow.models import DAG
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.utils import dates, timezone
from airflow.utils.session import create_session
from airflow.utils.state import DagRunState, TaskInstanceState
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
from tests.test_utils.mock_operators import Dummy2TestOperator, Dummy3TestOperator
from tests.test_utils.www import check_content_in_response
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
ENDPOINT = "extra_links"
class RaiseErrorLink(BaseOperatorLink):
name = 'raise_error'
def get_link(self, operator, dttm):
raise ValueError('This is an error')
class NoResponseLink(BaseOperatorLink):
name = 'no_response'
def get_link(self, operator, dttm):
return None
class FooBarLink(BaseOperatorLink):
name = 'foo-bar'
def get_link(self, operator, dttm):
return f"http://www.example.com/{operator.task_id}/foo-bar/{dttm}"
class AirflowLink(BaseOperatorLink):
name = 'airflow'
def get_link(self, operator, dttm):
return 'https://airflow.apache.org'
class DummyTestOperator(BaseOperator):
operator_extra_links = (
RaiseErrorLink(),
NoResponseLink(),
FooBarLink(),
AirflowLink(),
)
@pytest.fixture(scope="module")
def dag():
return DAG("dag", start_date=DEFAULT_DATE)
@pytest.fixture(scope="module")
def create_dag_run(dag):
def _create_dag_run(*, execution_date, session):
return dag.create_dagrun(
state=DagRunState.RUNNING,
execution_date=execution_date,
run_type=DagRunType.MANUAL,
session=session,
)
return _create_dag_run
@pytest.fixture(scope="module", autouse=True)
def patched_app(app, dag):
with mock.patch.object(app, "dag_bag") as mock_dag_bag:
mock_dag_bag.get_dag.return_value = dag
yield
@pytest.fixture(scope="module", autouse=True)
def task_1(dag):
return DummyTestOperator(task_id="some_dummy_task", dag=dag)
@pytest.fixture(scope="module", autouse=True)
def task_2(dag):
return Dummy2TestOperator(task_id="some_dummy_task_2", dag=dag)
@pytest.fixture(scope="module", autouse=True)
def task_3(dag):
return Dummy3TestOperator(task_id="some_dummy_task_3", dag=dag)
@pytest.fixture(scope="module", autouse=True)
def init_blank_task_instances():
"""Make sure there are no runs before we test anything.
This really shouldn't be needed, but tests elsewhere leave the db dirty.
"""
clear_db_runs()
@pytest.fixture(autouse=True)
def reset_task_instances():
yield
clear_db_runs()
def test_extra_links_works(dag, task_1, viewer_client):
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_1.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=foo-bar",
follow_redirects=True,
)
assert response.status_code == 200
assert json.loads(response.data.decode()) == {
'url': 'http://www.example.com/some_dummy_task/foo-bar/2017-01-01T00:00:00+00:00',
'error': None,
}
def test_global_extra_links_works(dag, task_1, viewer_client):
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_1.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=github",
follow_redirects=True,
)
assert response.status_code == 200
assert json.loads(response.data.decode()) == {
'url': 'https://github.com/apache/airflow',
'error': None,
}
def test_extra_link_in_gantt_view(dag, create_dag_run, viewer_client):
exec_date = dates.days_ago(2)
start_date = timezone.datetime(2020, 4, 10, 2, 0, 0)
end_date = exec_date + datetime.timedelta(seconds=30)
with create_session() as session:
dag_run = create_dag_run(execution_date=exec_date, session=session)
for ti in dag_run.task_instances:
ti.refresh_from_task(dag.get_task(ti.task_id))
ti.state = TaskInstanceState.SUCCESS
ti.start_date = start_date
ti.end_date = end_date
session.merge(ti)
url = f'gantt?dag_id={dag.dag_id}&execution_date={exec_date}'
resp = viewer_client.get(url, follow_redirects=True)
check_content_in_response('"extraLinks":', resp)
extra_links_grps = re.search(r'extraLinks\": \[(\".*?\")\]', resp.get_data(as_text=True))
extra_links = extra_links_grps.group(0)
assert 'airflow' in extra_links
assert 'github' in extra_links
def test_operator_extra_link_override_global_extra_link(dag, task_1, viewer_client):
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_1.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=airflow",
follow_redirects=True,
)
assert response.status_code == 200
response_str = response.data
if isinstance(response.data, bytes):
response_str = response_str.decode()
assert json.loads(response_str) == {'url': 'https://airflow.apache.org', 'error': None}
def test_extra_links_error_raised(dag, task_1, viewer_client):
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_1.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=raise_error",
follow_redirects=True,
)
assert 404 == response.status_code
response_str = response.data
if isinstance(response.data, bytes):
response_str = response_str.decode()
assert json.loads(response_str) == {'url': None, 'error': 'This is an error'}
def test_extra_links_no_response(dag, task_1, viewer_client):
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_1.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=no_response",
follow_redirects=True,
)
assert response.status_code == 404
response_str = response.data
if isinstance(response.data, bytes):
response_str = response_str.decode()
assert json.loads(response_str) == {'url': None, 'error': 'No URL found for no_response'}
def test_operator_extra_link_override_plugin(dag, task_2, viewer_client):
"""
This tests checks if Operator Link (AirflowLink) defined in the Dummy2TestOperator
is overridden by Airflow Plugin (AirflowLink2).
AirflowLink returns 'https://airflow.apache.org/' link
AirflowLink2 returns 'https://airflow.apache.org/1.10.5/' link
"""
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_2.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=airflow",
follow_redirects=True,
)
assert response.status_code == 200
response_str = response.data
if isinstance(response.data, bytes):
response_str = response_str.decode()
assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None}
def test_operator_extra_link_multiple_operators(dag, task_2, task_3, viewer_client):
"""
This tests checks if Operator Link (AirflowLink2) defined in
Airflow Plugin (AirflowLink2) is attached to all the list of
operators defined in the AirflowLink2().operators property
AirflowLink2 returns 'https://airflow.apache.org/1.10.5/' link
GoogleLink returns 'https://www.google.com'
"""
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_2.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=airflow",
follow_redirects=True,
)
assert response.status_code == 200
response_str = response.data
if isinstance(response.data, bytes):
response_str = response_str.decode()
assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None}
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_3.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=airflow",
follow_redirects=True,
)
assert response.status_code == 200
response_str = response.data
if isinstance(response.data, bytes):
response_str = response_str.decode()
assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None}
# Also check that the other Operator Link defined for this operator exists
response = viewer_client.get(
f"{ENDPOINT}?dag_id={dag.dag_id}&task_id={task_3.task_id}"
f"&execution_date={DEFAULT_DATE}&link_name=google",
follow_redirects=True,
)
assert response.status_code == 200
response_str = response.data
if isinstance(response.data, bytes):
response_str = response_str.decode()
assert json.loads(response_str) == {'url': 'https://www.google.com', 'error': None}
|
|
import datetime
import math
import os
from collections import namedtuple
from urllib.parse import urlparse
from django.conf import settings
from django.core import paginator
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Count, Max, Q
from django.template import loader
from django.utils import translation
from django.utils.functional import cached_property
from django.urls import reverse
from olympia import amo
from olympia.addons.models import Addon, AddonCategory
from olympia.amo.reverse import get_url_prefix, override_url_prefix
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.constants.categories import CATEGORIES
from olympia.constants.promoted import RECOMMENDED
from olympia.bandwagon.models import Collection
from olympia.promoted.models import PromotedAddon
from olympia.users.models import UserProfile
# These constants are from:
# https://github.com/mozilla/addons-frontend/blob/master/src/amo/reducers/addonsByAuthors.js
EXTENSIONS_BY_AUTHORS_PAGE_SIZE = 10
THEMES_BY_AUTHORS_PAGE_SIZE = 12
# top 10 locales by visitor from GA (as of May 2021)
FRONTEND_LANGUAGES = [
'de',
'en-GB',
'en-US',
'es',
'fr',
'ja',
'pl',
'pt-BR',
'ru',
'zh-CN',
]
# Copied over from django because we want the 3.2 version in 2.2.
# We can delete this after we upgrade to django3.2
# https://github.com/django/django/blob/3.2/django/contrib/sitemaps/__init__.py
class DjangoSitemap:
# This limit is defined by Google. See the index documentation at
# https://www.sitemaps.org/protocol.html#index.
limit = 50000
# If protocol is None, the URLs in the sitemap will use the protocol
# with which the sitemap was requested.
protocol = None
# Enables generating URLs for all languages.
i18n = False
# Override list of languages to use.
languages = None
# Enables generating alternate/hreflang links.
alternates = False
# Add an alternate/hreflang link with value 'x-default'.
x_default = False
def _get(self, name, item, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
if self.i18n:
# Split the (item, lang_code) tuples again for the location,
# priority, lastmod and changefreq method calls.
item, lang_code = item
return attr(item)
return attr
def _languages(self):
if self.languages is not None:
return self.languages
return [lang_code for lang_code, _ in settings.LANGUAGES]
def _items(self):
if self.i18n:
# Create (item, lang_code) tuples for all items and languages.
# This is necessary to paginate with all languages already considered.
items = [
(item, lang_code)
for lang_code in self._languages()
for item in self.items()
]
return items
return self.items()
def _location(self, item, force_lang_code=None):
if self.i18n:
obj, lang_code = item
# Activate language from item-tuple or forced one before calling location.
with translation.override(force_lang_code or lang_code):
return self._get('location', item)
return self._get('location', item)
@property
def paginator(self):
return paginator.Paginator(self._items(), self.limit)
def items(self):
return []
def location(self, item):
return item.get_absolute_url()
def get_protocol(self, protocol=None):
# Determine protocol
return self.protocol or protocol or 'http'
def get_domain(self, site=None):
# Determine domain
if site is None:
if site is None:
raise ImproperlyConfigured(
'To use sitemaps, either enable the sites framework or pass '
'a Site/RequestSite object in your view.'
)
return site.domain
def get_urls(self, page=1, site=None, protocol=None):
protocol = self.get_protocol(protocol)
domain = self.get_domain(site)
return self._urls(page, protocol, domain)
def _urls(self, page, protocol, domain):
urls = []
latest_lastmod = None
all_items_lastmod = True # track if all items have a lastmod
paginator_page = self.paginator.page(page)
for item in paginator_page.object_list:
loc = f'{protocol}://{domain}{self._location(item)}'
priority = self._get('priority', item)
lastmod = self._get('lastmod', item)
if all_items_lastmod:
all_items_lastmod = lastmod is not None
if all_items_lastmod and (
latest_lastmod is None or lastmod > latest_lastmod
):
latest_lastmod = lastmod
url_info = {
'item': item,
'location': loc,
'lastmod': lastmod,
'changefreq': self._get('changefreq', item),
'priority': str(priority if priority is not None else ''),
}
if self.i18n and self.alternates:
alternates = []
for lang_code in self._languages():
loc = f'{protocol}://{domain}{self._location(item, lang_code)}'
alternates.append(
{
'location': loc,
'lang_code': lang_code,
}
)
if self.x_default:
lang_code = settings.LANGUAGE_CODE
loc = f'{protocol}://{domain}{self._location(item, lang_code)}'
loc = loc.replace(f'/{lang_code}/', '/', 1)
alternates.append(
{
'location': loc,
'lang_code': 'x-default',
}
)
url_info['alternates'] = alternates
urls.append(url_info)
if all_items_lastmod and latest_lastmod:
self.latest_lastmod = latest_lastmod
return urls
class LazyTupleList:
"""Lazily emulates a generated list like:
[
(item_a, item_b)
for item_b in list_b
for item_a in list_a
]
"""
def __init__(self, list_a, list_b):
self.list_a = list_a
self.list_b = list_b
def __len__(self):
return len(self.list_a) * len(self.list_b)
def __getitem__(self, key):
a_len = len(self.list_a)
def get(index):
return (self.list_a[index % a_len], self.list_b[index // a_len])
return (
[get(idx) for idx in range(key.start, key.stop, key.step or 1)]
if isinstance(key, slice)
else get(key)
)
class Sitemap(DjangoSitemap):
limit = 2000
i18n = True
languages = FRONTEND_LANGUAGES
alternates = True
# x_default = False # TODO: enable this when we can validate it works well
_cached_items = []
protocol = urlparse(settings.EXTERNAL_SITE_URL).scheme
def _location(self, item, force_lang_code=None):
# modified from Django implementation - we don't rely on locale for urls
if self.i18n:
obj, lang_code = item
# Doing .replace is hacky, but `override_url_prefix` is slow at scale
return self.location(obj).replace(
settings.LANGUAGE_CODE, force_lang_code or lang_code, 1
)
return self.location(item)
def _items(self):
items = self.items()
if self.i18n:
# Create (item, lang_code) tuples for all items and languages.
# This is necessary to paginate with all languages already considered.
return LazyTupleList(items, self._languages())
return items
def items(self):
return self._cached_items
def get_domain(self, site):
if not site:
if not hasattr(self, 'domain'):
self.domain = urlparse(settings.EXTERNAL_SITE_URL).netloc
return self.domain
return super().get_domain(site=site)
def get_urls(self, page=1, site=None, protocol=None, *, app_name=None):
with override_url_prefix(app_name=app_name):
return super().get_urls(page=page, site=site, protocol=protocol)
@cached_property
def template(self):
return loader.get_template('sitemap.xml')
def render(self, app_name, page):
context = {'urlset': self.get_urls(page=page, app_name=app_name)}
return self.template.render(context)
@property
def _current_app(self):
return amo.APPS[get_url_prefix().app]
def get_android_promoted_addons():
return PromotedAddon.objects.filter(
Q(application_id=amo.ANDROID.id) | Q(application_id__isnull=True),
group_id=RECOMMENDED.id,
addon___current_version__promoted_approvals__application_id=(amo.ANDROID.id),
addon___current_version__promoted_approvals__group_id=RECOMMENDED.id,
)
class AddonSitemap(Sitemap):
item_tuple = namedtuple('Item', ['last_updated', 'url', 'page'], defaults=(1,))
@cached_property
def _cached_items(self):
current_app = self._current_app
addons_qs = Addon.objects.public().filter(
_current_version__apps__application=current_app.id
)
# android is currently limited to a small number of recommended addons, so get
# the list of those and filter further
if current_app == amo.ANDROID:
promoted_addon_ids = get_android_promoted_addons().values_list(
'addon_id', flat=True
)
addons_qs = addons_qs.filter(id__in=promoted_addon_ids)
addons = list(
addons_qs.order_by('-last_updated')
.values_list(
'last_updated',
'slug',
'text_ratings_count',
named=True,
)
.iterator()
)
items = [
self.item_tuple(
addon.last_updated,
reverse('addons.detail', args=[addon.slug]),
)
for addon in addons
]
# add pages for ratings - and extra pages when needed to paginate
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
for addon in addons:
pages_needed = math.ceil((addon.text_ratings_count or 1) / page_size)
items.extend(
self.item_tuple(
addon.last_updated,
reverse('addons.ratings.list', args=[addon.slug]),
page,
)
for page in range(1, pages_needed + 1)
)
return items
def lastmod(self, item):
return item.last_updated
def location(self, item):
return item.url + (f'?page={item.page}' if item.page > 1 else '')
class AMOSitemap(Sitemap):
lastmod = datetime.datetime.now()
_cached_items = [
# frontend pages
('home', amo.FIREFOX),
('home', amo.ANDROID),
('pages.about', None),
('pages.review_guide', None),
('browse.extensions', amo.FIREFOX),
('browse.themes', amo.FIREFOX),
('browse.language-tools', amo.FIREFOX),
# server pages
('devhub.index', None),
('apps.appversions', amo.FIREFOX),
('apps.appversions', amo.ANDROID),
]
def location(self, item):
urlname, app = item
if app:
with override_url_prefix(app_name=app.short):
return reverse(urlname)
else:
return reverse(urlname)
class CategoriesSitemap(Sitemap):
lastmod = datetime.datetime.now()
@cached_property
def _cached_items(self):
page_size = settings.REST_FRAMEWORK['PAGE_SIZE']
page_count_max = settings.ES_MAX_RESULT_WINDOW // page_size
def additems(type):
items = []
for category in CATEGORIES[current_app.id][type].values():
items.append((category, 1))
pages_needed = min(
math.ceil(addon_counts.get(category.id, 1) / page_size),
page_count_max,
)
for page in range(2, pages_needed + 1):
items.append((category, page))
return items
current_app = self._current_app
counts_qs = (
AddonCategory.objects.filter(
addon___current_version__isnull=False,
addon___current_version__apps__application=current_app.id,
addon__disabled_by_user=False,
addon__status__in=amo.REVIEWED_STATUSES,
)
.values('category_id')
.annotate(count=Count('addon_id'))
)
addon_counts = {cat['category_id']: cat['count'] for cat in counts_qs}
items = additems(amo.ADDON_EXTENSION)
if current_app == amo.FIREFOX:
items.extend(additems(amo.ADDON_STATICTHEME))
return items
def location(self, item):
(category, page) = item
return category.get_url_path() + (f'?page={page}' if page > 1 else '')
class CollectionSitemap(Sitemap):
@cached_property
def _cached_items(self):
return list(
Collection.objects.filter(author_id=settings.TASK_USER_ID)
.order_by('-modified')
.values_list('modified', 'slug', 'author_id', named=True)
.iterator()
)
def lastmod(self, item):
return item.modified
def location(self, item):
return Collection.get_url_path(item)
class AccountSitemap(Sitemap):
item_tuple = namedtuple(
'AccountItem',
['addons_updated', 'url', 'extension_page', 'theme_page'],
defaults=(1, 1),
)
@cached_property
def _cached_items(self):
current_app = self._current_app
addon_q = Q(
addons___current_version__isnull=False,
addons___current_version__apps__application=current_app.id,
addons__disabled_by_user=False,
addons__status__in=amo.REVIEWED_STATUSES,
addonuser__listed=True,
addonuser__role__in=(amo.AUTHOR_ROLE_DEV, amo.AUTHOR_ROLE_OWNER),
)
# android is currently limited to a small number of recommended addons, so get
# the list of those and filter further
if current_app == amo.ANDROID:
promoted_addon_ids = get_android_promoted_addons().values_list(
'addon_id', flat=True
)
addon_q = addon_q & Q(addons__id__in=promoted_addon_ids)
users = (
UserProfile.objects.filter(is_public=True, deleted=False)
.annotate(
theme_count=Count(
'addons', filter=Q(addon_q, addons__type=amo.ADDON_STATICTHEME)
)
)
.annotate(
extension_count=Count(
'addons', filter=Q(addon_q, addons__type=amo.ADDON_EXTENSION)
)
)
.annotate(addons_updated=Max('addons__last_updated', filter=addon_q))
.order_by('-addons_updated', '-modified')
.values_list(
'addons_updated', 'id', 'extension_count', 'theme_count', named=True
)
.iterator()
)
items = []
for user in users:
if not user.extension_count and not user.theme_count:
# some users have an empty page for various reasons, no need to include
continue
extension_pages_needed = math.ceil(
(user.extension_count or 1) / EXTENSIONS_BY_AUTHORS_PAGE_SIZE
)
theme_pages_needed = math.ceil(
(user.theme_count or 1) / THEMES_BY_AUTHORS_PAGE_SIZE
)
items.extend(
self.item_tuple(
user.addons_updated,
reverse('users.profile', args=[user.id]),
ext_page,
1,
)
for ext_page in range(1, extension_pages_needed + 1)
)
# start themes at 2 because we don't want (1, 1) twice
items.extend(
self.item_tuple(
user.addons_updated,
reverse('users.profile', args=[user.id]),
1,
theme_page,
)
for theme_page in range(2, theme_pages_needed + 1)
)
return items
def lastmod(self, item):
return item.addons_updated
def location(self, item):
urlargs = '&'.join(
([f'page_e={item.extension_page}'] if item.extension_page > 1 else [])
+ ([f'page_t={item.theme_page}'] if item.theme_page > 1 else [])
)
return item.url + (f'?{urlargs}' if urlargs else '')
def get_sitemaps():
return {
# because some urls are app-less, we specify per item, so don't specify an app
('amo', None): AMOSitemap(),
('addons', amo.FIREFOX): AddonSitemap(),
('addons', amo.ANDROID): AddonSitemap(),
# category pages aren't supported on android, so firefox only
('categories', amo.FIREFOX): CategoriesSitemap(),
# we don't expose collections on android, so firefox only
('collections', amo.FIREFOX): CollectionSitemap(),
('users', amo.FIREFOX): AccountSitemap(),
('users', amo.ANDROID): AccountSitemap(),
}
OTHER_SITEMAPS = [
'/blog/sitemap.xml',
]
def get_sitemap_section_pages(sitemaps):
pages = []
for (section, app), site in sitemaps.items():
if not app:
pages.extend((section, None, page) for page in site.paginator.page_range)
continue
with override_url_prefix(app_name=app.short):
# Add all pages of the sitemap section.
pages.extend(
(section, app.short, page) for page in site.paginator.page_range
)
return pages
def render_index_xml(sitemaps):
sitemap_url = reverse('amo.sitemap')
server_urls = (
f'{sitemap_url}?section={section}'
+ (f'&app_name={app_name}' if app_name else '')
+ (f'&p={page}' if page != 1 else '')
for section, app_name, page in get_sitemap_section_pages(sitemaps)
)
urls = list(server_urls) + OTHER_SITEMAPS
return loader.render_to_string(
'sitemap_index.xml',
{'sitemaps': (absolutify(url) for url in urls)},
)
def get_sitemap_path(section, app, page=1):
return os.path.join(
settings.SITEMAP_STORAGE_PATH,
'sitemap'
+ (f'-{section}' if section else '')
+ (f'-{app}' if app else '')
+ (f'-{page}' if page != 1 else '')
+ '.xml',
)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.AdamOptimizer"])
class AdamOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
See [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"):
r"""Construct a new Adam optimizer.
Initialization:
$$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$
$$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
$$t := 0 \text{(Initialize timestep)}$$
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section2 of the paper:
$$t := t + 1$$
$$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
$$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta1`, `beta2`, and `epsilon` can each be a
callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
# Created in SparseApply if needed.
self._updated_lr = None
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=self._beta1,
name="beta1_power",
colocate_with=first_var)
self._create_non_slot_variable(initial_value=self._beta2,
name="beta2_power",
colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
beta1 = self._call_if_callable(self._beta1)
beta2 = self._call_if_callable(self._beta2)
epsilon = self._call_if_callable(self._epsilon)
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.apply_adam(
var, m, v,
math_ops.cast(beta1_power, var.dtype.base_dtype),
math_ops.cast(beta2_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad, use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.resource_apply_adam(
var.handle, m.handle, v.handle,
math_ops.cast(beta1_power, grad.dtype.base_dtype),
math_ops.cast(beta2_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t,
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(var,
lr * m_t / (v_sqrt + epsilon_t),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values, var, grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x, i, v, use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(
x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(
grad, var, indices, self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
name=name_scope)
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from distantbes.proto.proto import bazel_config_pb2 as proto_dot_bazel__config__pb2
from distantbes.proto.proto import invocation_pb2 as proto_dot_invocation__pb2
from distantbes.proto.proto import user_pb2 as proto_dot_user__pb2
class BuildBuddyServiceStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetInvocation = channel.unary_unary(
'/buildbuddy.service.BuildBuddyService/GetInvocation',
request_serializer=proto_dot_invocation__pb2.GetInvocationRequest.SerializeToString,
response_deserializer=proto_dot_invocation__pb2.GetInvocationResponse.FromString,
)
self.SearchInvocation = channel.unary_unary(
'/buildbuddy.service.BuildBuddyService/SearchInvocation',
request_serializer=proto_dot_invocation__pb2.SearchInvocationRequest.SerializeToString,
response_deserializer=proto_dot_invocation__pb2.SearchInvocationResponse.FromString,
)
self.CreateUser = channel.unary_unary(
'/buildbuddy.service.BuildBuddyService/CreateUser',
request_serializer=proto_dot_user__pb2.CreateUserRequest.SerializeToString,
response_deserializer=proto_dot_user__pb2.CreateUserResponse.FromString,
)
self.GetUser = channel.unary_unary(
'/buildbuddy.service.BuildBuddyService/GetUser',
request_serializer=proto_dot_user__pb2.GetUserRequest.SerializeToString,
response_deserializer=proto_dot_user__pb2.GetUserResponse.FromString,
)
self.GetBazelConfig = channel.unary_unary(
'/buildbuddy.service.BuildBuddyService/GetBazelConfig',
request_serializer=proto_dot_bazel__config__pb2.GetBazelConfigRequest.SerializeToString,
response_deserializer=proto_dot_bazel__config__pb2.GetBazelConfigResponse.FromString,
)
self.GetInvocationStat = channel.unary_unary(
'/buildbuddy.service.BuildBuddyService/GetInvocationStat',
request_serializer=proto_dot_invocation__pb2.GetInvocationStatRequest.SerializeToString,
response_deserializer=proto_dot_invocation__pb2.GetInvocationStatResponse.FromString,
)
class BuildBuddyServiceServicer(object):
"""Missing associated documentation comment in .proto file"""
def GetInvocation(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SearchInvocation(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateUser(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetUser(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBazelConfig(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetInvocationStat(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BuildBuddyServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetInvocation': grpc.unary_unary_rpc_method_handler(
servicer.GetInvocation,
request_deserializer=proto_dot_invocation__pb2.GetInvocationRequest.FromString,
response_serializer=proto_dot_invocation__pb2.GetInvocationResponse.SerializeToString,
),
'SearchInvocation': grpc.unary_unary_rpc_method_handler(
servicer.SearchInvocation,
request_deserializer=proto_dot_invocation__pb2.SearchInvocationRequest.FromString,
response_serializer=proto_dot_invocation__pb2.SearchInvocationResponse.SerializeToString,
),
'CreateUser': grpc.unary_unary_rpc_method_handler(
servicer.CreateUser,
request_deserializer=proto_dot_user__pb2.CreateUserRequest.FromString,
response_serializer=proto_dot_user__pb2.CreateUserResponse.SerializeToString,
),
'GetUser': grpc.unary_unary_rpc_method_handler(
servicer.GetUser,
request_deserializer=proto_dot_user__pb2.GetUserRequest.FromString,
response_serializer=proto_dot_user__pb2.GetUserResponse.SerializeToString,
),
'GetBazelConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetBazelConfig,
request_deserializer=proto_dot_bazel__config__pb2.GetBazelConfigRequest.FromString,
response_serializer=proto_dot_bazel__config__pb2.GetBazelConfigResponse.SerializeToString,
),
'GetInvocationStat': grpc.unary_unary_rpc_method_handler(
servicer.GetInvocationStat,
request_deserializer=proto_dot_invocation__pb2.GetInvocationStatRequest.FromString,
response_serializer=proto_dot_invocation__pb2.GetInvocationStatResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'buildbuddy.service.BuildBuddyService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BuildBuddyService(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def GetInvocation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildbuddy.service.BuildBuddyService/GetInvocation',
proto_dot_invocation__pb2.GetInvocationRequest.SerializeToString,
proto_dot_invocation__pb2.GetInvocationResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SearchInvocation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildbuddy.service.BuildBuddyService/SearchInvocation',
proto_dot_invocation__pb2.SearchInvocationRequest.SerializeToString,
proto_dot_invocation__pb2.SearchInvocationResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildbuddy.service.BuildBuddyService/CreateUser',
proto_dot_user__pb2.CreateUserRequest.SerializeToString,
proto_dot_user__pb2.CreateUserResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildbuddy.service.BuildBuddyService/GetUser',
proto_dot_user__pb2.GetUserRequest.SerializeToString,
proto_dot_user__pb2.GetUserResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetBazelConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildbuddy.service.BuildBuddyService/GetBazelConfig',
proto_dot_bazel__config__pb2.GetBazelConfigRequest.SerializeToString,
proto_dot_bazel__config__pb2.GetBazelConfigResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetInvocationStat(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildbuddy.service.BuildBuddyService/GetInvocationStat',
proto_dot_invocation__pb2.GetInvocationStatRequest.SerializeToString,
proto_dot_invocation__pb2.GetInvocationStatResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
|
|
"""chain module.
This module contains information about a depletion chain. A depletion chain is
loaded from an .xml file and all the nuclides are linked together.
"""
from io import StringIO
from itertools import chain
import math
import re
from collections import OrderedDict, defaultdict, namedtuple
from collections.abc import Mapping, Iterable
from numbers import Real, Integral
from warnings import warn
from openmc.checkvalue import check_type, check_greater_than
from openmc.data import gnd_name, zam
from .nuclide import FissionYieldDistribution
# Try to use lxml if it is available. It preserves the order of attributes and
# provides a pretty-printer by default. If not available,
# use OpenMC function to pretty print.
try:
import lxml.etree as ET
_have_lxml = True
except ImportError:
import xml.etree.ElementTree as ET
_have_lxml = False
import scipy.sparse as sp
import openmc.data
from openmc._xml import clean_indentation
from .nuclide import Nuclide, DecayTuple, ReactionTuple
# tuple of (possible MT values, (dA, dZ), secondaries) where dA is the change in
# the mass number and dZ is the change in the atomic number
ReactionInfo = namedtuple('ReactionInfo', ('mts', 'dadz', 'secondaries'))
REACTIONS = {
'(n,2nd)': ReactionInfo({11}, (-3, -1), ('H2',)),
'(n,2n)': ReactionInfo(set(chain([16], range(875, 892))), (-1, 0), ()),
'(n,3n)': ReactionInfo({17}, (-2, 0), ()),
'(n,na)': ReactionInfo({22}, (-4, -2), ('He4',)),
'(n,n3a)': ReactionInfo({23}, (-12, -6), ('He4', 'He4', 'He4')),
'(n,2na)': ReactionInfo({24}, (-5, -2), ('He4',)),
'(n,3na)': ReactionInfo({25}, (-6, -2), ('He4',)),
'(n,np)': ReactionInfo({28}, (-1, -1), ('H1',)),
'(n,n2a)': ReactionInfo({29}, (-8, -4), ('He4', 'He4')),
'(n,2n2a)': ReactionInfo({30}, (-9, -4), ('He4', 'He4')),
'(n,nd)': ReactionInfo({32}, (-2, -1), ('H2',)),
'(n,nt)': ReactionInfo({33}, (-3, -1), ('H3',)),
'(n,n3He)': ReactionInfo({34}, (-3, -2), ('He3',)),
'(n,nd2a)': ReactionInfo({35}, (-10, -5), ('H2', 'He4', 'He4')),
'(n,nt2a)': ReactionInfo({36}, (-11, -5), ('H3', 'He4', 'He4')),
'(n,4n)': ReactionInfo({37}, (-3, 0), ()),
'(n,2np)': ReactionInfo({41}, (-2, -1), ('H1',)),
'(n,3np)': ReactionInfo({42}, (-3, -1), ('H1',)),
'(n,n2p)': ReactionInfo({44}, (-2, -2), ('H1', 'H1')),
'(n,npa)': ReactionInfo({45}, (-5, -3), ('H1', 'He4')),
'(n,gamma)': ReactionInfo({102}, (1, 0), ()),
'(n,p)': ReactionInfo(set(chain([103], range(600, 650))), (0, -1), ('H1',)),
'(n,d)': ReactionInfo(set(chain([104], range(650, 700))), (-1, -1), ('H2',)),
'(n,t)': ReactionInfo(set(chain([105], range(700, 750))), (-2, -1), ('H3',)),
'(n,3He)': ReactionInfo(set(chain([106], range(750, 800))), (-2, -2), ('He3',)),
'(n,a)': ReactionInfo(set(chain([107], range(800, 850))), (-3, -2), ('He4',)),
'(n,2a)': ReactionInfo({108}, (-7, -4), ('He4', 'He4')),
'(n,3a)': ReactionInfo({109}, (-11, -6), ('He4', 'He4', 'He4')),
'(n,2p)': ReactionInfo({111}, (-1, -2), ('H1', 'H1')),
'(n,pa)': ReactionInfo({112}, (-4, -3), ('H1', 'He4')),
'(n,t2a)': ReactionInfo({113}, (-10, -5), ('H3', 'He4', 'He4')),
'(n,d2a)': ReactionInfo({114}, (-9, -5), ('H2', 'He4', 'He4')),
'(n,pd)': ReactionInfo({115}, (-2, -2), ('H1', 'H2')),
'(n,pt)': ReactionInfo({116}, (-3, -2), ('H1', 'H3')),
'(n,da)': ReactionInfo({117}, (-5, -3), ('H2', 'He4')),
'(n,5n)': ReactionInfo({152}, (-4, 0), ()),
'(n,6n)': ReactionInfo({153}, (-5, 0), ()),
'(n,2nt)': ReactionInfo({154}, (-4, -1), ('H3',)),
'(n,ta)': ReactionInfo({155}, (-6, -3), ('H3', 'He4')),
'(n,4np)': ReactionInfo({156}, (-4, -1), ('H1',)),
'(n,3nd)': ReactionInfo({157}, (-4, -1), ('H2',)),
'(n,nda)': ReactionInfo({158}, (-6, -3), ('H2', 'He4')),
'(n,2npa)': ReactionInfo({159}, (-6, -3), ('H1', 'He4')),
'(n,7n)': ReactionInfo({160}, (-6, 0), ()),
'(n,8n)': ReactionInfo({161}, (-7, 0), ()),
'(n,5np)': ReactionInfo({162}, (-5, -1), ('H1',)),
'(n,6np)': ReactionInfo({163}, (-6, -1), ('H1',)),
'(n,7np)': ReactionInfo({164}, (-7, -1), ('H1',)),
'(n,4na)': ReactionInfo({165}, (-7, -2), ('He4',)),
'(n,5na)': ReactionInfo({166}, (-8, -2), ('He4',)),
'(n,6na)': ReactionInfo({167}, (-9, -2), ('He4',)),
'(n,7na)': ReactionInfo({168}, (-10, -2), ('He4',)),
'(n,4nd)': ReactionInfo({169}, (-5, -1), ('H2',)),
'(n,5nd)': ReactionInfo({170}, (-6, -1), ('H2',)),
'(n,6nd)': ReactionInfo({171}, (-7, -1), ('H2',)),
'(n,3nt)': ReactionInfo({172}, (-5, -1), ('H3',)),
'(n,4nt)': ReactionInfo({173}, (-6, -1), ('H3',)),
'(n,5nt)': ReactionInfo({174}, (-7, -1), ('H3',)),
'(n,6nt)': ReactionInfo({175}, (-8, -1), ('H3',)),
'(n,2n3He)': ReactionInfo({176}, (-4, -2), ('He3',)),
'(n,3n3He)': ReactionInfo({177}, (-5, -2), ('He3',)),
'(n,4n3He)': ReactionInfo({178}, (-6, -2), ('He3',)),
'(n,3n2p)': ReactionInfo({179}, (-4, -2), ('H1', 'H1')),
'(n,3n2a)': ReactionInfo({180}, (-10, -4), ('He4', 'He4')),
'(n,3npa)': ReactionInfo({181}, (-7, -3), ('H1', 'He4')),
'(n,dt)': ReactionInfo({182}, (-4, -2), ('H2', 'H3')),
'(n,npd)': ReactionInfo({183}, (-3, -2), ('H1', 'H2')),
'(n,npt)': ReactionInfo({184}, (-4, -2), ('H1', 'H3')),
'(n,ndt)': ReactionInfo({185}, (-5, -2), ('H2', 'H3')),
'(n,np3He)': ReactionInfo({186}, (-4, -3), ('H1', 'He3')),
'(n,nd3He)': ReactionInfo({187}, (-5, -3), ('H2', 'He3')),
'(n,nt3He)': ReactionInfo({188}, (-6, -3), ('H3', 'He3')),
'(n,nta)': ReactionInfo({189}, (-7, -3), ('H3', 'He4')),
'(n,2n2p)': ReactionInfo({190}, (-3, -2), ('H1', 'H1')),
'(n,p3He)': ReactionInfo({191}, (-4, -3), ('H1', 'He3')),
'(n,d3He)': ReactionInfo({192}, (-5, -3), ('H2', 'He3')),
'(n,3Hea)': ReactionInfo({193}, (-6, -4), ('He3', 'He4')),
'(n,4n2p)': ReactionInfo({194}, (-5, -2), ('H1', 'H1')),
'(n,4n2a)': ReactionInfo({195}, (-11, -4), ('He4', 'He4')),
'(n,4npa)': ReactionInfo({196}, (-8, -3), ('H1', 'He4')),
'(n,3p)': ReactionInfo({197}, (-2, -3), ('H1', 'H1', 'H1')),
'(n,n3p)': ReactionInfo({198}, (-3, -3), ('H1', 'H1', 'H1')),
'(n,3n2pa)': ReactionInfo({199}, (-8, -4), ('H1', 'H1', 'He4')),
'(n,5n2p)': ReactionInfo({200}, (-6, -2), ('H1', 'H1')),
}
__all__ = ["Chain", "REACTIONS"]
def replace_missing(product, decay_data):
"""Replace missing product with suitable decay daughter.
Parameters
----------
product : str
Name of product in GND format, e.g. 'Y86_m1'.
decay_data : dict
Dictionary of decay data
Returns
-------
product : str
Replacement for missing product in GND format.
"""
# Determine atomic number, mass number, and metastable state
Z, A, state = openmc.data.zam(product)
symbol = openmc.data.ATOMIC_SYMBOL[Z]
# Replace neutron with nothing
if Z == 0:
return None
# First check if ground state is available
if state:
product = '{}{}'.format(symbol, A)
# Find isotope with longest half-life
half_life = 0.0
for nuclide, data in decay_data.items():
m = re.match(r'{}(\d+)(?:_m\d+)?'.format(symbol), nuclide)
if m:
# If we find a stable nuclide, stop search
if data.nuclide['stable']:
mass_longest_lived = int(m.group(1))
break
if data.half_life.nominal_value > half_life:
mass_longest_lived = int(m.group(1))
half_life = data.half_life.nominal_value
# If mass number of longest-lived isotope is less than that of missing
# product, assume it undergoes beta-. Otherwise assume beta+.
beta_minus = (mass_longest_lived < A)
# Iterate until we find an existing nuclide
while product not in decay_data:
if Z > 98:
# Assume alpha decay occurs for Z=99 and above
Z -= 2
A -= 4
else:
# Otherwise assume a beta- or beta+
if beta_minus:
Z += 1
else:
Z -= 1
product = '{}{}'.format(openmc.data.ATOMIC_SYMBOL[Z], A)
return product
def replace_missing_fpy(actinide, fpy_data, decay_data):
"""Replace missing fission product yields
Parameters
----------
actinide : str
Name of actinide missing FPY data
fpy_data : dict
Dictionary of FPY data
decay_data : dict
Dictionary of decay data
Returns
-------
str
Actinide that can be used as replacement for FPY purposes
"""
# Check if metastable state has data (e.g., Am242m)
Z, A, m = zam(actinide)
if m == 0:
metastable = gnd_name(Z, A, 1)
if metastable in fpy_data:
return metastable
# Try increasing Z, holding N constant
isotone = actinide
while isotone in decay_data:
Z += 1
A += 1
isotone = gnd_name(Z, A, 0)
if isotone in fpy_data:
return isotone
# Try decreasing Z, holding N constant
isotone = actinide
while isotone in decay_data:
Z -= 1
A -= 1
isotone = gnd_name(Z, A, 0)
if isotone in fpy_data:
return isotone
# If all else fails, use U235 yields
return 'U235'
class Chain:
"""Full representation of a depletion chain.
A depletion chain can be created by using the :meth:`from_endf` method which
requires a list of ENDF incident neutron, decay, and neutron fission product
yield sublibrary files. The depletion chain used during a depletion
simulation is indicated by either an argument to
:class:`openmc.deplete.Operator` or through the
``depletion_chain`` item in the :envvar:`OPENMC_CROSS_SECTIONS`
environment variable.
Attributes
----------
nuclides : list of openmc.deplete.Nuclide
Nuclides present in the chain.
reactions : list of str
Reactions that are tracked in the depletion chain
nuclide_dict : OrderedDict of str to int
Maps a nuclide name to an index in nuclides.
fission_yields : None or iterable of dict
List of effective fission yields for materials. Each dictionary
should be of the form ``{parent: {product: yield}}`` with
types ``{str: {str: float}}``, where ``yield`` is the fission product
yield for isotope ``parent`` producing isotope ``product``.
A single entry indicates yields are constant across all materials.
Otherwise, an entry can be added for each material to be burned.
Ordering should be identical to how the operator orders reaction
rates for burnable materials.
"""
def __init__(self):
self.nuclides = []
self.reactions = []
self.nuclide_dict = OrderedDict()
self._fission_yields = None
def __contains__(self, nuclide):
return nuclide in self.nuclide_dict
def __getitem__(self, name):
"""Get a Nuclide by name."""
return self.nuclides[self.nuclide_dict[name]]
def __len__(self):
"""Number of nuclides in chain."""
return len(self.nuclides)
def add_nuclide(self, nuclide):
"""Add a nuclide to the depletion chain
Parameters
----------
nuclide : openmc.deplete.Nuclide
Nuclide to add
"""
self.nuclide_dict[nuclide.name] = len(self.nuclides)
self.nuclides.append(nuclide)
# Check for reaction paths
for rx in nuclide.reactions:
if rx.type not in self.reactions:
self.reactions.append(rx.type)
@classmethod
def from_endf(cls, decay_files, fpy_files, neutron_files,
reactions=('(n,2n)', '(n,3n)', '(n,4n)', '(n,gamma)', '(n,p)', '(n,a)'),
progress=True
):
"""Create a depletion chain from ENDF files.
String arguments in ``decay_files``, ``fpy_files``, and
``neutron_files`` will be treated as file names to be read.
Alternatively, :class:`openmc.data.endf.Evaluation` instances
can be included in these arguments.
Parameters
----------
decay_files : list of str or openmc.data.endf.Evaluation
List of ENDF decay sub-library files
fpy_files : list of str or openmc.data.endf.Evaluation
List of ENDF neutron-induced fission product yield sub-library files
neutron_files : list of str or openmc.data.endf.Evaluation
List of ENDF neutron reaction sub-library files
reactions : iterable of str, optional
Transmutation reactions to include in the depletion chain, e.g.,
`["(n,2n)", "(n,gamma)"]`. Note that fission is always included if
it is present. A complete listing of transmutation reactions can be
found in :data:`openmc.deplete.chain.REACTIONS`.
progress : bool, optional
Flag to print status messages during processing. Does not
effect warning messages
Returns
-------
Chain
Notes
-----
When an actinide is missing fission product yield (FPY) data, yields will
copied from a parent isotope, found according to:
1. If the nuclide is in a ground state and a metastable state exists with
fission yields, copy the yields from the metastable
2. Find an isotone (same number of neutrons) and copy those yields
3. Copy the yields of U235 if the previous two checks fail
"""
transmutation_reactions = reactions
# Create dictionary mapping target to filename
if progress:
print('Processing neutron sub-library files...')
reactions = {}
for f in neutron_files:
evaluation = openmc.data.endf.Evaluation(f)
name = evaluation.gnd_name
reactions[name] = {}
for mf, mt, nc, mod in evaluation.reaction_list:
if mf == 3:
file_obj = StringIO(evaluation.section[3, mt])
openmc.data.endf.get_head_record(file_obj)
q_value = openmc.data.endf.get_cont_record(file_obj)[1]
reactions[name][mt] = q_value
# Determine what decay and FPY nuclides are available
if progress:
print('Processing decay sub-library files...')
decay_data = {}
for f in decay_files:
data = openmc.data.Decay(f)
# Skip decay data for neutron itself
if data.nuclide['atomic_number'] == 0:
continue
decay_data[data.nuclide['name']] = data
if progress:
print('Processing fission product yield sub-library files...')
fpy_data = {}
for f in fpy_files:
data = openmc.data.FissionProductYields(f)
fpy_data[data.nuclide['name']] = data
if progress:
print('Creating depletion_chain...')
missing_daughter = []
missing_rx_product = []
missing_fpy = []
missing_fp = []
chain = cls()
for idx, parent in enumerate(sorted(decay_data, key=openmc.data.zam)):
data = decay_data[parent]
nuclide = Nuclide(parent)
if not data.nuclide['stable'] and data.half_life.nominal_value != 0.0:
nuclide.half_life = data.half_life.nominal_value
nuclide.decay_energy = data.decay_energy.nominal_value
sum_br = 0.0
for i, mode in enumerate(data.modes):
type_ = ','.join(mode.modes)
if mode.daughter in decay_data:
target = mode.daughter
else:
print('missing {} {} {}'.format(
parent, ','.join(mode.modes), mode.daughter))
target = replace_missing(mode.daughter, decay_data)
# Write branching ratio, taking care to ensure sum is unity
br = mode.branching_ratio.nominal_value
sum_br += br
if i == len(data.modes) - 1 and sum_br != 1.0:
br = 1.0 - sum(m.branching_ratio.nominal_value
for m in data.modes[:-1])
# Append decay mode
nuclide.add_decay_mode(type_, target, br)
fissionable = False
if parent in reactions:
reactions_available = set(reactions[parent].keys())
for name in transmutation_reactions:
mts, changes, _ = REACTIONS[name]
if mts & reactions_available:
delta_A, delta_Z = changes
A = data.nuclide['mass_number'] + delta_A
Z = data.nuclide['atomic_number'] + delta_Z
daughter = '{}{}'.format(openmc.data.ATOMIC_SYMBOL[Z], A)
if daughter not in decay_data:
daughter = replace_missing(daughter, decay_data)
if daughter is None:
missing_rx_product.append((parent, name, daughter))
# Store Q value
for mt in sorted(mts):
if mt in reactions[parent]:
q_value = reactions[parent][mt]
break
else:
q_value = 0.0
nuclide.add_reaction(name, daughter, q_value, 1.0)
if any(mt in reactions_available for mt in openmc.data.FISSION_MTS):
q_value = reactions[parent][18]
nuclide.add_reaction('fission', None, q_value, 1.0)
fissionable = True
if fissionable:
if parent in fpy_data:
fpy = fpy_data[parent]
if fpy.energies is not None:
yield_energies = fpy.energies
else:
yield_energies = [0.0]
yield_data = {}
for E, yield_table in zip(yield_energies, fpy.independent):
yield_replace = 0.0
yields = defaultdict(float)
for product, y in yield_table.items():
# Handle fission products that have no decay data
if product not in decay_data:
daughter = replace_missing(product, decay_data)
product = daughter
yield_replace += y.nominal_value
yields[product] += y.nominal_value
if yield_replace > 0.0:
missing_fp.append((parent, E, yield_replace))
yield_data[E] = yields
nuclide.yield_data = FissionYieldDistribution(yield_data)
else:
nuclide._fpy = replace_missing_fpy(parent, fpy_data, decay_data)
missing_fpy.append((parent, nuclide._fpy))
# Add nuclide to chain
chain.add_nuclide(nuclide)
# Replace missing FPY data
for nuclide in chain.nuclides:
if hasattr(nuclide, '_fpy'):
nuclide.yield_data = chain[nuclide._fpy].yield_data
# Display warnings
if missing_daughter:
print('The following decay modes have daughters with no decay data:')
for mode in missing_daughter:
print(' {}'.format(mode))
print('')
if missing_rx_product:
print('The following reaction products have no decay data:')
for vals in missing_rx_product:
print('{} {} -> {}'.format(*vals))
print('')
if missing_fpy:
print('The following fissionable nuclides have no fission product yields:')
for parent, replacement in missing_fpy:
print(' {}, replaced with {}'.format(parent, replacement))
print('')
if missing_fp:
print('The following nuclides have fission products with no decay data:')
for vals in missing_fp:
print(' {}, E={} eV (total yield={})'.format(*vals))
return chain
@classmethod
def from_xml(cls, filename, fission_q=None):
"""Reads a depletion chain XML file.
Parameters
----------
filename : str
The path to the depletion chain XML file.
fission_q : dict, optional
Dictionary of nuclides and their fission Q values [eV].
If not given, values will be pulled from ``filename``
"""
chain = cls()
if fission_q is not None:
check_type("fission_q", fission_q, Mapping)
else:
fission_q = {}
# Load XML tree
root = ET.parse(str(filename))
for i, nuclide_elem in enumerate(root.findall('nuclide')):
this_q = fission_q.get(nuclide_elem.get("name"))
nuc = Nuclide.from_xml(nuclide_elem, root, this_q)
chain.add_nuclide(nuc)
return chain
def export_to_xml(self, filename):
"""Writes a depletion chain XML file.
Parameters
----------
filename : str
The path to the depletion chain XML file.
"""
root_elem = ET.Element('depletion_chain')
for nuclide in self.nuclides:
root_elem.append(nuclide.to_xml_element())
tree = ET.ElementTree(root_elem)
if _have_lxml:
tree.write(str(filename), encoding='utf-8', pretty_print=True)
else:
clean_indentation(root_elem)
tree.write(str(filename), encoding='utf-8')
def get_default_fission_yields(self):
"""Return fission yields at lowest incident neutron energy
Used as the default set of fission yields for :meth:`form_matrix`
if ``fission_yields`` are not provided
Returns
-------
fission_yields : dict
Dictionary of ``{parent: {product: f_yield}}``
where ``parent`` and ``product`` are both string
names of nuclides with yield data and ``f_yield``
is a float for the fission yield.
"""
out = defaultdict(dict)
for nuc in self.nuclides:
if nuc.yield_data is None:
continue
yield_obj = nuc.yield_data[min(nuc.yield_energies)]
out[nuc.name] = dict(yield_obj)
return out
def form_matrix(self, rates, fission_yields=None):
"""Forms depletion matrix.
Parameters
----------
rates : numpy.ndarray
2D array indexed by (nuclide, reaction)
fission_yields : dict, optional
Option to use a custom set of fission yields. Expected
to be of the form ``{parent : {product : f_yield}}``
with string nuclide names for ``parent`` and ``product``,
and ``f_yield`` as the respective fission yield
Returns
-------
scipy.sparse.csr_matrix
Sparse matrix representing depletion.
See Also
--------
:meth:`get_default_fission_yields`
"""
matrix = defaultdict(float)
reactions = set()
if fission_yields is None:
fission_yields = self.get_default_fission_yields()
for i, nuc in enumerate(self.nuclides):
# Loss from radioactive decay
if nuc.half_life is not None:
decay_constant = math.log(2) / nuc.half_life
if decay_constant != 0.0:
matrix[i, i] -= decay_constant
# Gain from radioactive decay
if nuc.n_decay_modes != 0:
for _, target, branching_ratio in nuc.decay_modes:
# Allow for total annihilation for debug purposes
if target is not None:
branch_val = branching_ratio * decay_constant
if branch_val != 0.0:
k = self.nuclide_dict[target]
matrix[k, i] += branch_val
if nuc.name in rates.index_nuc:
# Extract all reactions for this nuclide in this cell
nuc_ind = rates.index_nuc[nuc.name]
nuc_rates = rates[nuc_ind, :]
for r_type, target, _, br in nuc.reactions:
# Extract reaction index, and then final reaction rate
r_id = rates.index_rx[r_type]
path_rate = nuc_rates[r_id]
# Loss term -- make sure we only count loss once for
# reactions with branching ratios
if r_type not in reactions:
reactions.add(r_type)
if path_rate != 0.0:
matrix[i, i] -= path_rate
# Gain term; allow for total annihilation for debug purposes
if r_type != 'fission':
if target is not None and path_rate != 0.0:
k = self.nuclide_dict[target]
matrix[k, i] += path_rate * br
# Determine light nuclide production, e.g., (n,d) should
# produce H2
light_nucs = REACTIONS[r_type].secondaries
for light_nuc in light_nucs:
k = self.nuclide_dict.get(light_nuc)
if k is not None:
matrix[k, i] += path_rate * br
else:
for product, y in fission_yields[nuc.name].items():
yield_val = y * path_rate
if yield_val != 0.0:
k = self.nuclide_dict[product]
matrix[k, i] += yield_val
# Clear set of reactions
reactions.clear()
# Use DOK matrix as intermediate representation, then convert to CSR and return
n = len(self)
matrix_dok = sp.dok_matrix((n, n))
dict.update(matrix_dok, matrix)
return matrix_dok.tocsr()
def get_branch_ratios(self, reaction="(n,gamma)"):
"""Return a dictionary with reaction branching ratios
Parameters
----------
reaction : str, optional
Reaction name like ``"(n,gamma)"`` [default], or
``"(n,alpha)"``.
Returns
-------
branches : dict
nested dict of parent nuclide keys with reaction targets and
branching ratios. Consider the capture, ``"(n,gamma)"``,
reaction for Am241::
{"Am241": {"Am242": 0.91, "Am242_m1": 0.09}}
See Also
--------
:meth:`set_branch_ratios`
"""
capt = {}
for nuclide in self.nuclides:
nuc_capt = {}
for rx in nuclide.reactions:
if rx.type == reaction and rx.branching_ratio != 1.0:
nuc_capt[rx.target] = rx.branching_ratio
if len(nuc_capt) > 0:
capt[nuclide.name] = nuc_capt
return capt
def set_branch_ratios(self, branch_ratios, reaction="(n,gamma)",
strict=True, tolerance=1e-5):
"""Set the branching ratios for a given reactions
Parameters
----------
branch_ratios : dict of {str: {str: float}}
Capture branching ratios to be inserted.
First layer keys are names of parent nuclides, e.g.
``"Am241"``. The branching ratios for these
parents will be modified. Corresponding values are
dictionaries of ``{target: branching_ratio}``
reaction : str, optional
Reaction name like ``"(n,gamma)"`` [default], or
``"(n, alpha)"``.
strict : bool, optional
Error control. If this evalutes to ``True``, then errors will
be raised if inconsistencies are found. Otherwise, warnings
will be raised for most issues.
tolerance : float, optional
Tolerance on the sum of all branching ratios for a
single parent. Will be checked with::
1 - tol < sum_br < 1 + tol
Raises
------
IndexError
If no isotopes were found on the chain that have the requested
reaction
KeyError
If ``strict`` evaluates to ``False`` and a parent isotope in
``branch_ratios`` does not exist on the chain
AttributeError
If ``strict`` evaluates to ``False`` and a parent isotope in
``branch_ratios`` does not have the requested reaction
ValueError
If ``strict`` evalutes to ``False`` and the sum of one parents
branch ratios is outside 1 +/- ``tolerance``
See Also
--------
:meth:`get_branch_ratios`
"""
# Store some useful information through the validation stage
sums = {}
rxn_ix_map = {}
grounds = {}
tolerance = abs(tolerance)
missing_parents = set()
missing_products = {}
missing_reaction = set()
bad_sums = {}
# Secondary products, like alpha particles, should not be modified
secondary = REACTIONS[reaction].secondaries
# Check for validity before manipulation
for parent, sub in branch_ratios.items():
if parent not in self:
if strict:
raise KeyError(parent)
missing_parents.add(parent)
continue
# Make sure all products are present in the chain
prod_flag = False
for product in sub:
if product not in self:
if strict:
raise KeyError(product)
missing_products[parent] = product
prod_flag = True
break
if prod_flag:
continue
# Make sure this nuclide has the reaction
indexes = []
for ix, rx in enumerate(self[parent].reactions):
if rx.type == reaction and rx.target not in secondary:
indexes.append(ix)
if "_m" not in rx.target:
grounds[parent] = rx.target
if len(indexes) == 0:
if strict:
raise AttributeError(
"Nuclide {} does not have {} reactions".format(
parent, reaction))
missing_reaction.add(parent)
continue
this_sum = sum(sub.values())
# sum of branching ratios can be lower than 1 if no ground
# target is given, but never greater
if (this_sum >= 1 + tolerance or (grounds[parent] in sub
and this_sum <= 1 - tolerance)):
if strict:
msg = ("Sum of {} branching ratios for {} "
"({:7.3f}) outside tolerance of 1 +/- "
"{:5.3e}".format(
reaction, parent, this_sum, tolerance))
raise ValueError(msg)
bad_sums[parent] = this_sum
else:
rxn_ix_map[parent] = indexes
sums[parent] = this_sum
if len(rxn_ix_map) == 0:
raise IndexError(
"No {} reactions found in this {}".format(
reaction, self.__class__.__name__))
if len(missing_parents) > 0:
warn("The following nuclides were not found in {}: {}".format(
self.__class__.__name__, ", ".join(sorted(missing_parents))))
if len(missing_reaction) > 0:
warn("The following nuclides did not have {} reactions: "
"{}".format(reaction, ", ".join(sorted(missing_reaction))))
if len(missing_products) > 0:
tail = ("{} -> {}".format(k, v)
for k, v in sorted(missing_products.items()))
warn("The following products were not found in the {} and "
"parents were unmodified: \n{}".format(
self.__class__.__name__, ", ".join(tail)))
if len(bad_sums) > 0:
tail = ("{}: {:5.3f}".format(k, s)
for k, s in sorted(bad_sums.items()))
warn("The following parent nuclides were given {} branch ratios "
"with a sum outside tolerance of 1 +/- {:5.3e}:\n{}".format(
reaction, tolerance, "\n".join(tail)))
# Insert new ReactionTuples with updated branch ratios
for parent_name, rxn_index in rxn_ix_map.items():
parent = self[parent_name]
new_ratios = branch_ratios[parent_name]
rxn_index = rxn_ix_map[parent_name]
# Assume Q value is independent of target state
rxn_Q = parent.reactions[rxn_index[0]].Q
# Remove existing reactions
for ix in reversed(rxn_index):
parent.reactions.pop(ix)
# Add new reactions
all_meta = True
for target, br in new_ratios.items():
all_meta = all_meta and ("_m" in target)
parent.add_reaction(reaction, target, rxn_Q, br)
# If branching ratios don't add to unity, add reaction to ground
# with remainder of branching ratio
if all_meta and sums[parent_name] != 1.0:
ground_br = 1.0 - sums[parent_name]
ground_target = grounds.get(parent_name)
if ground_target is None:
pz, pa, pm = zam(parent_name)
ground_target = gnd_name(pz, pa + 1, 0)
new_ratios[ground_target] = ground_br
parent.add_reaction(reaction, ground_target, rxn_Q, ground_br)
@property
def fission_yields(self):
if self._fission_yields is None:
self._fission_yields = [self.get_default_fission_yields()]
return self._fission_yields
@fission_yields.setter
def fission_yields(self, yields):
if yields is not None:
if isinstance(yields, Mapping):
yields = [yields]
check_type("fission_yields", yields, Iterable, Mapping)
self._fission_yields = yields
def validate(self, strict=True, quiet=False, tolerance=1e-4):
"""Search for possible inconsistencies
The following checks are performed for all nuclides present:
1) For all non-fission reactions, does the sum of branching
ratios equal about one?
2) For fission reactions, does the sum of fission yield
fractions equal about two?
Parameters
----------
strict : bool, optional
Raise exceptions at the first inconsistency if true.
Otherwise mark a warning
quiet : bool, optional
Flag to suppress warnings and return immediately at
the first inconsistency. Used only if
``strict`` does not evaluate to ``True``.
tolerance : float, optional
Absolute tolerance for comparisons. Used to compare computed
value ``x`` to intended value ``y`` as::
valid = (y - tolerance <= x <= y + tolerance)
Returns
-------
valid : bool
True if no inconsistencies were found
Raises
------
ValueError
If ``strict`` evaluates to ``True`` and an inconistency was
found
See Also
--------
openmc.deplete.Nuclide.validate
"""
check_type("tolerance", tolerance, Real)
check_greater_than("tolerance", tolerance, 0.0, True)
valid = True
# Sort through nuclides by name
for name in sorted(self.nuclide_dict):
stat = self[name].validate(strict, quiet, tolerance)
if quiet and not stat:
return stat
valid = valid and stat
return valid
def reduce(self, initial_isotopes, level=None):
"""Reduce the size of the chain by following transmutation paths
As an example, consider a simple chain with the following
isotopes and transmutation paths::
U235 (n,gamma) U236
(n,fission) (Xe135, I135, Cs135)
I135 (beta decay) Xe135 (beta decay) Cs135
Xe135 (n,gamma) Xe136
Calling ``chain.reduce(["I135"])`` will produce a depletion
chain that contains only isotopes that would originate from
I135: I135, Xe135, Cs135, and Xe136. U235 and U236 will not
be included, but multiple isotopes can be used to start
the search.
The ``level`` value controls the depth of the search.
``chain.reduce(["U235"], level=1)`` would return a chain
with all isotopes except Xe136, since it is two transmutations
removed from U235 in this case.
While targets will not be included in the new chain, the
total destruction rate and decay rate of included isotopes
will be preserved.
Parameters
----------
initial_isotopes : iterable of str
Start the search based on the contents of these isotopes
level : int, optional
Depth of transmuation path to follow. Must be greater than
or equal to zero. A value of zero returns a chain with
``initial_isotopes``. The default value of None implies
that all isotopes that appear in the transmutation paths
of the initial isotopes and their progeny should be
explored
Returns
-------
Chain
Depletion chain containing isotopes that would appear
after following up to ``level`` reactions and decay paths
"""
check_type("initial_isotopes", initial_isotopes, Iterable, str)
if level is None:
level = math.inf
else:
check_type("level", level, Integral)
check_greater_than("level", level, 0, equality=True)
all_isotopes = self._follow(set(initial_isotopes), level)
# Avoid re-sorting for fission yields
name_sort = sorted(all_isotopes)
new_chain = type(self)()
for idx, iso in enumerate(sorted(all_isotopes, key=openmc.data.zam)):
previous = self[iso]
new_nuclide = Nuclide(previous.name)
new_nuclide.half_life = previous.half_life
new_nuclide.decay_energy = previous.decay_energy
if hasattr(previous, '_fpy'):
new_nuclide._fpy = previous._fpy
for mode in previous.decay_modes:
if mode.target in all_isotopes:
new_nuclide.add_decay_mode(*mode)
else:
new_nuclide.add_decay_mode(mode.type, None, mode.branching_ratio)
for rx in previous.reactions:
if rx.target in all_isotopes:
new_nuclide.add_reaction(*rx)
elif rx.type == "fission":
new_yields = new_nuclide.yield_data = (
previous.yield_data.restrict_products(name_sort))
if new_yields is not None:
new_nuclide.add_reaction(*rx)
# Maintain total destruction rates but set no target
else:
new_nuclide.add_reaction(rx.type, None, rx.Q, rx.branching_ratio)
new_chain.add_nuclide(new_nuclide)
# Doesn't appear that the ordering matters for the reactions,
# just the contents
new_chain.reactions = sorted(new_chain.reactions)
return new_chain
def _follow(self, isotopes, level):
"""Return all isotopes present up to depth level"""
found = isotopes.copy()
remaining = set(self.nuclide_dict)
if not found.issubset(remaining):
raise IndexError(
"The following isotopes were not found in the chain: "
"{}".format(", ".join(found - remaining)))
if level == 0:
return found
remaining -= found
depth = 0
next_iso = set()
while depth < level and remaining:
# Exhaust all isotopes at this level
while isotopes:
iso = isotopes.pop()
found.add(iso)
nuclide = self[iso]
# Follow all transmutation paths for this nuclide
for rxn in nuclide.reactions + nuclide.decay_modes:
if rxn.type == "fission":
continue
# Figure out if this reaction produces light nuclides
if rxn.type in REACTIONS:
secondaries = REACTIONS[rxn.type].secondaries
else:
secondaries = []
# Only include secondaries if they are present in original chain
secondaries = [x for x in secondaries if x in self]
for product in chain([rxn.target], secondaries):
if product is None:
continue
# Skip if we've already come across this isotope
elif (product in next_iso or product in found
or product in isotopes):
continue
next_iso.add(product)
if nuclide.yield_data is not None:
for product in nuclide.yield_data.products:
if (product in next_iso
or product in found or product in isotopes):
continue
next_iso.add(product)
if not next_iso:
# No additional isotopes to process, nor to update the
# current set of discovered isotopes
return found
# Prepare for next dig
depth += 1
isotopes |= next_iso
remaining -= next_iso
next_iso.clear()
# Process isotope that would have started next depth
found.update(isotopes)
return found
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module constructs Hamiltonians for the uniform electron gas."""
from typing import Optional
import numpy
from openfermion.ops.operators import FermionOperator, QubitOperator
from openfermion.utils.grid import Grid
def wigner_seitz_length_scale(wigner_seitz_radius: float, n_particles: int,
dimension: int) -> float:
"""Function to give length_scale associated with Wigner-Seitz radius.
Args:
wigner_seitz_radius (float): The radius per particle in atomic units.
n_particles (int): The number of particles in the simulation cell.
dimension (int): The dimension of the system.
Returns:
length_scale (float): The length scale for the simulation.
Raises:
ValueError: System dimension must be a positive integer.
"""
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('System dimension must be a positive integer.')
half_dimension = dimension // 2
if dimension % 2:
volume_per_particle = (2 * numpy.math.factorial(half_dimension) *
(4 * numpy.pi)**half_dimension /
numpy.math.factorial(dimension) *
wigner_seitz_radius**dimension)
else:
volume_per_particle = (numpy.pi**half_dimension /
numpy.math.factorial(half_dimension) *
wigner_seitz_radius**dimension)
volume = volume_per_particle * n_particles
length_scale = volume**(1. / dimension)
return length_scale
def plane_wave_kinetic(grid: Grid,
spinless: bool = False,
e_cutoff: Optional[float] = None) -> FermionOperator:
"""Return the kinetic energy operator in the plane wave basis.
Args:
grid (openfermion.utils.Grid): The discretization to use.
spinless (bool): Whether to use the spinless model or not.
e_cutoff (float): Energy cutoff.
Returns:
FermionOperator: The kinetic momentum operator.
"""
# Initialize.
operator = FermionOperator()
spins = [None] if spinless else [0, 1]
# Loop once through all plane waves.
for momenta_indices in grid.all_points_indices():
momenta = grid.momentum_vector(momenta_indices)
coefficient = momenta.dot(momenta) / 2.
# Energy cutoff.
if e_cutoff is not None and coefficient > e_cutoff:
continue
# Loop over spins.
for spin in spins:
orbital = grid.orbital_id(momenta_indices, spin)
# Add interaction term.
operators = ((orbital, 1), (orbital, 0))
operator += FermionOperator(operators, coefficient)
return operator
def plane_wave_potential(grid: Grid,
spinless: bool = False,
e_cutoff: float = None,
non_periodic: bool = False,
period_cutoff: Optional[float] = None
) -> FermionOperator:
"""Return the e-e potential operator in the plane wave basis.
Args:
grid (Grid): The discretization to use.
spinless (bool): Whether to use the spinless model or not.
e_cutoff (float): Energy cutoff.
non_periodic (bool): If the system is non-periodic, default to False.
period_cutoff (float): Period cutoff, default to
grid.volume_scale() ** (1. / grid.dimensions).
Returns:
operator (FermionOperator)
"""
# Initialize.
prefactor = 2. * numpy.pi / grid.volume_scale()
operator = FermionOperator((), 0.0)
spins = [None] if spinless else [0, 1]
if non_periodic and period_cutoff is None:
period_cutoff = grid.volume_scale()**(1. / grid.dimensions)
# Pre-Computations.
shifted_omega_indices_dict = {}
shifted_indices_minus_dict = {}
shifted_indices_plus_dict = {}
orbital_ids = {}
for indices_a in grid.all_points_indices():
shifted_omega_indices = [
j - grid.length[i] // 2 for i, j in enumerate(indices_a)
]
shifted_omega_indices_dict[indices_a] = shifted_omega_indices
shifted_indices_minus_dict[indices_a] = {}
shifted_indices_plus_dict[indices_a] = {}
for indices_b in grid.all_points_indices():
shifted_indices_minus_dict[indices_a][indices_b] = tuple([
(indices_b[i] - shifted_omega_indices[i]) % grid.length[i]
for i in range(grid.dimensions)
])
shifted_indices_plus_dict[indices_a][indices_b] = tuple([
(indices_b[i] + shifted_omega_indices[i]) % grid.length[i]
for i in range(grid.dimensions)
])
orbital_ids[indices_a] = {}
for spin in spins:
orbital_ids[indices_a][spin] = grid.orbital_id(indices_a, spin)
# Loop once through all plane waves.
for omega_indices in grid.all_points_indices():
shifted_omega_indices = shifted_omega_indices_dict[omega_indices]
# Get the momenta vectors.
momenta = grid.momentum_vector(omega_indices)
momenta_squared = momenta.dot(momenta)
# Skip if momentum is zero.
if momenta_squared == 0:
continue
# Energy cutoff.
if e_cutoff is not None and momenta_squared / 2. > e_cutoff:
continue
# Compute coefficient.
coefficient = prefactor / momenta_squared
if non_periodic:
coefficient *= 1.0 - numpy.cos(
period_cutoff * numpy.sqrt(momenta_squared))
for grid_indices_a in grid.all_points_indices():
shifted_indices_d = (
shifted_indices_minus_dict[omega_indices][grid_indices_a])
for grid_indices_b in grid.all_points_indices():
shifted_indices_c = (
shifted_indices_plus_dict[omega_indices][grid_indices_b])
# Loop over spins.
for spin_a in spins:
orbital_a = orbital_ids[grid_indices_a][spin_a]
orbital_d = orbital_ids[shifted_indices_d][spin_a]
for spin_b in spins:
orbital_b = orbital_ids[grid_indices_b][spin_b]
orbital_c = orbital_ids[shifted_indices_c][spin_b]
# Add interaction term.
if ((orbital_a != orbital_b) and
(orbital_c != orbital_d)):
operators = ((orbital_a, 1), (orbital_b, 1),
(orbital_c, 0), (orbital_d, 0))
operator += FermionOperator(operators, coefficient)
# Return.
return operator
def dual_basis_jellium_model(grid: Grid,
spinless: bool = False,
kinetic: bool = True,
potential: bool = True,
include_constant: bool = False,
non_periodic: bool = False,
period_cutoff: Optional[float] = None
) -> FermionOperator:
"""Return jellium Hamiltonian in the dual basis of arXiv:1706.00023
Args:
grid (Grid): The discretization to use.
spinless (bool): Whether to use the spinless model or not.
kinetic (bool): Whether to include kinetic terms.
potential (bool): Whether to include potential terms.
include_constant (bool): Whether to include the Madelung constant.
Note constant is unsupported for non-uniform, non-cubic cells with
ions.
non_periodic (bool): If the system is non-periodic, default to False.
period_cutoff (float): Period cutoff, default to
grid.volume_scale() ** (1. / grid.dimensions).
Returns:
operator (FermionOperator)
"""
# Initialize.
n_points = grid.num_points
position_prefactor = 2.0 * numpy.pi / grid.volume_scale()
operator = FermionOperator()
spins = [None] if spinless else [0, 1]
if potential and non_periodic and period_cutoff is None:
period_cutoff = grid.volume_scale()**(1.0 / grid.dimensions)
# Pre-Computations.
position_vectors = {}
momentum_vectors = {}
momenta_squared_dict = {}
orbital_ids = {}
for indices in grid.all_points_indices():
position_vectors[indices] = grid.position_vector(indices)
momenta = grid.momentum_vector(indices)
momentum_vectors[indices] = momenta
momenta_squared_dict[indices] = momenta.dot(momenta)
orbital_ids[indices] = {}
for spin in spins:
orbital_ids[indices][spin] = grid.orbital_id(indices, spin)
# Loop once through all lattice sites.
grid_origin = (0,) * grid.dimensions
coordinates_origin = position_vectors[grid_origin]
for grid_indices_b in grid.all_points_indices():
coordinates_b = position_vectors[grid_indices_b]
differences = coordinates_b - coordinates_origin
# Compute coefficients.
kinetic_coefficient = 0.
potential_coefficient = 0.
for momenta_indices in grid.all_points_indices():
momenta = momentum_vectors[momenta_indices]
momenta_squared = momenta_squared_dict[momenta_indices]
if momenta_squared == 0:
continue
cos_difference = numpy.cos(momenta.dot(differences))
if kinetic:
kinetic_coefficient += (cos_difference * momenta_squared /
(2. * float(n_points)))
if potential:
potential_coefficient += (position_prefactor * cos_difference /
momenta_squared)
for grid_indices_shift in grid.all_points_indices():
# Loop over spins and identify interacting orbitals.
orbital_a = {}
orbital_b = {}
shifted_index_1 = tuple([
(grid_origin[i] + grid_indices_shift[i]) % grid.length[i]
for i in range(grid.dimensions)
])
shifted_index_2 = tuple([
(grid_indices_b[i] + grid_indices_shift[i]) % grid.length[i]
for i in range(grid.dimensions)
])
for spin in spins:
orbital_a[spin] = orbital_ids[shifted_index_1][spin]
orbital_b[spin] = orbital_ids[shifted_index_2][spin]
if kinetic:
for spin in spins:
operators = ((orbital_a[spin], 1), (orbital_b[spin], 0))
operator += FermionOperator(operators, kinetic_coefficient)
if potential:
for sa in spins:
for sb in spins:
if orbital_a[sa] == orbital_b[sb]:
continue
operators = ((orbital_a[sa], 1), (orbital_a[sa], 0),
(orbital_b[sb], 1), (orbital_b[sb], 0))
operator += FermionOperator(operators,
potential_coefficient)
# Include the Madelung constant if requested.
if include_constant:
# TODO: Check for other unit cell shapes
operator += (FermionOperator.identity() *
(2.8372 / grid.volume_scale()**(1. / grid.dimensions)))
# Return.
return operator
def dual_basis_kinetic(grid: Grid, spinless: bool = False) -> FermionOperator:
"""Return the kinetic operator in the dual basis of arXiv:1706.00023.
Args:
grid (Grid): The discretization to use.
spinless (bool): Whether to use the spinless model or not.
Returns:
operator (FermionOperator)
"""
return dual_basis_jellium_model(grid, spinless, True, False)
def dual_basis_potential(grid: Grid,
spinless: bool = False,
non_periodic: bool = False,
period_cutoff: Optional[float] = None
) -> FermionOperator:
"""Return the potential operator in the dual basis of arXiv:1706.00023
Args:
grid (Grid): The discretization to use.
spinless (bool): Whether to use the spinless model or not.
non_periodic (bool): If the system is non-periodic, default to False.
period_cutoff (float): Period cutoff, default to
grid.volume_scale() ** (1. / grid.dimensions).
Returns:
operator (FermionOperator)
"""
return dual_basis_jellium_model(grid, spinless, False, True, False,
non_periodic, period_cutoff)
def jellium_model(grid: Grid,
spinless: bool = False,
plane_wave: bool = True,
include_constant: bool = False,
e_cutoff: float = None,
non_periodic: bool = False,
period_cutoff: Optional[float] = None) -> FermionOperator:
"""Return jellium Hamiltonian as FermionOperator class.
Args:
grid (openfermion.utils.Grid): The discretization to use.
spinless (bool): Whether to use the spinless model or not.
plane_wave (bool): Whether to return in momentum space (True)
or position space (False).
include_constant (bool): Whether to include the Madelung constant.
Note constant is unsupported for non-uniform, non-cubic cells with
ions.
e_cutoff (float): Energy cutoff.
non_periodic (bool): If the system is non-periodic, default to False.
period_cutoff (float): Period cutoff, default to
grid.volume_scale() ** (1. / grid.dimensions).
Returns:
FermionOperator: The Hamiltonian of the model.
"""
if plane_wave:
hamiltonian = plane_wave_kinetic(grid, spinless, e_cutoff)
hamiltonian += plane_wave_potential(grid, spinless, e_cutoff,
non_periodic, period_cutoff)
else:
hamiltonian = dual_basis_jellium_model(grid, spinless, True, True,
include_constant, non_periodic,
period_cutoff)
# Include the Madelung constant if requested.
if include_constant:
# TODO: Check for other unit cell shapes
hamiltonian += (FermionOperator.identity() *
(2.8372 / grid.volume_scale()**(1. / grid.dimensions)))
return hamiltonian
def jordan_wigner_dual_basis_jellium(grid: Grid,
spinless: bool = False,
include_constant: bool = False
) -> QubitOperator:
"""Return the jellium Hamiltonian as QubitOperator in the dual basis.
Args:
grid (Grid): The discretization to use.
spinless (bool): Whether to use the spinless model or not.
include_constant (bool): Whether to include the Madelung constant.
Note constant is unsupported for non-uniform, non-cubic cells with
ions.
Returns:
hamiltonian (QubitOperator)
"""
# Initialize.
n_orbitals = grid.num_points
volume = grid.volume_scale()
if spinless:
n_qubits = n_orbitals
else:
n_qubits = 2 * n_orbitals
hamiltonian = QubitOperator()
# Compute vectors.
momentum_vectors = {}
momenta_squared_dict = {}
for indices in grid.all_points_indices():
momenta = grid.momentum_vector(indices)
momentum_vectors[indices] = momenta
momenta_squared_dict[indices] = momenta.dot(momenta)
# Compute the identity coefficient and the coefficient of local Z terms.
identity_coefficient = 0.
z_coefficient = 0.
for k_indices in grid.all_points_indices():
momenta = momentum_vectors[k_indices]
momenta_squared = momenta.dot(momenta)
if momenta_squared == 0:
continue
identity_coefficient += momenta_squared / 2.
identity_coefficient -= (numpy.pi * float(n_orbitals) /
(momenta_squared * volume))
z_coefficient += numpy.pi / (momenta_squared * volume)
z_coefficient -= momenta_squared / (4. * float(n_orbitals))
if spinless:
identity_coefficient /= 2.
# Add identity term.
identity_term = QubitOperator((), identity_coefficient)
hamiltonian += identity_term
# Add local Z terms.
for qubit in range(n_qubits):
qubit_term = QubitOperator(((qubit, 'Z'),), z_coefficient)
hamiltonian += qubit_term
# Add ZZ terms and XZX + YZY terms.
zz_prefactor = numpy.pi / volume
xzx_yzy_prefactor = .25 / float(n_orbitals)
for p in range(n_qubits):
index_p = grid.grid_indices(p, spinless)
position_p = grid.position_vector(index_p)
for q in range(p + 1, n_qubits):
index_q = grid.grid_indices(q, spinless)
position_q = grid.position_vector(index_q)
difference = position_p - position_q
skip_xzx_yzy = not spinless and (p + q) % 2
# Loop through momenta.
zpzq_coefficient = 0.
term_coefficient = 0.
for k_indices in grid.all_points_indices():
momenta = momentum_vectors[k_indices]
momenta_squared = momenta_squared_dict[k_indices]
if momenta_squared == 0:
continue
cos_difference = numpy.cos(momenta.dot(difference))
zpzq_coefficient += (zz_prefactor * cos_difference /
momenta_squared)
if skip_xzx_yzy:
continue
term_coefficient += (xzx_yzy_prefactor * cos_difference *
momenta_squared)
# Add ZZ term.
qubit_term = QubitOperator(((p, 'Z'), (q, 'Z')), zpzq_coefficient)
hamiltonian += qubit_term
# Add XZX + YZY term.
if skip_xzx_yzy:
continue
z_string = tuple((i, 'Z') for i in range(p + 1, q))
xzx_operators = ((p, 'X'),) + z_string + ((q, 'X'),)
yzy_operators = ((p, 'Y'),) + z_string + ((q, 'Y'),)
hamiltonian += QubitOperator(xzx_operators, term_coefficient)
hamiltonian += QubitOperator(yzy_operators, term_coefficient)
# Include the Madelung constant if requested.
if include_constant:
# TODO Generalize to other cells
hamiltonian += (QubitOperator(
(),) * (2.8372 / grid.volume_scale()**(1. / grid.dimensions)))
# Return Hamiltonian.
return hamiltonian
def hypercube_grid_with_given_wigner_seitz_radius_and_filling(
dimension: int,
grid_length: int,
wigner_seitz_radius: float,
filling_fraction: float = 0.5,
spinless: bool = True) -> Grid:
"""Return a Grid with the same number of orbitals along each dimension
with the specified Wigner-Seitz radius.
Args:
dimension (int): The number of spatial dimensions.
grid_length (int): The number of orbitals along each dimension.
wigner_seitz_radius (float): The Wigner-Seitz radius per particle,
in Bohr.
filling_fraction (float): The average spin-orbital occupation.
Specifies the number of particles (rounding down).
spinless (boolean): Whether to give the system without or with spin.
"""
if filling_fraction > 1:
raise ValueError("filling_fraction cannot be greater than 1.")
n_qubits = grid_length**dimension
if not spinless:
n_qubits *= 2
n_particles = int(numpy.floor(n_qubits * filling_fraction))
if not n_particles:
raise ValueError(
"filling_fraction too low for number of orbitals specified by "
"other parameters.")
# Compute appropriate length scale.
length_scale = wigner_seitz_length_scale(wigner_seitz_radius, n_particles,
dimension)
return Grid(dimension, grid_length, length_scale)
|
|
#!/usr/bin/env python
from rospy import init_node, get_param, loginfo, logerr, on_shutdown
from rosbridge_server import RosbridgeTcpSocket
from rosbridge_library.capabilities.advertise import Advertise
from rosbridge_library.capabilities.publish import Publish
from rosbridge_library.capabilities.subscribe import Subscribe
from rosbridge_library.capabilities.advertise_service import AdvertiseService
from rosbridge_library.capabilities.unadvertise_service import UnadvertiseService
from rosbridge_library.capabilities.call_service import CallService
from functools import partial
from signal import signal, SIGINT, SIG_DFL
import SocketServer
import sys
import time
#TODO: take care of socket timeouts and make sure to close sockets after killing programm to release network ports
#TODO: add new parameters to websocket version! those of rosbridge_tcp.py might not be needed, but the others should work well when adding them to .._websocket.py
def shutdown_hook(server):
server.shutdown()
if __name__ == "__main__":
loaded = False
retry_count = 0
while not loaded:
retry_count += 1
print "trying to start rosbridge TCP server.."
try:
print ""
init_node("rosbridge_tcp")
signal(SIGINT, SIG_DFL)
"""
Parameter handling:
- try to get parameter from parameter server (..define those via launch-file)
- overwrite value if given as commandline-parameter
BEGIN...
"""
#TODO: ensure types get cast correctly after getting from parameter server
#TODO: check if ROS parameter server uses None string for 'None-value' or Null or something else, then change code accordingly
# update parameters from parameter server or use default value ( second parameter of get_param )
port = get_param('~port', 9090)
host = get_param('~host', '')
incoming_buffer = get_param('~incoming_buffer', RosbridgeTcpSocket.incoming_buffer)
socket_timeout = get_param('~socket_timeout', RosbridgeTcpSocket.socket_timeout)
retry_startup_delay = get_param('~retry_startup_delay', 5.0) # seconds
fragment_timeout = get_param('~fragment_timeout', RosbridgeTcpSocket.fragment_timeout)
delay_between_messages = get_param('~delay_between_messages', RosbridgeTcpSocket.delay_between_messages)
max_message_size = get_param('~max_message_size', RosbridgeTcpSocket.max_message_size)
if max_message_size == "None":
max_message_size = None
# Get the glob strings and parse them as arrays.
RosbridgeTcpSocket.topics_glob = [
element.strip().strip("'")
for element in get_param('~topics_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
RosbridgeTcpSocket.services_glob = [
element.strip().strip("'")
for element in get_param('~services_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
RosbridgeTcpSocket.params_glob = [
element.strip().strip("'")
for element in get_param('~params_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
# update parameters if provided via commandline
# .. could implemented 'better' (value/type checking, etc.. )
if "--port" in sys.argv:
idx = sys.argv.index("--port") + 1
if idx < len(sys.argv):
port = int(sys.argv[idx])
else:
print "--port argument provided without a value."
sys.exit(-1)
if "--host" in sys.argv:
idx = sys.argv.index("--host") + 1
if idx < len(sys.argv):
host = str(sys.argv[idx])
else:
print "--host argument provided without a value."
sys.exit(-1)
if "--incoming_buffer" in sys.argv:
idx = sys.argv.index("--incoming_buffer") + 1
if idx < len(sys.argv):
incoming_buffer = int(sys.argv[idx])
else:
print "--incoming_buffer argument provided without a value."
sys.exit(-1)
if "--socket_timeout" in sys.argv:
idx = sys.argv.index("--socket_timeout") + 1
if idx < len(sys.argv):
socket_timeout = int(sys.argv[idx])
else:
print "--socket_timeout argument provided without a value."
sys.exit(-1)
if "--retry_startup_delay" in sys.argv:
idx = sys.argv.index("--retry_startup_delay") + 1
if idx < len(sys.argv):
retry_startup_delay = int(sys.argv[idx])
else:
print "--retry_startup_delay argument provided without a value."
sys.exit(-1)
if "--fragment_timeout" in sys.argv:
idx = sys.argv.index("--fragment_timeout") + 1
if idx < len(sys.argv):
fragment_timeout = int(sys.argv[idx])
else:
print "--fragment_timeout argument provided without a value."
sys.exit(-1)
if "--delay_between_messages" in sys.argv:
idx = sys.argv.index("--delay_between_messages") + 1
if idx < len(sys.argv):
delay_between_messages = float(sys.argv[idx])
else:
print "--delay_between_messages argument provided without a value."
sys.exit(-1)
if "--max_message_size" in sys.argv:
idx = sys.argv.index("--max_message_size") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
max_message_size = None
else:
max_message_size = int(value)
else:
print "--max_message_size argument provided without a value. (can be None or <Integer>)"
sys.exit(-1)
# export parameters to handler class
RosbridgeTcpSocket.incoming_buffer = incoming_buffer
RosbridgeTcpSocket.socket_timeout = socket_timeout
RosbridgeTcpSocket.fragment_timeout = fragment_timeout
RosbridgeTcpSocket.delay_between_messages = delay_between_messages
RosbridgeTcpSocket.max_message_size = max_message_size
if "--topics_glob" in sys.argv:
idx = sys.argv.index("--topics_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeTcpSocket.topics_glob = []
else:
RosbridgeTcpSocket.topics_glob = [element.strip().strip("'") for element in value[1:-1].split(',')]
else:
print "--topics_glob argument provided without a value. (can be None or a list)"
sys.exit(-1)
if "--services_glob" in sys.argv:
idx = sys.argv.index("--services_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeTcpSocket.services_glob = []
else:
RosbridgeTcpSocket.services_glob = [element.strip().strip("'") for element in value[1:-1].split(',')]
else:
print "--services_glob argument provided without a value. (can be None or a list)"
sys.exit(-1)
if "--params_glob" in sys.argv:
idx = sys.argv.index("--params_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeTcpSocket.params_glob = []
else:
RosbridgeTcpSocket.params_glob = [element.strip().strip("'") for element in value[1:-1].split(',')]
else:
print "--params_glob argument provided without a value. (can be None or a list)"
sys.exit(-1)
# To be able to access the list of topics and services, you must be able to access the rosapi services.
if RosbridgeTcpSocket.services_glob:
RosbridgeTcpSocket.services_glob.append("/rosapi/*")
Subscribe.topics_glob = RosbridgeTcpSocket.topics_glob
Advertise.topics_glob = RosbridgeTcpSocket.topics_glob
Publish.topics_glob = RosbridgeTcpSocket.topics_glob
AdvertiseService.services_glob = RosbridgeTcpSocket.services_glob
UnadvertiseService.services_glob = RosbridgeTcpSocket.services_glob
CallService.services_glob = RosbridgeTcpSocket.services_glob
"""
...END (parameter handling)
"""
# Server host is a tuple ('host', port)
# empty string for host makes server listen on all available interfaces
SocketServer.ThreadingTCPServer.allow_reuse_address = True
server = SocketServer.ThreadingTCPServer((host, port), RosbridgeTcpSocket)
on_shutdown(partial(shutdown_hook, server))
loginfo("Rosbridge TCP server started on port %d", port)
server.serve_forever()
loaded = True
except Exception, e:
time.sleep(retry_startup_delay)
print "server loaded"
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is almost entirely borrowed from
# https://github.com/Yelp/Testify/blob/master/testify/assertions.py
# The only modfifications are for py3k support
from __future__ import absolute_import, with_statement
import contextlib
import re
import sys
from . import utils
try:
STRING_TYPE = basestring
except: # py3k
STRING_TYPE = str
def _val_subtract(val1, val2, dict_subtractor, list_subtractor):
"""
Find the difference between two container types
Returns:
The difference between the values as defined by list_subtractor() and
dict_subtractor() if both values are the same container type.
None if val1 == val2
val1 if type(val1) != type(val1)
Otherwise - the difference between the values
"""
if val1 == val2:
# if the values are the same, return a degenerate type
# this case is not used by list_subtract or dict_subtract
return type(val1)()
if isinstance(val1, dict) and isinstance(val2, dict):
val_diff = dict_subtractor(val1, val2)
elif isinstance(val1, (list, tuple)) and isinstance(val2, (list, tuple)):
val_diff = list_subtractor(val1, val2)
else:
val_diff = val1
return val_diff
def _dict_subtract(dict1, dict2):
"""
Return key,value pairs from dict1 that are not in dict2
Returns:
A new dict 'res_dict' with the following properties:
For all (key, val) pairs where key appears in dict2:
if dict1[val] == dict2[val] then res_dict[val] is not defined
else res_dict[val] == dict1[val]
If vals are themselves dictionaries the algorim is applied recursively.
Example:
_dict_subtract({
1: 'one',
2: 'two',
3: {'a': 'A', 'b': 'B'},
4: {'c': 'C', 'd': 'D'}
},
{
2: 'two',
3: {'a': 'A', 'b': 'B'},
4: {'d': 'D'},
5: {'e': 'E'}
}) => {1: 'one', 4: {'c': 'C'}}
"""
# make a result we can edit
result = dict(dict1)
# find the common keys -- i.e., the ones we might need to subtract
common_keys = set(dict1.keys()) & set(dict2.keys())
for key in common_keys:
val1, val2 = dict1[key], dict2[key]
if val1 == val2:
# values are the same: subtract
del result[key]
else:
# values are different: set the output key to the different between the values
result[key] = _val_subtract(val1, val2, _dict_subtract, _list_subtract)
return result
def _list_subtract(list1, list2):
"""
Returns the difference between list1 and list2.
_list_subtract([1,2,3], [3,2,1]) == [1,3]
If any items in the list are container types, the method recursively calls
itself or _dict_subtract() to subtract the child
containers.
"""
# call val_subtract on all items that are not the same
res_list = [_val_subtract(val1, val2, _dict_subtract, _list_subtract)
for val1, val2 in zip(list1, list2) if val1 != val2]
# now append items that come after any item in list1
res_list += list1[len(list2):]
# return a tuple of list1 is a tuple
if isinstance(list1, tuple):
return tuple(res_list)
else:
return res_list
def assert_raises(*args, **kwargs):
"""Assert an exception is raised as a context manager or by passing in a
callable and its arguments.
As a context manager:
>>> with assert_raises(Exception):
... raise Exception
Pass in a callable:
>>> def raise_exception(arg, kwarg=None):
... raise Exception
>>> assert_raises(Exception, raise_exception, 1, kwarg=234)
"""
if (len(args) == 1) and not kwargs:
return _assert_raises_context_manager(args[0])
else:
return _assert_raises(*args, **kwargs)
def assert_raises_and_contains(expected_exception_class, strings, callable_obj, *args, **kwargs):
"""Assert an exception is raised by passing in a callable and its
arguments and that the string representation of the exception
contains the case-insensetive list of passed in strings.
Args
strings -- can be a string or an iterable of strings
"""
try:
callable_obj(*args, **kwargs)
except:
_, e, _ = sys.exc_info()
assert_isinstance(e, expected_exception_class)
message = str(e).lower()
if isinstance(strings, STRING_TYPE):
strings = [strings]
for string in strings:
assert_in(string.lower(), message)
else:
assert_not_reached("No exception was raised (expected %s)" % expected_exception_class)
@contextlib.contextmanager
def _assert_raises_context_manager(exception_class):
try:
yield
except:
_, ex, _ = sys.exc_info()
assert_isinstance(ex, exception_class)
else:
assert_not_reached("No exception was raised (expected %r)" %
exception_class)
def _assert_raises(exception_class, callable, *args, **kwargs):
with _assert_raises_context_manager(exception_class):
callable(*args, **kwargs)
def _diff_message(lhs, rhs):
"""If `lhs` and `rhs` are strings, return the a formatted message
describing their differences. If they're not strings, describe the
differences in their `repr()`s.
NOTE: Only works well for strings not containing newlines.
"""
lhs = repr(lhs) if not isinstance(lhs, STRING_TYPE) else lhs
rhs = repr(rhs) if not isinstance(rhs, STRING_TYPE) else rhs
return 'Diff:\nl: %s\nr: %s' % utils.highlight(lhs, rhs)
def assert_equal(lval, rval, message=None):
"""Assert that lval and rval are equal."""
if message:
assert lval == rval, message
else:
assert lval == rval, \
"assertion failed: l == r\nl: %r\nr: %r\n\n%s" % \
(lval, rval, _diff_message(lval, rval))
assert_equals = assert_equal
def assert_almost_equal(lval, rval, digits, message=None):
"""Assert that lval and rval, when rounded to the specified number of digits, are the same."""
real_message = message or "%r !~= %r" % (lval, rval)
assert round(lval, digits) == round(rval, digits), real_message
def assert_within_tolerance(lval, rval, tolerance, message=None):
"""Assert that the difference between the two values, as a fraction of the left value, is smaller than the tolerance specified.
That is, abs(float(lval) - float(rval)) / float(lval) < tolerance"""
real_message = message or "%r !~= %r" % (lval, rval)
assert abs(float(lval) - float(rval)) / float(lval) < tolerance, real_message
def assert_not_equal(lval, rval, message=None):
"""Assert that lval and rval are unequal to each other."""
assert lval != rval, message or 'assertion failed: %r != %r' % (lval, rval)
def assert_lt(lval, rval, message=None):
"""Assert that lval is less than rval."""
assert lval < rval, message or 'assertion failed: %r < %r' % (lval, rval)
def assert_lte(lval, rval, message=None):
"""Assert that lval is less than or equal to rval"""
assert lval <= rval, message or 'assertion failed: %r <= %r' % (lval, rval)
def assert_gt(lval, rval, message=None):
"""Assert that lval is greater than rval."""
assert lval > rval, message or 'assertion failed: %r > %r' % (lval, rval)
def assert_gte(lval, rval, message=None):
"""Assert that lval is greater than or equal to rval"""
assert lval >= rval, message or 'assertion failed: %r >= %r' % (lval, rval)
def assert_in_range(val, start, end, message=None, inclusive=False):
"""Assert that val is greater than start and less than end. If inclusive is true, val may be equal to start or end."""
if inclusive:
real_message = message or "! %s <= %r <= %r" % (start, val, end)
assert start <= val <= end, real_message
else:
real_message = message or "! %s < %r < %r" % (start, val, end)
assert start < val < end, real_message
def assert_between(a, b, c):
"""Assert that b is between a and c, inclusive."""
assert_in_range(b, a, c, inclusive=True)
def assert_in(item, sequence, message=None):
"""Assert that the item is in the sequence."""
if not message:
message = (
"assertion failed: expected %(item)r in %(sequence)r" % locals()
)
assert item in sequence, message
def assert_not_in(item, sequence, message=None):
"""Assert that the item is not in the sequence."""
assert item not in sequence, (
"assertion failed: expected %(item)r not in %(sequence)r" % locals()
)
def assert_all_in(left, right):
"""Assert that everything in `left` is also in `right`
Note: This is different than `assert_subset()` because python sets use
`__hash__()` for comparision whereas `in` uses `__eq__()`.
"""
unmatching = []
for item in left:
if item not in right:
unmatching.append(item)
if unmatching:
raise AssertionError(
"%(unmatching)r missing from %(right)r" % locals()
)
def assert_starts_with(val, prefix, message=None):
"""Assert that val starts with prefix.
Applies to any iterable, not just strings.
"""
try:
iter(val)
except:
raise TypeError("%(val)r is not iterable" % locals())
try:
iter(prefix)
except:
raise TypeError("%(prefix)r is not iterable" % locals())
msg = message or "%(val)r does not start with %(prefix)r" % locals()
for i, (l, r) in enumerate(zip(val, prefix)):
assert_equal(l, r, msg)
msg = (
message or
"%(val)r shorter than %(prefix)r, so can't start with it" % locals()
)
length = len(list(prefix))
assert_equal(length, i + 1, msg)
def assert_not_reached(message=None):
"""Raise an AssertionError with a message."""
if message:
assert False, message
else:
assert False, 'egads! this line ought not to have been reached'
def assert_rows_equal(rows1, rows2):
"""Check that two sequences contain the same lists of dictionaries"""
def norm_row(row):
if isinstance(row, dict):
return tuple((k, row[k]) for k in sorted(row))
else:
return tuple(sorted(row))
def norm_rows(rows):
return tuple(sorted(norm_row(row) for row in rows))
assert_equal(norm_rows(rows1), norm_rows(rows2))
def assert_length(sequence, expected, message=None):
"""Assert a sequence or iterable has an expected length."""
length = len(list(sequence))
assert length == expected, (message or
"%(sequence)s has length %(length)s expected %(expected)s" % locals()
)
def assert_is(left, right, message=None):
"""Assert that left and right are the same object"""
assert left is right, (
message or "expected %(left)r is %(right)r" % locals()
)
def assert_is_not(left, right, message=None):
"""Assert that left and right are not the same object"""
assert left is not right, (
message or "expected %(left)r is not %(right)r" % locals()
)
def assert_all_match_regex(pattern, values, message=None):
"""Assert that all values in an iterable match a regex pattern.
Args:
pattern -- a regex.
values -- an iterable of values to test.
Raises AssertionError if any value does not match.
"""
for value in values:
if not message:
message = "expected %(value)r to match %(pattern)r" % locals()
assert re.match(pattern, value), message
def assert_match_regex(pattern, value, *args, **kwargs):
"""Assert that a single value matches a regex pattern."""
assert_all_match_regex(pattern, [value], *args, **kwargs)
assert_matches_regex = assert_match_regex
def assert_any_match_regex(pattern, values, message=None):
"""Assert that at least one value in an iterable matches a regex pattern.
Args:
pattern -- a regex.
values -- an iterable of values to test.
Raises AssertionError if all values don't match.
"""
for value in values:
if re.match(pattern, value) is not None:
return
if not message:
message = (
"expected at least one %(values)r to match %(pattern)r" % locals()
)
raise AssertionError(message)
def assert_all_not_match_regex(pattern, values, message=None):
"""Assert that all values don't match a regex pattern.
Args:
pattern -- a regex.
values -- an iterable of values to test.
Raises AssertionError if any values matches.
"""
for value in values:
if not message:
message = "expected %(value)r to not match %(pattern)r" % locals()
assert not re.match(pattern, value), message
assert_none_match_regex = assert_all_not_match_regex
def assert_sets_equal(left, right, message=None):
"""Assert that two sets are equal."""
if left != right:
extra_left = left - right
extra_right = right - left
if not message:
message = (
"expected %(left)r == %(right)r "
"[left has:%(extra_left)r, "
"right has:%(extra_right)r]"
) % locals()
raise AssertionError(message)
def assert_dicts_equal(left, right, ignore_keys=None, message=None):
"""Assert that two dictionarys are equal (optionally ignoring certain keys)."""
if ignore_keys is not None:
left = dict((k, left[k]) for k in left if k not in ignore_keys)
right = dict((k, right[k]) for k in right if k not in ignore_keys)
if left == right:
return
extra_left = _dict_subtract(left, right)
extra_right = _dict_subtract(right, left)
if not message:
message = (
"expected %(left)r == %(right)r "
"[left has:%(extra_left)r, "
"right has:%(extra_right)r]"
) % locals()
raise AssertionError(message)
def assert_dict_subset(left, right, message=None):
"""Assert that a dictionary is a strict subset of another dictionary.
Checks both keys and values.
"""
difference_dict = _dict_subtract(left, right)
if not difference_dict:
return
extra_left = difference_dict
small_right = dict((k, right[k]) for k in right if k in list(left.keys()))
extra_right = _dict_subtract(small_right, left)
if not message:
message = (
"[subset has:%(extra_left)r, superset has:%(extra_right)s]"
) % locals()
raise AssertionError(message)
def assert_subset(left, right, message=None):
"""Assert that the left set is a subset of the right set."""
set_left = set(left)
set_right = set(right)
if not (set_left <= set_right):
extra = set_left - set_right
if not message:
message = (
"expected %(set_left)r <= %(set_right)r [left has:%(extra)r]"
) % locals()
raise AssertionError(message)
def assert_list_prefix(left, right, message=None):
"""Assert that the left list is a prefix of the right list."""
assert_equal(left, right[:len(left)], message)
def assert_sorted_equal(left, right, **kwargs):
"""Assert equality, but without respect to ordering of elements. Basically for multisets."""
assert_equal(sorted(left), sorted(right), **kwargs)
def assert_isinstance(object_, type_, message=None):
"""Assert that an object is an instance of a given type."""
if not message:
message = "Expected type %r but got type %r" % (type_, type(object_))
assert isinstance(object_, type_), message
def assert_datetimes_equal(a, b, message=None):
"""Tests for equality of times by only testing up to the millisecond."""
assert_equal(
a.utctimetuple()[:-3],
b.utctimetuple()[:-3],
message or "%r != %r" % (a, b)
)
def assert_exactly_one(*args, **kwargs):
"""Assert that only one of the given arguments passes the provided truthy function (non-None by default).
Args:
truthy_fxn: a filter to redefine truthy behavior. Should take an object and return
True if desired conditions are satisfied. For example:
>>> assert_exactly_one(True, False, truthy_fxn=bool) # Success
>>> assert_exactly_one(0, None) # Success
>>> assert_exactly_one(True, False)
AssertionError
Returns:
The argument that passes the truthy function
"""
truthy_fxn = kwargs.pop('truthy_fxn', lambda x: x is not None)
assert not kwargs, "Unexpected kwargs: %r" % kwargs
true_args = [arg for arg in args if truthy_fxn(arg)]
if len(true_args) != 1:
raise AssertionError("Expected exactly one True (got %d) args: %r" % (len(true_args), args))
return true_args[0]
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file.
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
api_version:
required: false
choices: ['v1', 'v1beta3']
default: v1
description:
- The API version associated with cluster.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating ore updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.base_cmd = [module.get_bin_path('kubectl', True)]
self.api_version = module.params.get('api_version')
if self.api_version:
self.base_cmd.append('--api-version=' + self.api_version)
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.name = module.params.get('name')
self.filename = module.params.get('filename')
self.resource = module.params.get('resource')
self.label = module.params.get('label')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True):
if check and self.exists():
return []
cmd = ['create']
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def replace(self):
if not self.force and not self.exists():
return []
cmd = ['replace']
if self.api_version != 'v1':
cmd = ['update']
if self.force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def exists(self):
cmd = ['get']
if not self.resource:
return False
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
cmd.append('--no-headers')
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
result = self._execute_nofail(cmd)
if not result:
return False
return True
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
api_version=dict(default='v1', choices=['v1', 'v1beta3']),
force=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
)
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create()
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
if manager.exists():
manager.force = True
result = manager.replace()
else:
result = manager.create(check=False)
else:
module.fail_json(msg='Unrecognized state %s.' % state)
if result:
changed = True
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Various HBase helpers
"""
import copy
import datetime
import json
import bson.json_util
from happybase.hbase import ttypes
from oslo_log import log
import six
from report.i18n import _
from report import utils
LOG = log.getLogger(__name__)
EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3,
'datetime': 4}
OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='}
# We need this additional dictionary because we have reverted timestamp in
# row-keys for stored metrics
OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<',
'ge': '<='}
def _QualifierFilter(op, qualifier):
return "QualifierFilter (%s, 'binaryprefix:m_%s')" % (op, qualifier)
def timestamp(dt, reverse=True):
"""Timestamp is count of milliseconds since start of epoch.
If reverse=True then timestamp will be reversed. Such a technique is used
in HBase rowkey design when period queries are required. Because of the
fact that rows are sorted lexicographically it's possible to vary whether
the 'oldest' entries will be on top of the table or it should be the newest
ones (reversed timestamp case).
:param dt: datetime which is translated to timestamp
:param reverse: a boolean parameter for reverse or straight count of
timestamp in milliseconds
:return: count or reversed count of milliseconds since start of epoch
"""
epoch = datetime.datetime(1970, 1, 1)
td = dt - epoch
ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000
return 0x7fffffffffffffff - ts if reverse else ts
def make_events_query_from_filter(event_filter):
"""Return start and stop row for filtering and a query.
Query is based on the selected parameter.
:param event_filter: storage.EventFilter object.
"""
start = "%s" % (timestamp(event_filter.start_timestamp, reverse=False)
if event_filter.start_timestamp else "")
stop = "%s" % (timestamp(event_filter.end_timestamp, reverse=False)
if event_filter.end_timestamp else "")
kwargs = {'event_type': event_filter.event_type,
'event_id': event_filter.message_id}
res_q = make_query(**kwargs)
if event_filter.traits_filter:
for trait_filter in event_filter.traits_filter:
q_trait = make_query(trait_query=True, **trait_filter)
if q_trait:
if res_q:
res_q += " AND " + q_trait
else:
res_q = q_trait
return res_q, start, stop
def make_timestamp_query(func, start=None, start_op=None, end=None,
end_op=None, bounds_only=False, **kwargs):
"""Return a filter start and stop row for filtering and a query.
Query is based on the fact that CF-name is 'rts'.
:param start: Optional start timestamp
:param start_op: Optional start timestamp operator, like gt, ge
:param end: Optional end timestamp
:param end_op: Optional end timestamp operator, like lt, le
:param bounds_only: if True than query will not be returned
:param func: a function that provide a format of row
:param kwargs: kwargs for :param func
"""
# We don't need to dump here because get_start_end_rts returns strings
rts_start, rts_end = get_start_end_rts(start, end)
start_row, end_row = func(rts_start, rts_end, **kwargs)
if bounds_only:
return start_row, end_row
q = []
start_op = start_op or 'ge'
end_op = end_op or 'lt'
if rts_start:
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
(OP_SIGN_REV[start_op], rts_start))
if rts_end:
q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
(OP_SIGN_REV[end_op], rts_end))
res_q = None
if len(q):
res_q = " AND ".join(q)
return start_row, end_row, res_q
def get_start_end_rts(start, end):
rts_start = str(timestamp(start)) if start else ""
rts_end = str(timestamp(end)) if end else ""
return rts_start, rts_end
def make_query(metaquery=None, trait_query=None, **kwargs):
"""Return a filter query string based on the selected parameters.
:param metaquery: optional metaquery dict
:param trait_query: optional boolean, for trait_query from kwargs
:param kwargs: key-value pairs to filter on. Key should be a real
column name in db
"""
q = []
res_q = None
# Query for traits differs from others. It is constructed with
# SingleColumnValueFilter with the possibility to choose comparison
# operator
if trait_query:
trait_name = kwargs.pop('key')
op = kwargs.pop('op', 'eq')
for k, v in kwargs.items():
if v is not None:
res_q = ("SingleColumnValueFilter "
"('f', '%s', %s, 'binary:%s', true, true)" %
(prepare_key(trait_name, EVENT_TRAIT_TYPES[k]),
OP_SIGN[op], dump(v)))
return res_q
# Note: we use extended constructor for SingleColumnValueFilter here.
# It is explicitly specified that entry should not be returned if CF is not
# found in table.
for key, value in sorted(kwargs.items()):
if value is not None:
if key == 'source':
q.append("SingleColumnValueFilter "
"('f', 's_%s', =, 'binary:%s', true, true)" %
(value, dump('1')))
elif key == 'trait_type':
q.append("ColumnPrefixFilter('%s')" % value)
elif key == 'event_id':
q.append("RowFilter ( = , 'regexstring:\d*:%s')" % value)
else:
q.append("SingleColumnValueFilter "
"('f', '%s', =, 'binary:%s', true, true)" %
(quote(key), dump(value)))
res_q = None
if len(q):
res_q = " AND ".join(q)
if metaquery:
meta_q = []
for k, v in metaquery.items():
meta_q.append(
"SingleColumnValueFilter ('f', '%s', =, 'binary:%s', "
"true, true)"
% ('r_' + k, dump(v)))
meta_q = " AND ".join(meta_q)
# join query and metaquery
if res_q is not None:
res_q += " AND " + meta_q
else:
res_q = meta_q # metaquery only
return res_q
def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs):
"""Return a list of required columns in meter table to be scanned.
SingleColumnFilter has 'columns' filter that should be used to determine
what columns we are interested in. But if we want to use 'filter' and
'columns' together we have to include columns we are filtering by
to columns list.
Please see an example: If we make scan with filter
"SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')"
and columns ['f:rts'], the output will be always empty
because only 'rts' will be returned and filter will be applied
to this data so 's_test-1' cannot be find.
To make this request correct it should be fixed as follows:
filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')",
columns = ['f:rts','f:s_test-1']}
:param metaquery: optional metaquery dict
:param need_timestamp: flag, which defines the need for timestamp columns
:param kwargs: key-value pairs to filter on. Key should be a real
column name in db
"""
columns = ['f:message', 'f:recorded_at']
columns.extend("f:%s" % k for k, v in kwargs.items()
if v is not None)
if metaquery:
columns.extend("f:r_%s" % k for k, v in metaquery.items()
if v is not None)
source = kwargs.get('source')
if source:
columns.append("f:s_%s" % source)
if need_timestamp:
columns.extend(['f:rts', 'f:timestamp'])
return columns
def make_sample_query_from_filter(sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param sample_filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
#meter = sample_filter.meter
meter = None
if not meter and require_meter:
raise RuntimeError('Missing required meter specifier')
start_row, end_row, ts_query = make_timestamp_query(
make_general_rowkey_scan,
start=sample_filter.start_timestamp,
start_op=sample_filter.start_timestamp_op,
end=sample_filter.end_timestamp,
end_op=sample_filter.end_timestamp_op,
some_id=meter)
kwargs = dict(user_id=sample_filter.user,
project_id=sample_filter.project,
counter_name=meter,
resource_id=sample_filter.resource,
source=sample_filter.source,
message_id=sample_filter.message_id)
q = make_query(metaquery=sample_filter.metaquery, **kwargs)
if q:
res_q = q + " AND " + ts_query if ts_query else q
else:
res_q = ts_query if ts_query else None
need_timestamp = (sample_filter.start_timestamp or
sample_filter.end_timestamp) is not None
columns = get_meter_columns(metaquery=sample_filter.metaquery,
need_timestamp=need_timestamp, **kwargs)
return res_q, start_row, end_row, columns
def make_sample_query_from_filterdict(sample_filterdict, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param sample_filter: filter dict
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
#meter = sample_filter.meter
meter = None
if not meter and require_meter:
raise RuntimeError('Missing required meter specifier')
"""
start_row, end_row, ts_query = make_timestamp_query(
make_general_rowkey_scan,
start=sample_filter.start_timestamp,
start_op=sample_filter.start_timestamp_op,
end=sample_filter.end_timestamp,
end_op=sample_filter.end_timestamp_op,
some_id=meter)
"""
start_row, end_row, ts_query = None, None, None
kwargs = dict(user_id=sample_filterdict['user'],
project_id=sample_filterdict['project'],
counter_name=sample_filterdict['meter'],
resource_id=sample_filterdict['resource'],
source=sample_filterdict['source'],
message_id=sample_filterdict['message'])
q = make_query(**kwargs)
if q:
res_q = q + " AND " + ts_query if ts_query else q
else:
res_q = ts_query if ts_query else None
"""
need_timestamp = (sample_filter.start_timestamp or
sample_filter.end_timestamp) is not None
columns = get_meter_columns(metaquery=sample_filter.metaquery,
need_timestamp=need_timestamp, **kwargs)
"""
columns = ['f:resource_id', 'f:user_id', 'f:project_id', 'f:counter_name', 'f:counter_volume', 'f:counter_unit', 'f:timestamp']
return res_q, start_row, end_row, columns
def make_meter_query_for_resource(start_timestamp, start_timestamp_op,
end_timestamp, end_timestamp_op, source,
query=None):
"""This method is used when Resource table should be filtered by meters.
In this method we are looking into all qualifiers with m_ prefix.
:param start_timestamp: meter's timestamp start range.
:param start_timestamp_op: meter's start time operator, like ge, gt.
:param end_timestamp: meter's timestamp end range.
:param end_timestamp_op: meter's end time operator, like lt, le.
:param source: source filter.
:param query: a query string to concatenate with.
"""
start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp)
mq = []
start_op = start_timestamp_op or 'ge'
end_op = end_timestamp_op or 'lt'
if start_rts:
filter_value = (start_rts + ':' + quote(source) if source
else start_rts)
mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value))
if end_rts:
filter_value = (end_rts + ':' + quote(source) if source
else end_rts)
mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value))
if mq:
meter_q = " AND ".join(mq)
# If there is a filtering on time_range we need to point that
# qualifiers should start with m_. Overwise in case e.g.
# QualifierFilter (>=, 'binaryprefix:m_9222030811134775808')
# qualifier 's_test' satisfies the filter and will be returned.
meter_q = _QualifierFilter("=", '') + " AND " + meter_q
query = meter_q if not query else query + " AND " + meter_q
return query
def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None):
"""If it's filter on some_id without start and end.
start_row = some_id while end_row = some_id + MAX_BYTE.
"""
if some_id is None:
return None, None
if not rts_start:
# NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123
# will be quoted and character will be turn in a composition that is
# started with '%' (chr(37)) that lexicographically is less then chr
# of number
rts_start = chr(122)
end_row = prepare_key(some_id, rts_start)
start_row = prepare_key(some_id, rts_end)
return start_row, end_row
def prepare_key(*args):
"""Prepares names for rows and columns with correct separator.
:param args: strings or numbers that we want our key construct of
:return: key with quoted args that are separated with character ":"
"""
key_quote = []
for key in args:
if isinstance(key, six.integer_types):
key = str(key)
key_quote.append(quote(key))
return ":".join(key_quote)
def timestamp_from_record_tuple(record):
"""Extract timestamp from HBase tuple record."""
return record[0]['timestamp']
def resource_id_from_record_tuple(record):
"""Extract resource_id from HBase tuple record."""
return record[0]['resource_id']
def deserialize_entry(entry, get_raw_meta=True):
"""Return a list of flatten_result, sources, meters and metadata.
Flatten_result contains a dict of simple structures such as 'resource_id':1
sources/meters are the lists of sources and meters correspondingly.
metadata is metadata dict. This dict may be returned as flattened if
get_raw_meta is False.
:param entry: entry from HBase, without row name and timestamp
:param get_raw_meta: If true then raw metadata will be returned,
if False metadata will be constructed from
'f:r_metadata.' fields
"""
flatten_result = {}
sources = []
meters = []
metadata_flattened = {}
for k, v in entry.items():
if k.startswith('f:s_'):
sources.append(decode_unicode(k[4:]))
elif k.startswith('f:r_metadata.'):
qualifier = decode_unicode(k[len('f:r_metadata.'):])
metadata_flattened[qualifier] = load(v)
elif k.startswith("f:m_"):
meter = ([unquote(i) for i in k[4:].split(':')], load(v))
meters.append(meter)
else:
if ':' in k[2:]:
key = tuple([unquote(i) for i in k[2:].split(':')])
else:
key = unquote(k[2:])
flatten_result[key] = load(v)
if get_raw_meta:
metadata = flatten_result.get('resource_metadata', {})
else:
metadata = metadata_flattened
return flatten_result, sources, meters, metadata
def serialize_entry(data=None, **kwargs):
"""Return a dict that is ready to be stored to HBase
:param data: dict to be serialized
:param kwargs: additional args
"""
data = data or {}
entry_dict = copy.copy(data)
entry_dict.update(**kwargs)
result = {}
for k, v in entry_dict.items():
if k == 'source':
# user, project and resource tables may contain several sources.
# Besides, resource table may contain several meters.
# To make insertion safe we need to store all meters and sources in
# a separate cell. For this purpose s_ and m_ prefixes are
# introduced.
qualifier = encode_unicode('f:s_%s' % v)
result[qualifier] = dump('1')
elif k == 'meter':
for meter, ts in v.items():
qualifier = encode_unicode('f:m_%s' % meter)
result[qualifier] = dump(ts)
elif k == 'resource_metadata':
# keep raw metadata as well as flattened to provide
# capability with API v2. It will be flattened in another
# way on API level. But we need flattened too for quick filtering.
flattened_meta = dump_metadata(v)
for key, m in flattened_meta.items():
metadata_qualifier = encode_unicode('f:r_metadata.' + key)
result[metadata_qualifier] = dump(m)
result['f:resource_metadata'] = dump(v)
else:
result['f:' + quote(k, ':')] = dump(v)
return result
def dump_metadata(meta):
resource_metadata = {}
for key, v in utils.dict_to_keyval(meta):
resource_metadata[key] = v
return resource_metadata
def dump(data):
return json.dumps(data, default=bson.json_util.default)
def load(data):
return json.loads(data, object_hook=object_hook)
def encode_unicode(data):
return data.encode('utf-8') if isinstance(data, six.text_type) else data
def decode_unicode(data):
return data.decode('utf-8') if isinstance(data, six.string_types) else data
# We don't want to have tzinfo in decoded json.This object_hook is
# overwritten json_util.object_hook for $date
def object_hook(dct):
if "$date" in dct:
dt = bson.json_util.object_hook(dct)
return dt.replace(tzinfo=None)
return bson.json_util.object_hook(dct)
def create_tables(conn, tables, column_families):
for table in tables:
try:
conn.create_table(table, column_families)
except ttypes.AlreadyExists:
if conn.table_prefix:
table = ("%(table_prefix)s"
"%(separator)s"
"%(table_name)s" %
dict(table_prefix=conn.table_prefix,
separator=conn.table_prefix_separator,
table_name=table))
LOG.warn(_("Cannot create table %(table_name)s "
"it already exists. Ignoring error")
% {'table_name': table})
def quote(s, *args):
"""Return quoted string even if it is unicode one.
:param s: string that should be quoted
:param args: any symbol we want to stay unquoted
"""
s_en = s.encode('utf8')
return six.moves.urllib.parse.quote(s_en, *args)
def unquote(s):
"""Return unquoted and decoded string.
:param s: string that should be unquoted
"""
s_de = six.moves.urllib.parse.unquote(s)
return s_de.decode('utf8')
|
|
import argparse
import json
import shlex
import sys
import textwrap
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from pip._internal.network.session import PipSession
from pip._internal.req import constructors
from pip._internal.req.req_file import (
RequirementsFileParser,
get_file_content,
get_line_parser,
preprocess,
)
from pip._internal.req.req_install import InstallRequirement
from python.pip_install.extract_wheels.lib import annotation, arguments, bazel
def parse_install_requirements(
requirements_lock: str, extra_pip_args: List[str]
) -> List[Tuple[InstallRequirement, str]]:
ps = PipSession()
# This is roughly taken from pip._internal.req.req_file.parse_requirements
# (https://github.com/pypa/pip/blob/21.0.1/src/pip/_internal/req/req_file.py#L127) in order to keep
# the original line (sort-of, its preprocessed) from the requirements_lock file around, to pass to sub repos
# as the requirement.
line_parser = get_line_parser(finder=None)
parser = RequirementsFileParser(ps, line_parser)
install_req_and_lines: List[Tuple[InstallRequirement, str]] = []
_, content = get_file_content(requirements_lock, ps)
for parsed_line, (_, line) in zip(
parser.parse(requirements_lock, constraint=False), preprocess(content)
):
if parsed_line.is_requirement:
install_req_and_lines.append(
(constructors.install_req_from_line(parsed_line.requirement), line)
)
else:
extra_pip_args.extend(shlex.split(line))
return install_req_and_lines
def repo_names_and_requirements(
install_reqs: List[Tuple[InstallRequirement, str]], repo_prefix: str
) -> List[Tuple[str, str]]:
return [
(
bazel.sanitise_name(ir.name, prefix=repo_prefix),
line,
)
for ir, line in install_reqs
]
def parse_whl_library_args(args: argparse.Namespace) -> Dict[str, Any]:
whl_library_args = dict(vars(args))
whl_library_args = arguments.deserialize_structured_args(whl_library_args)
whl_library_args.setdefault("python_interpreter", sys.executable)
# These arguments are not used by `whl_library`
for arg in ("requirements_lock", "annotations"):
if arg in whl_library_args:
whl_library_args.pop(arg)
return whl_library_args
def generate_parsed_requirements_contents(
requirements_lock: Path,
repo_prefix: str,
whl_library_args: Dict[str, Any],
annotations: Dict[str, str] = dict(),
) -> str:
"""
Parse each requirement from the requirements_lock file, and prepare arguments for each
repository rule, which will represent the individual requirements.
Generates a requirements.bzl file containing a macro (install_deps()) which instantiates
a repository rule for each requirment in the lock file.
"""
install_req_and_lines = parse_install_requirements(
requirements_lock, whl_library_args["extra_pip_args"]
)
repo_names_and_reqs = repo_names_and_requirements(
install_req_and_lines, repo_prefix
)
all_requirements = ", ".join(
[
bazel.sanitised_repo_library_label(ir.name, repo_prefix=repo_prefix)
for ir, _ in install_req_and_lines
]
)
all_whl_requirements = ", ".join(
[
bazel.sanitised_repo_file_label(ir.name, repo_prefix=repo_prefix)
for ir, _ in install_req_and_lines
]
)
return textwrap.dedent(
"""\
load("@rules_python//python/pip_install:pip_repository.bzl", "whl_library")
all_requirements = [{all_requirements}]
all_whl_requirements = [{all_whl_requirements}]
_packages = {repo_names_and_reqs}
_config = {args}
_annotations = {annotations}
def _clean_name(name):
return name.replace("-", "_").replace(".", "_").lower()
def requirement(name):
return "@{repo_prefix}" + _clean_name(name) + "//:{py_library_label}"
def whl_requirement(name):
return "@{repo_prefix}" + _clean_name(name) + "//:{wheel_file_label}"
def data_requirement(name):
return "@{repo_prefix}" + _clean_name(name) + "//:{data_label}"
def dist_info_requirement(name):
return "@{repo_prefix}" + _clean_name(name) + "//:{dist_info_label}"
def entry_point(pkg, script = None):
if not script:
script = pkg
return "@{repo_prefix}" + _clean_name(pkg) + "//:{entry_point_prefix}_" + script
def _get_annotation(requirement):
# This expects to parse `setuptools==58.2.0 --hash=sha256:2551203ae6955b9876741a26ab3e767bb3242dafe86a32a749ea0d78b6792f11`
# down wo `setuptools`.
name = requirement.split(" ")[0].split("=")[0]
return _annotations.get(name)
def install_deps():
for name, requirement in _packages:
whl_library(
name = name,
requirement = requirement,
annotation = _get_annotation(requirement),
**_config,
)
""".format(
all_requirements=all_requirements,
all_whl_requirements=all_whl_requirements,
annotations=json.dumps(annotations),
args=whl_library_args,
data_label=bazel.DATA_LABEL,
dist_info_label=bazel.DIST_INFO_LABEL,
entry_point_prefix=bazel.WHEEL_ENTRY_POINT_PREFIX,
py_library_label=bazel.PY_LIBRARY_LABEL,
repo_names_and_reqs=repo_names_and_reqs,
repo_prefix=repo_prefix,
wheel_file_label=bazel.WHEEL_FILE_LABEL,
)
)
def coerce_to_bool(option):
return str(option).lower() == "true"
def main() -> None:
parser = argparse.ArgumentParser(
description="Create rules to incrementally fetch needed \
dependencies from a fully resolved requirements lock file."
)
parser.add_argument(
"--requirements_lock",
action="store",
required=True,
help="Path to fully resolved requirements.txt to use as the source of repos.",
)
parser.add_argument(
"--python_interpreter",
help="The python interpreter that will be used to download and unpack the wheels.",
)
parser.add_argument(
"--python_interpreter_target",
help="Bazel target of a python interpreter.\
It will be used in repository rules so it must be an already built interpreter.\
If set, it will take precedence over python_interpreter.",
)
parser.add_argument(
"--quiet",
type=coerce_to_bool,
default=True,
required=True,
help="Whether to print stdout / stderr from child repos.",
)
parser.add_argument(
"--timeout",
type=int,
action="store",
required=True,
help="timeout to use for pip operation.",
)
parser.add_argument(
"--annotations",
type=annotation.annotations_map_from_str_path,
help="A json encoded file containing annotations for rendered packages.",
)
arguments.parse_common_args(parser)
args = parser.parse_args()
whl_library_args = parse_whl_library_args(args)
# Check for any annotations which match packages in the locked requirements file
install_requirements = parse_install_requirements(
args.requirements_lock, whl_library_args["extra_pip_args"]
)
req_names = sorted([req.name for req, _ in install_requirements])
annotations = args.annotations.collect(req_names)
# Write all rendered annotation files and generate a list of the labels to write to the requirements file
annotated_requirements = dict()
for name, content in annotations.items():
annotation_path = Path(name + ".annotation.json")
annotation_path.write_text(json.dumps(content, indent=4))
annotated_requirements.update(
{
name: "@{}//:{}.annotation.json".format(
args.repo_prefix.rstrip("_"), name
)
}
)
with open("requirements.bzl", "w") as requirement_file:
requirement_file.write(
generate_parsed_requirements_contents(
requirements_lock=args.requirements_lock,
repo_prefix=args.repo_prefix,
whl_library_args=whl_library_args,
annotations=annotated_requirements,
)
)
|
|
"""A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
class Error(Exception):
pass
class Done(Exception):
pass
class Incomplete(Exception):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
# if trailingdata.strip('\x00'):
# raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return result
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, unicode)):
raise TypeError("expected string or unicode, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < '\x7f':
try:
table_a2b_base64[c]
return c
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return ''.join([chr(i) for i in res])
table_b2a_base64 = \
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = ord(final[0])
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = ord(final[0])
b = ord(final[1])
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return ''.join(result) + snippet + '\n'
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return ''.join(odata)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result += ['\x90', '\x00']
elif count < 4:
if prev != '\x90':
result += [prev] * count
else:
result += ['\x90', '\x00'] * count
else:
if prev != '\x90':
result += [prev, '\x90', chr(count)]
else:
result += ['\x90', '\x00', '\x90', chr(count)]
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result. append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
0x2d02ef8dL
]
def crc32(s, crc=0):
result = 0
crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffffL
if result > (1 << 31):
result = ((result + (1<<31)) % (1<<32)) - (1<<31)
return result
def b2a_hex(s):
result = []
for char in s:
c = (ord(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
c = ord(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
while s:
try:
yield table_hex[ord(s[0])], table_hex[ord(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return ''.join(result)
unhexlify = a2b_hex
|
|
from __future__ import with_statement
from datetime import datetime, timedelta
import shutil
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import SessionStore as CookieSession
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils import timezone
from django.utils import unittest
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertTrue('some key' in self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(self.session.values(), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(self.session.values(), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iterkeys()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.itervalues()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iteritems()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x',1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(self.session.items(), [('x',1)])
self.session.clear()
self.assertEqual(self.session.items(), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = self.session.items()
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(self.session.items(), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail("The session object did not save properly. Middleware may be saving cache items without namespaces.")
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
# Using seconds
self.session.set_expiry(10)
delta = self.session.get_expiry_date() - timezone.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_timedelta(self):
# Using timedelta
self.session.set_expiry(timedelta(seconds=10))
delta = self.session.get_expiry_date() - timezone.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_datetime(self):
# Using fixed datetime
self.session.set_expiry(timezone.now() + timedelta(seconds=10))
delta = self.session.get_expiry_date() - timezone.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y':2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
DatabaseSessionWithTimeZoneTests = override_settings(USE_TZ=True)(DatabaseSessionTests)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
CacheDBSessionWithTimeZoneTests = override_settings(USE_TZ=True)(CacheDBSessionTests)
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
super(FileSessionTests, self).setUp()
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
def tearDown(self):
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
super(FileSessionTests, self).tearDown()
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend("a\\b\\c").load)
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend("a/b/c").load)
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
class SessionMiddlewareTests(unittest.TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# If it isn't in the cookie, that's fine (Python 2.5)
if 'httponly' in settings.SESSION_COOKIE_NAME:
self.assertFalse(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
class CookieSessionTests(SessionTestsMixin, TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import logging
from datetime import datetime
from six.moves.urllib.error import HTTPError
import six
from io import BytesIO
from tg import (
expose,
flash,
redirect
)
from tg.decorators import (
with_trailing_slash,
without_trailing_slash
)
from allura import model as M
from allura.lib import helpers as h
from allura.lib import validators as v
from allura.lib.plugin import ImportIdConverter
from allura.lib.decorators import require_post
from ming.orm import session, ThreadLocalORMSession
from tg import tmpl_context as c
from tg import app_globals as g
from forgetracker import model as TM
from forgeimporters.base import (
ToolImporter,
ToolImportForm,
ToolImportController,
)
from forgeimporters.github import (
GitHubProjectExtractor,
GitHubOAuthMixin,
GitHubProjectNameValidator,
)
from forgeimporters.github.utils import GitHubMarkdownConverter
log = logging.getLogger(__name__)
class GitHubTrackerImportForm(ToolImportForm):
gh_project_name = GitHubProjectNameValidator()
gh_user_name = v.UnicodeString(not_empty=True)
class GitHubTrackerImportController(ToolImportController, GitHubOAuthMixin):
import_form = GitHubTrackerImportForm
@with_trailing_slash
@expose('jinja:forgeimporters.github:templates/tracker/index.html')
def index(self, **kw):
self.oauth_begin()
return dict(importer=self.importer,
target_app=self.target_app)
@without_trailing_slash
@expose()
@require_post()
def create(self, gh_project_name, gh_user_name, mount_point, mount_label, **kw):
if self.importer.enforce_limit(c.project):
self.importer.post(
project_name=gh_project_name,
user_name=gh_user_name,
mount_point=mount_point,
mount_label=mount_label)
flash('Ticket import has begun. Your new tracker will be available '
'when the import is complete.')
else:
flash(
'There are too many imports pending at this time. Please wait and try again.', 'error')
redirect(c.project.url() + 'admin/')
class GitHubTrackerImporter(ToolImporter):
source = 'GitHub'
target_app_ep_names = 'tickets'
controller = GitHubTrackerImportController
tool_label = 'Issues'
max_ticket_num = 0
open_milestones = set()
def import_tool(self, project, user, project_name, mount_point=None,
mount_label=None, **kw):
import_id_converter = ImportIdConverter.get()
project_name = '{}/{}'.format(kw['user_name'], project_name)
extractor = GitHubProjectExtractor(project_name, user=user)
if not extractor.has_tracker():
return
app = project.install_app('tickets', mount_point, mount_label,
EnableVoting=False,
open_status_names='open',
closed_status_names='closed',
import_id={
'source': self.source,
'project_name': project_name,
}
)
self.github_markdown_converter = GitHubMarkdownConverter(
kw['user_name'], project_name)
ThreadLocalORMSession.flush_all()
try:
M.session.artifact_orm_session._get().skip_mod_date = True
with h.push_config(c, user=M.User.anonymous(), app=app):
for ticket_num, issue in extractor.iter_issues():
self.max_ticket_num = max(ticket_num, self.max_ticket_num)
ticket = TM.Ticket(
app_config_id=app.config._id,
custom_fields=dict(),
ticket_num=ticket_num,
import_id=import_id_converter.expand(ticket_num, app)
)
self.process_fields(extractor, ticket, issue)
self.process_comments(extractor, ticket, issue)
self.process_events(extractor, ticket, issue)
self.process_milestones(ticket, issue)
session(ticket).flush(ticket)
session(ticket).expunge(ticket)
app.globals.custom_fields = self.postprocess_milestones()
app.globals.last_ticket_num = self.max_ticket_num
ThreadLocalORMSession.flush_all()
M.AuditLog.log(
'import tool {} from {} on {}'.format(
app.config.options.mount_point,
project_name, self.source),
project=project, user=user, url=app.url)
g.post_event('project_updated')
app.globals.invalidate_bin_counts()
return app
finally:
M.session.artifact_orm_session._get().skip_mod_date = False
def parse_datetime(self, datetime_string):
return datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%SZ')
def get_user_link(self, user):
return '[{0}](https://github.com/{0})'.format(user)
def process_fields(self, extractor, ticket, issue):
ticket.summary = issue['title']
ticket.status = issue['state']
ticket.created_date = self.parse_datetime(issue['created_at'])
ticket.mod_date = self.parse_datetime(issue['updated_at'])
if issue['assignee']:
owner_line = '*Originally owned by:* {}\n'.format(
self.get_user_link(issue['assignee']['login']))
else:
owner_line = ''
# body processing happens here
body, attachments = self._get_attachments(extractor, issue['body'])
ticket.add_multiple_attachments(attachments)
ticket.description = (
'*Originally created by:* {creator}\n'
'{owner}'
'\n'
'{body}').format(
creator=self.get_user_link(issue['user']['login']),
owner=owner_line,
body=self.github_markdown_converter.convert(body),
)
ticket.labels = [label['name'] for label in issue['labels']]
def process_comments(self, extractor, ticket, issue):
for comment in extractor.iter_comments(issue):
body, attachments = self._get_attachments(
extractor, comment['body'])
if comment['user']:
posted_by = '*Originally posted by:* {}\n\n'.format(
self.get_user_link(comment['user']['login']))
body = posted_by + body
p = ticket.discussion_thread.add_post(
text=self.github_markdown_converter.convert(body),
ignore_security=True,
timestamp=self.parse_datetime(comment['created_at']),
)
p.add_multiple_attachments(attachments)
def process_events(self, extractor, ticket, issue):
for event in extractor.iter_events(issue):
prefix = text = ''
actor = event['actor']
if event['event'] in ('reopened', 'closed'):
prefix = '*Ticket changed by:* {}\n\n'.format(
self.get_user_link(actor['login'] if actor else 'ghost'))
if event['event'] == 'reopened':
text = '- **status**: closed --> open'
elif event['event'] == 'closed':
text = '- **status**: open --> closed'
elif event['event'] == 'assigned':
text = '- **assigned_to**: {}'.format(
self.get_user_link(actor['login'] if actor else 'ghost'))
text = prefix + text
if not text:
continue
ticket.discussion_thread.add_post(
text=text,
ignore_security=True,
timestamp=self.parse_datetime(event['created_at'])
)
def process_milestones(self, ticket, issue):
if issue['milestone']:
title = issue['milestone']['title']
due = None
if issue['milestone']['due_on']:
due = self.parse_datetime(issue['milestone']['due_on'])
ticket.custom_fields = {
'_milestone': title,
}
self.open_milestones.add((title, due,))
def postprocess_milestones(self):
global_milestones = {
'milestones': [],
'type': 'milestone',
'name': '_milestone',
'label': 'Milestone'
}
for milestone in self.open_milestones:
global_milestones['milestones'].append({
'name': milestone[0],
'due_date': str(milestone[1].date()) if milestone[1] else None,
'complete': False,
})
return [global_milestones]
def _get_attachments(self, extractor, body):
# at github, attachments are images only and are included into comment's body
# usual syntax is
# \r\n
REGEXP = r'!\[[\w0-9]+?\]\(((?:https?:\/\/)?[\da-z\.-]+\.[a-z\.]{2,6}'\
'[\\/%\\w\\.-]*.(jpg|jpeg|png|gif))\\)[\r\n]*'
attachments = []
try:
found_matches = re.finditer(REGEXP, body, re.IGNORECASE)
except TypeError:
found_matches = re.finditer(REGEXP, str(body), re.IGNORECASE)
for i, match in enumerate(found_matches):
# removing attach text from comment
body = body.replace(match.group(0), '')
# stripping url and extension
attachments.append(Attachment(
extractor,
match.group(1), # url
f'attach{i + 1}.{match.group(2)}' # extension
))
return (body, attachments)
class Attachment:
def __init__(self, extractor, url, filename):
self.url = url
self.filename = filename
self.type = None
file = self.get_file(extractor)
if file:
# don't set unless valid (add_multiple_attachments uses hasattr)
self.file = file
def get_file(self, extractor):
try:
fp_ish = extractor.urlopen(self.url)
fp = BytesIO(fp_ish.read())
return fp
except HTTPError as e:
if e.code == 404:
log.error('Unable to load attachment: %s', self.url)
return None
else:
raise
|
|
#!/usr/bin/env python
import unittest
import xml.etree.cElementTree as eTree
from latex2mathml import converter
__author__ = "Ronie Martinez"
__copyright__ = "Copyright 2016-2017, Ronie Martinez"
__credits__ = ["Ronie Martinez"]
__license__ = "MIT"
__maintainer__ = "Ronie Martinez"
__email__ = "[email protected]"
__status__ = "Development"
class CommandTest(unittest.TestCase):
def setUp(self):
self.math = eTree.Element('math')
self.row = eTree.SubElement(self.math, 'mrow')
def test_subscript(self):
sub = eTree.SubElement(self.row, 'msub')
mi = eTree.SubElement(sub, 'mi')
mi.text = 'a'
mi = eTree.SubElement(sub, 'mi')
mi.text = 'b'
self.assertEqual(eTree.tostring(self.math), converter.convert('a_b'))
def test_superscript(self):
sup = eTree.SubElement(self.row, 'msup')
mi = eTree.SubElement(sup, 'mi')
mi.text = 'a'
mi = eTree.SubElement(sup, 'mi')
mi.text = 'b'
self.assertEqual(eTree.tostring(self.math), converter.convert('a^b'))
def test_subscript_and_superscript(self):
subsup = eTree.SubElement(self.row, 'msubsup')
mi = eTree.SubElement(subsup, 'mi')
mi.text = 'a'
mi = eTree.SubElement(subsup, 'mi')
mi.text = 'b'
mi = eTree.SubElement(subsup, 'mi')
mi.text = 'c'
self.assertEqual(eTree.tostring(self.math), converter.convert('a_b^c'))
def test_superscript_and_subscript(self):
subsup = eTree.SubElement(self.row, 'msubsup')
mi = eTree.SubElement(subsup, 'mi')
mi.text = 'a'
mi = eTree.SubElement(subsup, 'mi')
mi.text = 'c'
mi = eTree.SubElement(subsup, 'mi')
mi.text = 'b'
self.assertEqual(eTree.tostring(self.math), converter.convert('a^b_c'))
def test_subscript_within_curly_braces(self):
row = eTree.SubElement(self.row, 'mrow')
sub = eTree.SubElement(row, 'msub')
mi = eTree.SubElement(sub, 'mi')
mi.text = 'a'
mi = eTree.SubElement(sub, 'mi')
mi.text = 'b'
self.assertEqual(eTree.tostring(self.math), converter.convert('{a_b}'))
def test_superscript_within_curly_braces(self):
row = eTree.SubElement(self.row, 'mrow')
sup = eTree.SubElement(row, 'msup')
mi = eTree.SubElement(sup, 'mi')
mi.text = 'a'
mi = eTree.SubElement(sup, 'mi')
mi.text = 'b'
self.assertEqual(eTree.tostring(self.math), converter.convert('{a^b}'))
def test_superscript_with_curly_braces(self):
subsup = eTree.SubElement(self.row, 'msubsup')
mi = eTree.SubElement(subsup, 'mi')
mi.text = 'a'
mn = eTree.SubElement(subsup, 'mn')
mn.text = '3'
row = eTree.SubElement(subsup, 'mrow')
mi = eTree.SubElement(row, 'mi')
mi.text = 'i'
mo = eTree.SubElement(row, 'mo')
mo.text = '+'
mn = eTree.SubElement(row, 'mn')
mn.text = '1'
self.assertEqual(eTree.tostring(self.math), converter.convert('a^{i+1}_3'))
def test_simple_fraction(self):
frac = eTree.SubElement(self.row, 'mfrac')
row = eTree.SubElement(frac, 'mrow')
mn = eTree.SubElement(row, 'mn')
mn.text = '1'
row = eTree.SubElement(frac, 'mrow')
mn = eTree.SubElement(row, 'mn')
mn.text = '2'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\frac{1}{2}'))
def test_square_root(self):
sqrt = eTree.SubElement(self.row, 'msqrt')
row = eTree.SubElement(sqrt, 'mrow')
mn = eTree.SubElement(row, 'mn')
mn.text = '2'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\sqrt{2}'))
def test_root(self):
root = eTree.SubElement(self.row, 'mroot')
row = eTree.SubElement(root, 'mrow')
mn = eTree.SubElement(row, 'mn')
mn.text = '2'
row = eTree.SubElement(root, 'mrow')
mn = eTree.SubElement(row, 'mn')
mn.text = '3'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\sqrt[3]{2}'))
def test_binomial(self):
mo = eTree.SubElement(self.row, 'mo')
mo.text = '('
frac = eTree.SubElement(self.row, 'mfrac', linethickness="0")
row = eTree.SubElement(frac, 'mrow')
mn = eTree.SubElement(row, 'mn')
mn.text = '2'
row = eTree.SubElement(frac, 'mrow')
mn = eTree.SubElement(row, 'mn')
mn.text = '3'
mo = eTree.SubElement(self.row, 'mo')
mo.text = ')'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\binom{2}{3}'))
def test_left_and_right(self):
mo = eTree.SubElement(self.row, 'mo', stretchy='true', form='prefix', fence='true')
mo.text = '('
mi = eTree.SubElement(self.row, 'mi')
mi.text = 'x'
mo = eTree.SubElement(self.row, 'mo', stretchy='true', form='postfix', fence='true')
mo.text = ')'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\left(x\right)'))
def test_space(self):
eTree.SubElement(self.row, 'mspace', width='0.167em')
self.assertEqual(eTree.tostring(self.math), converter.convert('\,'))
def test_overline(self):
over = eTree.SubElement(self.row, 'mover')
row = eTree.SubElement(over, 'mrow')
mi = eTree.SubElement(row, 'mi')
mi.text = 'a'
mo = eTree.SubElement(over, 'mo', stretchy='true')
mo.text = '¯'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\overline{a}'))
def test_underline(self):
under = eTree.SubElement(self.row, 'munder')
mrow = eTree.SubElement(under, 'mrow')
mi = eTree.SubElement(mrow, 'mi')
mi.text = 'a'
mo = eTree.SubElement(under, 'mo', stretchy='true')
mo.text = '̲'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\underline{a}'))
def test_matrix(self):
table = eTree.SubElement(self.row, 'mtable')
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'a'
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'b'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'c'
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'd'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\begin{matrix}a & b \\ c & d \end{matrix}'))
def test_matrix_without_begin_and_end(self): # taken from MathJax
table = eTree.SubElement(self.row, 'mtable')
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'a'
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'b'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'c'
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'd'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\matrix{a & b \\ c & d}'))
def test_matrix_with_alignment(self):
table = eTree.SubElement(self.row, 'mtable')
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mi = eTree.SubElement(td, 'mi')
mi.text = 'a'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mi = eTree.SubElement(td, 'mi')
mi.text = 'b'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mi = eTree.SubElement(td, 'mi')
mi.text = 'c'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mi = eTree.SubElement(td, 'mi')
mi.text = 'd'
self.assertEqual(eTree.tostring(self.math),
converter.convert(r'\begin{matrix*}[r]a & b \\ c & d \end{matrix*}'))
def test_matrix_with_negative_sign(self):
table = eTree.SubElement(self.row, 'mtable')
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd')
mo = eTree.SubElement(td, 'mo')
mo.text = '−'
mi = eTree.SubElement(td, 'mi')
mi.text = 'a'
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'b'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'c'
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'd'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\begin{matrix}-a & b \\ c & d \end{matrix}'))
def test_pmatrix(self):
mo = eTree.SubElement(self.row, 'mo')
mo.text = '('
table = eTree.SubElement(self.row, 'mtable')
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'a'
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'b'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'c'
td = eTree.SubElement(tr, 'mtd')
mi = eTree.SubElement(td, 'mi')
mi.text = 'd'
mo = eTree.SubElement(self.row, 'mo')
mo.text = ')'
self.assertEqual(eTree.tostring(self.math), converter.convert(r'\begin{pmatrix}a & b \\ c & d \end{pmatrix}'))
def test_simple_array(self):
table = eTree.SubElement(self.row, 'mtable')
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='center')
mn = eTree.SubElement(td, 'mn')
mn.text = '1'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mn = eTree.SubElement(td, 'mn')
mn.text = '2'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='center')
mn = eTree.SubElement(td, 'mn')
mn.text = '3'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mn = eTree.SubElement(td, 'mn')
mn.text = '4'
self.assertEqual(eTree.tostring(self.math),
converter.convert(r'\begin{array}{cr} 1 & 2 \\ 3 & 4 \end{array}'''))
def test_array_with_vertical_bars(self):
table = eTree.SubElement(self.row, 'mtable', columnlines='solid none')
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='center')
mn = eTree.SubElement(td, 'mn')
mn.text = '1'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mn = eTree.SubElement(td, 'mn')
mn.text = '2'
td = eTree.SubElement(tr, 'mtd', columnalign='left')
mn = eTree.SubElement(td, 'mn')
mn.text = '3'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='center')
mn = eTree.SubElement(td, 'mn')
mn.text = '4'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mn = eTree.SubElement(td, 'mn')
mn.text = '5'
td = eTree.SubElement(tr, 'mtd', columnalign='left')
mn = eTree.SubElement(td, 'mn')
mn.text = '6'
self.assertEqual(eTree.tostring(self.math),
converter.convert(r'\begin{array}{c|rl} 1 & 2 & 3 \\ 4 & 5 & 6 \end{array}'''))
def test_array_with_horizontal_lines(self):
table = eTree.SubElement(self.row, 'mtable', rowlines="none solid")
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='center')
mn = eTree.SubElement(td, 'mn')
mn.text = '1'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mn = eTree.SubElement(td, 'mn')
mn.text = '2'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='center')
mn = eTree.SubElement(td, 'mn')
mn.text = '3'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mn = eTree.SubElement(td, 'mn')
mn.text = '4'
tr = eTree.SubElement(table, 'mtr')
td = eTree.SubElement(tr, 'mtd', columnalign='center')
mn = eTree.SubElement(td, 'mn')
mn.text = '5'
td = eTree.SubElement(tr, 'mtd', columnalign='right')
mn = eTree.SubElement(td, 'mn')
mn.text = '6'
self.assertEqual(eTree.tostring(self.math),
converter.convert(r'\begin{array}{cr} 1 & 2 \\ 3 & 4 \\ \hline 5 & 6 \end{array}'''))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import os
import jsonpath_rw
from oslo_config import cfg
from oslo_utils import timeutils
import six
import yaml
from ceilometer.event.storage import models
from ceilometer.i18n import _
from ceilometer.openstack.common import log
OPTS = [
cfg.StrOpt('definitions_cfg_file',
default="event_definitions.yaml",
help="Configuration file for event definitions."
),
cfg.BoolOpt('drop_unmatched_notifications',
default=False,
help='Drop notifications if no event definition matches. '
'(Otherwise, we convert them with just the default traits)'),
]
cfg.CONF.register_opts(OPTS, group='event')
LOG = log.getLogger(__name__)
class EventDefinitionException(Exception):
def __init__(self, message, definition_cfg):
super(EventDefinitionException, self).__init__(message)
self.definition_cfg = definition_cfg
def __str__(self):
return '%s %s: %s' % (self.__class__.__name__,
self.definition_cfg, self.message)
class TraitDefinition(object):
def __init__(self, name, trait_cfg, plugin_manager):
self.cfg = trait_cfg
self.name = name
type_name = trait_cfg.get('type', 'text')
if 'plugin' in trait_cfg:
plugin_cfg = trait_cfg['plugin']
if isinstance(plugin_cfg, six.string_types):
plugin_name = plugin_cfg
plugin_params = {}
else:
try:
plugin_name = plugin_cfg['name']
except KeyError:
raise EventDefinitionException(
_('Plugin specified, but no plugin name supplied for '
'trait %s') % name, self.cfg)
plugin_params = plugin_cfg.get('parameters')
if plugin_params is None:
plugin_params = {}
try:
plugin_ext = plugin_manager[plugin_name]
except KeyError:
raise EventDefinitionException(
_('No plugin named %(plugin)s available for '
'trait %(trait)s') % dict(plugin=plugin_name,
trait=name), self.cfg)
plugin_class = plugin_ext.plugin
self.plugin = plugin_class(**plugin_params)
else:
self.plugin = None
if 'fields' not in trait_cfg:
raise EventDefinitionException(
_("Required field in trait definition not specified: "
"'%s'") % 'fields',
self.cfg)
fields = trait_cfg['fields']
if not isinstance(fields, six.string_types):
# NOTE(mdragon): if not a string, we assume a list.
if len(fields) == 1:
fields = fields[0]
else:
fields = '|'.join('(%s)' % path for path in fields)
try:
self.fields = jsonpath_rw.parse(fields)
except Exception as e:
raise EventDefinitionException(
_("Parse error in JSONPath specification "
"'%(jsonpath)s' for %(trait)s: %(err)s")
% dict(jsonpath=fields, trait=name, err=e), self.cfg)
self.trait_type = models.Trait.get_type_by_name(type_name)
if self.trait_type is None:
raise EventDefinitionException(
_("Invalid trait type '%(type)s' for trait %(trait)s")
% dict(type=type_name, trait=name), self.cfg)
def _get_path(self, match):
if match.context is not None:
for path_element in self._get_path(match.context):
yield path_element
yield str(match.path)
def to_trait(self, notification_body):
values = [match for match in self.fields.find(notification_body)
if match.value is not None]
if self.plugin is not None:
value_map = [('.'.join(self._get_path(match)), match.value) for
match in values]
value = self.plugin.trait_value(value_map)
else:
value = values[0].value if values else None
if value is None:
return None
# NOTE(mdragon): some openstack projects (mostly Nova) emit ''
# for null fields for things like dates.
if self.trait_type != models.Trait.TEXT_TYPE and value == '':
return None
value = models.Trait.convert_value(self.trait_type, value)
return models.Trait(self.name, self.trait_type, value)
class EventDefinition(object):
DEFAULT_TRAITS = dict(
service=dict(type='text', fields='publisher_id'),
request_id=dict(type='text', fields='_context_request_id'),
tenant_id=dict(type='text', fields=['payload.tenant_id',
'_context_tenant']),
)
def __init__(self, definition_cfg, trait_plugin_mgr):
self._included_types = []
self._excluded_types = []
self.traits = dict()
self.cfg = definition_cfg
try:
event_type = definition_cfg['event_type']
traits = definition_cfg['traits']
except KeyError as err:
raise EventDefinitionException(
_("Required field %s not specified") % err.args[0], self.cfg)
if isinstance(event_type, six.string_types):
event_type = [event_type]
for t in event_type:
if t.startswith('!'):
self._excluded_types.append(t[1:])
else:
self._included_types.append(t)
if self._excluded_types and not self._included_types:
self._included_types.append('*')
for trait_name in self.DEFAULT_TRAITS:
self.traits[trait_name] = TraitDefinition(
trait_name,
self.DEFAULT_TRAITS[trait_name],
trait_plugin_mgr)
for trait_name in traits:
self.traits[trait_name] = TraitDefinition(
trait_name,
traits[trait_name],
trait_plugin_mgr)
def included_type(self, event_type):
for t in self._included_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def excluded_type(self, event_type):
for t in self._excluded_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def match_type(self, event_type):
return (self.included_type(event_type)
and not self.excluded_type(event_type))
@property
def is_catchall(self):
return '*' in self._included_types and not self._excluded_types
@staticmethod
def _extract_when(body):
"""Extract the generated datetime from the notification."""
# NOTE: I am keeping the logic the same as it was in the collector,
# However, *ALL* notifications should have a 'timestamp' field, it's
# part of the notification envelope spec. If this was put here because
# some openstack project is generating notifications without a
# timestamp, then that needs to be filed as a bug with the offending
# project (mdragon)
when = body.get('timestamp', body.get('_context_timestamp'))
if when:
return timeutils.normalize_time(timeutils.parse_isotime(when))
return timeutils.utcnow()
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
when = self._extract_when(notification_body)
traits = (self.traits[t].to_trait(notification_body)
for t in self.traits)
# Only accept non-None value traits ...
traits = [trait for trait in traits if trait is not None]
event = models.Event(message_id, event_type, when, traits)
return event
class NotificationEventsConverter(object):
"""Notification Event Converter
The NotificationEventsConverter handles the conversion of Notifications
from openstack systems into Ceilometer Events.
The conversion is handled according to event definitions in a config file.
The config is a list of event definitions. Order is significant, a
notification will be processed according to the LAST definition that
matches it's event_type. (We use the last matching definition because that
allows you to use YAML merge syntax in the definitions file.)
Each definition is a dictionary with the following keys (all are
required):
- event_type: this is a list of notification event_types this definition
will handle. These can be wildcarded with unix shell glob (not regex!)
wildcards.
An exclusion listing (starting with a '!') will exclude any types listed
from matching. If ONLY exclusions are listed, the definition will match
anything not matching the exclusions.
This item can also be a string, which will be taken as equivalent to 1
item list.
Examples:
* ['compute.instance.exists'] will only match
compute.intance.exists notifications
* "compute.instance.exists" Same as above.
* ["image.create", "image.delete"] will match
image.create and image.delete, but not anything else.
* "compute.instance.*" will match
compute.instance.create.start but not image.upload
* ['*.start','*.end', '!scheduler.*'] will match
compute.instance.create.start, and image.delete.end,
but NOT compute.instance.exists or
scheduler.run_instance.start
* '!image.*' matches any notification except image
notifications.
* ['*', '!image.*'] same as above.
- traits: (dict) The keys are trait names, the values are the trait
definitions. Each trait definition is a dictionary with the following
keys:
- type (optional): The data type for this trait. (as a string)
Valid options are: 'text', 'int', 'float' and 'datetime', defaults to
'text' if not specified.
- fields: a path specification for the field(s) in the notification you
wish to extract. The paths can be specified with a dot syntax
(e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is
also supported.
In either case, if the key for the field you are looking for contains
special characters, like '.', it will need to be quoted (with double
or single quotes) like so::
"payload.image_meta.'org.openstack__1__architecture'"
The syntax used for the field specification is a variant of JSONPath,
and is fairly flexible.
(see: https://github.com/kennknowles/python-jsonpath-rw for more info)
Specifications can be written to match multiple possible fields, the
value for the trait will be derived from the matching fields that
exist and have a non-null (i.e. is not None) values in the
notification.
By default the value will be the first such field. (plugins can alter
that, if they wish)
This configuration value is normally a string, for convenience, it can
be specified as a list of specifications, which will be OR'ed together
(a union query in jsonpath terms)
- plugin (optional): (dictionary) with the following keys:
- name: (string) name of a plugin to load
- parameters: (optional) Dictionary of keyword args to pass
to the plugin on initialization. See documentation on each plugin to
see what arguments it accepts.
For convenience, this value can also be specified as a string, which is
interpreted as a plugin name, which will be loaded with no parameters.
"""
def __init__(self, events_config, trait_plugin_mgr, add_catchall=True):
self.definitions = [
EventDefinition(event_def, trait_plugin_mgr)
for event_def in reversed(events_config)]
if add_catchall and not any(d.is_catchall for d in self.definitions):
event_def = dict(event_type='*', traits={})
self.definitions.append(EventDefinition(event_def,
trait_plugin_mgr))
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
edef = None
for d in self.definitions:
if d.match_type(event_type):
edef = d
break
if edef is None:
msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)')
% dict(type=event_type, msgid=message_id))
if cfg.CONF.event.drop_unmatched_notifications:
LOG.debug(msg)
else:
# If drop_unmatched_notifications is False, this should
# never happen. (mdragon)
LOG.error(msg)
return None
return edef.to_event(notification_body)
def get_config_file():
config_file = cfg.CONF.event.definitions_cfg_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
return config_file
def setup_events(trait_plugin_mgr):
"""Setup the event definitions from yaml config file."""
config_file = get_config_file()
if config_file is not None:
LOG.debug(_("Event Definitions configuration file: %s"), config_file)
with open(config_file) as cf:
config = cf.read()
try:
events_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (_("Invalid YAML syntax in Event Definitions file "
"%(file)s at line: %(line)s, column: %(column)s.")
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = (_("YAML error reading Event Definitions file "
"%(file)s")
% dict(file=config_file))
LOG.error(errmsg)
raise
else:
LOG.debug(_("No Event Definitions configuration file found!"
" Using default config."))
events_config = []
LOG.info(_("Event Definitions: %s"), events_config)
allow_drop = cfg.CONF.event.drop_unmatched_notifications
return NotificationEventsConverter(events_config,
trait_plugin_mgr,
add_catchall=not allow_drop)
|
|
import sys
import os
import glob
import string
import logging
import yaml
#logging.basicConfig(filename=os.path.join(os.getcwd(), 'extract_data_logs.log'), filemode='w', level=logging.DEBUG,\
# format="%(levelname)s %(asctime)s %(lineno)d %(message)s")
def extract_data(c, param_map):
"""
Method to generate a CPAC input subject list
python file. The method extracts anatomical
and functional data for each site( if multiple site)
and/or scan parameters for each site and put it into
a data structure read by python
Example:
subjects_list =[
{
'subject_id' : '0050386',
'unique_id' : 'session_1',
'anat': '/Users/home/data/NYU/0050386/session_1/anat_1/anat.nii.gz',
'rest':{
'rest_1_rest' : '/Users/home/data/NYU/0050386/session_1/rest_1/rest.nii.gz',
'rest_2_rest' : '/Users/home/data/NYU/0050386/session_1/rest_2/rest.nii.gz',
}
'scan_parameters':{
'tr': '2',
'acquisition': 'alt+z2',
'reference': '17',
'first_tr': '',
'last_tr': '',
}
},
]
or
subjects_list =[
{
'subject_id' : '0050386',
'unique_id' : 'session_1',
'anat': '/Users/home/data/NYU/0050386/session_1/anat_1/anat.nii.gz',
'rest':{
'rest_1_rest' : '/Users/home/data/NYU/0050386/session_1/rest_1/rest.nii.gz',
'rest_2_rest' : '/Users/home/data/NYU/0050386/session_1/rest_2/rest.nii.gz',
}
},
]
"""
#method to read each line of the file into list
#returns list
def get_list(arg):
if isinstance(arg, list):
ret_list = arg
else:
ret_list = [fline.rstrip('\r\n') for fline in open(arg, 'r').readlines()]
return ret_list
exclusion_list = []
if c.exclusionSubjectList is not None:
exclusion_list = get_list(c.exclusionSubjectList)
subject_list = []
if c.subjectList is not None:
subject_list = get_list(c.subjectList)
#check if Template is correct
def checkTemplate(template):
if template.count('%s') != 2:
msg = "Please provide '%s' in the template" \
"where your site and subjects are present"\
"Please see examples"
logging.exception(msg)
raise Exception(msg)
filename, ext = os.path.splitext(os.path.basename(template))
ext = os.path.splitext(filename)[1] + ext
if ext not in [".nii", ".nii.gz"]:
msg = "Invalid file name", os.path.basename(template)
logging.exception(msg)
raise Exception(msg)
def get_site_list(path):
base, relative = path.split('%s')
sites = os.listdir(base)
return sites
def check_length(scan_name, file_name):
if len(file_name) > 30:
msg = "filename- %s is too long."\
"It should not be more than 30 characters."%(file_name)
logging.exception(msg)
raise Exception(msg)
if len(scan_name) - len(os.path.splitext(os.path.splitext(file_name)[0])[0])>= 40:
msg = "scan name %s is too long."\
"It should not be more than 20 characters"\
%(scan_name.replace("_"+os.path.splitext(os.path.splitext(file_name)[0])[0], ''))
logging.exception(msg)
raise Exception(msg)
def create_site_subject_mapping(base, relative):
#mapping between site and subject
site_subject_map = {}
base_path_list = []
if c.siteList is not None:
site_list = get_list(c.siteList)
else:
site_list = get_site_list(base)
for site in site_list:
paths = glob.glob(string.replace(base, '%s', site))
base_path_list.extend(paths)
for path in paths:
for sub in os.listdir(path):
#check if subject is present in subject_list
if subject_list:
if sub in subject_list and sub not in exclusion_list:
site_subject_map[sub] = site
elif sub not in exclusion_list:
if sub not in '.DS_Store':
site_subject_map[sub] = site
return base_path_list, site_subject_map
#method to split the input template path
#into base, path before subject directory
#and relative, path after subject directory
def getPath(template):
checkTemplate(template)
base, relative = template.rsplit("%s", 1)
base, subject_map = create_site_subject_mapping(base, relative)
base.sort()
relative = relative.lstrip("/")
return base, relative, subject_map
#get anatomical base path and anatomical relative path
anat_base, anat_relative = getPath(c.anatomicalTemplate)[:2]
#get functional base path, functional relative path and site-subject map
func_base, func_relative, subject_map = getPath(c.functionalTemplate)
if not anat_base:
msg = "Anatomical Data template incorrect. No such file or directory %s", anat_base
logging.exception(msg)
raise Exception(msg)
if not func_base:
msg = "Functional Data template incorrect. No such file or directory %s, func_base"
logging.exception(msg)
raise Exception(msg)
if len(anat_base) != len(func_base):
msg1 = "Some sites are missing, Please check your template"\
, anat_base, "!=", func_base
logging.exception(msg1)
msg2 = " Base length Unequal. Some sites are missing."\
"extract_data doesn't script support this.Please" \
"Provide your own subjects_list file"
logging.exception(msg2)
raise Exception(msg2)
#calculate the length of relative paths(path after subject directory)
func_relative_len = len(func_relative.split('/'))
anat_relative_len = len(anat_relative.split('/'))
def check_for_sessions(relative_path, path_length):
"""
Method to check if there are sessions present
"""
#default
session_present = False
session_path = 'session_1'
#session present if path_length is equal to 3
if path_length == 3:
relative_path_list = relative_path.split('/')
session_path = relative_path_list[0]
relative_path = string.join(relative_path_list[1:], "/")
session_present = True
elif path_length > 3:
msg = "extract_data script currently doesn't support this directory structure."\
"Please provide the subjects_list file to run CPAC."\
"For more information refer to manual"
logging.exception(msg)
raise Exception(msg)
return session_present, session_path, relative_path
func_session_present, func_session_path, func_relative = \
check_for_sessions(func_relative, func_relative_len)
anat_session_present, anat_session_path, anat_relative = \
check_for_sessions(anat_relative, anat_relative_len)
f = open(os.path.join(c.outputSubjectListLocation, "CPAC_subject_list_%s.yml" % c.subjectListName[0]), 'wb')
def fetch_path(i, anat_sub, func_sub, session_id):
"""
Method to extract anatomical and functional
path for a session and print to file
Parameters
----------
i : int
index of site
anat_sub : string
string containing subject/ concatenated
subject-session path for anatomical file
func_sub: string
string containing subject/ concatenated
subject-session path for functional file
session_id: string
session
Raises
------
Exception
"""
try:
def print_begin_of_file(sub, session_id):
print >> f, "-"
print >> f, " subject_id: '" + sub + "'"
print >> f, " unique_id: '" + session_id + "'"
def print_end_of_file(sub):
if param_map is not None:
try:
logging.debug("site for sub %s -> %s" %(sub, subject_map.get(sub)))
logging.debug("scan parameters for the above site %s"%param_map.get(subject_map.get(sub)))
print >> f, " scan_parameters:"
print >> f, " tr: '" + param_map.get(subject_map.get(sub))[4] + "'"
print >> f, " acquisition: '" + param_map.get(subject_map.get(sub))[0] + "'"
print >> f, " reference: '" + param_map.get(subject_map.get(sub))[3] + "'"
print >> f, " first_tr: '" + param_map.get(subject_map.get(sub))[1] + "'"
print >> f, " last_tr: '" + param_map.get(subject_map.get(sub))[2] + "'"
except:
msg = " No Parameter values for the %s site is defined in the scan"\
" parameters csv file" %subject_map.get(sub)
raise ValueError(msg)
#get anatomical file
anat_base_path = os.path.join(anat_base[i], anat_sub)
func_base_path = os.path.join(func_base[i], func_sub)
anat = None
func = None
anat = glob.glob(os.path.join(anat_base_path, anat_relative))
func = glob.glob(os.path.join(func_base_path, func_relative))
if anat and func:
print_begin_of_file(anat_sub.split("/")[0], session_id)
print >> f, " anat: '" + os.path.realpath(anat[0]) + "'"
print >> f, " rest: "
#iterate for each rest session
for iter in func:
#get scan_id
iterable = os.path.splitext(os.path.splitext(iter.replace(func_base_path, '').lstrip("/"))[0])[0]
iterable = iterable.replace("/", "_")
check_length(iterable, os.path.basename(os.path.realpath(iter)))
print>>f, " " + iterable + ": '" + os.path.realpath(iter) + "'"
print_end_of_file(anat_sub.split("/")[0])
else:
logging.debug("skipping subject %s"%anat_sub.split("/")[0])
except ValueError:
logging.exception(ValueError.message)
raise
except Exception, e:
msg = "Exception while fetching anatomical and functional paths. " + str(e)
logging.exception(msg)
raise Exception(msg)
def walk(index, sub):
"""
Method which walks across each subject
path in the data site path
Parameters
----------
index : int
index of site
sub : string
subject_id
Raises
------
Exception
"""
try:
if func_session_present:
#if there are sessions
if "*" in func_session_path:
session_list = glob.glob(os.path.join(func_base[index], os.path.join(sub, func_session_path)))
else:
session_list = [func_session_path]
if session_list:
for session in session_list:
session_id = os.path.basename(session)
if anat_session_present:
if func_session_path == anat_session_path:
fetch_path(index, os.path.join(sub, session_id), os.path.join(sub, session_id), session_id)
else:
fetch_path(index, os.path.join(sub, anat_session_path), os.path.join(sub, session_id), session_id)
else:
fetch_path(index, sub, os.path.join(sub, session_id), session_id)
else:
logging.debug("Skipping subject %s", sub)
else:
logging.debug("No sessions")
session_id = ''
fetch_path(index, sub, sub, session_id)
except Exception:
logging.exception(Exception.message)
raise
except:
msg = "Please make sessions are consistent across all subjects"
logging.exception(msg)
raise Exception(msg)
try:
for i in range(len(anat_base)):
for sub in os.listdir(anat_base[i]):
#check if subject is present in subject_list
if subject_list:
if sub in subject_list and sub not in exclusion_list:
logging.debug("extracting data for subject: %s", sub)
walk(i, sub)
#check that subject is not in exclusion list
elif sub not in exclusion_list and sub not in '.DS_Store':
logging.debug("extracting data for subject: %s", sub)
walk(i, sub)
name = os.path.join(c.outputSubjectListLocation, 'CPAC_subject_list.yml')
print "Extraction Successfully Completed...Input Subjects_list for CPAC - %s" % name
except Exception:
logging.exception(Exception.message)
raise
finally:
f.close()
def generate_supplementary_files(output_path, subject_list_name):
"""
Method to generate phenotypic template file
and subject list for group analysis
"""
from sets import Set
import csv
subject_list_name = subject_list_name[0]
try:
subjects_list = yaml.load(open(os.path.join(output_path, 'CPAC_' \
'subject_list_%s.yml' % subject_list_name), 'r'))
except:
print 'Subject list couldn\'t be read!'
print 'path: ', os.path.join(output_path, 'CPAC_subject_list_%s.yml' \
% subject_list_name)
raise Exception
subject_scan_set = Set()
subID_set = Set()
session_set = Set()
subject_set = Set()
scan_set = Set()
data_list = []
for sub in subjects_list:
if sub['unique_id']:
subject_id = sub['subject_id'] + "_" + sub['unique_id']
else:
subject_id = sub['subject_id']
for scan in sub['rest'].keys():
subject_scan_set.add((subject_id, scan))
subID_set.add(sub['subject_id'])
session_set.add(sub['unique_id'])
subject_set.add(subject_id)
scan_set.add(scan)
for item in subject_scan_set:
list1 = []
list1.append(item[0] + "/" + item[1])
for val in subject_set:
if val in item:
list1.append(1)
else:
list1.append(0)
for val in scan_set:
if val in item:
list1.append(1)
else:
list1.append(0)
data_list.append(list1)
# generate the phenotypic file templates for group analysis
file_name = os.path.join(output_path, 'phenotypic_template_%s.csv' \
% subject_list_name)
try:
f = open(file_name, 'wb')
except:
print '\n\nCPAC says: I couldn\'t save this file to your drive:\n'
print file_name, '\n\n'
print 'Make sure you have write access? Then come back. Don\'t ' \
'worry.. I\'ll wait.\n\n'
raise IOError
writer = csv.writer(f)
writer.writerow(['subject_id', 'EV1', '..'])
for sub in sorted(subID_set):
writer.writerow([sub, ''])
f.close()
print "Template Phenotypic file for group analysis - %s" % file_name
# generate the phenotypic file templates for repeated measures
if (len(session_set) > 1) and (len(scan_set) > 1):
file_name = os.path.join(output_path, 'phenotypic_template_repeated' \
'_measures_mult_sessions_and_scans_%s.csv' \
% subject_list_name)
try:
f = open(file_name, 'wb')
except:
print '\n\nCPAC says: I couldn\'t save this file to your drive:\n'
print file_name, '\n\n'
print 'Make sure you have write access? Then come back. Don\'t ' \
'worry.. I\'ll wait.\n\n'
raise IOError
writer = csv.writer(f)
writer.writerow(['subject_id', 'EV1', '..'])
for session in sorted(session_set):
for scan in sorted(scan_set):
for sub in sorted(subID_set):
writer.writerow([sub + '_' + scan + '_' + session, ''])
f.close()
if (len(session_set) > 1):
file_name = os.path.join(output_path, 'phenotypic_template_repeated' \
'_measures_multiple_sessions_%s.csv' % subject_list_name)
try:
f = open(file_name, 'wb')
except:
print '\n\nCPAC says: I couldn\'t save this file to your drive:\n'
print file_name, '\n\n'
print 'Make sure you have write access? Then come back. Don\'t ' \
'worry.. I\'ll wait.\n\n'
raise IOError
writer = csv.writer(f)
writer.writerow(['subject_id', 'EV1', '..'])
for session in sorted(session_set):
for sub in sorted(subID_set):
writer.writerow([sub + '_' + session, ''])
f.close()
if (len(scan_set) > 1):
file_name = os.path.join(output_path, 'phenotypic_template_repeated' \
'_measures_multiple_scans_%s.csv' % subject_list_name)
try:
f = open(file_name, 'wb')
except:
print '\n\nCPAC says: I couldn\'t save this file to your drive:\n'
print file_name, '\n\n'
print 'Make sure you have write access? Then come back. Don\'t ' \
'worry.. I\'ll wait.\n\n'
raise IOError
writer = csv.writer(f)
writer.writerow(['subject_id', 'EV1', '..'])
for scan in sorted(scan_set):
for sub in sorted(subID_set):
writer.writerow([sub + '_' + scan, ''])
f.close()
# generate the group analysis subject lists
file_name = os.path.join(output_path, 'subject_list_group_analysis' \
'_%s.txt' % subject_list_name)
try:
f = open(file_name, 'w')
except:
print '\n\nCPAC says: I couldn\'t save this file to your drive:\n'
print file_name, '\n\n'
print 'Make sure you have write access? Then come back. Don\'t ' \
'worry.. I\'ll wait.\n\n'
raise IOError
for sub in sorted(subID_set):
print >> f, sub
print "Subject list required later for group analysis - %s" % file_name
f.close()
# generate the group analysis subject lists for repeated measures
if (len(session_set) > 1) and (len(scan_set) > 1):
file_name = os.path.join(output_path, 'subject_list_group_analysis_' \
'repeated_measures_mult_sessions_and_scans_%s.txt' \
% subject_list_name)
try:
f = open(file_name, 'w')
except:
print '\n\nCPAC says: I couldn\'t save this file to your drive:\n'
print file_name, '\n\n'
print 'Make sure you have write access? Then come back. Don\'t ' \
'worry.. I\'ll wait.\n\n'
raise IOError
for session in sorted(session_set):
for scan in sorted(scan_set):
for sub in sorted(subID_set):
print >> f, sub + ',' + scan + ',' + session
f.close()
if (len(session_set) > 1):
file_name = os.path.join(output_path, 'subject_list_group_analysis_' \
'repeated_measures_multiple_sessions_%s.txt' \
% subject_list_name)
try:
f = open(file_name, 'w')
except:
print '\n\nCPAC says: I couldn\'t save this file to your drive:\n'
print file_name, '\n\n'
print 'Make sure you have write access? Then come back. Don\'t ' \
'worry.. I\'ll wait.\n\n'
raise IOError
for session in sorted(session_set):
for sub in sorted(subID_set):
print >> f, sub + ',' + session
f.close()
if (len(scan_set) > 1):
file_name = os.path.join(output_path, 'subject_list_group_analysis_' \
'repeated_measures_multiple_scans_%s.txt' \
% subject_list_name)
try:
f = open(file_name, 'w')
except:
print '\n\nCPAC says: I couldn\'t save this file to your drive:\n'
print file_name, '\n\n'
print 'Make sure you have write access? Then come back. Don\'t ' \
'worry.. I\'ll wait.\n\n'
raise IOError
for scan in sorted(scan_set):
for sub in sorted(subID_set):
print >> f, sub + ',' + scan
f.close()
def read_csv(csv_input):
"""
Method to read csv file
'Acquisition'
'Reference'
'Site'
'TR (seconds)'
"""
import csv
from collections import defaultdict
try:
reader = csv.DictReader(open(csv_input, "U"))
dict_labels = defaultdict(list)
for line in reader:
csv_dict = dict((k.lower(), v) for k, v in line.iteritems())
dict_labels[csv_dict.get('site')] = [csv_dict[key] for key in sorted(csv_dict.keys()) \
if key != 'site' and key != 'scan']
if len(dict_labels.keys()) < 1:
msg ="Scan Parameters File is either empty"\
"or missing header"
logging.exception(msg)
raise Exception(msg)
return dict_labels
except IOError:
msg = "Error reading the csv file %s", csv_input
logging.exception(msg)
raise Exception(msg)
except:
msg = "Error reading scan parameters csv. Make sure you are using the correct template"
logging.exception(msg)
raise Exception(msg)
"""
Class to set dictionary keys as map attributes
"""
class Configuration(object):
def __init__(self, config_map):
for key in config_map:
if config_map[key] == 'None':
config_map[key] = None
setattr(self, key, config_map[key])
def run(data_config):
"""
Run method takes data_config
file as the input argument
"""
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(filename=os.path.join(os.getcwd(), 'extract_data_logs.log'), filemode='w', level=logging.DEBUG,\
format="%(levelname)s %(asctime)s %(lineno)d %(message)s")
print "For any errors or messages check the log file - %s"\
% os.path.join(os.getcwd(), 'extract_data_logs.log')
c = Configuration(yaml.load(open(os.path.realpath(data_config), 'r')))
if c.scanParametersCSV is not None:
s_param_map = read_csv(c.scanParametersCSV)
else:
logging.debug("no scan parameters csv included"\
"make sure you turn off slice timing correction option"\
"in CPAC configuration")
s_param_map = None
extract_data(c, s_param_map)
generate_supplementary_files(c.outputSubjectListLocation, c.subjectListName)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Usage: python extract_data.py data_config.yml"
sys.exit()
else:
run(sys.argv[1])
|
|
#!/usr/bin/env python3
# Copyright 2014-present Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import asyncio
import os
import subprocess
try:
from pywatchman import bser
except ImportError:
from pywatchman import pybser as bser
from pywatchman import encoding
from pywatchman import CommandError, WatchmanError
# 2 bytes marker, 1 byte int size, 8 bytes int64 value
SNIFF_LEN = 13
# TODO: Fix this when https://github.com/python/asyncio/issues/281 is resolved.
# tl;dr is that you cannot have different event loops running in different
# threads all fork subprocesses and listen for child events. The current
# workaround is to do the old fashioned blocking process communication using a
# ThreadPool.
def _resolve_sockname_helper():
# if invoked via a trigger, watchman will set this env var; we
# should use it unless explicitly set otherwise
path = os.getenv('WATCHMAN_SOCK')
if path:
return path
cmd = ['watchman', '--output-encoding=bser', 'get-sockname']
try:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=os.name != 'nt')
except OSError as e:
raise WatchmanError('"watchman" executable not in PATH (%s)', e)
stdout, stderr = p.communicate()
exitcode = p.poll()
if exitcode:
raise WatchmanError('watchman exited with code %d' % exitcode)
result = bser.loads(stdout)
if 'error' in result:
raise WatchmanError(str(result['error']))
return result['sockname']
async def _resolve_sockname():
'''Find the Unix socket path to the global Watchman instance.'''
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, _resolve_sockname_helper)
class AsyncTransport(object):
'''Communication transport to the Watchman Service.'''
async def activate(self, **kwargs):
'''Make the transport ready for use. Optional for subclasses.'''
pass
async def read(self, size):
'''Read 'size' bytes from the transport.'''
raise NotImplementedError()
async def write(self, buf):
'''Write 'buf' bytes to the transport.'''
raise NotImplementedError()
def close(self):
'''Close the transport. Optional for subclasses.'''
pass
class AsyncUnixSocketTransport(AsyncTransport):
'''Local Unix domain socket transport supporting asyncio.'''
def __init__(self):
self.sockname = None
self.reader = None
self.writer = None
async def activate(self, **kwargs):
# Requires keyword-argument 'sockname'
reader, writer = await asyncio.open_unix_connection(kwargs['sockname'])
self.reader = reader
self.writer = writer
async def write(self, data):
self.writer.write(data)
await self.writer.drain()
async def read(self, size):
res = await self.reader.read(size)
return res
def close(self):
if self.writer:
self.writer.close()
class AsyncCodec(object):
'''Communication encoding for the Watchman service.'''
def __init__(self, transport):
self.transport = transport
async def receive(self):
'''Read from the underlying transport, parse and return the message.'''
raise NotImplementedError()
async def send(self, *args):
'''Send the given message via the underlying transport.'''
raise NotImplementedError()
def close(self):
'''Close the underlying transport.'''
self.transport.close()
# This requires BSERv2 support of the server, but doesn't gracefully check
# for the requisite capability being present in older versions.
class AsyncBserCodec(AsyncCodec):
'''Use the BSER encoding.'''
async def receive(self):
sniff = await self.transport.read(SNIFF_LEN)
if not sniff:
raise WatchmanError('empty watchman response')
_1, _2, elen = bser.pdu_info(sniff)
rlen = len(sniff)
buf = bytearray(elen)
buf[:rlen] = sniff
while elen > rlen:
b = await self.transport.read(elen - rlen)
buf[rlen:rlen + len(b)] = b
rlen += len(b)
response = bytes(buf)
try:
res = self._loads(response)
return res
except ValueError as e:
raise WatchmanError('watchman response decode error: %s' % e)
async def send(self, *args):
cmd = bser.dumps(*args, version=2, capabilities=0)
await self.transport.write(cmd)
def _loads(self, response):
''' Parse the BSER packet '''
return bser.loads(response,
True,
value_encoding=encoding.get_local_encoding(),
value_errors=encoding.default_local_errors)
class ReceiveLoopError(Exception):
pass
class AIOClient(object):
def __init__(self, connection):
self.connection = connection
self.log_queue = asyncio.Queue()
self.sub_by_root = {}
self.bilateral_response_queue = asyncio.Queue()
self.recieive_task = None
self.receive_task_exception = None
def stop(self):
self.should_stop = True
async def receive_bilateral_response(self):
'''Receive the response to a request made to the Watchman service.'''
self._check_receive_loop()
resp = await self.bilateral_response_queue.get()
self._check_error(resp)
return resp
async def query(self, *args):
'''Send a query to the Watchman service and return the response.'''
self._check_receive_loop()
try:
await self.connection.send(args)
return await self.receive_bilateral_response()
except CommandError as ex:
ex.setCommand(args)
raise ex
async def capability_check(self, optional=None, required=None):
'''Perform a server capability check.'''
self._check_receive_loop()
# If the returned response is an error, self.query will raise an error
await self.query('version', {
'optional': optional or [],
'required': required or []
})
async def get_subscription(self, name, root):
''' Retrieve the data associated with a named subscription
Returns None if there is no data associated with `name`
If root is not None, then only return the subscription
data that matches both root and name. When used in this way,
remove processing impacts both the unscoped and scoped stores
for the subscription data.
'''
self._check_receive_loop()
self._ensure_subscription_queue_exists(name, root)
return await self.sub_by_root[root][name].get()
async def pop_log(self):
'''Get one log from the log queue.'''
self._check_receive_loop()
return await self.log_queue.get()
def close(self):
'''Close the underlying connection.'''
if self.receive_task:
self.receive_task.cancel()
if self.connection:
self.connection.close()
def enable_receiving(self, loop=None):
'''Schedules the receive loop to run on the given loop.'''
self.receive_task = asyncio.ensure_future(self._receive_loop(),
loop=loop)
def do_if_done(fut):
try:
fut.result()
except asyncio.CancelledError:
pass
except Exception as ex:
self.receive_task_exception = ex
self.receive_task.add_done_callback(do_if_done)
@classmethod
async def from_socket(cls, sockname=None):
'''Create a new AIOClient using Unix transport and BSER Codec
connecting to the specified socket. If the specified socket is None,
then resolve the socket path automatically.
This method also schedules the receive loop to run on the event loop.
This method is a coroutine.'''
if not sockname:
sockname = await _resolve_sockname()
transport = AsyncUnixSocketTransport()
await transport.activate(sockname=sockname)
connection = AsyncBserCodec(transport)
obj = cls(connection)
obj.enable_receiving()
return obj
async def _receive_loop(self):
'''Receive the response to a request made to the Watchman service.
Note that when trying to receive a PDU from the Watchman service,
we might get a unilateral response to a subscription or log, so these
are processed and queued up for later retrieval. This function only
returns when a non-unilateral response is received.'''
while True:
response = await self.connection.receive()
if self._is_unilateral(response):
await self._process_unilateral_response(response)
else:
await self.bilateral_response_queue.put(response)
def _check_error(self, res):
if 'error' in res:
raise CommandError(res['error'])
def _check_receive_loop(self):
if self.receive_task is None:
raise ReceiveLoopError('Receive loop was not started.')
def _is_unilateral(self, res):
return res.get('unilateral') or 'subscription' in res or 'log' in res
def _ensure_subscription_queue_exists(self, name, root):
# Note this function must be called from an async function on only one
# event loop.
self.sub_by_root.setdefault(root, {}).setdefault(name, asyncio.Queue())
async def _process_unilateral_response(self, response):
if 'log' in response:
await self.logs.put(response['log'])
elif 'subscription' in response:
sub = response['subscription']
root = os.path.normcase(response['root'])
self._ensure_subscription_queue_exists(sub, root)
await self.sub_by_root[root][sub].put(response)
elif self._is_unilateral(response):
raise WatchmanError('Unknown unilateral response: ' +
str(response))
else:
raise WatchmanError('Not a unilateral response: ' +
str(response))
|
|
"""SPSSINC SPLIT DATASET extension command"""
#Licensed Materials - Property of IBM
#IBM SPSS Products: Statistics General
#(c) Copyright IBM Corp. 2010, 2020
#US Government Users Restricted Rights - Use, duplication or disclosure
#restricted by GSA ADP Schedule Contract with IBM Corp.
__author__ = 'spss, JKP'
__version__= '1.2.2'
# history
# 08-feb-2010 original version
# 31-mar-2010 expand file handles for file list log if V18 or later
# 24-jan-2011 allow multiple split variables and patterns in directory paths
# 08-feb-2011 add FILENAMESUBS keywordf
# 07-mar-2011 file handle support in DIRECTORY keyword in class Filename
# 09-dec-2011 bug fix for delete
# 23-jul-2013 prevent multiple missing value definitions from causing file conflict
helptext = """SPSSINC SPLIT DATASET
SPLITVAR=variable-names
/OUTPUT
[DIRECTORY="directory-specification-for-output"] [DELETECONTENTS={NO* | YES}]
[MAKETEMPDIR={NO* | YES}]] [FILENAME="filename"]
[/OPTIONS [NAMES={VALUES*|LABELS|NUMBERS}] FILENAMESUBS={ASDIR* | VALUES | LABELS}
[NAMEPREFIX="nameprefix"]
[PRINTLIST={YES* | NO}
[FILELIST="filespec"]]
[/HELP]
This command writes a collection of datasets with all the cases for each value of the split variable(s)
in a different dataset. It facilitates executing arbitrary blocks of syntax for each distinct split value.
The extension command SPSSINC PROCESS FILES can be used to apply syntax to each of the
files written by this command.
Example:
spssinc split dataset splitvar= educ
/output directory="c:/temp/target" deletecontents=yes
/options names=values filelist="c:/temp/target/files.txt" printlist=yes.
Data do not need to be sorted by the split variable for this command.
The output file name is by default constructed from the variable values or labels
with an underscore(_) between parts if multiple variables are used.
FILENAME can be used to override these names. It can be a pattern such as
described below for directories. It is always given an sav extension.
System-missing values are written to a dataset named $Sysmis.sav.
A blank string value will be written to a dataset named .sav (no rootname).
Such names are legal, but on some operating systems a directory listing will not show
names starting with a period by default.
SPLITVAR names one or more string or numeric variables that define the splits.
If numeric, the values must be integers.
DIRECTORY optionally names a directory for the output. If not specified, the current
working directory for the SPSS Processor is used. The specified directory will be created
if it does not already exist.
The directory can be a simple path such as "c:\project\splits" or it can refer to some or
all of the variables used in splitting. To refer to a variable value, use the variable name
in the path like this "${varname}". For example, if splitting by variables xvar and yvar,
you could write
DIRECTORY = "c:/mydata/abc${xvar}/${yvar}". if xvar has values "xyz" and "KLM" and
yvar has values 1 and 2, then directories
c:/mydata/abcxyz/1, c:/mydata/abcxyz/2, c:/mydata/abcKLM/1, and c:/mydata/abcKLM/2
could be created and used. Only value combinations actually occurring in the data will
be created or used.
Notes:
- On a case-insensitive file system such as used by Windows,
the files created could be different from other systems.
- Characters typically not allowed in directory names are replaced with underscores (_).
These are " * ? < > |
If MAKETEMPDIR is specified, a new temporary directory is created and used for the
output. DIRECTORY must not be specified if this option is used.
DELETECONTENTS specifies whether all SPSS sav files are removed before new files are
written. Use with caution! It cannot be used if DIRECTORY is not specified. If the directory specification
includes variable substitutions, only directory references for values in the dataset being split
will be cleared.
NAMES can specify VALUES, the default, or LABELS or NUMBERS. It determines how
the output directories and files are named. For VALUES the file names are like value.sav,
for LABELS, the file names are the label.sav; for numbers, the file names are sequential numbers.
If NAMES=NUMBERS and there is a directory pattern, the variable values are used
to expand that pattern. The NAMES setting determines whether values or labels are
displayed in the detailed output pivot table.
Characters in the values or labels that would be illegal in a file name, including those listed
above and / or \, are replaced by the underscore character (_).
If using LABELS, the value is used if there is no label.
By default, the same NAMES choice is used for file names. You can override this by
specifying FILENAMESUBS = VALUES or LABELS.
If NAMEPREFIX is specified, that text plus _ will be prefixed to the output file names.
NAMEPREFIX cannot be combined with FILENAME.
If specified, FILELIST names a file that will contain a list of all the files that were written
along with the associated values. This file can be used as input to SPSSINC PROCESS FILES.
If file handles are in use, the paths in the log are expanded to the true names if using V18
or later, but Viewer output is left in terms of the handles
/HELP displays this help and does nothing else.
"""
# Notes: V17 max XSAVES is 10. V18 is 64.
import spss, spssaux
from extension import Template, Syntax, processcmd
import sys, locale, tempfile, random, os, glob, re, codecs, string
v18okay = spssaux.getSpssMajorVersion() >= 18
v19okay = spssaux.getSpssMajorVersion() >= 19
# Version 18 has a bug related to inserting Unicode text in a pivot table data cell.
# The following code monkey patches the problematic method just for V18 and earlier
if not v19okay:
from spss import CellText, Dimension
import time, datetime
def SimplePivotTable(self,rowdim="",rowlabels=[],coldim="",collabels=[],cells=None):
"""Monkey Patch for unicode-in-cells bug for V18 and earlier"""
error.Reset()
try:
# If cells is None, neither rowlabels nor collabels is allowed.
if cells is None:
if len(rowlabels) > 0:
error.SetErrorCode(1032)
if error.IsError():
raise SpssError(error)
elif len(collabels) > 0:
error.SetErrorCode(1032)
if error.IsError():
raise SpssError(error)
# Make a local copy. We don't want to change the original labels.
tmpRowLabels = list(rowlabels)
tmpColLabels = list(collabels)
except TypeError:
error.SetErrorCode(1004)
if error.IsError():
raise SpssError(error)
# Check the structure of cells.
nRows = 0
nCols = 0
# None or empty cells is okay at this point.
if cells is not None:
nRows = len(cells)
if nRows > 0:
if not isinstance(cells[0],str):
try:
#check if cells[0] is iterable.
nCols = len([(i, x) for (i,x) in enumerate(cells[0])])
except TypeError:
nCols = 1
else:
nCols = 1
if tmpRowLabels != [] and tmpColLabels != []:
nRows = len(tmpRowLabels)
nCols = len(tmpColLabels)
elif tmpRowLabels != []:
nRows = len(tmpRowLabels)
# If there are no labels for the column dimension, the length of the first cells item is used.
tmpColLabels.extend(["col"+str(x) for x in range(nCols)])
elif tmpColLabels != []:
nCols = len(tmpColLabels)
# If there are no labels for the row dimension, the number of rows in Cells is used.
tmpRowLabels.extend(["row"+str(x) for x in range(nRows)])
else:
tmpRowLabels.extend(["row"+str(x) for x in range(nRows)])
tmpColLabels.extend(["col"+str(x) for x in range(nCols)])
tmpRowLabels = list(map(CellText._CellText__ToCellText,tmpRowLabels))
tmpColLabels = list(map(CellText._CellText__ToCellText,tmpColLabels))
tmpCells = []
# cells must match the label structure if the labels are given.
if nRows > 0 and nCols > 0:
try:
# Check cells length and if cells can be indexed as cells[i][j] or cells[i].
if nCols > 1:
try:
x = []
for c in cells:
if isinstance(c, (tuple,list)):
x.append(len(c))
else:
x.append(1)
maxlen = max(x)
except TypeError:
maxlen = 1
if ( 1 == maxlen ):
assert (len(cells) == nCols * nRows)
tmpCells = [cells[x*nCols + y] for x in range(nRows) for y in range(nCols)]
else:
assert(maxlen == nCols)
assert(len(cells) == nRows)
tmpCells = [cells[x][y] for x in range(nRows) for y in range(nCols)]
else:
assert(len(cells) == nCols * nRows)
tmpCells = [cells[x*nCols + y] for x in range(nRows) for y in range(nCols)]
except:
error.SetErrorCode(1032)
if error.IsError():
raise SpssError(error)
# Check if cells[i][j] or cells[i] is scalar (such as sequence).
for x in tmpCells:
###if not isinstance(x,(str, time.struct_time, datetime.datetime, datetime.date)):
if not isinstance(x,(str, time.struct_time, datetime.datetime, datetime.date)):
try:
[(i, x) for (i,x) in enumerate(x)]
error.SetErrorCode(1032)
if error.IsError():
raise SpssError(error)
except TypeError:
pass
tmpCells = list(map(CellText._CellText__ToCellText,tmpCells))
# If dimension is empty, the dimension label is hidden.
if rowdim == "":
rowdim = self.Append(Dimension.Place.row,"rowdim",True,False)
else:
rowdim = self.Append(Dimension.Place.row,rowdim,False,False)
if coldim == "":
coldim = self.Append(Dimension.Place.column,"coldim",True,False)
else:
coldim = self.Append(Dimension.Place.column,coldim,False,False)
if tmpCells != []:
categories = [(row,col) for row in tmpRowLabels for col in tmpColLabels]
for (i,cats) in enumerate(categories):
self[cats] = tmpCells[i]
# monkey patch BasePivotTable class
import spss.errMsg
error = spss.errMsg.errCode()
spss.BasePivotTable.SimplePivotTable = SimplePivotTable
def _safeval(val, quot='"'):
"return safe value for quoting with quot, which may be single or double quote or blank"
return quot == " " and val or val.replace(quot, quot+quot)
def Run(args):
"""Execute the SPSSINC SPLIT DATASETS extension command"""
args = args[list(args.keys())[0]]
oobj = Syntax([
Template("SPLITVAR", subc="", ktype="existingvarlist", var="varnames", islist=True),
Template("DIRECTORY", subc="OUTPUT", ktype="literal", var="directory"),
Template("DELETECONTENTS", subc="OUTPUT", ktype="bool", var="deletecontents"),
Template("MAKETEMPDIR", subc="OUTPUT", ktype="bool", var="maketempdir"),
Template("FILENAME", subc="OUTPUT", ktype="literal", var="fnspec"),
Template("NAMES", subc="OPTIONS", ktype="str", var="names", vallist=["values", "labels","numbers"]),
Template("FILENAMESUBS", subc="OPTIONS", ktype="str", var="filenamesubs", vallist=["values", "labels", "numbers", "asdir"]),
Template("NAMEPREFIX", subc="OPTIONS", ktype="literal", var="nameprefix"),
Template("FILELIST", subc="OPTIONS", ktype="literal", var="filelist"),
Template("PRINTLIST", subc="OPTIONS", ktype="bool", var="printlist"),
Template("HELP", subc="", ktype="bool")])
#try:
#import wingdbstub
#if wingdbstub.debugger != None:
#if wingdbstub.debugger.ChannelClosed():
#import time
#wingdbstub.debugger.StopDebug()
#time.sleep(2)
#wingdbstub.debugger.StartDebug()
#import thread
#wingdbstub.debugger.SetDebugThreads({thread.get_ident(): 1}, default_policy=0)
#except:
#pass
#enable localization
global _
try:
_("---")
except:
def _(msg):
return msg
# A HELP subcommand overrides all else
if "HELP" in args:
#print helptext
helper()
else:
processcmd(oobj, args, makesplits, vardict=spssaux.VariableDict())
def helper():
"""open html help in default browser window
The location is computed from the current module name"""
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = "file://" + path + os.path.sep + \
"markdown.html"
# webbrowser.open seems not to work well
browser = webbrowser.get()
if not browser.open_new(helpspec):
print(("Help file not found:" + helpspec))
try: #override
from extension import helper
except:
pass
def makesplits(varnames, directory=None, deletecontents=False, maketempdir = False,
names="values", nameprefix="", filelist=None, printlist=True, fnspec="", filenamesubs="asdir"):
"""Create split data files and reports"""
myenc = locale.getlocale()[1] # get current encoding in case conversions needed
if fnspec and nameprefix:
raise ValueError(_("FILENAME and NAMEPREFIX cannot be used together"))
if fnspec and names == "numbers":
raise ValueError(_("FILENAME and Names = Numbers cannot be used together"))
def unicodeit(value, keepnumeric=False):
"""Convert singleton or sequence to Unicode
if keepnumeric, then numbers are left as numeric"""
isseq = spssaux._isseq(value)
if not isseq:
value = [value]
for i, v in enumerate(value):
if isinstance(v, (int, float)):
if not keepnumeric:
value[i]= str(v)
elif v is None:
pass
elif not isinstance(v, str):
value[i] = str(v, myenc)
if isseq:
return value
else:
return value[0]
cwd = unicodeit(spssaux.GetSHOW("DIRECTORY"))
# output files will go into or under a temporary directory, the cwd of the backend,
# or a specified path. Deleting contents is not allowed in cwd.
#if spssaux._isseq(varname):
#varname = varname[0]
varnames = unicodeit(varnames)
directory = unescape(directory)
root = None
delcount = 0
if maketempdir:
root=tempfile.mkdtemp()
elif directory is None:
root = cwd
elif deletecontents: # Needs update for subtrees
if not directory.endswith("/") or directory.endswith("\\"):
directory = directory + os.sep
if directory and root:
directory = os.path.join(root, directory)
elif root:
directory = root
directory = unicodeit(directory)
dsn = spss.ActiveDataset()
if dsn == "*":
dsn = "D" + str(random.uniform(0,1))
spss.Submit("DATASET NAME " + dsn)
varnamestr = " ".join(varnames)
# get the list of values for the splitting variable.
# AGGREGATE will fail if there are any undefined variables
dsname = "D" + str(random.uniform(0,1))
cmd = """DATASET DECLARE %(dsname)s.
AGGREGATE /OUTFILE = "%(dsname)s"
/BREAK = %(varnamestr)s
/%(dsname)s=N.
DATASET ACTIVATE %(dsname)s.""" % locals()
spss.Submit(cmd)
# cases is a list of tuples of values and counts.
# By default, user missing values become None and can produce
# multiple cases with the same apparent break value, so we turn that off.
cur = spss.Cursor()
cur.SetUserMissingInclude(True)
cases = cur.fetchall()
cur.close()
spss.Submit("""DATASET CLOSE %(dsname)s.
DATASET ACTIVATE %(dsn)s.""" % locals())
# get all but last variable and convert from tuple to list
cases = [list(item[:-1]) for item in cases] # we just need the break values
if names == "labels" or filenamesubs == "labels":
vardict = spssaux.VariableDict(varnames)
if len(vardict) != len(varnames):
raise ValueError(_("One or more of the split variables was not found. Note that names are case sensitive"))
vldict = [vardict[v.VariableName].ValueLabels for v in vardict] # a list of value label dictionaries
# ensure that everything is properly unicoded
vldict = [dict((unicodeit(k), unicodeit(v)) for k, v in item.items()) for item in vldict]
else:
vldict = [{}]
# set up values for use in syntax by quoting and converting values
for row, case in enumerate(cases):
for v, vval in enumerate(case):
if not isinstance(vval, str) and vval is not None:
if int(vval) != vval:
raise ValueError(_("Split variable contains non-integer value: %f") % vval)
valuecount = len(cases)
fnc = filename(varnames, names, vldict, directory, nameprefix, myenc, unicodeit,
deletecontents, fnspec, filenamesubs)
xsavetemplate = """ XSAVE OUTFILE=%s."""
xsavelimit = v18okay and 64 or 10
remaining = valuecount
fns = []
for v in range(0, valuecount, xsavelimit):
if v > 0:
spss.Submit("EXECUTE.") # must execute transformation block in order to submit new ones
for g in range(min(xsavelimit, remaining)):
if g == 0:
cmd = ["DO IF "]
else:
cmd.append("ELSE IF ")
values = unicodeit(cases[v+g], keepnumeric=True)
spssexpr = makeexpr(varnames, values)
cmd[-1] = cmd[-1] + ("(" + spssexpr + ").")
valuestr, fnv, fn, thefile = fnc.genfn(values)
fns.append([fn, valuestr, thefile])
cmd.append(xsavetemplate % fn)
cmd.append("END IF.")
spss.Submit("\n".join(cmd))
remaining -= xsavelimit
for i in range(valuecount):
for j in range(3):
fns[i][j] = unistr(fns[i][j], myenc)
if not filelist is None:
filelist = unescape(filelist)
fh = Handles() # only functional for V18 or later.
# filelist itself is already resolved by the UP because of its parameter type
filelist = fixloc(unicodeit(filelist), cwd)
fh.resolvehandles(fns)
with codecs.open(filelist, "wb", encoding="utf_8_sig") as f:
f.writelines([item[0] + ' ' + item[1] + os.linesep for item in fns])
spss.StartProcedure("SPSSINC SPLIT DATASET")
pt = NonProcPivotTable("INFORMATION", tabletitle=_("Split File Information"),
columnlabels=[_("Settings and Statistics")])
pt.addrow(rowlabel=_("Split Variable Names"), cvalues=[", ".join(varnames)])
pt.addrow(rowlabel=_("Output Directory"), cvalues=[directory])
pt.addrow(rowlabel=_("Files Deleted"), cvalues=[str(fnc.delcount)])
pt.addrow(rowlabel=_("Files Written"), cvalues=[str(len(fns))])
pt.addrow(rowlabel=_("File List"), cvalues=[filelist or _("None")])
pt.addrow(rowlabel=_("Directories Cleared"), cvalues=[deletecontents and _("Yes") or _("No")])
pt.generate()
if printlist:
pt = NonProcPivotTable("FILEOUTPUT", _("Split Files Written"),
tabletitle=_("Values and File Names for Split Files Written"),
columnlabels=[_("Values or Labels"), _("Directory"), _("Data File")],
caption=_("Based on Variables: %s") % ", ".join(varnames))
for i, f in enumerate(fns):
row = [f[1]]
row.extend((os.path.split(f[0].strip('"'))))
pt.addrow(rowlabel=str(i+1), cvalues=row)
pt.generate()
spss.EndProcedure
def unistr(value, myenc):
"""return unicode value for a unicode object, a number, or a code page object"""
if isinstance(value, str):
return value
if isinstance(value, (float, int)):
return str(value)
if value is None:
return "$Sysmis"
return str(value, myenc)
def str18(item):
if v19okay:
return item
class Strtemplate(object):
"""class for pattern substitution like string.Template but working with arbitrary nonidentifier strings"""
reexpr = re.compile(r"(\$\{.+?\})")
def __init__(self, s):
"""s is a string possibly containing patterns of the form ${...}"""
self.s = s
def substitute(self, d):
"""substitute all patterns from dictionary d"""
def repl(mo):
try:
return d[mo.group()[2:-1]]
except:
raise ValueError(_("A variable reference was found in a directory or file name pattern that is not listed as a split variable: %s") % mo.group()[2:-1])
return re.sub(Strtemplate.reexpr, repl, self.s)
class filename(object):
"""Generate file names and paths for the split files"""
def __init__(self, varnames, names, vldict, directory, nameprefix, myenc, unicodeit,
deletecontents, fnspec, filenamesubs):
"""varnames is a sequence of variable names
names indicates whether value lablels or values are used
vldict is a sequence of value label dictionaries
directory is a simple path or a template in which variable values may be substituted
nameprefix is a prefix for all the file names
myenc is the character encoding
deletecontents indicates whether target directories should be cleared of sav files
fnspec can be used to override the generated file names. It can be a template.
filenamesubs can override the names mode."""
attributesFromDict(locals())
self.first = True
if nameprefix and not nameprefix.endswith("_"):
self.nameprefix = nameprefix + "_"
self.used = set()
self.pat = re.compile(r"""[/\\:"*?<>|]""")
self.dirpat = re.compile(r"""[:"*?<>|]""")
if not (directory.endswith("/") or directory.endswith("\\")):
self.directory = self.directory + os.sep
self.directory = Strtemplate(directory) # template for split variable substitutions
self.fnspec = Strtemplate(fnspec)
self.seq = 0
self.numvar = len(varnames)
self.delcount = 0
if filenamesubs == "asdir":
self.fnames = names
else:
self.fnames = filenamesubs
self.handles = Handles()
def genfn(self, values):
"""generate a quoted filespec for values and manage directories
values is a sequence of one or more values to combine. It may be a mixture of strings and numbers"""
nvalues = []
for v in values:
if isinstance(v, str):
nvalues.append(v.rstrip())
elif v is None:
nvalues.append("$Sysmis")
else:
nvalues.append(str(int(v)))
values = nvalues
if self.names in ["values", "numbers"]:
d = dict(list(zip(self.varnames, values)))
valuelist = ", ".join(values)
else:
labels = [self.vldict[i].get(value, value) for i, value in enumerate(values)]
d = dict(list(zip(self.varnames, labels)))
valuelist = ", ".join(labels)
if self.names == self.fnames: # same substitution mode for directories and filenames
fd = d
else:
if self.fnames in ["values", "numbers"]:
fd = dict(list(zip(self.varnames, values)))
else:
labels = [self.vldict[i].get(value, value) for i, value in enumerate(values)]
fd = dict(list(zip(self.varnames, labels)))
if self.fnspec.s:
fn = self.fnspec.substitute(fd)
else:
if self.fnames == "labels":
fn = "_".join([self.vldict[i].get(value, value) for i, value in enumerate(values)]) # use value label if available; else name
elif self.fnames == "values":
fn = "_".join(values)
else:
self.seq += 1
fn = "%05d" % self.seq
if fn is None:
fn = "$Sysmis"
value = fn
fn = unistr(fn, self.myenc)
###fnv = unistr(value, self.myenc)
fnv = fn
fn = re.sub(self.pat, "_", fn) # chars illegal in file name (Windows) or at least undesirable on other platforms
#if fn.lower() in self.used:
#raise ValueError(_("Output file names are not unique: %s") % fn)
self.used.add(fn.lower())
# substitution for illegal characters allows ":" as a drive separator but nowhere else
actualdir = self.directory.substitute(d)
# check for file handle and resolve if possible
if self.handles:
actualdir = self.handles.resolve(actualdir)
dirparts = list(os.path.splitdrive(actualdir)) # first item will be empty if no drive letter
dirparts[-1] = re.sub(self.dirpat, "_", dirparts[-1])
actualdir = "".join(dirparts)
if not os.path.isdir(actualdir): # does directory exist?
if os.path.isfile(actualdir): # don't overwrite a file
raise ValueError("Error: A file exists with the same name as a target directory: %s" % actualdir)
else:
os.makedirs(actualdir)
else:
if self.deletecontents and self.first: # 12/8/2011
self.first = False
for f in glob.iglob(os.path.join(actualdir, "*.sav")):
os.remove(f)
self.delcount += 1
return valuelist, fnv, '"' + _safeval(os.path.join(actualdir, self.nameprefix + fn + ".sav")) + '"', self.nameprefix + fn + ".sav"
def makeexpr(varnames, values):
"""Return conditional for this split and value string for output
varnames is the list of criterion variables
values is a list of values"""
crit = []
for var, value in zip(varnames, values):
if isinstance(value, str):
expression = var + ' EQ "' + _safeval(value) +'"'
elif value is None:
expression = "SYSMIS(%s)" % var
else:
expression = var + " EQ " + str(value)
crit.append(expression)
return " AND ".join(crit)
def fixloc(filelist, cwd):
"""return filelist aligned with SPSS process
filelist is a filespec
cwd is the SPSS process current working directory"""
if os.path.isabs(filelist):
return filelist
parts = os.path.splitdrive(filelist)
if not parts[0] == "":
raise ValueError(_("Relative paths cannot be specified with a drive letter: %s") % filelist)
return os.path.join(cwd, parts[1])
class Handles(object):
"""Version-guarded file handle resolver"""
def __init__(self):
try:
self.fh = spssaux.FileHandles()
except:
self.fh = None
def resolvehandles(self, fns):
"""resolve file handles in spec if V18 or later
fns is a list where each list element is a list whose first element is the filespec.
Each filespec actually starts with a double quote"""
if self.fh:
for item in fns:
item[0] = '"' + self.fh.resolve(item[0][1:])
def resolve(self, filespec):
"Ordinary handle resolver but guarded. Returns expanded filespec if possible"
if self.fh:
return self.fh.resolve(filespec)
else:
return filespec
class NonProcPivotTable(object):
"""Accumulate an object that can be turned into a basic pivot table once a procedure state can be established"""
def __init__(self, omssubtype, outlinetitle="", tabletitle="", caption="", rowdim="", coldim="", columnlabels=[],
procname="Messages"):
"""omssubtype is the OMS table subtype.
caption is the table caption.
tabletitle is the table title.
columnlabels is a sequence of column labels.
If columnlabels is empty, this is treated as a one-column table, and the rowlabels are used as the values with
the label column hidden
procname is the procedure name. It must not be translated."""
attributesFromDict(locals())
self.rowlabels = []
self.columnvalues = []
self.rowcount = 0
def addrow(self, rowlabel=None, cvalues=None):
"""Append a row labelled rowlabel to the table and set value(s) from cvalues.
rowlabel is a label for the stub.
cvalues is a sequence of values with the same number of values are there are columns in the table."""
if cvalues is None:
cvalues = []
self.rowcount += 1
if rowlabel is None:
self.rowlabels.append(str(self.rowcount))
else:
self.rowlabels.append(rowlabel)
self.columnvalues.extend(cvalues)
def generate(self):
"""Produce the table assuming that a procedure state is now in effect if it has any rows."""
privateproc = False
if self.rowcount > 0:
try:
table = spss.BasePivotTable(self.tabletitle, self.omssubtype)
except:
spss.StartProcedure(self.procname)
privateproc = True
table = spss.BasePivotTable(self.tabletitle, self.omssubtype)
if self.caption:
table.Caption(self.caption)
if self.columnlabels != []:
table.SimplePivotTable(self.rowdim, self.rowlabels, self.coldim, self.columnlabels, self.columnvalues)
else:
table.Append(spss.Dimension.Place.row,"rowdim",hideName=True,hideLabels=True)
table.Append(spss.Dimension.Place.column,"coldim",hideName=True,hideLabels=True)
colcat = spss.CellText.String("Message")
for r in self.rowlabels:
cellr = spss.CellText.String(r)
table[(cellr, colcat)] = cellr
if privateproc:
spss.EndProcedure()
def attributesFromDict(d):
"""build self attributes from a dictionary d."""
self = d.pop('self')
for name, value in d.items():
setattr(self, name, value)
escapemapping = \
{"\t": r"\t", "\n":r"\n", "\r": r"\r", "\'":r"\'", "\a":r"\a","\b":r"\b", "\f":r"\f","\\N":r"\N", "\v":r"\v"}
def unescape(item):
"repair any escape sequences generated by the UP"
if item is None:
return item
return "".join([escapemapping.get(ch, ch) for ch in item])
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __init__ import AvroTester
from avro.ipc import AvroRemoteException
import avro_utils
import time
def _make_write_params(key, cf, sc, c, v, ts=0, cl='ONE'):
params = dict()
params['key'] = key
params['column_parent'] = dict()
params['column_parent']['column_family'] = cf
params['column_parent']['super_column'] = sc
params['column'] = dict()
params['column']['name'] = c
params['column']['value'] = v
params['column']['timestamp'] = ts
params['consistency_level'] = cl
return params
def _make_read_params(key, cf, sc, c, cl):
params = dict()
params['key'] = key
column_path = dict()
column_path['column_family'] = cf
column_path['super_column'] = sc
column_path['column'] = c
params['column_path'] = column_path
params['consistency_level'] = cl
return params
def _super_col(name, columns):
return {'name': name, 'columns': columns}
def Mutation(**kwargs):
return kwargs
def SlicePredicate(**kwargs):
return kwargs
def SliceRange(start='', finish='', reversed=False, count=10):
return {'start': start, 'finish': finish, 'reversed':reversed, 'count': count}
def ColumnParent(*args, **kwargs):
cp = {}
if args and len(args) > 0:
cp['column_family'] = args[0]
if args and len(args) > 1:
cp['super_column'] = args[1]
for k,v in kwargs.items():
cp[k] = v
return cp
def Deletion(*args, **kwargs):
cp = {}
if args and len(args) > 0:
cp['timestamp'] = args[0]
for k,v in kwargs.items():
cp[k] = v
return cp
def ColumnPath(*args, **kwargs):
cp = {}
if args and len(args) > 0:
cp['column_family'] = args[0]
for k,v in kwargs.items():
cp[k] = v
return cp
def Column(name, value, timestamp, ttl=None):
return {'name':name, 'value':value, 'timestamp': timestamp, 'ttl': ttl}
def _i64(i):
return avro_utils.i64(i)
_SUPER_COLUMNS = [_super_col('sc1', [Column(avro_utils.i64(4), 'value4', 0)]),
_super_col('sc2', [Column(avro_utils.i64(5), 'value5', 0),
Column(avro_utils.i64(6), 'value6', 0)])]
class TestSuperOperations(AvroTester):
def _set_keyspace(self, keyspace):
self.client.request('set_keyspace', {'keyspace': keyspace})
"""
Operations on Super column families
"""
def test_super_insert(self):
"simple super column insert"
self._set_keyspace('Keyspace1')
self._insert_super()
self._verify_super()
def test_slice_super(self):
"tests simple insert and get_slice"
self._set_keyspace('Keyspace1')
self._insert_super()
p = {'slice_range': {'start': '', 'finish': '', 'reversed': False, 'count': 10}}
parent = {'column_family': 'Super1', 'super_column': 'sc1'}
cosc = self.client.request('get_slice', {'key': 'key1', 'column_parent': parent, 'predicate': p, 'consistency_level': 'ONE'})
avro_utils.assert_cosc(cosc[0])
def test_missing_super(self):
"verifies that inserting doesn't yield false positives."
self._set_keyspace('Keyspace1')
avro_utils.assert_raises(AvroRemoteException,
self.client.request,
'get',
_make_read_params('key1', 'Super1', 'sc1', avro_utils.i64(1), 'ONE'))
self._insert_super()
avro_utils.assert_raises(AvroRemoteException,
self.client.request,
'get',
_make_read_params('key1', 'Super1', 'sc1', avro_utils.i64(1), 'ONE'))
def test_super_get(self):
"read back a super column"
self._set_keyspace('Keyspace1')
self._insert_super()
result = self.client.request('get', _make_read_params('key1', 'Super1', 'sc2', None, 'ONE'))['super_column']
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
"test get_slice honors subcolumn reversal and limit"
self._set_keyspace('Keyspace1')
self._insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
column_parent = ColumnParent('Super1', 'sc2')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(_i64(5), 'value5', 0)], slice
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(_i64(6), 'value6', 0)], slice
def test_time_uuid(self):
"test operation on timeuuid subcolumns in super columns"
import uuid
L = []
self._set_keyspace('Keyspace2')
# 100 isn't enough to fail reliably if the comparator is borked
for i in xrange(500):
L.append(uuid.uuid1())
self.client.request('insert', {'key': 'key1', 'column_parent': ColumnParent('Super4', 'sc1'), 'column': Column(L[-1].bytes, 'value%s' % i, i), 'consistency_level': 'ONE'})
slice = self._big_slice('key1', ColumnParent('Super4', 'sc1'))
assert len(slice) == 500, len(slice)
for i in xrange(500):
u = slice[i]['column']
assert u['value'] == 'value%s' % i
assert u['name'] == L[i].bytes
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(L[0].bytes, 'value0', 0),
Column(L[1].bytes, 'value1', 1),
Column(L[2].bytes, 'value2', 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(L[2].bytes, 'value2', 2),
Column(L[1].bytes, 'value1', 1),
Column(L[0].bytes, 'value0', 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result['column'] for result in self.client.request('get_slice', {'key': 'key1', 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})]
assert slice == [Column(L[2].bytes, 'value2', 2)], slice
def test_batch_mutate_remove_super_columns_with_standard_under(self):
"batch mutate with deletions in super columns"
self._set_keyspace('Keyspace1')
column_families = ['Super1', 'Super2']
keys = ['key_%d' % i for i in range(11,21)]
self._insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc['columns']:
names.append(c['name'])
mutations.append(Mutation(deletion=Deletion(20, super_column=c['name'], predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = [{'key': key, 'mutations': mutation_map} for key in keys]
def _assert_no_columnpath(key, column_path):
self._assert_no_columnpath(key, column_path)
self.client.request('batch_mutate', {'mutation_map': keyed_mutations, 'consistency_level': 'ONE'})
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc['columns']:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc['name'], column=c['name']))
# internal helper functions.
def _insert_super(self, key='key1'):
self.client.request('insert', _make_write_params(key, 'Super1', 'sc1', avro_utils.i64(4), 'value4', 0, 'ONE'))
self.client.request('insert', _make_write_params(key, 'Super1', 'sc2', avro_utils.i64(5), 'value5', 0, 'ONE'))
self.client.request('insert', _make_write_params(key, 'Super1', 'sc2', avro_utils.i64(6), 'value6', 0, 'ONE'))
def _big_slice(self, key, column_parent):
p = {'slice_range': {'start': '', 'finish': '', 'reversed': False, 'count': 1000}}
return self.client.request('get_slice', {'key': key, 'column_parent': column_parent, 'predicate': p, 'consistency_level': 'ONE'})
def _verify_super(self, supercf='Super1', key='key1'):
col = self.client.request('get', _make_read_params(key, supercf, 'sc1', avro_utils.i64(4), 'ONE'))['column']
avro_utils.assert_columns_match(col, {'name': avro_utils.i64(4), 'value': 'value4', 'timestamp': 0})
slice = [result['super_column'] for result in self._big_slice(key, {'column_family': supercf})]
assert slice == _SUPER_COLUMNS, _SUPER_COLUMNS
def _assert_no_columnpath(self, key, column_path):
try:
self.client.request('get', {'key': key, 'column_path': column_path, 'consistency_level': 'ONE'})
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except AvroRemoteException:
assert True, 'column did not exist'
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for supernova light curve I/O"""
import math
import os
import sys
import re
import json
from collections import OrderedDict
import numpy as np
from astropy.table import Table
from astropy.io import fits
from astropy import wcs
from .utils import dict_to_array
from .bandpasses import get_bandpass
__all__ = ['read_lc', 'write_lc', 'load_example_data', 'read_griddata_ascii',
'read_griddata_fits', 'write_griddata_ascii', 'write_griddata_fits']
def _stripcomment(line, char='#'):
pos = line.find(char)
if pos == -1:
return line
else:
return line[:pos]
def _cast_str(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s.strip()
def read_griddata_ascii(name_or_obj):
"""Read 2-d grid data from a text file.
Each line has values `x0 x1 y`. Space separated.
x1 values are only read for first x0 value. Others are assumed
to match.
Parameters
----------
name_or_obj : str or file-like object
Returns
-------
x0 : numpy.ndarray
1-d array.
x1 : numpy.ndarray
1-d array.
y : numpy.ndarray
2-d array of shape (len(x0), len(x1)).
"""
if isinstance(name_or_obj, str):
f = open(name_or_obj, 'r')
else:
f = name_or_obj
x0 = [] # x0 values.
x1 = None # x1 values for first x0 value, assume others are the same.
y = [] # 2-d array of internal values
x0_current = None
x1_current = []
y1_current = []
for line in f:
stripped_line = _stripcomment(line)
if len(stripped_line) == 0:
continue
x0_tmp, x1_tmp, y_tmp = map(float, stripped_line.split())
if x0_current is None:
x0_current = x0_tmp # Initialize first time
# If there is a new x0 value, ingest the old one and reset values
if x0_tmp != x0_current:
x0.append(x0_current)
if x1 is None:
x1 = x1_current
y.append(y1_current)
x0_current = x0_tmp
x1_current = []
y1_current = []
x1_current.append(x1_tmp)
y1_current.append(y_tmp)
# Ingest the last x0 value and y1 array
x0.append(x0_current)
y.append(y1_current)
f.close()
return np.array(x0), np.array(x1), np.array(y)
def read_multivector_griddata_ascii(name_or_obj):
"""Read 2-d grid data from a text file.
Each line has values `x0 x1 y0 y1 ...`. Space separated.
Assumed to be grid of values.
Parameters
----------
name_or_obj : str or file-like object
The name of the file or a file-like object containing the
data.
Returns
-------
x0 : numpy.ndarray
1-d array.
x1 : numpy.ndarray
1-d array.
y : numpy.ndarray
3-d array of shape ``(n, len(x0), len(x1))`` where ``n`` is
the number of y values on each line.
"""
data = np.loadtxt(name_or_obj)
x0 = np.sort(np.unique(data[:, 0]))
x1 = np.sort(np.unique(data[:, 1]))
y = np.zeros((len(data[0]) - 2, len(x0), len(x1)))
for i0, p in enumerate(x0):
for i1, q in enumerate(x1):
ind = (data[:, 0] == p) & (data[:, 1] == q)
y[:, i0, i1] = data[ind, 2:]
return x0, x1, y
def read_griddata_fits(name_or_obj, ext=0):
"""Read a multi-dimensional grid of data from a FITS file, where the
grid coordinates are encoded in the FITS-WCS header keywords.
Parameters
----------
name_or_obj : str or file-like object
Returns
-------
x0, x1, ... : `~numpy.ndarray`
1-d arrays giving coordinates of grid. The number of these arrays will
depend on the dimension of the data array. For example, if the data
have two dimensions, a total of three arrays will be returned:
``x0, x1, y``, with ``x0`` giving the coordinates of the first axis
of ``y``. If the data have three dimensions, a total of four arrays
will be returned: ``x0, x1, x2, y``, and so on with higher dimensions.
y : `~numpy.ndarray`
n-d array of shape ``(len(x0), len(x1), ...)``. For three dimensions
for example, the value at ``y[i, j, k]`` corresponds to coordinates
``(x0[i], x1[j], x2[k])``.
"""
hdulist = fits.open(name_or_obj)
w = wcs.WCS(hdulist[ext].header)
y = hdulist[ext].data
# get abcissa values (coordinates at grid values)
xs = []
for i in range(y.ndim):
j = y.ndim - i # The i-th axis (in Python) corresponds to FITS AXISj
coords = np.zeros((y.shape[i], y.ndim), dtype=np.float32)
coords[:, j-1] = np.arange(y.shape[i])
x = w.wcs_pix2world(coords, 0)[:, j-1]
xs.append(x)
hdulist.close()
return tuple(xs) + (y,)
def write_griddata_ascii(x0, x1, y, name_or_obj):
"""Write 2-d grid data to a text file.
Each line has values `x0 x1 y`. Space separated.
Parameters
----------
x0 : numpy.ndarray
1-d array.
x1 : numpy.ndarray
1-d array.
y : numpy.ndarray
2-d array of shape (len(x0), len(x1)).
name_or_obj : str or file-like object
Filename to write to or open file.
"""
if isinstance(name_or_obj, str):
f = open(name_or_obj, 'w')
else:
f = name_or_obj
for j in range(len(x0)):
for i in range(len(x1)):
f.write("{0:.7g} {1:.7g} {2:.7g}\n".format(x0[j], x1[i], y[j, i]))
if isinstance(name_or_obj, str):
f.close()
def write_griddata_fits(x0, x1, y, name_or_obj):
"""Write a 2-d grid of data to a FITS file
The grid coordinates are encoded in the FITS-WCS header keywords.
Parameters
----------
x0 : numpy.ndarray
1-d array.
x1 : numpy.ndarray
1-d array.
y : numpy.ndarray
2-d array of shape (len(x0), len(x1)).
name_or_obj : str or file-like object
Filename to write to or open file.
"""
d0, d1 = np.ediff1d(x0), np.ediff1d(x1)
if not (np.allclose(d0, d0[0]) and np.allclose(d1, d1[0])):
raise ValueError('grid must be regularly spaced in both x0 and x1')
if not (len(x0), len(x1)) == y.shape:
raise ValueError('length of x0 and x1 do not match shape of y')
w = wcs.WCS(naxis=2)
w.wcs.crpix = [1, 1]
w.wcs.crval = [x1[0], x0[0]]
w.wcs.cdelt = [d1[0], d0[0]]
hdu = fits.PrimaryHDU(y, header=w.to_header())
hdu.writeto(name_or_obj)
# -----------------------------------------------------------------------------
# Reader: ascii
def _read_ascii(f, delim=None, metachar='@', commentchar='#'):
meta = OrderedDict()
colnames = []
cols = []
readingdata = False
for line in f:
# strip leading & trailing whitespace, newline, and comments
line = line.strip()
pos = line.find(commentchar)
if pos > -1:
line = line[:pos]
if len(line) == 0:
continue
if not readingdata:
# Read metadata
if line[0] == metachar:
pos = line.find(' ') # Find first space.
if pos in [-1, 1]: # Space must exist and key must exist.
raise ValueError('Incorrectly formatted metadata line: ' +
line)
meta[line[1:pos]] = _cast_str(line[pos:])
continue
# Read header line
for item in line.split(delim):
colnames.append(item.strip())
cols.append([])
readingdata = True
continue
# Now we're reading data
items = line.split(delim)
for col, item in zip(cols, items):
col.append(_cast_str(item))
data = OrderedDict(zip(colnames, cols))
return meta, data
# -----------------------------------------------------------------------------
# Reader: salt2
def _expand_bands(band_list, meta):
"""Given a list containing band names, return a list of Bandpass objects"""
# Treat dependent bandpasses based on metadata contents
# TODO: need a way to figure out which bands are position dependent!
# for now, we assume *all* or none are.
if "X_FOCAL_PLANE" in meta and "Y_FOCAL_PLANE" in meta:
r = math.sqrt(meta["X_FOCAL_PLANE"]**2 + meta["Y_FOCAL_PLANE"]**2)
# map name to object for unique bands
name_to_band = {name: get_bandpass(name, r)
for name in set(band_list)}
return [name_to_band[name] for name in band_list]
else:
# For other bandpasses, get_bandpass will return the same object
# on each call, so just use it directly.
return [get_bandpass(name) for name in band_list]
def _read_salt2(name_or_obj, read_covmat=False, expand_bands=False):
"""Read a new-style SALT2 file.
Such a file has metadata on lines starting with '@' and column names
on lines starting with '#' and containing a ':' after the column name.
There is optionally a line containing '#end' before the start of data.
"""
if isinstance(name_or_obj, str):
f = open(name_or_obj, 'r')
else:
f = name_or_obj
meta = OrderedDict()
colnames = []
cols = []
readingdata = False
for line in f:
# strip leading & trailing whitespace & newline
line = line.strip()
if len(line) == 0:
continue
if not readingdata:
# Read metadata
if line[0] == '@':
pos = line.find(' ') # Find first space.
if pos in [-1, 1]: # Space must exist and key must exist.
raise ValueError('Incorrectly formatted metadata line: ' +
line)
meta[line[1:pos]] = _cast_str(line[pos:])
continue
# Read header line
if line[0] == '#':
pos = line.find(':')
if pos in [-1, 1]:
continue # comment line
colname = line[1:pos].strip()
if colname == 'end':
continue
colnames.append(colname)
cols.append([])
continue
# If the first non-whitespace character is not '@' or '#',
# assume the line is the first data line.
readingdata = True
# strip comments
pos = line.find('#')
if pos > -1:
line = line[:pos]
if len(line) == 0:
continue
# Now we're reading data
items = line.split()
for col, item in zip(cols, items):
col.append(_cast_str(item))
if isinstance(name_or_obj, str):
f.close()
# read covariance matrix file, if requested and present
if read_covmat and 'COVMAT' in meta:
fname = os.path.join(os.path.dirname(f.name), meta['COVMAT'])
# use skiprows=1 because first row has array dimensions
fluxcov = np.loadtxt(fname, skiprows=1)
# asethetics: capitalize 'Fluxcov' to match salt2 colnames
# such as 'Fluxerr'
colnames.append('Fluxcov')
cols.append(fluxcov)
data = OrderedDict(zip(colnames, cols))
if expand_bands:
data['Filter'] = _expand_bands(data['Filter'], meta)
return meta, data
# -----------------------------------------------------------------------------
# Reader: salt2-old
def _read_salt2_old(dirname, filenames=None):
"""Read old-style SALT2 files from a directory.
A file named 'lightfile' must exist in the directory.
"""
# Get list of files in directory.
if not (os.path.exists(dirname) and os.path.isdir(dirname)):
raise IOError("Not a directory: '{0}'".format(dirname))
dirfilenames = os.listdir(dirname)
# Read metadata from lightfile.
if 'lightfile' not in dirfilenames:
raise IOError("no lightfile in directory: '{0}'".format(dirname))
with open(os.path.join(dirname, 'lightfile'), 'r') as lightfile:
meta = OrderedDict()
for line in lightfile.readlines():
line = line.strip()
if len(line) == 0:
continue
try:
key, val = line.split()
except ValueError:
raise ValueError('expected space-separated key value pairs in '
'lightfile: {0}'
.format(os.path.join(dirname, 'lightfile')))
meta[key] = _cast_str(val)
# Get list of filenames to read.
if filenames is None:
filenames = dirfilenames
if 'lightfile' in filenames:
filenames.remove('lightfile') # We already read the lightfile.
fullfilenames = [os.path.join(dirname, f) for f in filenames]
# Read data from files.
data = None
for fname in fullfilenames:
with open(fname, 'r') as f:
filemeta, filedata = _read_salt2(f)
# Check that all necessary file metadata was defined.
if not ('INSTRUMENT' in filemeta and 'BAND' in filemeta and
'MAGSYS' in filemeta):
raise ValueError('not all necessary global keys (INSTRUMENT, '
'BAND, MAGSYS) are defined in file {0}'
.format(fname))
# Add the instrument/band to the file data, in anticipation of
# aggregating it with other files.
firstcol = next(iter(filedata.values()))
data_length = len(firstcol)
filter_name = '{0}::{1}'.format(filemeta.pop('INSTRUMENT'),
filemeta.pop('BAND'))
filedata['Filter'] = data_length * [filter_name]
filedata['MagSys'] = data_length * [filemeta.pop('MAGSYS')]
# If this if the first file, initialize data lists, otherwise if keys
# match, append this file's data to the main data.
if data is None:
data = filedata
elif set(filedata.keys()) == set(data.keys()):
for key in data:
data[key].extend(filedata[key])
else:
raise ValueError('column names do not match between files')
# Append any extra metadata in this file to the master metadata.
if len(filemeta) > 0:
meta[filter_name] = filemeta
return meta, data
# -----------------------------------------------------------------------------
# Reader: json
def _read_json(f):
t = json.load(f)
# Encode data keys as ascii rather than UTF-8 so that they can be
# used as numpy structured array names later.
d = {}
for key, value in t['data'].items():
d[key] = value
return t['meta'], d
# -----------------------------------------------------------------------------
# All readers
READERS = {'ascii': _read_ascii,
'json': _read_json,
'salt2': _read_salt2,
'salt2-old': _read_salt2_old}
def read_lc(file_or_dir, format='ascii', **kwargs):
"""Read light curve data for a single supernova.
Parameters
----------
file_or_dir : str
Filename (formats 'ascii', 'json', 'salt2') or directory name
(format 'salt2-old'). For 'salt2-old' format, directory must contain
a file named 'lightfile'. All other files in the directory are
assumed to be photometry files, unless the `filenames` keyword argument
is set.
format : {'ascii', 'json', 'salt2', 'salt2-old'}, optional
Format of file. Default is 'ascii'. 'salt2' is the new format available
in snfit version >= 2.3.0.
read_covmat : bool, optional
**[salt2 only]** If True, and if a ``COVMAT`` keyword is present in
header, read the covariance matrix from the filename specified
by ``COVMAT`` (assumed to be in the same directory as the lightcurve
file) and include it as a column named ``Fluxcov`` in the returned
table. Default is False.
*New in version 1.5.0*
expand_bands : bool, optional
**[salt2 only]** If True, convert band names into equivalent Bandpass
objects. This is particularly useful for position-dependent
bandpasses in the salt2 file format: the position information is
read from the header and used when creating the bandpass objects.
*New in version 1.5.0*
delim : str, optional
**[ascii only]** Used to split entries on a line. Default is `None`.
Extra whitespace is ignored.
metachar : str, optional
**[ascii only]** Lines whose first non-whitespace character is
`metachar` are treated as metadata lines, where the key and value
are split on the first whitespace. Default is ``'@'``
commentchar : str, optional
**[ascii only]** One-character string indicating a comment. Default is
'#'.
filenames : list, optional
**[salt2-old only]** Only try to read the given filenames as
photometry files. Default is to try to read all files in directory.
Returns
-------
t : astropy `~astropy.table.Table`
Table of data. Metadata (as an `OrderedDict`) can be accessed via
the ``t.meta`` attribute. For example: ``t.meta['key']``. The key
is case-sensitive.
Examples
--------
Read an ascii format file that includes metadata (``StringIO``
behaves like a file object):
>>> from io import StringIO
>>> f = StringIO('''
... @id 1
... @RA 36.0
... @description good
... time band flux fluxerr zp zpsys
... 50000. g 1. 0.1 25. ab
... 50000.1 r 2. 0.1 25. ab
... ''')
>>> t = read_lc(f, format='ascii')
>>> print(t)
time band flux fluxerr zp zpsys
------- ---- ---- ------- ---- -----
50000.0 g 1.0 0.1 25.0 ab
50000.1 r 2.0 0.1 25.0 ab
>>> t.meta
OrderedDict([('id', 1), ('RA', 36.0), ('description', 'good')])
"""
try:
readfunc = READERS[format]
except KeyError:
raise ValueError("Reader not defined for format {0!r}. Options: "
.format(format) + ", ".join(READERS.keys()))
if format == 'salt2-old':
meta, data = readfunc(file_or_dir, **kwargs)
elif isinstance(file_or_dir, str):
with open(file_or_dir, 'r') as f:
meta, data = readfunc(f, **kwargs)
else:
meta, data = readfunc(file_or_dir, **kwargs)
return Table(data, meta=meta)
# =========================================================================== #
# Writers #
# =========================================================================== #
# -----------------------------------------------------------------------------
# Writer: ascii
def _write_ascii(f, data, meta, **kwargs):
delim = kwargs.get('delim', ' ')
metachar = kwargs.get('metachar', '@')
if meta is not None:
for key, val in meta.items():
f.write('{0}{1}{2}{3}\n'.format(metachar, key, delim, str(val)))
keys = data.dtype.names
length = len(data)
f.write(delim.join(keys))
f.write('\n')
for i in range(length):
f.write(delim.join([str(data[key][i]) for key in keys]))
f.write('\n')
# -----------------------------------------------------------------------------
# Writer: salt2
KEY_TO_SALT2KEY_META = {
'Z': 'REDSHIFT', # Not sure if this is used.
'Z_HELIOCENTRIC': 'Z_HELIO',
'MAGSYS': 'MagSys',
'Z_SOURCE': 'z_source'}
KEY_TO_SALT2KEY_COLUMN = {
'Mjd': 'Date',
'Time': 'Date',
'Flux': 'FluxPsf',
'Fluxpsf': 'FluxPsf',
'Fluxerr': 'FluxPsferr',
'Fluxpsferr': 'FluxPsferr',
'Airmass': 'AirMass',
'Zp': 'ZP',
'Zpsys': 'MagSys',
'Magsys': 'MagSys',
'Band': 'Filter'}
def _write_salt2(f, data, meta, **kwargs):
raw = kwargs.get('raw', False)
pedantic = kwargs.get('pedantic', True)
if meta is not None:
for key, val in meta.items():
if not raw:
key = key.upper()
key = KEY_TO_SALT2KEY_META.get(key, key)
f.write('@{0} {1}\n'.format(key, str(val)))
keys = data.dtype.names
length = len(data)
# Write column names
keys_as_written = []
for key in keys:
if not raw:
key = key.capitalize()
key = KEY_TO_SALT2KEY_COLUMN.get(key, key)
f.write('#{0} :\n'.format(key))
keys_as_written.append(key)
f.write('#end :\n')
# Check that necessary fields exist
if pedantic:
if not ('Filter' in keys_as_written and 'MagSys' in keys_as_written):
raise ValueError('photometry data missing required some fields '
': Filter, MagSys')
# Write the data itself
for i in range(length):
f.write(' '.join([str(data[key][i]) for key in keys]))
f.write('\n')
# -----------------------------------------------------------------------------
# Writer: snana
KEY_TO_SNANAKEY_COLUMN = {
'TIME': 'MJD',
'DATE': 'MJD',
'FILTER': 'FLT',
'BAND': 'FLT',
'FLUX': 'FLUXCAL',
'FLUXERR': 'FLUXCALERR',
'ZP': 'ZPT',
'ZEROPOINT': 'ZPT'}
KEY_TO_SNANAKEY_META = {
'DEC': 'DECL'}
SNANA_REQUIRED_META = ['RA', 'DECL', 'SURVEY', 'FILTERS', 'MWEBV']
SNANA_REQUIRED_COLUMN = ['MJD', 'FLT', 'FLUXCAL', 'FLUXCALERR', 'ZPT']
def _write_snana(f, data, meta, **kwargs):
raw = kwargs.get('raw', False)
pedantic = kwargs.get('pedantic', True)
# Write metadata
keys_as_written = []
if meta is not None:
for key, val in meta.items():
if not raw:
key = key.upper()
key = KEY_TO_SNANAKEY_META.get(key, key)
f.write('{0}: {1}\n'.format(key, str(val)))
keys_as_written.append(key)
# Check that necessary metadata was written
if pedantic:
for key in SNANA_REQUIRED_META:
if key not in keys_as_written:
raise ValueError('Missing required metadata kw: ' + key)
# Get column names and data length
keys = data.dtype.names
length = len(data)
# Convert column names
keys_to_write = []
for key in keys:
if not raw:
key = key.upper()
key = KEY_TO_SNANAKEY_COLUMN.get(key, key)
keys_to_write.append(key)
# Check that necessary column names are included
if pedantic:
for key in SNANA_REQUIRED_COLUMN:
if key not in keys_to_write:
raise ValueError('Missing required column name: ' + key)
# Write the header
f.write('\n'
'# ==========================================\n'
'# TERSE LIGHT CURVE OUTPUT:\n'
'#\n'
'NOBS: {0:d}\n'
'NVAR: {1:d}\n'
'VARLIST: {2}\n'
.format(length, len(keys), ' '.join(keys_to_write)))
# Write data
for i in range(length):
f.write('OBS: ')
f.write(' '.join([str(data[key][i]) for key in keys]))
f.write('\n')
# -----------------------------------------------------------------------------
# Writer: json
def _write_json(f, data, meta, **kwargs):
# Build a dictionary of pure-python objects
output = OrderedDict([('meta', meta),
('data', OrderedDict())])
for key in data.dtype.names:
output['data'][key] = data[key].tolist()
json.dump(output, f)
del output
# -----------------------------------------------------------------------------
# All writers
WRITERS = {'ascii': _write_ascii,
'salt2': _write_salt2,
'snana': _write_snana,
'json': _write_json}
def write_lc(data, fname, format='ascii', **kwargs):
"""Write light curve data.
Parameters
----------
data : `~astropy.table.Table`
Light curve data.
fname : str
Filename.
format : {'ascii', 'salt2', 'snana', 'json'}, optional
Format of file. Default is 'ascii'. 'salt2' is the new format available
in snfit version >= 2.3.0.
delim : str, optional
**[ascii only]** Character used to separate entries on a line.
Default is ' '.
metachar : str, optional
**[ascii only]** Metadata designator. Default is '@'.
raw : bool, optional
**[salt2, snana]** By default, the SALT2 and SNANA writers rename
some metadata keys and column names in order to comply with what
snfit and SNANA expect. Set to True to override this.
Default is False.
pedantic : bool, optional
**[salt2, snana]** If True, check that output column names and header
keys comply with expected formatting, and raise a ValueError if not.
It is probably a good idea to set to False when raw is True.
Default is True.
"""
if format not in WRITERS:
raise ValueError("Writer not defined for format {0!r}. Options: "
.format(format) + ", ".join(WRITERS.keys()))
if isinstance(data, Table):
meta = data.meta
data = np.asarray(data)
else:
meta = OrderedDict()
if not isinstance(data, np.ndarray):
data = dict_to_array(data)
with open(fname, 'w') as f:
WRITERS[format](f, data, meta, **kwargs)
def load_example_data():
"""
Load an example photometric data table.
Returns
-------
data : `~astropy.table.Table`
"""
from astropy.utils.data import get_pkg_data_filename
filename = get_pkg_data_filename(
'data/examples/example_photometric_data.dat')
return read_lc(filename, format='ascii')
|
|
# -*- coding: utf-8 -*-
"""
zask.ext.zerorpc
~~~~~~~~~~~~~~~~
Add zerorpc support to zask.
:copyright: (c) 2015 by the J5.
:license: BSD, see LICENSE for more details.
"""
import inspect
import gevent
import time
import uuid
import zerorpc
from zerorpc.heartbeat import HeartBeatOnChannel
from zerorpc.channel import BufferedChannel, logger as channel_logger
from zerorpc.gevent_zmq import logger as gevent_logger
from zerorpc.core import logger as core_logger
from logging import DEBUG, ERROR, Formatter, getLogger, INFO, StreamHandler
from logging.handlers import TimedRotatingFileHandler
from zask import _request_ctx
from zask.logging import debug_handler, production_handler
# Because the time module has a problem with timezones, we now format all log
# message dates in UTC. We tried replacing the Formatter using tzlocal but it
# was very slow calling it the first time. The delay is somewhere in the range
# of 3-4 seconds. This is not acceptable in a production application. So until
# we find a better solution, this is the compromise.
Formatter.converter = time.gmtime
access_logger = getLogger(__name__)
# NCSA Combined Log Format + request time + uuid
ACCESS_LOG_FORMAT = (
'%(host)s %(identifier)s %(username)s %(asctime)s %(message)s ' +
'%(status_code)s %(bytes)s %(referrer)s %(user_agent)s %(cookies)s ' +
'%(request_time)d %(uuid)s'
)
ACCESS_LOG_DATETIME_FORMAT = '[%d/%b/%Y:%H:%M:%S +0000]' # Hard coded for UTC
CONFIG_ENDPOINT_MIDDLEWARE = 'file'
CONFIG_CUSTOME_HEADER_MIDDLEWARE = 'header'
ACCESS_LOG_MIDDLEWARE = 'access_log'
REQUEST_CHAIN_MIDDLEWARE = 'uuid'
REQUEST_EVENT_MIDDLEWARE = 'event'
DEFAULT_MIDDLEWARES = [
CONFIG_CUSTOME_HEADER_MIDDLEWARE,
REQUEST_CHAIN_MIDDLEWARE,
ACCESS_LOG_MIDDLEWARE,
REQUEST_EVENT_MIDDLEWARE
]
def _milli_time():
"""get millionsecond of time.
"""
return int(round(time.time() * 1000))
def _log(cls_name, func):
"""[Deprecated]
Decorator for every method of server to record simple access log.
"""
def wrapped(*args, **kwargs):
start = _milli_time()
result = func(*args, **kwargs)
log = '"%s" - "%s" - OK - %dms' % (cls_name,
func.__name__,
_milli_time() - start)
access_logger.info(log, extra={'access_key': None})
return result
return wrapped
def access_log(cls):
"""[Deprecated]
A decorator for zerorpc server class to generate access logs::
@access_log
Class MySrv(Object):
def foo(self)
return "bar"
Every request from client will create a log::
[2014-12-18 13:33:16,433] - None - "MySrv" - "foo" - OK - 1ms
:param cls: the class object
"""
for name, m in inspect.getmembers(cls, inspect.ismethod):
setattr(cls, name, _log(cls.__name__, m))
return cls
def init_zerorpc(app):
"""Baskward compatibility.
"""
return ZeroRPC(app)
class ConfigMiddleware(object):
"""A middleware work with configure of zask application.
This is the base class for all the config based middlewares.
"""
def __init__(self, app):
self.app = app
def _get_config_name(self, name):
config_name = "ZERORPC_%s" % (name.upper())
if self.app.config.get(config_name) is None:
raise MissingConfigException(config_name)
return config_name
def get_version(self, name, version):
config_name = self._get_config_name(name)
if version is None:
try:
version = self.app.config[config_name]['default']
except:
raise ClientMissingVersionException()
else:
version = self.app.config[config_name]['default']
if self.app.config.get(config_name).get(version) is None:
raise MissingConfigException(config_name + '["' + version + '"]')
return version
def get_endpoint(self, name, version):
config_name = self._get_config_name(name)
version = self.get_version(name, version)
return self.app.config[config_name][version]
def get_access_key(self, name):
config_name = self._get_config_name(name)
if self.app.config.get(config_name).get('access_key') is None:
raise MissingAccessKeyException(config_name)
return self.app.config[config_name]['access_key']
def get_client_keys(self, name):
config_name = self._get_config_name(name)
return self.app.config.get(config_name).get('client_keys', None)
class ConfigEndpointMiddleware(ConfigMiddleware):
"""Resolve the endpoint by service name.
"""
def resolve_endpoint(self, endpoint):
# when configured multiple endpoint,
# i don't want sub endpoint also be decoded.
# so ignore that and return directly.
try:
name, version = HandleEndpoint.decode(endpoint)
except ValueError:
return endpoint
else:
return self.get_endpoint(name, version)
class ConfigCustomHeaderMiddleware(ConfigEndpointMiddleware):
"""Besides resolve the endpoint by service name,
add custome header to the client.
Server side will do the validation for the access key and service version.
"""
_server_version = None
def set_server_version(self, version):
self._server_version = version
def client_before_request(self, event):
if event.header.get('service_name'):
event.header.update(
{
'access_key': self.get_access_key(
event.header['service_name']),
'service_version': self.get_version(
event.header['service_name'],
event.header['service_version'])})
def load_task_context(self, event_header):
if event_header.get('service_version') and event_header.get(
'service_version') != self._server_version:
raise VersionNotMatchException(event_header.get('access_key'),
event_header.get('service_version'),
self._server_version)
if event_header.get('access_key'):
keys = self.get_client_keys(event_header['service_name'])
if keys and event_header.get('access_key') not in keys:
raise NoSuchAccessKeyException(event_header.get('access_key'))
class RequestChainMiddleware(object):
"""Generate UUID for requests and store in greenlet's local storage
"""
def __init__(self, app):
self.app = app
def get_uuid(self):
if not hasattr(_request_ctx.stash, 'uuid'):
setattr(_request_ctx.stash, 'uuid', str(uuid.uuid1()))
return _request_ctx.stash.uuid
def set_uuid(self, uuid):
setattr(_request_ctx.stash, 'uuid', uuid)
def clear_uuid(self):
if hasattr(_request_ctx.stash, 'uuid'):
delattr(_request_ctx.stash, 'uuid')
def server_before_exec(self, request_event):
if not request_event.header.get('uuid'):
request_event.header.update({
'uuid': self.get_uuid(),
})
else:
self.set_uuid(request_event.header.get('uuid'))
def server_after_exec(self, request_event, reply_event):
self.clear_uuid()
def server_inspect_exception(
self,
request_event,
reply_event,
task_context,
exc_infos):
self.clear_uuid()
def client_before_request(self, event):
if not event.header.get('uuid'):
event.header.update({
'uuid': self.get_uuid(),
})
class RequestEventMiddleware(object):
"""Exposes the request_event to the object being passed to Server()
via self.get_request_event() from a service endpoint.
"""
def server_before_exec(self, request_event):
"""Injects the request_event into greenlet's local storage context.
"""
setattr(_request_ctx.stash, 'request_event', request_event)
class AccessLogMiddleware(object):
"""This can't be used before initialize the logger.
"""
_class_name = None
def __init__(self, app):
self.app = app
def set_class_name(self, class_name):
self._class_name = class_name
def server_before_exec(self, request_event):
request_event.header.update({
'started_at': _milli_time()
})
def server_after_exec(self, request_event, reply_event):
start = request_event.header.get('started_at')
message = '"%s %s"' % (self._class_name, request_event.name)
access_key = request_event.header.get('access_key', '-')
uuid = request_event.header.get('uuid', '-')
access_logger.info(message, extra={
'host': '-',
'identifier': '-',
'username': access_key,
'status_code': 'OK',
'bytes': '-',
'referrer': '-',
'user_agent': '-',
'cookies': '-',
'request_time': _milli_time() - start,
'uuid': uuid,
})
def server_inspect_exception(
self,
request_event,
reply_event,
task_context,
exc_infos):
start = request_event.header.get('started_at')
message = '"%s %s"' % (self._class_name, request_event.name)
access_key = request_event.header.get('access_key', '-')
uuid = request_event.header.get('uuid', '-')
access_logger.info(message, extra={
'host': '-',
'identifier': '-',
'username': access_key,
'status_code': 'ERROR',
'bytes': '-',
'referrer': '-',
'user_agent': '-',
'cookies': '-',
'request_time': _milli_time() - start if start else 0,
'uuid': uuid,
})
class ZeroRPC(object):
"""This is a class used to integrate zerorpc to the Zask application.
ZeroRPC extention provides a few powful middlewares.
Take ``CONFIG_ENDPOINT_MIDDLEWARE`` as example,
which will resolve endpoint according to the
zask application configuration. To use that you can setup a
ZeroRPC like this::
app = Zask(__name__)
app.config['ZERORPC_SOME_SERVICE'] = {
'1.0': endpoint,
}
rpc = ZeroRPC(app, middlewares=[CONFIG_ENDPOINT_MIDDLEWARE])
Then create a server and a client::
class Srv(object):
__version__ = "1.0"
__service_name__ = "some_service"
def hello(self):
return 'world'
client = rpc.Client('some_service', version='1.0')
client.hello()
Application will look for ``RPC_SOME_SERVICE`` config. You can set a
default version to make the client initialization more easier::
app.config['ZERORPC_SOME_SERVICE'] = {
'1.0': endpoint,
'2.0': [ # set list if you have multiple endpoints
endpoint1,
endpoint2
]
'default': '1.0'
}
client = rpc.Client('some_service')
client.hello()
But if you don't want to use the middlewares, just set ``middlewares``
to ``None``::
app = Zask(__name__)
rpc = ZeroRPC(app, middlewares=None)
Or set a new context to the Server/Client during the runtime::
app = Zask(__name__)
rpc = ZeroRPC(app, middlewares=[CONFIG_ENDPOINT_MIDDLEWARE])
default_context = zerorpc.Context().get_instance()
srv = rpc.Server(Srv(), context=default_context)
client = rpc.Client(context=default_context)
"""
def __init__(self, app=None, middlewares=DEFAULT_MIDDLEWARES):
self._middlewares = middlewares
self.Server = _Server
self.Client = _Client
if app is not None:
self.init_app(app)
else:
self.app = None
def init_app(self, app):
"""Initial the access logger and zerorpc exception handlers.
:param app: current zask application
"""
self.app = app
app.config.setdefault('ZERORPC_ACCESS_LOG', '/tmp/zerorpc.access.log')
self._init_zerorpc_logger()
if self._middlewares:
self._init_zerorpc_context()
else:
_Server.__context__ = _Client.__context__ = None
def _init_zerorpc_context(self):
context = zerorpc.Context()
# there is a conflict when binding the endpoint
# so don't register both middleware
if CONFIG_CUSTOME_HEADER_MIDDLEWARE in self._middlewares:
context.register_middleware(ConfigCustomHeaderMiddleware(self.app))
elif CONFIG_ENDPOINT_MIDDLEWARE in self._middlewares:
context.register_middleware(ConfigEndpointMiddleware(self.app))
if REQUEST_CHAIN_MIDDLEWARE in self._middlewares:
context.register_middleware(RequestChainMiddleware(self.app))
if ACCESS_LOG_MIDDLEWARE in self._middlewares:
context.register_middleware(AccessLogMiddleware(self.app))
if REQUEST_EVENT_MIDDLEWARE in self._middlewares:
context.register_middleware(RequestEventMiddleware())
_Server.__context__ = _Client.__context__ = context
def register_middleware(self, middleware):
context = _Server.__context__ or zerorpc.Context()
context.register_middleware(middleware)
_Server.__context__ = _Client.__context__ = context
def _init_zerorpc_logger(self):
if self.app.config['DEBUG']:
access_handler = StreamHandler()
error_handler = debug_handler()
else:
access_handler = TimedRotatingFileHandler(
self.app.config['ZERORPC_ACCESS_LOG'],
when='D',
interval=1,
backupCount=15)
error_handler = production_handler(self.app.config)
access_handler.setLevel(INFO)
access_handler.setFormatter(Formatter(ACCESS_LOG_FORMAT,
ACCESS_LOG_DATETIME_FORMAT))
access_logger.setLevel(INFO)
del access_logger.handlers[:]
access_logger.addHandler(access_handler)
channel_logger.addHandler(error_handler)
gevent_logger.addHandler(error_handler)
core_logger.addHandler(error_handler)
class _Server(zerorpc.Server):
"""Extends zerorpc.Server by the middlewares
"""
__version__ = None
__service_name__ = None
__context__ = None
def __init__(self, methods=None, context=None, **kargs):
if methods is None:
methods = self
context_ = context \
or _Server.__context__ \
or zerorpc.Context.get_instance()
heartbeat = kargs.pop('heartbeat', None)
zerorpc.Server.__init__(self,
methods,
context=context_,
heartbeat=heartbeat,
**kargs)
# Inject get_request_event *after* Server constructor so that
# it's not exposed to the RPC from the outside.
methods.get_request_event = self._get_request_event
for instance in context_._middlewares:
if isinstance(instance, ConfigEndpointMiddleware):
if methods.__version__ is None:
raise NoVersionException()
if methods.__service_name__ is None:
raise NoNameException()
self.bind(HandleEndpoint.encode(methods.__service_name__,
methods.__version__))
if isinstance(instance, ConfigCustomHeaderMiddleware):
instance.set_server_version(methods.__version__)
if isinstance(instance, AccessLogMiddleware):
instance.set_class_name(methods.__class__.__name__)
def _get_request_event(self):
"""Returns the request_event from the local greenlet storage.
Requires RequestEventMiddleware to be enabled to work.
"""
enabled_middlewares = [mw.__class__.__name__ for mw in
self.__context__._middlewares]
if 'RequestEventMiddleware' not in enabled_middlewares:
raise MissingMiddlewareException('RequestEventMiddleware')
return getattr(_request_ctx.stash, 'request_event')
class _Client(zerorpc.Client):
"""Extends zerorpc.Client by the middlewares
"""
__context__ = None
def __init__(self, connect_to=None, context=None, version=None, **kargs):
self._connect_to = connect_to
self._service_version = version
heartbeat = kargs.pop('heartbeat', None)
context_ = context \
or _Client.__context__ \
or zerorpc.Context.get_instance()
# let this client handle connect all the time by setting
# connect_to=None
zerorpc.Client.__init__(
self,
connect_to=None,
context=context_,
heartbeat=heartbeat,
**kargs)
if connect_to:
connected = False
# this is tricky
# because the hook_resolve_endpoint only accept endpoint
# so i made a encode and decode for the server_name and version
for instance in context_._middlewares:
if isinstance(instance, ConfigMiddleware):
self.connect(HandleEndpoint.encode(connect_to, version))
connected = True
break
if not connected:
self.connect(connect_to)
def _generate_request_event(self, channel, method, args):
xheader = self._context.hook_get_task_context()
if self._context._hooks['client_before_request']:
xheader.update({
'service_name': self._connect_to,
'service_version': self._service_version
})
request_event = channel.new_event(method, args, xheader)
self._context.hook_client_before_request(request_event)
return request_event
def __call__(self, method, *args, **kargs):
timeout = kargs.get('timeout', self._timeout)
channel = self._multiplexer.channel()
hbchan = HeartBeatOnChannel(channel, freq=self._heartbeat_freq,
passive=self._passive_heartbeat)
bufchan = BufferedChannel(hbchan, inqueue_size=kargs.get('slots', 100))
request_event = self._generate_request_event(bufchan, method, args)
bufchan.emit_event(request_event)
try:
if kargs.get('async', False) is False:
return self._process_response(request_event, bufchan, timeout)
async_result = gevent.event.AsyncResult()
gevent.spawn(self._process_response, request_event, bufchan,
timeout).link(async_result)
return async_result
except:
# XXX: This is going to be closed twice if async is false and
# _process_response raises an exception. I wonder if the above
# async branch can raise an exception too, if no we can just remove
# this code.
bufchan.close()
raise
class HandleEndpoint(object):
@staticmethod
def encode(name, version):
# TODO: validate the name. only [A-Za-z] and _ are acceptable
return [name, version]
@staticmethod
def decode(endpoint):
[name, version] = endpoint
return name, version
class NoSuchAccessKeyException(Exception):
def __init__(self, access_key):
self.access_key = access_key
def __str__(self):
return "No such key '%s'." % self.access_key
class VersionNotMatchException(Exception):
def __init__(self, access_key, request_version, server_version):
self.access_key = access_key
self.request_version = request_version
self.server_version = server_version
def __str__(self):
return "The request version %s from client %s is not match %s." % \
(self.request_version, self.access_key, self.server_version)
class MissingAccessKeyException(Exception):
def __init__(self, config_name):
self.config_name = config_name
def __str__(self):
return "Missing 'access_key' in the '%s'." % self.config_name
class MissingConfigException(Exception):
def __init__(self, config_name):
self.config_name = config_name
def __str__(self):
return "Missing config '%s' in your application." % self.config_name
class ClientMissingVersionException(Exception):
def __str__(self):
return "Client missing version. " \
"You can set a default one or specify one when request."
class NoVersionException(Exception):
def __str__(self):
return "__version__ is needed for ZeroRPC server"
class NoNameException(Exception):
def __str__(self):
return "__service_name__ is needed for ZeroRPC server"
class MissingMiddlewareException(Exception):
"""Raised when Zask tries to invoke a functionality provided
by a specific middleware, but that middleware is not loaded.
"""
def __init__(self, middleware):
self.middleware = middleware
def __str__(self):
return 'Missing required middleware {}.'.format(self.middleware)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
import numpy as np
from ..extern import six
from ..extern.six.moves import range
from .bst import MinValue, MaxValue
from .sorted_array import SortedArray
from ..time import Time
class QueryError(ValueError):
'''
Indicates that a given index cannot handle the supplied query.
'''
pass
class Index(object):
'''
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
FastBST, and FastRBT) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __new__(cls, *args, **kwargs):
self = super(Index, cls).__new__(cls)
# If (and only if) unpickling for protocol >= 2, then args and kwargs
# are both empty. The class __init__ requires at least the `columns`
# arg. In this case return a bare `Index` object which is then morphed
# by the unpickling magic into the correct SlicedIndex object.
if not args and not kwargs:
return self
self.__init__(*args, **kwargs)
return SlicedIndex(self, slice(0, 0, None), original=True)
def __init__(self, columns, engine=None, unique=False):
from .table import Table, Column
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(col.jd, format='jd')
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
'''
Number of rows in index.
'''
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
'''
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
'''
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
'''
Recreate the index based on data in self.columns.
'''
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
'''
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
'''
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError("Column does not belong to index: {0}".format(col_name))
def insert_row(self, pos, vals, columns):
'''
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
'''
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[i] = vals[self.col_position(col.info.name)]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
'''
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
'''
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError("Expected int, array of ints, or slice but "
"got {0} in remove_rows".format(row_specifier))
def remove_rows(self, row_specifier):
'''
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
'''
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
'''
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
'''
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple([col[row] for col in self.columns]), row):
raise ValueError("Could not remove row {0} from index".format(row))
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
'''
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
'''
return self.data.find(key)
def same_prefix(self, key):
'''
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
'''
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
'''
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
'''
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
'''
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = dict((row, i) for i, row in enumerate(col_slice))
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self)
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
num_cols = self.data.num_cols if self.engine == SortedArray else None
# create an actual Index, not a SlicedIndex
index = super(Index, Index).__new__(Index)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex(object):
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
else: # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.copy().insert_row(self.orig_coords(pos), vals,
columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.copy().sort()
def __repr__(self):
if self.original:
return repr(self.index)
return 'Index slice {0} of\n{1}'.format(
(self.start, self.stop, self.step), self.index)
def __str__(self):
return repr(self)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
return Index([col_slice], engine=self.data.__class__)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
return Index(new_cols, engine=self.data.__class__)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy):
'''
Inputs a table and some subset of its columns, and
returns an index corresponding to this subset or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`
Subset of the columns in the table argument
'''
cols = set(table_copy.columns)
indices = set()
for column in cols:
for index in table[column].info.indices:
if set([x.info.name for x in index.columns]) == cols:
return index
return None
class _IndexModeContext(object):
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{0}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif self.mode == 'copy_on_getitem':
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'discard_on_copy':
self.table._copy_indices = True
elif self.mode == 'copy_on_getitem':
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = '_{0}WithIndexCopy'.format(cls.__name__)
new_cls = type(str(clsname), (cls,), {'__getitem__': __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
'''
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
'''
def __init__(self, lst):
super(TableIndices, self).__init__(lst)
def __getitem__(self, item):
'''
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
'''
if isinstance(item, six.string_types):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError("No index found for {0}".format(item))
return super(TableIndices, self).__getitem__(item)
class TableLoc(object):
'''
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def __getitem__(self, item):
'''
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
'''
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
rows.extend(index.find((key,)))
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
class TableILoc(TableLoc):
'''
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
super(TableILoc, self).__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError('Invalid index for iloc: {0}'.format(item))
return table_slice
|
|
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
api_group = cfg.OptGroup('api',
title='API options',
help="""
Options under this group are used to define Nova API.
""")
auth_opts = [
cfg.StrOpt("auth_strategy",
default="keystone",
choices=("keystone", "noauth2"),
deprecated_group="DEFAULT",
help="""
This determines the strategy to use for authentication: keystone or noauth2.
'noauth2' is designed for testing only, as it does no actual credential
checking. 'noauth2' provides administrative credentials only if 'admin' is
specified as the username.
"""),
cfg.BoolOpt("use_forwarded_for",
default=False,
deprecated_group="DEFAULT",
help="""
When True, the 'X-Forwarded-For' header is treated as the canonical remote
address. When False (the default), the 'remote_address' header is used.
You should only enable this if you have an HTML sanitizing proxy.
"""),
]
metadata_opts = [
cfg.StrOpt("config_drive_skip_versions",
default=("1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 "
"2007-12-15 2008-02-01 2008-09-01"),
deprecated_group="DEFAULT",
help="""
When gathering the existing metadata for a config drive, the EC2-style
metadata is returned for all versions that don't appear in this option.
As of the Liberty release, the available versions are:
* 1.0
* 2007-01-19
* 2007-03-01
* 2007-08-29
* 2007-10-10
* 2007-12-15
* 2008-02-01
* 2008-09-01
* 2009-04-04
The option is in the format of a single string, with each version separated
by a space.
Possible values:
* Any string that represents zero or more versions, separated by spaces.
"""),
cfg.ListOpt('vendordata_providers',
item_type=cfg.types.String(choices=['StaticJSON', 'DynamicJSON']),
default=['StaticJSON'],
deprecated_group="DEFAULT",
help="""
A list of vendordata providers.
vendordata providers are how deployers can provide metadata via configdrive
and metadata that is specific to their deployment. There are currently two
supported providers: StaticJSON and DynamicJSON.
StaticJSON reads a JSON file configured by the flag vendordata_jsonfile_path
and places the JSON from that file into vendor_data.json and
vendor_data2.json.
DynamicJSON is configured via the vendordata_dynamic_targets flag, which is
documented separately. For each of the endpoints specified in that flag, a
section is added to the vendor_data2.json.
For more information on the requirements for implementing a vendordata
dynamic endpoint, please see the vendordata.rst file in the nova developer
reference.
Possible values:
* A list of vendordata providers, with StaticJSON and DynamicJSON being
current options.
Related options:
* vendordata_dynamic_targets
* vendordata_dynamic_ssl_certfile
* vendordata_dynamic_connect_timeout
* vendordata_dynamic_read_timeout
* vendordata_dynamic_failure_fatal
"""),
cfg.ListOpt('vendordata_dynamic_targets',
default=[],
deprecated_group="DEFAULT",
help="""
A list of targets for the dynamic vendordata provider. These targets are of
the form <name>@<url>.
The dynamic vendordata provider collects metadata by contacting external REST
services and querying them for information about the instance. This behaviour
is documented in the vendordata.rst file in the nova developer reference.
"""),
cfg.StrOpt('vendordata_dynamic_ssl_certfile',
default='',
deprecated_group="DEFAULT",
help="""
Path to an optional certificate file or CA bundle to verify dynamic
vendordata REST services ssl certificates against.
Possible values:
* An empty string, or a path to a valid certificate file
Related options:
* vendordata_providers
* vendordata_dynamic_targets
* vendordata_dynamic_connect_timeout
* vendordata_dynamic_read_timeout
* vendordata_dynamic_failure_fatal
"""),
cfg.IntOpt('vendordata_dynamic_connect_timeout',
default=5,
min=3,
deprecated_group="DEFAULT",
help="""
Maximum wait time for an external REST service to connect.
Possible values:
* Any integer with a value greater than three (the TCP packet retransmission
timeout). Note that instance start may be blocked during this wait time,
so this value should be kept small.
Related options:
* vendordata_providers
* vendordata_dynamic_targets
* vendordata_dynamic_ssl_certfile
* vendordata_dynamic_read_timeout
* vendordata_dynamic_failure_fatal
"""),
cfg.IntOpt('vendordata_dynamic_read_timeout',
default=5,
min=0,
deprecated_group="DEFAULT",
help="""
Maximum wait time for an external REST service to return data once connected.
Possible values:
* Any integer. Note that instance start is blocked during this wait time,
so this value should be kept small.
Related options:
* vendordata_providers
* vendordata_dynamic_targets
* vendordata_dynamic_ssl_certfile
* vendordata_dynamic_connect_timeout
* vendordata_dynamic_failure_fatal
"""),
cfg.BoolOpt('vendordata_dynamic_failure_fatal',
default=False,
help="""
Should failures to fetch dynamic vendordata be fatal to instance boot?
Related options:
* vendordata_providers
* vendordata_dynamic_targets
* vendordata_dynamic_ssl_certfile
* vendordata_dynamic_connect_timeout
* vendordata_dynamic_read_timeout
"""),
cfg.IntOpt("metadata_cache_expiration",
default=15,
min=0,
deprecated_group="DEFAULT",
help="""
This option is the time (in seconds) to cache metadata. When set to 0,
metadata caching is disabled entirely; this is generally not recommended for
performance reasons. Increasing this setting should improve response times
of the metadata API when under heavy load. Higher values may increase memory
usage, and result in longer times for host metadata changes to take effect.
"""),
]
file_opts = [
cfg.StrOpt("vendordata_jsonfile_path",
deprecated_group="DEFAULT",
help="""
Cloud providers may store custom data in vendor data file that will then be
available to the instances via the metadata service, and to the rendering of
config-drive. The default class for this, JsonFileVendorData, loads this
information from a JSON file, whose path is configured by this option. If
there is no path set by this option, the class returns an empty dictionary.
Possible values:
* Any string representing the path to the data file, or an empty string
(default).
""")
]
osapi_opts = [
cfg.IntOpt("max_limit",
default=1000,
min=0,
deprecated_group="DEFAULT",
deprecated_name="osapi_max_limit",
help="""
As a query can potentially return many thousands of items, you can limit the
maximum number of items in a single response by setting this option.
"""),
cfg.StrOpt("compute_link_prefix",
deprecated_group="DEFAULT",
deprecated_name="osapi_compute_link_prefix",
help="""
This string is prepended to the normal URL that is returned in links to the
OpenStack Compute API. If it is empty (the default), the URLs are returned
unchanged.
Possible values:
* Any string, including an empty string (the default).
"""),
cfg.StrOpt("glance_link_prefix",
deprecated_group="DEFAULT",
deprecated_name="osapi_glance_link_prefix",
help="""
This string is prepended to the normal URL that is returned in links to
Glance resources. If it is empty (the default), the URLs are returned
unchanged.
Possible values:
* Any string, including an empty string (the default).
"""),
cfg.BoolOpt("instance_list_per_project_cells",
default=False,
help="""
When enabled, this will cause the API to only query cell databases
in which the tenant has mapped instances. This requires an additional
(fast) query in the API database before each list, but also
(potentially) limits the number of cell databases that must be queried
to provide the result. If you have a small number of cells, or tenants
are likely to have instances in all cells, then this should be
False. If you have many cells, especially if you confine tenants to a
small subset of those cells, this should be True.
"""),
]
# NOTE(edleafe): I would like to import the value directly from
# nova.compute.vm_states, but that creates a circular import. Since this value
# is not likely to be changed, I'm copy/pasting it here.
BUILDING = "building" # VM only exists in DB
osapi_hide_opts = [
cfg.ListOpt("hide_server_address_states",
default=[BUILDING],
deprecated_group="DEFAULT",
deprecated_name="osapi_hide_server_address_states",
deprecated_for_removal=True,
deprecated_since="17.0.0",
deprecated_reason="This option hide the server address in server "
"representation for configured server states. "
"Which makes GET server API controlled by this "
"config options. Due to this config options, user "
"would not be able to discover the API behavior on "
"different clouds which leads to the interop issue.",
help="""
This option is a list of all instance states for which network address
information should not be returned from the API.
Possible values:
A list of strings, where each string is a valid VM state, as defined in
nova/compute/vm_states.py. As of the Newton release, they are:
* "active"
* "building"
* "paused"
* "suspended"
* "stopped"
* "rescued"
* "resized"
* "soft-delete"
* "deleted"
* "error"
* "shelved"
* "shelved_offloaded"
""")
]
fping_path_opts = [
cfg.StrOpt("fping_path",
default="/usr/sbin/fping",
deprecated_group="DEFAULT",
deprecated_for_removal=True,
deprecated_since="18.0.0",
deprecated_reason="""
This option only used in /os-fping API and the API itself was deprecated
at version 2.36 (Newton release), also, the API itself is based on nova-network
and nova-network is deprecated as well.
""",
help="The full path to the fping binary.")
]
os_network_opts = [
cfg.BoolOpt("use_neutron_default_nets",
default=False,
deprecated_group="DEFAULT",
help="""
When True, the TenantNetworkController will query the Neutron API to get the
default networks to use.
Related options:
* neutron_default_tenant_id
"""),
cfg.StrOpt("neutron_default_tenant_id",
default="default",
deprecated_group="DEFAULT",
help="""
Tenant ID for getting the default network from Neutron API (also referred in
some places as the 'project ID') to use.
Related options:
* use_neutron_default_nets
"""),
]
enable_inst_pw_opts = [
cfg.BoolOpt("enable_instance_password",
default=True,
deprecated_group="DEFAULT",
help="""
Enables returning of the instance password by the relevant server API calls
such as create, rebuild, evacuate, or rescue. If the hypervisor does not
support password injection, then the password returned will not be correct,
so if your hypervisor does not support password injection, set this to False.
""")
]
API_OPTS = (auth_opts +
metadata_opts +
file_opts +
osapi_opts +
osapi_hide_opts +
fping_path_opts +
os_network_opts +
enable_inst_pw_opts)
def register_opts(conf):
conf.register_group(api_group)
conf.register_opts(API_OPTS, group=api_group)
def list_opts():
return {api_group: API_OPTS}
|
|
__doc__ = """Code by Benjamin S. Murphy
[email protected]
Dependencies:
numpy
Callable Methods:
write_asc_grid(X, Y, Z, filename='output.asc', style=1): Writes an MxN data grid
to an ASCII grid file (.*asc).
Inputs:
X (array-like, dim Nx1): X-coordinates of grid points at center
of cells.
Y (array-like, dim Mx1): Y-coordinates of grid points at center
of cells.
Z (array-like, dim MxN): Gridded data values. May be a masked array.
filename (string, optional): Name of output *.asc file.
style (int, optional): Determines how to write the *.asc file
header. Specifying 1 writes out DX, DY, XLLCENTER, YLLCENTER.
Specifying 2 writes out CELLSIZE (note DX must be the same
as DY), XLLCORNER, YLLCORNER. Default is 1.
read_asc_grid(filename, footer=0): Reads ASCII grid file (*.asc).
Inputs:
filename (string): Name of *.asc file.
footer (int, optional): Number of lines at bottom of *.asc file to skip.
Outputs:
grid_array (numpy array): MxN array of grid values,
where M is number of Y-coordinates and N is number
of X-coordinates. The array entry corresponding to
the lower-left coordinates is at index [M, 0], so that
the array is oriented as it would be in X-Y space.
x (numpy array): 1D array of N X-coordinates.
y (numpy array): 1D array of M Y-coordinates.
CELLSIZE (tuple or float): Either a two-tuple of (x-cell size,
y-cell size), or a float that specifies the uniform cell size.
NODATA (float): Value that specifies which entries are not
actual data.
Copyright (c) 2015 Benjamin S. Murphy
"""
import numpy as np
def write_asc_grid(x, y, z, filename='output.asc', style=1):
"""Writes gridded data to ASCII grid file (*.asc)"""
if np.ma.is_masked(z):
z = np.array(z.tolist(-999.))
x = np.squeeze(np.array(x))
y = np.squeeze(np.array(y))
z = np.squeeze(np.array(z))
nrows = z.shape[0]
ncols = z.shape[1]
if z.ndim != 2:
raise ValueError("Two-dimensional grid is required to write *.asc grid.")
if x.ndim > 1 or y.ndim > 1:
raise ValueError("Dimensions of X and/or Y coordinate arrays are not as "
"expected. Could not write *.asc grid.")
if z.shape != (y.size, x.size):
print "WARNING: Grid dimensions are not as expected. " \
"Incorrect *.asc file generation may result."
if np.amin(x) != x[0] or np.amin(y) != y[0]:
print "WARNING: Order of X or Y coordinates is not as expected. " \
"Incorrect *.asc file generation may result."
dx = abs(x[1] - x[0])
dy = abs(y[1] - y[0])
if abs((x[-1] - x[0])/(x.shape[0] - 1)) != dx or \
abs((y[-1] - y[0])/(y.shape[0] - 1)) != dy:
raise ValueError("X or Y spacing is not constant; *.asc grid cannot "
"be written.")
cellsize = -1
if style == 2:
if dx != dy:
raise ValueError("X and Y spacing is not the same. Cannot write "
"*.asc file in the specified format.")
cellsize = dx
xllcenter = x[0]
yllcenter = y[0]
xllcorner = -1 # Note that these values are flagged as -1. If there is a problem in trying
yllcorner = -1 # to write out style 2, the -1 value will appear in the output file.
if style == 2:
xllcorner = xllcenter - dx/2.0
yllcorner = yllcenter - dy/2.0
no_data = -999.
with open(filename, 'w') as f:
if style == 1:
f.write("NCOLS " + '{:<10n}'.format(ncols) + '\n')
f.write("NROWS " + '{:<10n}'.format(nrows) + '\n')
f.write("XLLCENTER " + '{:<10.2f}'.format(xllcenter) + '\n')
f.write("YLLCENTER " + '{:<10.2f}'.format(yllcenter) + '\n')
f.write("DX " + '{:<10.2f}'.format(dx) + '\n')
f.write("DY " + '{:<10.2f}'.format(dy) + '\n')
f.write("NODATA_VALUE " + '{:<10.2f}'.format(no_data) + '\n')
elif style == 2:
f.write("NCOLS " + '{:<10n}'.format(ncols) + '\n')
f.write("NROWS " + '{:<10n}'.format(nrows) + '\n')
f.write("XLLCORNER " + '{:<10.2f}'.format(xllcorner) + '\n')
f.write("YLLCORNER " + '{:<10.2f}'.format(yllcorner) + '\n')
f.write("CELLSIZE " + '{:<10.2f}'.format(cellsize) + '\n')
f.write("NODATA_VALUE " + '{:<10.2f}'.format(no_data) + '\n')
else:
raise ValueError("style kwarg must be either 1 or 2.")
for m in range(z.shape[0] - 1, -1, -1):
for n in range(z.shape[1]):
f.write('{:<16.2f}'.format(z[m, n]))
if m != 0:
f.write('\n')
def read_asc_grid(filename, footer=0):
"""Reads ASCII grid file (*.asc).
footer kwarg specifies how many lines at end of *.asc file to skip.
Returns a NumPy array of the values (dim MxN, where M is
the number of Y-coordinates and N is the number of
X-coordinates); a NumPy array of the X-coordinates (dim N);
a NumPy array of the Y-coordinates (dim M); either a tuple
of the grid cell size in the x direction and the grid cell
size in the y direction (DX, DY) or the uniform grid cell size;
and the NO_DATA value.
"""
ncols = None
nrows = None
xllcorner = None
xllcenter = None
yllcorner = None
yllcenter = None
cellsize = None
dx = None
dy = None
no_data = None
header_lines = 0
with open(filename, 'rU') as f:
while True:
string, value = f.readline().split()
header_lines += 1
if string.lower() == 'ncols':
ncols = int(value)
elif string.lower() == 'nrows':
nrows = int(value)
elif string.lower() == 'xllcorner':
xllcorner = float(value)
elif string.lower() == 'xllcenter':
xllcenter = float(value)
elif string.lower() == 'yllcorner':
yllcorner = float(value)
elif string.lower() == 'yllcenter':
yllcenter = float(value)
elif string.lower() == 'cellsize':
cellsize = float(value)
elif string.lower() == 'cell_size':
cellsize = float(value)
elif string.lower() == 'dx':
dx = float(value)
elif string.lower() == 'dy':
dy = float(value)
elif string.lower() == 'nodata_value':
no_data = float(value)
elif string.lower() == 'nodatavalue':
no_data = float(value)
else:
raise IOError("could not read *.asc file. Error in header.")
if (ncols is not None) and (nrows is not None) and \
(((xllcorner is not None) and (yllcorner is not None)) or
((xllcenter is not None) and (yllcenter is not None))) and \
((cellsize is not None) or ((dx is not None) and (dy is not None))) and \
(no_data is not None):
break
raw_grid_array = np.genfromtxt(filename, skip_header=header_lines, skip_footer=footer)
grid_array = np.flipud(raw_grid_array)
if nrows != grid_array.shape[0] or ncols != grid_array.shape[1]:
raise IOError("Error reading *.asc file. Encountered problem "
"with header: NCOLS and/or NROWS does not match "
"number of columns/rows in data file body.")
if xllcorner is not None and yllcorner is not None:
if dx is not None and dy is not None:
xllcenter = xllcorner + dx/2.0
yllcenter = yllcorner + dy/2.0
else:
xllcenter = xllcorner + cellsize/2.0
yllcenter = yllcorner + cellsize/2.0
if dx is not None and dy is not None:
x = np.arange(xllcenter, xllcenter + ncols*dx, dx)
y = np.arange(yllcenter, yllcenter + nrows*dy, dy)
else:
x = np.arange(xllcenter, xllcenter + ncols*cellsize, cellsize)
y = np.arange(yllcenter, yllcenter + nrows*cellsize, cellsize)
# Sometimes x and y and can be an entry too long due to imprecision in calculating
# the upper cutoff for np.arange(); this bit takes care of that potential problem.
if x.size == ncols + 1:
x = x[:-1]
if y.size == nrows + 1:
y = y[:-1]
if cellsize is None:
cellsize = (dx, dy)
return grid_array, x, y, cellsize, no_data
|
|
# Copyright (c) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for EMC XtremIO Storage.
supported XtremIO version 2.4 and up
1.0.0 - initial release
1.0.1 - enable volume extend
1.0.2 - added FC support, improved error handling
1.0.3 - update logging level, add translation
1.0.4 - support for FC zones
1.0.5 - add support for XtremIO 4.0
"""
import base64
import json
import math
import random
import string
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_PROVISIONING_FACTOR = 20.0
XTREMIO_OPTS = [
cfg.StrOpt('xtremio_cluster_name',
default='',
help='XMS cluster id in multi-cluster environment')]
CONF.register_opts(XTREMIO_OPTS)
RANDOM = random.Random()
OBJ_NOT_FOUND_ERR = 'obj_not_found'
VOL_NOT_UNIQUE_ERR = 'vol_obj_name_not_unique'
VOL_OBJ_NOT_FOUND_ERR = 'vol_obj_not_found'
ALREADY_MAPPED_ERR = 'already_mapped'
class XtremIOClient(object):
def __init__(self, configuration, cluster_id):
self.configuration = configuration
self.cluster_id = cluster_id
self.base64_auth = (base64
.encodestring('%s:%s' %
(self.configuration.san_login,
self.configuration.san_password))
.replace('\n', ''))
self.base_url = ('https://%s/api/json/types' %
self.configuration.san_ip)
def _create_request(self, request_typ, data, url, url_data):
if request_typ in ('GET', 'DELETE'):
data.update(url_data)
self.update_url(data, self.cluster_id)
query = urllib.parse.urlencode(data, doseq=True)
url = '%(url)s?%(query)s' % {'query': query, 'url': url}
request = urllib.request.Request(url)
else:
if url_data:
url = ('%(url)s?%(query)s' %
{'query': urllib.parse.urlencode(url_data, doseq=True),
'url': url})
self.update_data(data, self.cluster_id)
LOG.debug('data: %s', data)
request = urllib.request.Request(url, json.dumps(data))
LOG.debug('%(type)s %(url)s', {'type': request_typ, 'url': url})
def get_request_type():
return request_typ
request.get_method = get_request_type
request.add_header("Authorization", "Basic %s" % (self.base64_auth, ))
return request
def _send_request(self, object_type, key, request):
try:
response = urllib.request.urlopen(request)
except (urllib.error.HTTPError, ) as exc:
if exc.code == 400 and hasattr(exc, 'read'):
error = json.load(exc)
err_msg = error['message']
if err_msg.endswith(OBJ_NOT_FOUND_ERR):
LOG.warning(_LW("object %(key)s of "
"type %(typ)s not found"),
{'key': key, 'typ': object_type})
raise exception.NotFound()
elif err_msg == VOL_NOT_UNIQUE_ERR:
LOG.error(_LE("can't create 2 volumes with the same name"))
msg = (_('Volume by this name already exists'))
raise exception.VolumeBackendAPIException(data=msg)
elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
LOG.error(_LE("Can't find volume to map %s"), key)
raise exception.VolumeNotFound(volume_id=key)
elif ALREADY_MAPPED_ERR in err_msg:
raise exception.XtremIOAlreadyMappedError()
LOG.error(_LE('Bad response from XMS, %s'), exc.read())
msg = (_('Exception: %s') % six.text_type(exc))
raise exception.VolumeDriverException(message=msg)
if response.code >= 300:
LOG.error(_LE('bad API response, %s'), response.msg)
msg = (_('bad response from XMS got http code %(code)d, %(msg)s') %
{'code': response.code, 'msg': response.msg})
raise exception.VolumeBackendAPIException(data=msg)
return response
def req(self, object_type='volumes', request_typ='GET', data=None,
name=None, idx=None):
if not data:
data = {}
if name and idx:
msg = _("can't handle both name and index in req")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
url = '%s/%s' % (self.base_url, object_type)
url_data = {}
key = None
if name:
url_data['name'] = name
key = name
elif idx:
url = '%s/%d' % (url, idx)
key = str(idx)
request = self._create_request(request_typ, data, url, url_data)
response = self._send_request(object_type, key, request)
str_result = response.read()
if str_result:
try:
return json.loads(str_result)
except Exception:
LOG.exception(_LE('querying %(typ)s, %(req)s failed to '
'parse result, return value = %(res)s'),
{'typ': object_type,
'req': request_typ,
'res': str_result})
def update_url(self, data, cluster_id):
return
def update_data(self, data, cluster_id):
return
def get_cluster(self):
return self.req('clusters', idx=1)['content']
class XtremIOClient3(XtremIOClient):
def find_lunmap(self, ig_name, vol_name):
try:
for lm_link in self.req('lun-maps')['lun-maps']:
idx = lm_link['href'].split('/')[-1]
lm = self.req('lun-maps', idx=int(idx))['content']
if lm['ig-name'] == ig_name and lm['vol-name'] == vol_name:
return lm
except exception.NotFound:
raise (exception.VolumeDriverException
(_("can't find lunmap, ig:%(ig)s vol:%(vol)s") %
{'ig': ig_name, 'vol': vol_name}))
def num_of_mapped_volumes(self, initiator):
cnt = 0
for lm_link in self.req('lun-maps')['lun-maps']:
idx = lm_link['href'].split('/')[-1]
lm = self.req('lun-maps', idx=int(idx))['content']
if lm['ig-name'] == initiator:
cnt += 1
return cnt
def get_iscsi_portal(self):
iscsi_portals = [t['name'] for t in self.req('iscsi-portals')
['iscsi-portals']]
# Get a random portal
portal_name = RANDOM.choice(iscsi_portals)
try:
portal = self.req('iscsi-portals',
name=portal_name)['content']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("iscsi portal, %s, not found") % portal_name))
return portal
class XtremIOClient4(XtremIOClient):
def find_lunmap(self, ig_name, vol_name):
try:
return (self.req('lun-maps',
data={'full': 1,
'filter': ['vol-name:eq:%s' % vol_name,
'ig-name:eq:%s' % ig_name]})
['lun-maps'][0])
except (KeyError, IndexError):
raise exception.VolumeNotFound(volume_id=vol_name)
def num_of_mapped_volumes(self, initiator):
return len(self.req('lun-maps',
data={'filter': 'ig-name:eq:%s' % initiator})
['lun-maps'])
def update_url(self, data, cluster_id):
if cluster_id:
data['cluster-name'] = cluster_id
def update_data(self, data, cluster_id):
if cluster_id:
data['cluster-id'] = cluster_id
def get_iscsi_portal(self):
iscsi_portals = self.req('iscsi-portals',
data={'full': 1})['iscsi-portals']
return RANDOM.choice(iscsi_portals)
def get_cluster(self):
if self.cluster_id:
return self.req('clusters', name=self.cluster_id)['content']
else:
name = self.req('clusters')['clusters'][0]['name']
return self.req('clusters', name=name)['content']
class XtremIOVolumeDriver(san.SanDriver):
"""Executes commands relating to Volumes."""
VERSION = '1.0.5'
driver_name = 'XtremIO'
MIN_XMS_VERSION = [3, 0, 0]
def __init__(self, *args, **kwargs):
super(XtremIOVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(XTREMIO_OPTS)
self.protocol = None
self.backend_name = (self.configuration.safe_get('volume_backend_name')
or self.driver_name)
self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name')
or '')
self.provisioning_factor = (self.configuration.
safe_get('max_over_subscription_ratio')
or DEFAULT_PROVISIONING_FACTOR)
self._stats = {}
self.client = XtremIOClient3(self.configuration, self.cluster_id)
def _obj_from_result(self, res):
typ, idx = res['links'][0]['href'].split('/')[-2:]
return self.client.req(typ, idx=int(idx))['content']
def check_for_setup_error(self):
try:
try:
xms = self.client.req('xms', idx=1)['content']
version_text = xms['version']
except exception.VolumeDriverException:
cluster = self.client.req('clusters', idx=1)['content']
version_text = cluster['sys-sw-version']
except exception.NotFound:
msg = _("XtremIO not initialized correctly, no clusters found")
raise (exception.VolumeBackendAPIException
(data=msg))
ver = [int(n) for n in version_text.split('-')[0].split('.')]
if ver < self.MIN_XMS_VERSION:
msg = (_('Invalid XtremIO version %(cur)s,'
' version %(min)s or up is required') %
{'min': self.MIN_XMS_VERSION,
'cur': ver})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('XtremIO SW version %s'), version_text)
if ver[0] >= 4:
self.client = XtremIOClient4(self.configuration, self.cluster_id)
def create_volume(self, volume):
"Creates a volume"
data = {'vol-name': volume['id'],
'vol-size': str(volume['size']) + 'g'
}
self.client.req('volumes', 'POST', data)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
data = {'snap-vol-name': volume['id'],
'ancestor-vol-id': snapshot.id}
self.client.req('snapshots', 'POST', data)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
data = {'snap-vol-name': volume['id'],
'ancestor-vol-id': src_vref['id']}
self.client.req('snapshots', 'POST', data)
def delete_volume(self, volume):
"""Deletes a volume."""
try:
self.client.req('volumes', 'DELETE', name=volume['id'])
except exception.NotFound:
LOG.info(_LI("volume %s doesn't exist"), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
data = {'snap-vol-name': snapshot.id,
'ancestor-vol-id': snapshot.volume_id}
self.client.req('snapshots', 'POST', data)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
self.client.req('volumes', 'DELETE', name=snapshot.id)
except exception.NotFound:
LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id)
def _update_volume_stats(self):
sys = self.client.get_cluster()
physical_space = int(sys["ud-ssd-space"]) / units.Mi
used_physical_space = int(sys["ud-ssd-space-in-use"]) / units.Mi
free_physical = physical_space - used_physical_space
actual_prov = int(sys["vol-size"]) / units.Mi
self._stats = {'volume_backend_name': self.backend_name,
'vendor_name': 'EMC',
'driver_version': self.VERSION,
'storage_protocol': self.protocol,
'total_capacity_gb': physical_space,
'free_capacity_gb': (free_physical *
self.provisioning_factor),
'provisioned_capacity_gb': actual_prov,
'max_over_subscription_ratio': self.provisioning_factor,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': False}
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV."""
lv_name = existing_ref['source-name']
# Attempt to locate the volume.
try:
vol_obj = self.client.req('volumes', name=lv_name)['content']
except exception.NotFound:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# Attempt to rename the LV to match the OpenStack internal name.
self.client.req('volumes', 'PUT', data={'vol-name': volume['id']},
idx=vol_obj['index'])
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing."""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
# Attempt to locate the volume.
try:
vol_obj = self.client.req('volumes', name=lv_name)['content']
except exception.NotFound:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
lv_size = int(math.ceil(int(vol_obj['vol-size']) / units.Mi))
return lv_size
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
# trying to rename the volume to [cinder name]-unmanged
try:
self.client.req('volumes', 'PUT', name=volume['id'],
data={'vol-name': volume['name'] + '-unmanged'})
except exception.NotFound:
LOG.info(_LI("Volume with the name %s wasn't found,"
" can't unmanage"),
volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
data = {'vol-size': six.text_type(new_size) + 'g'}
try:
self.client.req('volumes', 'PUT', data, name=volume['id'])
except exception.NotFound:
msg = _("can't find the volume to extend")
raise (exception.VolumeDriverException(message=msg))
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector"""
try:
ig = self.client.req('initiator-groups',
name=self._get_ig(connector))['content']
tg = self.client.req('target-groups', name='Default')['content']
vol = self.client.req('volumes', name=volume['id'])['content']
lm_name = '%s_%s_%s' % (six.text_type(vol['index']),
six.text_type(ig['index'])
if ig else 'any',
six.text_type(tg['index']))
LOG.debug('removing lun map %s', lm_name)
self.client.req('lun-maps', 'DELETE', name=lm_name)
except exception.NotFound:
LOG.warning(_LW("terminate_connection: lun map not found"))
def _get_password(self):
return ''.join(RANDOM.choice
(string.ascii_uppercase + string.digits)
for _ in range(12))
def create_lun_map(self, volume, ig):
try:
res = self.client.req('lun-maps', 'POST',
{'ig-id': ig['ig-id'][2],
'vol-id': volume['id']})
lunmap = self._obj_from_result(res)
LOG.info(_LI('created lunmap\n%s'), lunmap)
except exception.XtremIOAlreadyMappedError:
LOG.info(_LI('volume already mapped,'
' trying to retrieve it %(ig)s, %(vol)d'),
{'ig': ig['ig-id'][1], 'vol': volume['id']})
lunmap = self.client.find_lunmap(ig['ig-id'][1], volume['id'])
return lunmap
def _get_ig(self, connector):
raise NotImplementedError()
class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
driver_name = 'XtremIO_ISCSI'
def __init__(self, *args, **kwargs):
super(XtremIOISCSIDriver, self).__init__(*args, **kwargs)
self.protocol = 'iSCSI'
def initialize_connection(self, volume, connector):
try:
sys = self.client.get_cluster()
except exception.NotFound:
msg = _("XtremIO not initialized correctly, no clusters found")
raise exception.VolumeBackendAPIException(data=msg)
use_chap = (sys.get('chap-authentication-mode', 'disabled') !=
'disabled')
discovery_chap = (sys.get('chap-discovery-mode', 'disabled') !=
'disabled')
initiator = self._get_initiator(connector)
try:
# check if the IG already exists
ig = self.client.req('initiator-groups', 'GET',
name=self._get_ig(connector))['content']
except exception.NotFound:
# create an initiator group to hold the the initiator
data = {'ig-name': self._get_ig(connector)}
self.client.req('initiator-groups', 'POST', data)
try:
ig = self.client.req('initiator-groups',
name=self._get_ig(connector))['content']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to create IG, %s") %
self._get_ig(connector)))
try:
init = self.client.req('initiators', 'GET',
name=initiator)['content']
if use_chap:
chap_passwd = init['chap-authentication-initiator-'
'password']
# delete the initiator to create a new one with password
if not chap_passwd:
LOG.info(_LI('initiator has no password while using chap,'
'removing it'))
self.client.req('initiators', 'DELETE', name=initiator)
# check if the initiator already exists
raise exception.NotFound()
except exception.NotFound:
# create an initiator
data = {'initiator-name': initiator,
'ig-id': initiator,
'port-address': initiator}
if use_chap:
data['initiator-authentication-user-name'] = 'chap_user'
chap_passwd = self._get_password()
data['initiator-authentication-password'] = chap_passwd
if discovery_chap:
data['initiator-discovery-user-name'] = 'chap_user'
data['initiator-discovery-'
'password'] = self._get_password()
self.client.req('initiators', 'POST', data)
# lun mappping
lunmap = self.create_lun_map(volume, ig)
properties = self._get_iscsi_properties(lunmap)
if use_chap:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = 'chap_user'
properties['auth_password'] = chap_passwd
LOG.debug('init conn params:\n%s', properties)
return {
'driver_volume_type': 'iscsi',
'data': properties
}
def _get_iscsi_properties(self, lunmap):
"""Gets iscsi configuration
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the id of the volume (currently used by xen)
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
:access_mode: the volume access mode allow client used
('rw' or 'ro' currently supported)
"""
portal = self.client.get_iscsi_portal()
ip = portal['ip-addr'].split('/')[0]
properties = {'target_discovered': False,
'target_iqn': portal['port-address'],
'target_lun': lunmap['lun'],
'target_portal': '%s:%d' % (ip, portal['ip-port']),
'access_mode': 'rw'}
return properties
def _get_initiator(self, connector):
return connector['initiator']
def _get_ig(self, connector):
return connector['initiator']
class XtremIOFibreChannelDriver(XtremIOVolumeDriver,
driver.FibreChannelDriver):
def __init__(self, *args, **kwargs):
super(XtremIOFibreChannelDriver, self).__init__(*args, **kwargs)
self.protocol = 'FC'
self._targets = None
def get_targets(self):
if not self._targets:
try:
target_list = self.client.req('targets')["targets"]
targets = [self.client.req('targets',
name=target['name'])['content']
for target in target_list
if '-fc' in target['name']]
self._targets = [target['port-address'].replace(':', '')
for target in targets
if target['port-state'] == 'up']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to get targets")))
return self._targets
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
initiators = self._get_initiator(connector)
ig_name = self._get_ig(connector)
i_t_map = {}
# get or create initiator group
try:
# check if the IG already exists
ig = self.client.req('initiator-groups', name=ig_name)['content']
except exception.NotFound:
# create an initiator group to hold the the initiator
data = {'ig-name': ig_name}
self.client.req('initiator-groups', 'POST', data)
try:
ig = self.client.req('initiator-groups',
name=ig_name)['content']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to create IG, %s") % ig_name))
# get or create all initiators
for initiator in initiators:
try:
self.client.req('initiators', name=initiator)['content']
except exception.NotFound:
# create an initiator
data = {'initiator-name': initiator,
'ig-id': ig['name'],
'port-address': initiator}
self.client.req('initiators', 'POST', data)
i_t_map[initiator] = self.get_targets()
lunmap = self.create_lun_map(volume, ig)
return {'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': lunmap['lun'],
'target_wwn': self.get_targets(),
'access_mode': 'rw',
'initiator_target_map': i_t_map}}
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
(super(XtremIOFibreChannelDriver, self)
.terminate_connection(volume, connector, **kwargs))
num_vols = self.client.num_of_mapped_volumes(self._get_ig(connector))
if num_vols > 0:
data = {}
else:
i_t_map = {}
for initiator in self._get_initiator(connector):
i_t_map[initiator] = self.get_targets()
data = {'target_wwn': self.get_targets(),
'initiator_target_map': i_t_map}
return {'driver_volume_type': 'fibre_channel',
'data': data}
def _get_initiator(self, connector):
return connector['wwpns']
def _get_ig(self, connector):
return connector['host']
|
|
"""
Common narrative logging functions.
To log an event with proper metadata and formatting use 'log_event':
You can also do free-form logs, but these will be ignored by
most upstream consumers.
"""
import collections
import logging
from logging import handlers
import os
import threading
import time
# Local
from .util import kbase_env
from . import log_proxy
from .log_common import format_event
from .narrative_logger import NarrativeLogger
__author__ = 'Dan Gunter <[email protected]>'
__date__ = '2014-07-31'
## Constants
KBASE_TMP_DIR = "/tmp"
KBASE_TMP_LOGFILE = os.path.join(KBASE_TMP_DIR, "kbase-narrative.log")
# env var with location of proxy config file
KBASE_PROXY_ENV = 'KBASE_PROXY_CONFIG'
## Internal logging
_log = logging.getLogger("tornado.application")
_narr_log = NarrativeLogger()
# WTF is going on logging
#def _logdbg(m):
# open("/tmp/wtf", "a").write(m + "\n")
## External functions
def get_narrative_logger():
"""Get a Narrative logger that talks to the Logstash store.
This doesn't do the usual write-to-syslog thing, but opens a socket to
Logstash, and writes a hunk of JSON to it. It probably doesn't make sense to use
the typical Python logging library, I don't think.
It's really intended for specific events that should be collated and used for metrics.
"""
return _narr_log
def get_logger(name="", init=False):
"""Get a given KBase log obj.
:param name: name (a.b.c) of the logging namespace, which may be
relative or absolute (starting with 'biokbase.'), or
empty in which case the 'biokbase' logger is returned
:param init: If true, re-initialize the file/socket log handlers
:return: Log object
:rtype: LogAdapter
"""
if init:
reset_handlers()
return logging.getLogger(_kbase_log_name(name))
def log_event(log, event, mapping):
"""Log an event and a mapping.
For example::
log_event(_log, "collision", {"who":"unstoppable force",
"with":"immovable object", "where":"kbase"})
"""
msg = format_event(event, mapping)
log.info(msg)
## Internal functions and classes
def _kbase_log_name(name):
"""Smarter name of KBase logger."""
# no name => root
if not name:
return "biokbase"
# absolute name
if name.startswith("biokbase."):
return name
# relative name
return "biokbase." + name
def _has_handler_type(log, type_):
return any([isinstance(h, type_) for h in log.handlers])
## Custom handlers
class BufferedSocketHandler(handlers.SocketHandler):
"""Buffer up messages to a socket, sending them asynchronously.
Starts a separate thread to pull messages off and send them.
Ignores any messages that did not come from `log_event()`, above.
"""
def __init__(self, *args):
handlers.SocketHandler.__init__(self, *args)
self._dbg = _log.isEnabledFor(logging.DEBUG)
if self._dbg:
_log.debug("Created SocketHandler with args = {}".format(args))
self.buf = collections.deque([], 100)
self.buf_lock = threading.Lock()
# start thread to send data from buffer
self.thr = threading.Thread(target=self.emitter)
self.thr.daemon = True
self._stop = False
self.extra = {}
self.thr.start()
def close(self):
if self.thr:
self._stop = True
self.thr.join()
self.thr = None
handlers.SocketHandler.close(self)
def emitter(self):
while not self._stop:
try:
self.buf_lock.acquire()
item = self.buf.popleft()
if not self._emit(item):
self.buf.appendleft(item)
self.buf_lock.release()
time.sleep(0.1)
else:
self.buf_lock.release()
except IndexError:
self.buf_lock.release()
time.sleep(0.1)
def emit(self, record):
if self._skip(record):
return
# stuff 'extra' from environment into record
#_logdbg("@@ stuffing into record: {}".format(kbase_env))
record.__dict__.update(kbase_env)
if 'auth_token' in record.__dict__:
del record.__dict__['auth_token']
self.buf_lock.acquire()
try:
self.buf.append(record)
finally:
self.buf_lock.release()
def _skip(self, record):
"""Return True if this record should not go to a socket"""
# Do not forward records that didn't get logged through
# kblogging.log_event
if record.funcName != 'log_event':
if self._dbg:
_log.debug("Skip: funcName {} != log_event"
.format(record.funcName))
return
def _emit(self, record):
"""Re-implement to return a success code."""
success = False
try:
s = self.makePickle(record)
self.send(s)
success = True
except (KeyboardInterrupt, SystemExit):
raise
except Exception as err:
_log.debug("Emit record to socket failed: {}".format(err))
self.handleError(record)
if success and _log.isEnabledFor(logging.DEBUG):
_log.debug("Record sent to socket")
return success
def init_handlers():
"""Initialize and add the log handlers.
We only allow one FileHandler and one SocketHandler to exist,
no matter how many times this is called.
"""
# Turn on debugging by setting environment variable KBASE_DEBUG.
if os.environ.get("KBASE_DEBUG", None):
g_log.setLevel(logging.DEBUG)
else:
g_log.setLevel(logging.INFO)
if not _has_handler_type(g_log, logging.FileHandler):
hndlr = logging.FileHandler(KBASE_TMP_LOGFILE)
fmtr = logging.Formatter("%(levelname)s %(asctime)s %(name)s %(message)s")
hndlr.setFormatter(fmtr)
g_log.addHandler(hndlr)
if not _has_handler_type(g_log, handlers.SocketHandler):
cfg = get_proxy_config()
g_log.debug("Opening socket to proxy at {}:{}".format(
cfg.host, cfg.port))
sock_handler = BufferedSocketHandler(cfg.host, cfg.port)
g_log.addHandler(sock_handler)
def get_proxy_config():
config_file = os.environ.get(KBASE_PROXY_ENV, None)
if config_file:
_log.info("Configuring KBase logging from file '{}'".format(config_file))
else:
_log.warn("Configuring KBase logging from defaults ({} is empty, or not found)"
.format(KBASE_PROXY_ENV))
# return log_proxy.ProxyConfiguration(config_file)
return log_proxy.ProxyConfigurationWrapper(config_file)
def reset_handlers():
"""Remove & re-add all handlers."""
while g_log.handlers:
g_log.removeHandler(g_log.handlers.pop())
init_handlers()
## Run the rest of this on import
# Get root log obj.
g_log = get_logger()
# If no handlers, initialize them
if not g_log.handlers:
init_handlers()
class NarrativeUIError(object):
"""Created by Narrative UI javascript on an error.
"""
ui_log = get_logger("narrative_ui")
def __init__(self, is_fatal, where="unknown location", what="unknown condition"):
info = {"function": where, "msg": what}
msg = format_event("ui.error", info)
log_method = (self.ui_log.error, self.ui_log.critical)[is_fatal]
log_method(msg)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Different gradient implementations for benchmark purposes
def SpecializedGrad(l, grad):
return gen_linalg_ops.cholesky_grad(l, grad)
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def TriAngSolveCompositeGrad(l, grad):
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
# Compute ((l^{H} @ grad) * (tril(ones)-1/2*eye)) = middle
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
# Compute l^{-H} @ middle = z
l_inverse_middle = linalg_ops.matrix_triangular_solve(l, middle, adjoint=True)
# We need to compute z @ l^{-1}. With matrix_triangular_solve we
# actually compute l^{-H} @ z^{H} = grad. Since we later add grad^{H}
# we can ommit the conjugate transpose here.
z_h = math_ops.conj(array_ops.matrix_transpose(l_inverse_middle))
grad_a = linalg_ops.matrix_triangular_solve(l, z_h, adjoint=True)
grad_a += linalg.adjoint(grad_a)
return grad_a * 0.5
def MatrixInverseCompositeGrad(l, grad):
l_inverse = linalg_ops.matrix_inverse(l)
return _GradWithInverseL(l, l_inverse, grad)
def TriAngInvCompositeGrad(l, grad):
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
return _GradWithInverseL(l, l_inverse, grad)
class CholeskyOpTest(test.TestCase):
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = self.evaluate([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.cached_session(use_gpu=True) as sess:
chol = linalg_ops.cholesky(x)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, x, chol, verification)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in (np.float32, np.float64):
self._verifyCholesky(data.astype(dtype))
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyCholesky(complex_data)
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
# Generate random complex valued positive-definite matrices.
matrices = np.random.rand(10, 5, 5) + 1j * np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
self._verifyCholesky(matrices)
@test_util.run_deprecated_v1
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
def testNotInvertibleCPU(self):
# The input should be invertible.
with self.session(use_gpu=True):
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful. The"
" input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(
np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
matrix1 = math_ops.matmul(matrix1, matrix1, adjoint_a=True)
matrix2 = math_ops.matmul(matrix2, matrix2, adjoint_a=True)
c1 = linalg_ops.cholesky(matrix1)
c2 = linalg_ops.cholesky(matrix2)
c1_val, c2_val = self.evaluate([c1, c2])
self.assertAllClose(c1_val, c2_val)
class CholeskyGradTest(test.TestCase):
_backprop_block_size = 32
def getShapes(self, shapeList):
return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList)
@test_util.run_deprecated_v1
def testSmallMatrices(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64))
@test_util.run_deprecated_v1
def testSmallMatricesComplex(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64, dtypes_lib.complex128))
@test_util.run_deprecated_v1
def testOneBlockMatrices(self):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalarTest=True)
@test_util.run_deprecated_v1
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
@test_util.run_deprecated_v1
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
@test_util.run_v1_only("b/120545219")
def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64,), scalarTest=True)
@test_util.run_deprecated_v1
def testTwoBlockMatrixComplexDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex128,), scalarTest=True)
def testAgainstSpecialized(self):
np.random.seed(0)
data = np.random.randn(33, 33).astype(np.float32)
data = np.matmul(data, data.T)
grad_data = np.random.randn(*data.shape).astype(np.float32)
with ops.Graph().as_default(), self.session(use_gpu=False) as s:
x = constant_op.constant(data, dtypes_lib.float32)
chol = linalg_ops.cholesky(x)
composite_grad = gradients_impl.gradients(chol, x, grad_data)[0]
specialized_grad = SpecializedGrad(chol, grad_data)
reference, actual = s.run([specialized_grad, composite_grad])
self.assertAllClose(reference, actual)
def runFiniteDifferences(self,
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128),
scalarTest=False):
with self.session(use_gpu=True):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
data = np.random.randn(shape[0], shape[1])
if dtype.is_complex:
data = data.astype(np.complex64)
data += 1j * np.random.randn(shape[0], shape[1])
x = constant_op.constant(data, dtype)
tensor = math_ops.matmul(
x, math_ops.conj(array_ops.transpose(x))) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
data = np.random.randn()
if dtype.is_complex:
data = np.complex64(data)
data += 1j * np.random.randn()
x = constant_op.constant(data, dtype)
R = constant_op.constant(
np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.multiply(R, x)
tensor = math_ops.matmul(
e, math_ops.conj(array_ops.transpose(e))) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(
array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(
x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
elif dtype == dtypes_lib.complex128:
self.assertLess(error, 5e-5)
else:
self.assertLess(error, 5e-3)
class CholeskyBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkCholeskyOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_gpu_{shape}".format(shape=shape))
def benchmarkGradVariants(self):
def _BenchmarkGrad(grad_fn, name, device):
for shape in self.shapes:
matrix = self._GenerateMatrix(shape)
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device):
l = variables.Variable(np.linalg.cholesky(matrix))
grad_matrix = variables.Variable(
np.random.randn(*matrix.shape).astype(np.float32))
grad = grad_fn(l, grad_matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
grad,),
min_iters=25,
name="{name}_{dev}_{shape}".format(
name=name, dev=grad.device, shape=shape))
if test.is_gpu_available(True):
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/device:GPU:0")
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/cpu:0")
_BenchmarkGrad(SpecializedGrad, "specialized", "/cpu:0")
if __name__ == "__main__":
test.main()
|
|
"""
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
from __future__ import absolute_import, division, print_function
import os, time, sys
from .monitor import TextMonitor
from subprocess import Popen, PIPE
import numpy as np
from logging import debug
from .hosts import Host
from shutil import rmtree
from stat import S_ISDIR
from glob import glob
from threading import Thread, Event
class SubmitHost(Host):
"""
Create a host object that uses the hub submit command.
Args:
cpus: Number of cpus each process uses. Default=1.
cpus_per_node: How many cpus to use on each node. Default=1.
"""
def __init__(self, venue=None, cpus=1, cpus_per_node=1, walltime=60):
Host.__init__(self)
self.cpus = cpus
self.cpus_per_node = cpus_per_node
self.hostname = venue
self.jobs = []
# Creates a CSV file compatible with the HubZero submit command
def add_jobs(self, fname, args):
import shlex
self.fname = fname
first = True
try:
os.mkdir(fname)
except:
pass
f = open(os.path.join(fname, 'input.csv'), 'w')
for a in args:
if first:
first = False
print(', '.join(['@@'+b[0] for b in a]), file=f)
cmds = [(x[0], '@@'+x[0]) for x in a]
print(','.join([str(b[1]) for b in a]), file=f)
f.close()
venue == ''
if self.hostname is not None:
if self.hostname == 'local':
venue = '--local'
else:
venue = '--venue %s' % self.hostname
scmd = "submit %s --runName=puq -d input.csv %s" % (venue, self.prog.cmd(cmds))
self.add_job(shlex.split(scmd), '', 0, '')
# run, monitor and status return
# True (1) is successful
# False (0) for errors or unfinished
def run(self):
""" Run all the jobs in the queue """
self._running = []
self._monitor = TextMonitor()
cwd = os.path.abspath(os.getcwd())
os.chdir(self.fname)
err = self._run()
os.chdir(cwd)
if err == False:
rmtree(self.fname, ignore_errors=True)
try:
os.remove(self.fname+'.hdf5')
except:
pass
return False
return True
def peg_parse(self):
# parse the contents of the pegasusstatus.txt file
done = 0
filename = 'pegasusstatus.txt'
with open(filename) as f:
for line in f:
if line.startswith('%DONE'):
done = float(line.split()[1])
break
return done
def status_monitor(self):
# Watch pegasusstatus.txt for status changes.
# This could possibly be done more efficiently
# using filesystem notification but in practice
# this turned out to be more reliable across
# different OS versions.
found = False
while not found and not self.stop.is_set():
try:
os.chdir('puq/work')
found = True
except:
self.stop.wait(10)
done = -1
while not self.stop.is_set():
try:
d = self.peg_parse()
except:
d = done
if d > done:
print('=RAPPTURE-PROGRESS=>%d Running' % (int(d)))
sys.stdout.flush()
done = d
if int(d) >= 100:
self.stop.set()
else:
self.stop.wait(10)
def _run(self):
j = self.jobs[0]
print('=RAPPTURE-PROGRESS=>0 Starting')
sys.stdout.flush()
try:
myprocess = Popen(j['cmd'], bufsize=0)
except Exception as e:
print('Command %s failed: %s' % (' '.join(j['cmd']), e))
sys.stdout.flush()
self.stop = Event()
p2 = Thread(target=self.status_monitor)
p2.daemon = True
p2.start()
# wait for command to finish
err = True
try:
ret = myprocess.wait()
if ret:
err = False
print('Submit failed with error %s' % ret)
whocares = os.listdir(os.getcwd())
if os.path.exists('puq'):
fn = glob('puq/*.stderr')
if fn:
with open(fn[0]) as f:
print(f.read())
sys.stdout.flush()
except KeyboardInterrupt:
print('\nPUQ interrupted. Cleaning up. Please wait...\n')
err = False
myprocess.kill()
j['status'] = 'F'
self.stop.set()
if p2 and p2.is_alive():
p2.join()
return err
# Collect the data from individual stdout and stderr files into
# the HDF5 file. Remove files when finished.
def collect(self, hf):
# Collect results from output files
debug("Collecting")
cwd = os.path.abspath(os.getcwd())
os.chdir(self.fname)
hf.require_group('output')
jobs_grp = hf.require_group('output/jobs')
# find the jobs that are completed and, if the stdout/stderr files are there,
# move them to hdf5
finished_jobs = []
os.chdir('puq')
# Get the job stats. Do this in a loop because it looks like
# sometimes this code gets run before pegasus generates the file.
tries = 2
while tries > 0:
try:
data = np.genfromtxt('pegasusjobstats.csv', usecols=(2,3,4,7,15,16), dtype='string',
skip_header=26, comments='#', delimiter=',')
tries = 0
except:
tries -= 1
if tries > 0:
time.sleep(30)
job = {}
for j, _try, site, _time, exitcode, host in data:
if site == 'local':
continue
j = j[j.rfind('_')+1:]
job[j] = (int(_try), site, float(_time), int(exitcode), host)
times = np.empty((len(job)))
for j in job:
jobnum = int(j)-1
times[jobnum] = job[j][2]
finished_jobs.append(jobnum)
if not S_ISDIR(os.stat(j).st_mode):
print("ERROR: job %s directory not found" % j)
continue
os.chdir(j)
grp = jobs_grp.require_group(str(jobnum))
for ext in ['out', 'err']:
outfile = glob('*.std%s' % ext)
if outfile:
f = open(outfile[0], 'r')
fdata = f.read()
grp.create_dataset('std%s' % ext, data=fdata)
if job[j][3] != 0:
# error code was set
print("ERROR: Job %s failed: %s" % (j, fdata))
f.close()
for fn in self.prog.outfiles:
try:
f = open(fn, 'r')
grp.create_dataset(fn, data=f.read())
f.close()
except:
pass
os.chdir('..')
if 'time' in jobs_grp:
del jobs_grp['time']
jobs_grp['time'] = times
os.chdir(cwd)
rmtree(self.fname)
return finished_jobs
|
|
################################################################################
#
# Copyright 2015 Crown copyright (c)
# Land Information New Zealand and the New Zealand Government.
# All rights reserved
#
# This program is released under the terms of the 3 clause BSD license. See the
# LICENSE file for more information.
#
################################################################################
import sys
import Resources
from os.path import dirname, abspath
sys.path.append('.qgis2/python/plugins/QGIS-AIMS-Plugin')
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from DockWindow import DockWindow
from AimsUI.LayerManager import LayerManager
from AimsUI.DelAddressTool import DelAddressTool
from AimsUI.MoveAddressTool import MoveAddressTool
from AimsUI.CreateNewAddressTool import CreateNewAddressTool
from AimsUI.UpdateAddressTool import UpdateAddressTool
#from AimsUI.LineageTool import LineageTool
from AimsUI.GetRclTool import GetRcl
from AimsUI.UpdateReviewPosition import UpdateReviewPosition
from AimsQueueWidget import AimsQueueWidget
from AimsUI.AimsClient.Gui.UiDataManager import UiDataManager
from AimsUI.AimsClient.Gui.ResponseHandler import ResponseHandler
from AimsUI.AimsClient.Gui.FeatureHighlighter import FeatureHighlighter
from AIMSDataManager.AimsLogging import Logger
uilog = None
class Controller( QObject ):
'''
Managers all UI Components and Plugins Tools as well as
initialisation and dessimnation of Singleton Instances
'''
# log
global uilog
uilog = Logger.setup(lf='uiLog')
_instance = None
def __init__(self, iface):
"""
Initialise UI Data Manager and Response Handler
@param iface: QgisInterface Abstract base class defining interfaces exposed by QgisApp
@type iface: Qgisinterface Object
"""
QObject.__init__(self)
self.iface = iface
self._queues = None
self._dockWindow = None
self._currentMapTool = None
self.rclParent = None
self.currentRevItem = None
self.actions = []
if Controller._instance == None:
Controller._instance = self
self.uidm = UiDataManager(self.iface, self)
self.RespHandler = ResponseHandler(self.iface, self.uidm)
self.refLayer = None
self.adrlayer = None
self.revLayer = None
def initGui(self):
"""
Set up UI within QGIS
"""
# set srs
self._displayCrs = QgsCoordinateReferenceSystem()
self._displayCrs.createFromOgcWmsCrs('EPSG:4167')
self.iface.mapCanvas().mapSettings().setDestinationCrs(self._displayCrs)
# init layerManager
self._layerManager = LayerManager(self.iface, self)
# init Highlighter
self.highlighter = FeatureHighlighter(self.iface, self._layerManager, self)
# Build an action list from QGIS navigation toolbar
actionList = self.iface.mapNavToolToolBar().actions()
self.actions = self.iface.mapNavToolToolBar().actions()
# Main address editing window
self._loadaction = QAction(QIcon(':/plugins/QGIS-AIMS-Plugin/resources/loadaddress.png'),
'QGIS-AIMS-Plugin', self.iface.mainWindow())
self._loadaction.setWhatsThis('Open the QGIS-AIMS-Plugin')
self._loadaction.setStatusTip('Open the QGIS-AIMS-Plugin')
self._loadaction.triggered.connect(self.loadQueues)
self._loadaction.triggered.connect(self.loadLayers)
self._loadaction.triggered.connect(self.enableAddressLayer)
self._loadaction.triggered.connect(self.startDM)
# Create new address tool
self._createnewaddressaction = QAction(QIcon(':/plugins/QGIS-AIMS-Plugin/resources/newaddresspoint.png'),
'Create AIMS Feature', self.iface.mainWindow())
self._createnewaddressaction.setWhatsThis('Create AIMS Feature')
self._createnewaddressaction.setStatusTip('Create AIMS Feature')
self._createnewaddressaction.setEnabled(False)
self._createnewaddressaction.setCheckable(True)
self._createnewaddressaction.triggered.connect( self.startNewAddressTool )
self._createnewaddresstool = CreateNewAddressTool( self.iface, self._layerManager, self)
self._createnewaddresstool.setAction( self._createnewaddressaction )
self.actions.append(self._createnewaddressaction)
# Delete address point
self._deladdressaction = QAction(QIcon(':/plugins/QGIS-AIMS-Plugin/resources/deleteaddress.png'),
'Delete AIMS Feature', self.iface.mainWindow())
self._deladdressaction.setWhatsThis('Delete AIMS Feature')
self._deladdressaction.setStatusTip('Delete AIMS Feature')
self._deladdressaction.setEnabled(False)
self._deladdressaction.setCheckable(True)
self._deladdressaction.triggered.connect( self.startDelAddressTool )
self._deladdtool = DelAddressTool( self.iface, self._layerManager, self)
self._deladdtool.setAction( self._deladdressaction )
self.actions.append(self._deladdressaction)
# Move address
self._moveaddressaction = QAction(QIcon(':/plugins/QGIS-AIMS-Plugin/resources/moveaddress.png'),
'Move AIMS Feature(s)', self.iface.mainWindow())
self._moveaddressaction.setWhatsThis('Move AIMS Feature(s)')
self._moveaddressaction.setStatusTip('Move AIMS Feature(s)')
self._moveaddressaction.setEnabled(False)
self._moveaddressaction.setCheckable(True)
self._moveaddressaction.triggered.connect( self.startMoveAddressTool )
self._moveaddtool = MoveAddressTool( self.iface, self._layerManager, self)
self._moveaddtool.setAction( self._moveaddressaction )
self.actions.append(self._moveaddressaction)
# Update address
self._updateaddressaction = QAction(QIcon(':/plugins/QGIS-AIMS-Plugin/resources/updateaddress.png'),
'Update AIMS Feature', self.iface.mainWindow())
self._updateaddressaction.setWhatsThis('Update AIMS Feature')
self._updateaddressaction.setStatusTip('Update AIMS Feature')
self._updateaddressaction.setEnabled(False)
self._updateaddressaction.setCheckable(True)
self._updateaddressaction.triggered.connect( self.startUpdateAddressTool )
self._updateaddtool = UpdateAddressTool( self.iface, self._layerManager, self)
self._updateaddtool.setAction( self._updateaddressaction )
self.actions.append(self._updateaddressaction)
# RCL tool -- Not a QAction as triggered from many palaces but not the toolbar
self._rcltool = GetRcl(self.iface, self._layerManager, self)
# UpdateReview Position tool -- Not a QAction as triggered initiated from review queue form
self._updateReviewPos = UpdateReviewPosition(self.iface, self._layerManager, self)
# Address lineage
"""
self._lineageaction = QAction(QIcon(':/plugins/QGIS-AIMS-Plugin/resources/lineage.png'),
'Build Lineage Relationships Between Features', self.iface.mainWindow())
self._lineageaction.setWhatsThis('Build Lineage Relationships Between Features')
self._lineageaction.setStatusTip('Build Lineage Relationships Between Features')
self._lineageaction.setEnabled(False)
self._lineageaction.setCheckable(True)
self._lineagetool = LineageTool( self.iface, self._layerManager, self)
self._lineageaction.triggered.connect(self._lineagetool.setEnabled)
self.actions.append(self._lineageaction)
"""
# Address highlighter
self._highlightaction = QAction(QIcon(":/plugins/QGIS-AIMS-Plugin/resources/addresshighlight.png"),
"Electoral address highlighter", self.iface.mainWindow())
self._highlightaction.setWhatsThis("Turn the electoral address highlighter on or off")
self._highlightaction.setStatusTip("Turn the electoral address highlighter on or off")
self._highlightaction.setText('Highlightaction')
self._highlightaction.setEnabled(False)
self._highlightaction.setCheckable(True)
self._highlightaction.toggled.connect( self.highlighter.setEnabled )
# Add to own toolbar
self._toolbar = self.iface.addToolBar('QGIS-AIMS-Plugin')
self._toolbar.addAction(self._createnewaddressaction)
self._toolbar.addAction(self._deladdressaction)
self._toolbar.addAction(self._updateaddressaction)
self._toolbar.addAction(self._moveaddressaction)
#self._toolbar.addAction(self._lineageaction)
self._toolbar.addAction(self._highlightaction)
# Add actions to menu
self.iface.addToolBarIcon(self._loadaction)
self.iface.addPluginToMenu('&QGIS-AIMS-Plugin', self._loadaction)
self.iface.addPluginToMenu('&QGIS-AIMS-Plugin', self._createnewaddressaction)
self.iface.addPluginToMenu('&QGIS-AIMS-Plugin', self._deladdressaction)
self.iface.addPluginToMenu('&QGIS-AIMS-Plugin', self._updateaddressaction)
self.iface.addPluginToMenu('&QGIS-AIMS-Plugin', self._moveaddressaction)
self.iface.addPluginToMenu('&QGIS-AIMS-Plugin', self._highlightaction)
# capture maptool selection changes
QObject.connect(self.iface.mapCanvas(), SIGNAL( "mapToolSet(QgsMapTool *)" ), self.mapToolChanged)
# Add actions from QGIS attributes toolbar (handling QWidgetActions)
tmpActionList = self.iface.attributesToolBar().actions()
for action in tmpActionList:
if isinstance(action, QWidgetAction):
actionList.extend( action.defaultWidget().actions() )
else:
actionList.append( action )
# ... could add other toolbars' action lists...
# Build a group with actions from actionList
group = QActionGroup( self.iface.mainWindow() )
group.setExclusive(True)
for qgisAction in actionList:
group.addAction( qgisAction )
# Add our own actions
for action in self.actions:
if action.text() == 'Highlightaction': continue
group.addAction( action )
# Plugin Management
def loadQueues( self ):
"""
Initialise Loading of the queue widgets into QGIS
"""
queues = self.Queues()
if not queues.isVisible():
queues.parent().show()
def Queues(self):
"""
Load of the queue widgets into QGIS
@rtype: QtGui.QTabWidget
@return: Docked QTabWidget with UI compinets for displaying and
editing AIMS features
"""
if not self._queues:
queues = AimsQueueWidget(self.iface.mainWindow())
self._dockWindow = DockWindow(self.iface.mainWindow(),queues,"AimsQueues","Aims Queues")
self._queues = queues
self._dockWindow.unloadPlugin.connect(self.unload)
return self._queues
def startDM(self):
"""
Start the Data Manager only once the user enables the Plugin
"""
self.uidm.startDM()
def enableAddressLayer(self):
"""
enable tools that are dependent on the Address Layer
only when the address layer exists
"""
self._deladdressaction.setEnabled(True)
self._createnewaddressaction.setEnabled(True)
self._moveaddressaction.setEnabled(True)
self._updateaddressaction.setEnabled(True)
self._highlightaction.setEnabled(True)
def loadLayers(self):
"""
Install map layers
"""
if not self.refLayer:
self.refLayer = self._layerManager.installRefLayers()
if not self.adrlayer:
self._layerManager.installAimsLayer('adr', 'AIMS Features')
if not self.revLayer:
self._layerManager.installAimsLayer('rev', 'AIMS Review')
self._layerManager.initialiseExtentEvent()
def mapToolChanged(self):
"""
Track the current maptool (excluding rcl tool) to allow
for rollback to previous tool when the Rcltool is deactivated
"""
if (isinstance(self.iface.mapCanvas().mapTool(), GetRcl) == False and
isinstance(self.iface.mapCanvas().mapTool(), UpdateReviewPosition) == False):
self._currentMapTool = self.iface.mapCanvas().mapTool()
#self.highlighter.hideAll()
# logging
uilog.info('*** TOOL CHANGE *** {0} started'.format(self.iface.mapCanvas().mapTool()))
def setPreviousMapTool(self):
"""
Roll back to the previous maptool
"""
if self.iface.mapCanvas().mapTool() != self._currentMapTool:
self.iface.mapCanvas().setMapTool(self._currentMapTool)
def startNewAddressTool(self):
"""
Enable the 'create new address' map tool
"""
self.iface.mapCanvas().setMapTool(self._createnewaddresstool)
self._createnewaddresstool.setEnabled(True)
def startRclTool(self, parent = None):
"""
Enable the 'get rcl tool' map tool
@param parent: Map that enabled the RCL tool. Based on the RCL tools
parent, different highlighting of features is performed
@type parent: string
"""
self.rclParent = parent
self.iface.mapCanvas().setMapTool(self._rcltool)
self._rcltool.setEnabled(True)
def startUpdateReviewPosTool(self, revItem = None):
"""
Enable the 'get update Review position tool' map tool
@param revItem: The current Review Item that is assigned to self.currentRevItem
@type revItem: AIMSDataManager.Address.AddressResolution() Object
"""
self.currentRevItem = revItem
self.iface.mapCanvas().setMapTool(self._updateReviewPos)
self._rcltool.setEnabled(True)
def startMoveAddressTool(self):
"""
Enable the 'move address' map tool
"""
self.iface.mapCanvas().setMapTool(self._moveaddtool)
self._moveaddtool.setEnabled(True)
def startUpdateAddressTool(self):
"""
Enable the "update address" map tool
"""
self.iface.mapCanvas().setMapTool(self._updateaddtool)
self._updateaddtool.setEnabled(True)
def startDelAddressTool(self):
"""
Enable the "delete address" map tool
"""
self.iface.mapCanvas().setMapTool(self._deladdtool)
self._deladdtool.setEnabled(True)
'''
def startLineageTool(self):
"""
Enable the "lineage" map tool
"""
self.iface.mapCanvas().setMapTool(self._lineagetool)
self._deladdtool.setEnabled(True)
'''
def unload(self):
"""
Remove Plugins UI Elements From QGIS
"""
self._layerManager.disconnectExtentEvent()
if self._queues:
self._queues.close()
self._queues = None
self.uidm.killDm()
self.iface.mainWindow().removeToolBar(self._toolbar)
self.iface.removeToolBarIcon(self._loadaction)
self.iface.removePluginMenu('&QGIS-AIMS-Plugin',self._loadaction)
self.iface.removePluginMenu('&QGIS-AIMS-Plugin', self._createnewaddressaction)
self.iface.removePluginMenu('&QGIS-AIMS-Plugin', self._deladdressaction)
self.iface.removePluginMenu('&QGIS-AIMS-Plugin', self._updateaddressaction)
self.iface.removePluginMenu('&QGIS-AIMS-Plugin', self._moveaddressaction)
#self.iface.removePluginMenu('&QGIS-AIMS-Plugin', self._lineageaction)
self.iface.removePluginMenu("&QGIS-AIMS-Plugin'", self._highlightaction)
@pyqtSlot()
def rDataChanged(self):
"""
Review data changed, update review layer and table
"""
self._queues.uResolutionTab.refreshData()
self._layerManager.updateReviewLayer()
@pyqtSlot()
def fDataChanged(self):
"""
Feature data changed, update review layer and table
"""
self._layerManager.getAimsFeatures()
# Singleton instance
def instance():
"""
Return instance of the Controller
@return: The single Controller Instance
@rtype: AimsUI.AimsClient.Gui.Controller() Instance
"""
if Controller._instance == None:
Controller._instance = Controller()
return Controller._instance
|
|
# This file is part of the MapProxy project.
# Copyright (C) 2010-2012 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement, division
import os
import time
try:
import cPickle as pickle
except ImportError:
import pickle
from mapproxy.seed.seeder import TileWalker, SeedTask, SeedProgress
from mapproxy.cache.dummy import DummyLocker
from mapproxy.cache.tile import TileManager
from mapproxy.source.tile import TiledSource
from mapproxy.grid import tile_grid_for_epsg
from mapproxy.grid import TileGrid
from mapproxy.srs import SRS
from mapproxy.util.coverage import BBOXCoverage, GeomCoverage
from mapproxy.seed.config import before_timestamp_from_options, SeedConfigurationError
from mapproxy.seed.config import LevelsList, LevelsRange, LevelsResolutionList, LevelsResolutionRange
from mapproxy.seed.util import ProgressStore
from mapproxy.test.helper import TempFile
from collections import defaultdict
from nose.tools import eq_, assert_almost_equal, raises
from nose.plugins.skip import SkipTest
try:
from shapely.wkt import loads as load_wkt
load_wkt # prevent lint warning
except ImportError:
load_wkt = None
class MockSeedPool(object):
def __init__(self):
self.seeded_tiles = defaultdict(set)
def process(self, tiles, progess):
for x, y, level in tiles:
self.seeded_tiles[level].add((x, y))
class MockCache(object):
def is_cached(self, tile):
return False
class TestSeeder(object):
def setup(self):
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90])
self.source = TiledSource(self.grid, None)
self.tile_mgr = TileManager(self.grid, MockCache(), [self.source], 'png',
locker=DummyLocker())
self.seed_pool = MockSeedPool()
def make_bbox_task(self, bbox, srs, levels):
md = dict(name='', cache_name='', grid_name='')
coverage = BBOXCoverage(bbox, srs)
return SeedTask(md, self.tile_mgr, levels, refresh_timestamp=None, coverage=coverage)
def make_geom_task(self, geom, srs, levels):
md = dict(name='', cache_name='', grid_name='')
coverage = GeomCoverage(geom, srs)
return SeedTask(md, self.tile_mgr, levels, refresh_timestamp=None, coverage=coverage)
def test_seed_full_bbox(self):
task = self.make_bbox_task([-180, -90, 180, 90], SRS(4326), [0, 1, 2])
seeder = TileWalker(task, self.seed_pool, handle_uncached=True)
seeder.walk()
eq_(len(self.seed_pool.seeded_tiles), 3)
eq_(self.seed_pool.seeded_tiles[0], set([(0, 0)]))
eq_(self.seed_pool.seeded_tiles[1], set([(0, 0), (1, 0)]))
eq_(self.seed_pool.seeded_tiles[2], set([(0, 0), (1, 0), (2, 0), (3, 0),
(0, 1), (1, 1), (2, 1), (3, 1)]))
def test_seed_small_bbox(self):
task = self.make_bbox_task([-45, 0, 180, 90], SRS(4326), [0, 1, 2])
seeder = TileWalker(task, self.seed_pool, handle_uncached=True)
seeder.walk()
eq_(len(self.seed_pool.seeded_tiles), 3)
eq_(self.seed_pool.seeded_tiles[0], set([(0, 0)]))
eq_(self.seed_pool.seeded_tiles[1], set([(0, 0), (1, 0)]))
eq_(self.seed_pool.seeded_tiles[2], set([(1, 1), (2, 1), (3, 1)]))
def test_seed_small_bbox_iregular_levels(self):
task = self.make_bbox_task([-45, 0, 180, 90], SRS(4326), [0, 2])
seeder = TileWalker(task, self.seed_pool, handle_uncached=True)
seeder.walk()
eq_(len(self.seed_pool.seeded_tiles), 2)
eq_(self.seed_pool.seeded_tiles[0], set([(0, 0)]))
eq_(self.seed_pool.seeded_tiles[2], set([(1, 1), (2, 1), (3, 1)]))
def test_seed_small_bbox_transformed(self):
bbox = SRS(4326).transform_bbox_to(SRS(900913), [-45, 0, 179, 80])
task = self.make_bbox_task(bbox, SRS(900913), [0, 1, 2])
seeder = TileWalker(task, self.seed_pool, handle_uncached=True)
seeder.walk()
eq_(len(self.seed_pool.seeded_tiles), 3)
eq_(self.seed_pool.seeded_tiles[0], set([(0, 0)]))
eq_(self.seed_pool.seeded_tiles[1], set([(0, 0), (1, 0)]))
eq_(self.seed_pool.seeded_tiles[2], set([(1, 1), (2, 1), (3, 1)]))
def test_seed_with_geom(self):
if not load_wkt: raise SkipTest('no shapely installed')
# box from 10 10 to 80 80 with small spike/corner to -10 60 (upper left)
geom = load_wkt("POLYGON((10 10, 10 50, -10 60, 10 80, 80 80, 80 10, 10 10))")
task = self.make_geom_task(geom, SRS(4326), [0, 1, 2, 3, 4])
seeder = TileWalker(task, self.seed_pool, handle_uncached=True)
seeder.walk()
eq_(len(self.seed_pool.seeded_tiles), 5)
eq_(self.seed_pool.seeded_tiles[0], set([(0, 0)]))
eq_(self.seed_pool.seeded_tiles[1], set([(0, 0), (1, 0)]))
eq_(self.seed_pool.seeded_tiles[2], set([(1, 1), (2, 1)]))
eq_(self.seed_pool.seeded_tiles[3], set([(4, 2), (5, 2), (4, 3), (5, 3), (3, 3)]))
eq_(len(self.seed_pool.seeded_tiles[4]), 4*4+2)
def test_seed_with_res_list(self):
if not load_wkt: raise SkipTest('no shapely installed')
# box from 10 10 to 80 80 with small spike/corner to -10 60 (upper left)
geom = load_wkt("POLYGON((10 10, 10 50, -10 60, 10 80, 80 80, 80 10, 10 10))")
self.grid = TileGrid(SRS(4326), bbox=[-180, -90, 180, 90],
res=[360/256, 360/720, 360/2000, 360/5000, 360/8000])
self.tile_mgr = TileManager(self.grid, MockCache(), [self.source], 'png',
locker=DummyLocker())
task = self.make_geom_task(geom, SRS(4326), [0, 1, 2, 3, 4])
seeder = TileWalker(task, self.seed_pool, handle_uncached=True)
seeder.walk()
eq_(len(self.seed_pool.seeded_tiles), 5)
eq_(self.seed_pool.seeded_tiles[0], set([(0, 0)]))
eq_(self.grid.grid_sizes[1], (3, 2))
eq_(self.seed_pool.seeded_tiles[1], set([(1, 0), (1, 1), (2, 0), (2, 1)]))
eq_(self.grid.grid_sizes[2], (8, 4))
eq_(self.seed_pool.seeded_tiles[2], set([(4, 2), (5, 2), (4, 3), (5, 3), (3, 3)]))
eq_(self.grid.grid_sizes[3], (20, 10))
eq_(len(self.seed_pool.seeded_tiles[3]), 5*5+2)
def test_seed_full_bbox_continue(self):
task = self.make_bbox_task([-180, -90, 180, 90], SRS(4326), [0, 1, 2])
seed_progress = SeedProgress([(0, 1), (0, 2)])
seeder = TileWalker(task, self.seed_pool, handle_uncached=True, seed_progress=seed_progress)
seeder.walk()
eq_(len(self.seed_pool.seeded_tiles), 3)
eq_(self.seed_pool.seeded_tiles[0], set([(0, 0)]))
eq_(self.seed_pool.seeded_tiles[1], set([(0, 0), (1, 0)]))
eq_(self.seed_pool.seeded_tiles[2], set([(2, 0), (3, 0),
(2, 1), (3, 1)]))
class TestLevels(object):
def test_level_list(self):
levels = LevelsList([-10, 3, 1, 3, 5, 7, 50])
eq_(levels.for_grid(tile_grid_for_epsg(4326)),
[1, 3, 5, 7])
def test_level_range(self):
levels = LevelsRange([1, 5])
eq_(levels.for_grid(tile_grid_for_epsg(4326)),
[1, 2, 3, 4, 5])
def test_level_range_open_from(self):
levels = LevelsRange([None, 5])
eq_(levels.for_grid(tile_grid_for_epsg(4326)),
[0, 1, 2, 3, 4, 5])
def test_level_range_open_to(self):
levels = LevelsRange([13, None])
eq_(levels.for_grid(tile_grid_for_epsg(4326)),
[13, 14, 15, 16, 17, 18, 19])
def test_level_range_open_tos_range(self):
levels = LevelsResolutionRange([1000, 100])
eq_(levels.for_grid(tile_grid_for_epsg(900913)),
[8, 9, 10, 11])
def test_res_range_open_from(self):
levels = LevelsResolutionRange([None, 100])
eq_(levels.for_grid(tile_grid_for_epsg(900913)),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
def test_res_range_open_to(self):
levels = LevelsResolutionRange([1000, None])
eq_(levels.for_grid(tile_grid_for_epsg(900913)),
[8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
def test_resolution_list(self):
levels = LevelsResolutionList([1000, 100, 500])
eq_(levels.for_grid(tile_grid_for_epsg(900913)),
[8, 9, 11])
class TestProgressStore(object):
def test_load_empty(self):
store = ProgressStore('doesnotexist.no_realy.txt')
store.load()
assert store.get(('foo', 'bar', 'baz')) == None
def test_load_store(self):
with TempFile(no_create=True) as tmp:
with open(tmp, 'wb') as f:
f.write(pickle.dumps({("view", "cache", "grid"): [(0, 1), (2, 4)]}))
store = ProgressStore(tmp)
assert store.get(('view', 'cache', 'grid')) == [(0, 1), (2, 4)]
assert store.get(('view', 'cache', 'grid2')) == None
store.add(('view', 'cache', 'grid'), [])
store.add(('view', 'cache', 'grid2'), [(0, 1)])
store.write()
store = ProgressStore(tmp)
assert store.get(('view', 'cache', 'grid')) == []
assert store.get(('view', 'cache', 'grid2')) == [(0, 1)]
def test_load_broken(self):
with TempFile(no_create=True) as tmp:
with open(tmp, 'wb') as f:
f.write(b'##invaliddata')
f.write(pickle.dumps({("view", "cache", "grid"): [(0, 1), (2, 4)]}))
store = ProgressStore(tmp)
assert store.status == {}
class TestRemovebreforeTimetamp(object):
def test_from_time(self):
ts = before_timestamp_from_options({'time': '2010-12-01T20:12:00'})
# we don't know the timezone this test will run
assert (1291230720.0 - 14 * 3600) < ts < (1291230720.0 + 14 * 3600)
def test_from_mtime(self):
with TempFile() as tmp:
os.utime(tmp, (12376512, 12376512))
eq_(before_timestamp_from_options({'mtime': tmp}), 12376512)
@raises(SeedConfigurationError)
def test_from_mtime_missing_file(self):
before_timestamp_from_options({'mtime': '/tmp/does-not-exist-at-all,really'})
def test_from_empty(self):
assert_almost_equal(
before_timestamp_from_options({}),
time.time(), -1
)
def test_from_delta(self):
assert_almost_equal(
before_timestamp_from_options({'minutes': 15}) + 60 * 15,
time.time(), -1
)
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'WanphyAlarmRepStatusEnum' : _MetaInfoEnum('WanphyAlarmRepStatusEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper',
{
'disable':'DISABLE',
'enable':'ENABLE',
}, 'Cisco-IOS-XR-wanphy-ui-oper', _yang_ns._namespaces['Cisco-IOS-XR-wanphy-ui-oper']),
'WanphyModeInfoEnum' : _MetaInfoEnum('WanphyModeInfoEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper',
{
'lan':'LAN',
'wan':'WAN',
}, 'Cisco-IOS-XR-wanphy-ui-oper', _yang_ns._namespaces['Cisco-IOS-XR-wanphy-ui-oper']),
'Wanphy.Controllers.Controller.Info' : {
'meta_info' : _MetaInfoClass('Wanphy.Controllers.Controller.Info',
False,
[
_MetaInfoClassMember('admin-mode', REFERENCE_ENUM_CLASS, 'WanphyModeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper', 'WanphyModeInfoEnum',
[], [],
''' Configuration Mode
''',
'admin_mode',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('line-ais', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Line AIS
''',
'line_ais',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('line-bip', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' Line BIP(B2)
''',
'line_bip',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('line-febe', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' Line FEBE
''',
'line_febe',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('line-rdi', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Line RDI
''',
'line_rdi',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('operational-mode', REFERENCE_ENUM_CLASS, 'WanphyModeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper', 'WanphyModeInfoEnum',
[], [],
''' Operational Mode
''',
'operational_mode',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('path-ais', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Path AIS
''',
'path_ais',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('path-bip', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' Path BIP(B3)
''',
'path_bip',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('path-febe', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' Path FEBE
''',
'path_febe',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('path-lop', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Path LOP
''',
'path_lop',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('path-newptr', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Path NEWPTR
''',
'path_newptr',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('path-nse', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Path NSE
''',
'path_nse',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('path-pse', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Path PSE
''',
'path_pse',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('path-rdi', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Path RDI
''',
'path_rdi',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('port-state', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Port State
''',
'port_state',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-j1-rx0', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register J1-Rx0
''',
'register_j1_rx0',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-j1-rx1', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register J1-Rx1
''',
'register_j1_rx1',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-j1-rx2', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register J1-Rx2
''',
'register_j1_rx2',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-j1-rx3', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register J1-Rx3
''',
'register_j1_rx3',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-j1-rx4', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register J1-Rx4
''',
'register_j1_rx4',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-j1-rx5', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register J1-Rx5
''',
'register_j1_rx5',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-j1-rx6', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register J1-Rx6
''',
'register_j1_rx6',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-j1-rx7', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register J1-Rx7
''',
'register_j1_rx7',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-l-bip', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register L_BIP
''',
'register_l_bip',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-l-fe-bip', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register L_FE_BIP
''',
'register_l_fe_bip',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-p-bec', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register P_BEC
''',
'register_p_bec',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-p-febe', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register P_FEBE
''',
'register_p_febe',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('register-s-bip', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Register S_BIP
''',
'register_s_bip',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('remote-ip', ATTRIBUTE, 'str' , None, None,
[], [],
''' Remote IP Address
''',
'remote_ip',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('sd-ber-report', REFERENCE_ENUM_CLASS, 'WanphyAlarmRepStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper', 'WanphyAlarmRepStatusEnum',
[], [],
''' SD_BER Report
''',
'sd_ber_report',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('sd-ber-threshold', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' BER thresholds: SD. Value 'd' in 10e-%d
''',
'sd_ber_threshold',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('section-bip', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' Section BIP(B1)
''',
'section_bip',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('section-lof', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Section LOF
''',
'section_lof',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('section-los', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Section LOS
''',
'section_los',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('sf-ber-report', REFERENCE_ENUM_CLASS, 'WanphyAlarmRepStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper', 'WanphyAlarmRepStatusEnum',
[], [],
''' SF_BER Report
''',
'sf_ber_report',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('sf-ber-threshold', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' BER thresholds: SF. Value 'd' in 10e-%d
''',
'sf_ber_threshold',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('wis-alarms-feaisp', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' WIS Alarms FEAISP
''',
'wis_alarms_feaisp',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('wis-alarms-felcdp', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' WIS Alarms FELCDP
''',
'wis_alarms_felcdp',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('wis-alarms-lfebip', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' WIS Alarms LFEBIP
''',
'wis_alarms_lfebip',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('wis-alarms-pbec', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' WIS Alarms PBEC
''',
'wis_alarms_pbec',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('wis-alarms-plcd', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' WIS Alarms PLCD
''',
'wis_alarms_plcd',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('wis-alarms-plmp', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' WIS Alarms PLMP
''',
'wis_alarms_plmp',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('wis-alarms-ser', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' WIS Alarms SER
''',
'wis_alarms_ser',
'Cisco-IOS-XR-wanphy-ui-oper', False),
_MetaInfoClassMember('wis-alarms-wlos', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' WIS Alarms WLOS
''',
'wis_alarms_wlos',
'Cisco-IOS-XR-wanphy-ui-oper', False),
],
'Cisco-IOS-XR-wanphy-ui-oper',
'info',
_yang_ns._namespaces['Cisco-IOS-XR-wanphy-ui-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper'
),
},
'Wanphy.Controllers.Controller' : {
'meta_info' : _MetaInfoClass('Wanphy.Controllers.Controller',
False,
[
_MetaInfoClassMember('controller-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Controller name
''',
'controller_name',
'Cisco-IOS-XR-wanphy-ui-oper', True),
_MetaInfoClassMember('info', REFERENCE_CLASS, 'Info' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper', 'Wanphy.Controllers.Controller.Info',
[], [],
''' WANPHY controller operational data
''',
'info',
'Cisco-IOS-XR-wanphy-ui-oper', False),
],
'Cisco-IOS-XR-wanphy-ui-oper',
'controller',
_yang_ns._namespaces['Cisco-IOS-XR-wanphy-ui-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper'
),
},
'Wanphy.Controllers' : {
'meta_info' : _MetaInfoClass('Wanphy.Controllers',
False,
[
_MetaInfoClassMember('controller', REFERENCE_LIST, 'Controller' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper', 'Wanphy.Controllers.Controller',
[], [],
''' WANPHY controller operational data
''',
'controller',
'Cisco-IOS-XR-wanphy-ui-oper', False),
],
'Cisco-IOS-XR-wanphy-ui-oper',
'controllers',
_yang_ns._namespaces['Cisco-IOS-XR-wanphy-ui-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper'
),
},
'Wanphy' : {
'meta_info' : _MetaInfoClass('Wanphy',
False,
[
_MetaInfoClassMember('controllers', REFERENCE_CLASS, 'Controllers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper', 'Wanphy.Controllers',
[], [],
''' All WANPHY controller operational data
''',
'controllers',
'Cisco-IOS-XR-wanphy-ui-oper', False),
],
'Cisco-IOS-XR-wanphy-ui-oper',
'wanphy',
_yang_ns._namespaces['Cisco-IOS-XR-wanphy-ui-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_wanphy_ui_oper'
),
},
}
_meta_table['Wanphy.Controllers.Controller.Info']['meta_info'].parent =_meta_table['Wanphy.Controllers.Controller']['meta_info']
_meta_table['Wanphy.Controllers.Controller']['meta_info'].parent =_meta_table['Wanphy.Controllers']['meta_info']
_meta_table['Wanphy.Controllers']['meta_info'].parent =_meta_table['Wanphy']['meta_info']
|
|
# -*- coding: utf-8 -*
from __future__ import unicode_literals
import os
import gc
import django
from django.db import transaction
django.setup()
from basics.models import *
from commons import pos_freq_dic
def create_file_of_general_word_freqs():
l_columns = ['str_adjectives', 'str_nouns', 'str_verbs', 'str_partizip_i', 'str_other_words', 'str_persons', 'str_word_pairs']
pos_dicts = RegularStatement.objects.values('str_adjectives', 'str_nouns', 'str_verbs', 'str_partizip_i', 'str_other_words', 'str_persons', 'str_word_pairs')
no_stmts = RegularStatement.objects.all().count() + 0.0
l_pos_freq_dicts = {}, {}, {}, {}, {}, {}, {}
no_all_words = 0
l_no_pos_words = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for pos_dict in pos_dicts:
for index, column in enumerate(l_columns):
l_words = pos_dict[column].split('|')
for word in l_words:
if word:
l_no_pos_words[index] += 1
no_all_words += 1
if word in l_pos_freq_dicts[index]:
l_pos_freq_dicts[index][word] += 1
else:
l_pos_freq_dicts[index][word] = 1
# print l_no_pos_words
path = os.getcwd() + '/data'
for index, pos_freq_dict in enumerate(l_pos_freq_dicts):
l_freq_word = sorted([(v, k) for k, v in pos_freq_dict.iteritems()])[::-1]
#print l_freq_word
file_path = path + '/' + l_columns[index] + '.txt'
no_pos_words = l_no_pos_words[index]
with open(file_path, 'w') as f:
for abs_freq, word in l_freq_word:
relative_freq = (abs_freq/no_stmts) * 100
line = '|'.join([word, str(relative_freq)])
f.write(line.encode('utf8'))
f.write('\n')
# print create_file_of_general_word_freqs()
def create_file_of_speaker_function_party_stmts_counts():
path = os.getcwd() + '/data/'
parties = [party for party in Party.objects.all() if party.abbrev]
speakers = [speaker for speaker in Speaker.objects.all()]
d_party_counts = {}
for party in parties:
d_party_counts[party.abbrev] = RegularStatement.objects.filter(speaker__party=party).count()
file_path = path + 'party_stmt_count' + '.txt'
with open(file_path, 'w') as f:
for party, count in d_party_counts.iteritems():
line = '|'.join([party, str(count)])
f.write(line.encode('utf8'))
f.write('\n')
d_speaker_counts = {}
for speaker in speakers:
d_speaker_counts[unicode(speaker)] = RegularStatement.objects.filter(speaker=speaker).count()
file_path = path + 'speaker_stmt_count' + '.txt'
with open(file_path, 'w') as f:
for speaker, count in d_speaker_counts.iteritems():
line = '|'.join([speaker, str(count)])
f.write(line.encode('utf8'))
f.write('\n')
# create_file_of_speaker_function_party_stmts_counts()
def get_l_significant_words(stmts, d_gen_pos_freq):
l_part_of_speech_order = ['Substantive', 'Adjektive', 'Verben', 'Partizip I', 'Andere', 'Wort Paare']
l_d_gen_pos_freq = []
for pos in ['str_nouns', 'str_adjectives', 'str_verbs', 'str_partizip_i', 'str_other_words', 'str_word_pairs']:
l_d_gen_pos_freq.append(d_gen_pos_freq[pos])
d_part_of_speech_freqs = {'Substantive': {}, 'Adjektive': {}, 'Verben': {}, 'Partizip I': {}, 'Andere': {}, 'Wort Paare' : {}}
l_pos_no_words = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
no_stmts = float(stmts.count())
for stmt in stmts:
l_nouns, l_adjectives, l_verbs, l_other_words, l_word_pairs = stmt.str_nouns.split('|'), stmt.str_adjectives.split(
'|'), stmt.str_verbs.split('|'), stmt.str_other_words.split('|'), stmt.str_word_pairs.split('|')
if stmt.str_partizip_i:
l_partizip_i = stmt.str_partizip_i.split('|')
else:
l_partizip_i = []
d_l_words = {'Substantive': l_nouns, 'Adjektive': l_adjectives, 'Verben': l_verbs, 'Partizip I': l_partizip_i,
'Andere': l_other_words, 'Wort Paare': l_word_pairs}
for index, part_of_speech in enumerate(l_part_of_speech_order):
l_words = d_l_words[part_of_speech]
for word in l_words:
l_pos_no_words[index] += 1
if word in d_part_of_speech_freqs[part_of_speech]:
d_part_of_speech_freqs[part_of_speech][word] += 1
else:
d_part_of_speech_freqs[part_of_speech][word] = 1
l_significant_words = []
for index, part_of_speech in enumerate(l_part_of_speech_order):
pos_no_words = l_pos_no_words[index]
d_gen_pos_freq = l_d_gen_pos_freq[index]
#print d_gen_pos_freq
# print(d_part_of_speech_freqs[part_of_speech])
l_part_of_speech_freq = sorted(
[(v, k) for k, v in d_part_of_speech_freqs[part_of_speech].iteritems()])[::-1]
# print l_part_of_speech_freq
l_part_of_speech_freq = [(round(v/no_stmts*100, 1), v, k) for v, k in l_part_of_speech_freq if k]
# print l_part_of_speech_freq
# l_part_of_speech_freq = [(round(rel_v, 1), v, k) for rel_v, v, k in l_part_of_speech_freq if rel_v > d_gen_pos_freq[k] *2]
counter = 0
for index_2, (rel_freq, abs_freq, word) in enumerate(l_part_of_speech_freq):
gen_word_pos_freq = d_gen_pos_freq[word]
if counter > 99:
break
if rel_freq > 2 * gen_word_pos_freq:
counter += 1
if abs_freq > 10:
# if rel_freq > 4 * gen_word_pos_freq:
# word = ''.join(['<strong>', word, '</strong>'])
l_significant_words.append((word))
# print l_part_of_speech_freq
return l_significant_words
def create_l_word_maps():
l_part_of_speechs = ['str_nouns', 'str_adjectives', 'str_verbs', 'str_partizip_i', 'str_other_words', 'str_word_pairs']
d_word_l_stmt_ids = {}
for leg_period in xrange(12, 19):
stmts = RegularStatement.objects.filter(**{'document__legislative_period': leg_period})
print leg_period
for stmt in stmts:
stmt_id = getattr(stmt, 'pk')
str_all_words = '|'.join([getattr(stmt, part_of_speech) for part_of_speech in l_part_of_speechs])
l_all_words = str_all_words.split('|')
for word in l_all_words:
try:
d_word_l_stmt_ids[word].append(stmt_id)
except KeyError:
d_word_l_stmt_ids[word] = [stmt_id]
WordMap.objects.all().delete()
print 'deleted word_maps_object'
l_word_maps = []
print 'created d_word_l_stmt_ids'
for word, l_ids in d_word_l_stmt_ids.iteritems():
str_pks = '|'.join([str(i) for i in l_ids])
no_words_stmts = len(l_ids)
l_word_maps.append(
WordMap(
word=word,
str_stmt_pks=str_pks,
no_stmts=no_words_stmts)
)
print 'created l_word_maps'
return l_word_maps
def map_words_to_texts():
l_word_maps = create_l_word_maps()
start_index = 0
while True:
temp_l_word_maps = l_word_maps[start_index : start_index + 1000]
start_index += 1000
if not temp_l_word_maps:
break
print start_index
WordMap.objects.bulk_create(temp_l_word_maps)
#WordMap.objects.get_or_create(
# word=word,
# str_stmt_pks=str_pks,
# no_stmts=no_words_stmts
#)
#map_words_to_texts()
|
|
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Cloud Spanner Database."""
import copy
import functools
import re
import threading
from google.api_core.gapic_v1 import client_info
import google.auth.credentials
from google.protobuf.struct_pb2 import Struct
from google.cloud.exceptions import NotFound
import six
# pylint: disable=ungrouped-imports
from google.cloud.spanner_v1 import __version__
from google.cloud.spanner_v1._helpers import _make_value_pb
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
from google.cloud.spanner_v1.batch import Batch
from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1.pool import BurstyPool
from google.cloud.spanner_v1.pool import SessionCheckout
from google.cloud.spanner_v1.session import Session
from google.cloud.spanner_v1.snapshot import _restart_on_unavailable
from google.cloud.spanner_v1.snapshot import Snapshot
from google.cloud.spanner_v1.streamed import StreamedResultSet
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
# pylint: enable=ungrouped-imports
_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__)
SPANNER_DATA_SCOPE = "https://www.googleapis.com/auth/spanner.data"
_DATABASE_NAME_RE = re.compile(
r"^projects/(?P<project>[^/]+)/"
r"instances/(?P<instance_id>[a-z][-a-z0-9]*)/"
r"databases/(?P<database_id>[a-z][a-z0-9_\-]*[a-z0-9])$"
)
class Database(object):
"""Representation of a Cloud Spanner Database.
We can use a :class:`Database` to:
* :meth:`create` the database
* :meth:`reload` the database
* :meth:`update` the database
* :meth:`drop` the database
:type database_id: str
:param database_id: The ID of the database.
:type instance: :class:`~google.cloud.spanner_v1.instance.Instance`
:param instance: The instance that owns the database.
:type ddl_statements: list of string
:param ddl_statements: (Optional) DDL statements, excluding the
CREATE DATABASE statement.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`.
:param pool: (Optional) session pool to be used by database. If not
passed, the database will construct an instance of
:class:`~google.cloud.spanner_v1.pool.BurstyPool`.
"""
_spanner_api = None
def __init__(self, database_id, instance, ddl_statements=(), pool=None):
self.database_id = database_id
self._instance = instance
self._ddl_statements = _check_ddl_statements(ddl_statements)
self._local = threading.local()
if pool is None:
pool = BurstyPool()
self._pool = pool
pool.bind(self)
@classmethod
def from_pb(cls, database_pb, instance, pool=None):
"""Creates an instance of this class from a protobuf.
:type database_pb:
:class:`google.spanner.v2.spanner_instance_admin_pb2.Instance`
:param database_pb: A instance protobuf object.
:type instance: :class:`~google.cloud.spanner_v1.instance.Instance`
:param instance: The instance that owns the database.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`.
:param pool: (Optional) session pool to be used by database.
:rtype: :class:`Database`
:returns: The database parsed from the protobuf response.
:raises ValueError:
if the instance name does not match the expected format
or if the parsed project ID does not match the project ID
on the instance's client, or if the parsed instance ID does
not match the instance's ID.
"""
match = _DATABASE_NAME_RE.match(database_pb.name)
if match is None:
raise ValueError(
"Database protobuf name was not in the " "expected format.",
database_pb.name,
)
if match.group("project") != instance._client.project:
raise ValueError(
"Project ID on database does not match the "
"project ID on the instance's client"
)
instance_id = match.group("instance_id")
if instance_id != instance.instance_id:
raise ValueError(
"Instance ID on database does not match the "
"Instance ID on the instance"
)
database_id = match.group("database_id")
return cls(database_id, instance, pool=pool)
@property
def name(self):
"""Database name used in requests.
.. note::
This property will not change if ``database_id`` does not, but the
return value is not cached.
The database name is of the form
``"projects/../instances/../databases/{database_id}"``
:rtype: str
:returns: The database name.
"""
return self._instance.name + "/databases/" + self.database_id
@property
def ddl_statements(self):
"""DDL Statements used to define database schema.
See
cloud.google.com/spanner/docs/data-definition-language
:rtype: sequence of string
:returns: the statements
"""
return self._ddl_statements
@property
def spanner_api(self):
"""Helper for session-related API calls."""
if self._spanner_api is None:
credentials = self._instance._client.credentials
if isinstance(credentials, google.auth.credentials.Scoped):
credentials = credentials.with_scopes((SPANNER_DATA_SCOPE,))
self._spanner_api = SpannerClient(
credentials=credentials, client_info=_CLIENT_INFO
)
return self._spanner_api
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
other.database_id == self.database_id and other._instance == self._instance
)
def __ne__(self, other):
return not self == other
def create(self):
"""Create this database within its instance
Inclues any configured schema assigned to :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase
:rtype: :class:`~google.api_core.operation.Operation`
:returns: a future used to poll the status of the create request
:raises Conflict: if the database already exists
:raises NotFound: if the instance owning the database does not exist
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
db_name = self.database_id
if "-" in db_name:
db_name = "`%s`" % (db_name,)
future = api.create_database(
parent=self._instance.name,
create_statement="CREATE DATABASE %s" % (db_name,),
extra_statements=list(self._ddl_statements),
metadata=metadata,
)
return future
def exists(self):
"""Test whether this database exists.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL
:rtype: bool
:returns: True if the database exists, else false.
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
try:
api.get_database_ddl(self.name, metadata=metadata)
except NotFound:
return False
return True
def reload(self):
"""Reload this database.
Refresh any configured schema into :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL
:raises NotFound: if the database does not exist
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
response = api.get_database_ddl(self.name, metadata=metadata)
self._ddl_statements = tuple(response.statements)
def update_ddl(self, ddl_statements, operation_id=''):
"""Update DDL for this database.
Apply any configured schema from :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase
:type ddl_statements: Sequence[str]
:param ddl_statements: a list of DDL statements to use on this database
:type operation_id: str
:param operation_id: (optional) a string ID for the long-running operation
:rtype: :class:`google.api_core.operation.Operation`
:returns: an operation instance
:raises NotFound: if the database does not exist
"""
client = self._instance._client
api = client.database_admin_api
metadata = _metadata_with_prefix(self.name)
future = api.update_database_ddl(
self.name, ddl_statements, operation_id=operation_id, metadata=metadata
)
return future
def drop(self):
"""Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
api.drop_database(self.name, metadata=metadata)
def execute_partitioned_dml(self, dml, params=None, param_types=None):
"""Execute a partitionable DML statement.
:type dml: str
:param dml: DML statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:rtype: int
:returns: Count of rows affected by the DML statement.
"""
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
params_pb = Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
params_pb = None
api = self.spanner_api
txn_options = TransactionOptions(
partitioned_dml=TransactionOptions.PartitionedDml()
)
metadata = _metadata_with_prefix(self.name)
with SessionCheckout(self._pool) as session:
txn = api.begin_transaction(session.name, txn_options, metadata=metadata)
txn_selector = TransactionSelector(id=txn.id)
restart = functools.partial(
api.execute_streaming_sql,
session.name,
dml,
transaction=txn_selector,
params=params_pb,
param_types=param_types,
metadata=metadata,
)
iterator = _restart_on_unavailable(restart)
result_set = StreamedResultSet(iterator)
list(result_set) # consume all partials
return result_set.stats.row_count_lower_bound
def session(self, labels=None):
"""Factory to create a session for this database.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for the session.
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: a session bound to this database.
"""
return Session(self, labels=labels)
def snapshot(self, **kw):
"""Return an object which wraps a snapshot.
The wrapper *must* be used as a context manager, with the snapshot
as the value returned by the wrapper.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.TransactionOptions.ReadOnly
:type kw: dict
:param kw:
Passed through to
:class:`~google.cloud.spanner_v1.snapshot.Snapshot` constructor.
:rtype: :class:`~google.cloud.spanner_v1.database.SnapshotCheckout`
:returns: new wrapper
"""
return SnapshotCheckout(self, **kw)
def batch(self):
"""Return an object which wraps a batch.
The wrapper *must* be used as a context manager, with the batch
as the value returned by the wrapper.
:rtype: :class:`~google.cloud.spanner_v1.database.BatchCheckout`
:returns: new wrapper
"""
return BatchCheckout(self)
def batch_snapshot(self, read_timestamp=None, exact_staleness=None):
"""Return an object which wraps a batch read / query.
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
:rtype: :class:`~google.cloud.spanner_v1.database.BatchSnapshot`
:returns: new wrapper
"""
return BatchSnapshot(
self, read_timestamp=read_timestamp, exact_staleness=exact_staleness
)
def run_in_transaction(self, func, *args, **kw):
"""Perform a unit of work in a transaction, retrying on abort.
:type func: callable
:param func: takes a required positional argument, the transaction,
and additional positional / keyword arguments as supplied
by the caller.
:type args: tuple
:param args: additional positional arguments to be passed to ``func``.
:type kw: dict
:param kw: optional keyword arguments to be passed to ``func``.
If passed, "timeout_secs" will be removed and used to
override the default timeout.
:rtype: :class:`datetime.datetime`
:returns: timestamp of committed transaction
"""
# Sanity check: Is there a transaction already running?
# If there is, then raise a red flag. Otherwise, mark that this one
# is running.
if getattr(self._local, "transaction_running", False):
raise RuntimeError("Spanner does not support nested transactions.")
self._local.transaction_running = True
# Check out a session and run the function in a transaction; once
# done, flip the sanity check bit back.
try:
with SessionCheckout(self._pool) as session:
return session.run_in_transaction(func, *args, **kw)
finally:
self._local.transaction_running = False
class BatchCheckout(object):
"""Context manager for using a batch from a database.
Inside the context manager, checks out a session from the database,
creates a batch from it, making the batch available.
Caller must *not* use the batch to perform API requests outside the scope
of the context manager.
:type database: :class:`~google.cloud.spanner.database.Database`
:param database: database to use
"""
def __init__(self, database):
self._database = database
self._session = self._batch = None
def __enter__(self):
"""Begin ``with`` block."""
session = self._session = self._database._pool.get()
batch = self._batch = Batch(session)
return batch
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
try:
if exc_type is None:
self._batch.commit()
finally:
self._database._pool.put(self._session)
class SnapshotCheckout(object):
"""Context manager for using a snapshot from a database.
Inside the context manager, checks out a session from the database,
creates a snapshot from it, making the snapshot available.
Caller must *not* use the snapshot to perform API requests outside the
scope of the context manager.
:type database: :class:`~google.cloud.spanner.database.Database`
:param database: database to use
:type kw: dict
:param kw:
Passed through to
:class:`~google.cloud.spanner_v1.snapshot.Snapshot` constructor.
"""
def __init__(self, database, **kw):
self._database = database
self._session = None
self._kw = kw
def __enter__(self):
"""Begin ``with`` block."""
session = self._session = self._database._pool.get()
return Snapshot(session, **self._kw)
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
self._database._pool.put(self._session)
class BatchSnapshot(object):
"""Wrapper for generating and processing read / query batches.
:type database: :class:`~google.cloud.spanner.database.Database`
:param database: database to use
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
"""
def __init__(self, database, read_timestamp=None, exact_staleness=None):
self._database = database
self._session = None
self._snapshot = None
self._read_timestamp = read_timestamp
self._exact_staleness = exact_staleness
@classmethod
def from_dict(cls, database, mapping):
"""Reconstruct an instance from a mapping.
:type database: :class:`~google.cloud.spanner.database.Database`
:param database: database to use
:type mapping: mapping
:param mapping: serialized state of the instance
:rtype: :class:`BatchSnapshot`
"""
instance = cls(database)
session = instance._session = database.session()
session._session_id = mapping["session_id"]
snapshot = instance._snapshot = session.snapshot()
snapshot._transaction_id = mapping["transaction_id"]
return instance
def to_dict(self):
"""Return state as a dictionary.
Result can be used to serialize the instance and reconstitute
it later using :meth:`from_dict`.
:rtype: dict
"""
session = self._get_session()
snapshot = self._get_snapshot()
return {
"session_id": session._session_id,
"transaction_id": snapshot._transaction_id,
}
def _get_session(self):
"""Create session as needed.
.. note::
Caller is responsible for cleaning up the session after
all partitions have been processed.
"""
if self._session is None:
session = self._session = self._database.session()
session.create()
return self._session
def _get_snapshot(self):
"""Create snapshot if needed."""
if self._snapshot is None:
self._snapshot = self._get_session().snapshot(
read_timestamp=self._read_timestamp,
exact_staleness=self._exact_staleness,
multi_use=True,
)
self._snapshot.begin()
return self._snapshot
def read(self, *args, **kw):
"""Convenience method: perform read operation via snapshot.
See :meth:`~google.cloud.spanner_v1.snapshot.Snapshot.read`.
"""
return self._get_snapshot().read(*args, **kw)
def execute_sql(self, *args, **kw):
"""Convenience method: perform query operation via snapshot.
See :meth:`~google.cloud.spanner_v1.snapshot.Snapshot.execute_sql`.
"""
return self._get_snapshot().execute_sql(*args, **kw)
def generate_read_batches(
self,
table,
columns,
keyset,
index="",
partition_size_bytes=None,
max_partitions=None,
):
"""Start a partitioned batch read operation.
Uses the ``PartitionRead`` API request to initiate the partitioned
read. Returns a list of batch information needed to perform the
actual reads.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of dict
:returns:
mappings of information used peform actual partitioned reads via
:meth:`process_read_batch`.
"""
partitions = self._get_snapshot().partition_read(
table=table,
columns=columns,
keyset=keyset,
index=index,
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
)
read_info = {
"table": table,
"columns": columns,
"keyset": keyset._to_dict(),
"index": index,
}
for partition in partitions:
yield {"partition": partition, "read": read_info.copy()}
def process_read_batch(self, batch):
"""Process a single, partitioned read.
:type batch: mapping
:param batch:
one of the mappings returned from an earlier call to
:meth:`generate_read_batches`.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
kwargs = copy.deepcopy(batch["read"])
keyset_dict = kwargs.pop("keyset")
kwargs["keyset"] = KeySet._from_dict(keyset_dict)
return self._get_snapshot().read(partition=batch["partition"], **kwargs)
def generate_query_batches(
self,
sql,
params=None,
param_types=None,
partition_size_bytes=None,
max_partitions=None,
):
"""Start a partitioned query operation.
Uses the ``PartitionQuery`` API request to start a partitioned
query operation. Returns a list of batch information needed to
peform the actual queries.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of dict
:returns:
mappings of information used peform actual partitioned reads via
:meth:`process_read_batch`.
"""
partitions = self._get_snapshot().partition_query(
sql=sql,
params=params,
param_types=param_types,
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
)
query_info = {"sql": sql}
if params:
query_info["params"] = params
query_info["param_types"] = param_types
for partition in partitions:
yield {"partition": partition, "query": query_info}
def process_query_batch(self, batch):
"""Process a single, partitioned query.
:type batch: mapping
:param batch:
one of the mappings returned from an earlier call to
:meth:`generate_query_batches`.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
return self._get_snapshot().execute_sql(
partition=batch["partition"], **batch["query"]
)
def process(self, batch):
"""Process a single, partitioned query or read.
:type batch: mapping
:param batch:
one of the mappings returned from an earlier call to
:meth:`generate_query_batches`.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
:raises ValueError: if batch does not contain either 'read' or 'query'
"""
if "query" in batch:
return self.process_query_batch(batch)
if "read" in batch:
return self.process_read_batch(batch)
raise ValueError("Invalid batch")
def close(self):
"""Clean up underlying session.
.. note::
If the transaction has been shared across multiple machines,
calling this on any machine would invalidate the transaction
everywhere. Ideally this would be called when data has been read
from all the partitions.
"""
if self._session is not None:
self._session.delete()
def _check_ddl_statements(value):
"""Validate DDL Statements used to define database schema.
See
https://cloud.google.com/spanner/docs/data-definition-language
:type value: list of string
:param value: DDL statements, excluding the 'CREATE DATABSE' statement
:rtype: tuple
:returns: tuple of validated DDL statement strings.
:raises ValueError:
if elements in ``value`` are not strings, or if ``value`` contains
a ``CREATE DATABASE`` statement.
"""
if not all(isinstance(line, six.string_types) for line in value):
raise ValueError("Pass a list of strings")
if any("create database" in line.lower() for line in value):
raise ValueError("Do not pass a 'CREATE DATABASE' statement")
return tuple(value)
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from drawsettings import DrawSettings
class ROIDialog(QDialog, DrawSettings):
"""
ROI toolset.
"""
last_target_name = "New Volume"
voxel_edit_enabled = pyqtSignal()
roi_edit_enabled = pyqtSignal()
roi_batch_enabled = pyqtSignal()
def __init__(self, model, label_config_center, parent=None):
super(ROIDialog, self).__init__(parent)
self.setWindowModality(Qt.NonModal)
self.setWindowFlags(Qt.Tool | \
Qt.CustomizeWindowHint | \
Qt.WindowTitleHint)
self._model = model
self.selected_rois = []
self._last_target_update_enable = True
self._label_config_center = label_config_center
self._init_gui()
self._create_actions()
def _init_gui(self):
self.setWindowModality(Qt.NonModal)
self.setWindowTitle("Edit")
self.voxel_btn = QPushButton("Voxel")
self.ROI_btn = QPushButton("ROI")
self.ROI_batch_btn = QPushButton("ROI Batch")
hlayout = QHBoxLayout()
hlayout.addWidget(self.voxel_btn)
hlayout.addWidget(self.ROI_btn)
hlayout.addWidget(self.ROI_batch_btn)
self.voxel_btn.setEnabled(False)
self.ROI_btn.setFocusPolicy(Qt.NoFocus)
self.voxel_btn.setFocusPolicy(Qt.NoFocus)
self.ROI_batch_btn.setFocusPolicy(Qt.NoFocus)
self.vlayout = QVBoxLayout()
self.vlayout.addLayout(hlayout)
self.vlayout.addWidget(self._label_config_center)
roi_label = QLabel("Selected ROIs")
self.roi_edit = QLineEdit()
self.roi_edit.setReadOnly(True)
self.add_button = QRadioButton("Select")
self.add_button.setChecked(True)
self.del_button = QRadioButton("Deselect")
self.roi_button_group = QButtonGroup()
self.roi_button_group.addButton(self.add_button)
self.roi_button_group.addButton(self.del_button)
action_label = QLabel("Action")
self.action_box = QComboBox()
self.action_box.addItems(['Labeling', 'Copy', 'Split'])
target_label = QLabel("Target")
self.target_box = QComboBox()
self._fill_target_box()
grid_layout = QGridLayout()
grid_layout.addWidget(target_label, 0, 0)
grid_layout.addWidget(self.target_box, 0, 1)
grid_layout.addWidget(roi_label, 1, 0)
grid_layout.addWidget(self.roi_edit, 1, 1)
grid_layout.addWidget(self.add_button, 2, 0)
grid_layout.addWidget(self.del_button, 2, 1)
grid_layout.addWidget(action_label, 3, 0)
grid_layout.addWidget(self.action_box, 3, 1)
hbox_layout = QHBoxLayout()
self.run_button = QPushButton("Run")
hbox_layout.addWidget(self.run_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.ROI_tool_widget = QWidget()
self.ROI_tool_widget.setVisible(False)
self.ROI_tool_widget.setLayout(vbox_layout)
self._label_config_center.size_label.setVisible(True);
self._label_config_center.size_edit.setVisible(True);
self.vlayout.addWidget(self.ROI_tool_widget)
self.setLayout(self.vlayout)
def _create_actions(self):
self.voxel_btn.clicked.connect(self._voxel_clicked)
self.ROI_btn.clicked.connect(self._ROI_clicked)
self.ROI_batch_btn.clicked.connect(self._ROI_batch_clicked)
self._model.rowsInserted.connect(self._fill_target_box)
self._model.rowsMoved.connect(self._fill_target_box)
self._model.rowsRemoved.connect(self._fill_target_box)
self.target_box.currentIndexChanged.connect(
self._update_last_target_name)
self.action_box.currentIndexChanged[QString].connect(
self._update_target_box)
self.run_button.pressed.connect(self._run)
def _voxel_clicked(self):
self._label_config_center.size_label.setVisible(True)
self._label_config_center.size_edit.setVisible(True)
self.ROI_tool_widget.setVisible(False)
self.voxel_edit_enabled.emit()
self.voxel_btn.setEnabled(False)
self.ROI_btn.setEnabled(True)
self.ROI_batch_btn.setEnabled(True)
def _ROI_clicked(self):
self._label_config_center.size_label.setVisible(False)
self._label_config_center.size_edit.setVisible(False)
self.ROI_tool_widget.setVisible(False)
self.roi_edit_enabled.emit()
self.voxel_btn.setEnabled(True)
self.ROI_btn.setEnabled(False)
self.ROI_batch_btn.setEnabled(True)
def _ROI_batch_clicked(self):
self.selected_rois = []
self.roi_edit.setText("")
self._label_config_center.size_label.setVisible(False)
self._label_config_center.size_edit.setVisible(False)
self.ROI_tool_widget.setVisible(True)
self.roi_batch_enabled.emit()
self.voxel_btn.setEnabled(True)
self.ROI_btn.setEnabled(True)
self.ROI_batch_btn.setEnabled(False)
def _fill_target_box(self, a=None, b=None, c=None, d=None, e=None):
self._last_target_update_enable = False
self.target_box.clear()
vol_list = self._model.getItemList()
self.target_box.addItem("New Volume")
self.target_box.addItems(QStringList(vol_list))
last_target_idx = 0
for idx, name in enumerate(vol_list):
if name == ROIDialog.last_target_name:
last_target_idx = idx+1
break
self.target_box.setCurrentIndex(last_target_idx)
self._last_target_update_enable = True
def _update_last_target_name(self):
if self._last_target_update_enable:
ROIDialog.last_target_name = str(self.target_box.currentText())
def _done(self):
self.done(0)
def _update_roi(self, id):
if self.roi_button_group.checkedButton() == self.add_button:
self._add_roi(id)
else:
self._del_roi(id)
roi_txt = ','.join(map(str, self.selected_rois))
self.roi_edit.setText(roi_txt)
def _add_roi(self, id):
if id not in self.selected_rois:
self.selected_rois.append(id)
def _del_roi(self, id):
if id in self.selected_rois:
idx = self.selected_rois.index(id)
del self.selected_rois[idx]
def _update_target_box(self, s):
if str(s) == 'Split':
self.target_box.setDisabled(True)
else:
self.target_box.setEnabled(True)
def is_roi_selection(self):
return True
def _run(self):
if self.target_box.isEnabled() and str(self.target_box.currentText()) == 'New Volume':
self._model.new_image(None, None, 'rainbow')
vol_index = self._model.currentIndex()
vol_data = self._model.data(vol_index, Qt.UserRole + 6)
run_text = str(self.action_box.currentText())
if run_text == 'Labeling':
target_row = self.target_box.currentIndex()
if target_row != 0:
target_row -= 1
if not self._model.get_label_config_center().is_drawing_valid():
QMessageBox.critical(self, "Invalid ROI Drawing Value",
"Please specify an valid drawing value")
else:
coordinates = []
for roi in self.selected_rois:
value = self._model.get_label_config_center().get_drawing_value()
self._model.modify_voxels(None, value, roi, target_row, False)
elif run_text == 'Copy':
target_row = self.target_box.currentIndex()
if target_row != 0:
target_row -= 1
for roi in self.selected_rois:
self._model.modify_voxels(None, roi, roi, target_row, False)
elif run_text == 'Split':
for roi in self.selected_rois:
self._model.new_image(None, None, 'rainbow')
self._model.modify_voxels(None, roi, roi, 0, False)
self.selected_rois = []
self.roi_edit.clear()
def clear_rois(self):
self.selected_rois = []
self.roi_edit.clear()
|
|
import urllib2
import os.path
import sys
import re
default_encoding = sys.getfilesystemencoding()
if default_encoding.lower() == 'ascii':
default_encoding = 'utf-8'
def to_native_string(s):
if type(s) == unicode:
return s.encode(default_encoding)
else:
return s
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def unescape_html(html):
import xml.sax.saxutils
html = xml.sax.saxutils.unescape(html) # Unescape &, <, and > in a string of data.
html = re.sub(r'&#(\d+);', lambda x: unichr(int(x.group(1))), html) # convert '艾' to '\u827e'
return html
def ungzip(s): # process Content-Encoding: gzip
from StringIO import StringIO
import gzip
buff = StringIO(s)
f = gzip.GzipFile(fileobj=buff)
return f.read()
def undeflate(s):
import zlib
return zlib.decompress(s, -zlib.MAX_WBITS)
def get_response(url):
response = urllib2.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
def get_html(url, encoding=None):
content = get_response(url).data
if encoding:
content = content.decode(encoding)
return content
def get_decoded_html(url):
response = get_response(url)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset)
else:
return data
def url_save(url, filepath, bar, refer=None):
headers = {}
if refer:
headers['Referer'] = refer
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
file_size = int(response.headers['content-length'])
assert file_size
if os.path.exists(filepath):
if file_size == os.path.getsize(filepath):
if bar:
bar.done()
print 'Skip %s: file already exists' % os.path.basename(filepath)
return
else:
if bar:
bar.done()
print 'Overwriting', os.path.basename(filepath), '...'
with open(filepath, 'wb') as output:
received = 0
while True:
buffer = response.read(1024*256)
if not buffer:
break
received += len(buffer)
output.write(buffer)
if bar:
bar.update_received(len(buffer))
assert received == file_size == os.path.getsize(filepath), '%s == %s == %s' % (received, file_size, os.path.getsize(filepath))
def url_size(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
response = urllib2.urlopen(request)
size = int(response.headers['content-length'])
return size
def url_size2(url):
size = int(urllib2.urlopen(url).headers['content-length'])
return size
def urls_size(urls):
return sum(map(url_size, urls))
class SimpleProgressBar:
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar_size = 40
percent = self.received*100.0/self.total_size
if percent > 100:
percent = 100.0
bar_rate = 100.0 / bar_size
dots = percent / bar_rate
dots = int(dots)
plus = percent / bar_rate - dots
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = '=' * dots + plus
bar = '{0:>3.0f}% [{1:<40}] {2}/{3}'.format(percent, bar, self.current_piece, self.total_pieces)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>3}%[{1:<40}] {2}/{3}'.format('?', '?'*40, self.current_piece, self.total_pieces)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True):
assert urls
assert ext in ('flv', 'mp4')
if not total_size:
try:
total_size = urls_size(urls)
except:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
pass
title = to_native_string(title)
title = escape_file_path(title)
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
if total_size:
if os.path.exists(filepath) and os.path.getsize(filepath) >= total_size * 0.9:
print 'Skip %s: file already exists' % filepath
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print 'Downloading %s ...' % filename
url_save(url, filepath, bar, refer=refer)
bar.done()
else:
flvs = []
print 'Downloading %s.%s ...' % (title, ext)
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
flvs.append(filepath)
#print 'Downloading %s [%s/%s]...' % (filename, i+1, len(urls))
bar.update_piece(i+1)
url_save(url, filepath, bar, refer=refer)
bar.done()
if not merge:
return
if ext == 'flv':
from flv_join import concat_flvs
concat_flvs(flvs, os.path.join(output_dir, title+'.flv'))
for flv in flvs:
os.remove(flv)
elif ext == 'mp4':
from mp4_join import concat_mp4s
concat_mp4s(flvs, os.path.join(output_dir, title+'.mp4'))
for flv in flvs:
os.remove(flv)
else:
print "Can't join %s files" % ext
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Play list is not supported for '+name)
return f
def script_main(script_name, download, download_playlist=None):
if download_playlist:
help = 'python %s.py [--playlist] [-c|--create-dir] [--no-merge] url ...' % script_name
short_opts = 'hc'
opts = ['help', 'playlist', 'create-dir', 'no-merge']
else:
help = 'python [--no-merge] %s.py url ...' % script_name
short_opts = 'h'
opts = ['help', 'no-merge']
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, opts)
except getopt.GetoptError, err:
print help
sys.exit(1)
playlist = False
create_dir = False
merge = True
for o, a in opts:
if o in ('-h', '--help'):
print help
sys.exit()
elif o in ('--playlist',):
playlist = True
elif o in ('-c', '--create-dir'):
create_dir = True
elif o in ('--no-merge'):
merge = False
else:
print help
sys.exit(1)
if not args:
print help
sys.exit(1)
for url in args:
if playlist:
download_playlist(url, create_dir=create_dir, merge=merge)
else:
download(url, merge=merge)
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parent client for calling the Google Cloud Bigtable API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`Client` owns a :class:`.Instance`
* a :class:`.Instance` owns a :class:`Table <gcloud.bigtable.table.Table>`
* a :class:`Table <gcloud.bigtable.table.Table>` owns a
:class:`ColumnFamily <.column_family.ColumnFamily>`
* a :class:`Table <gcloud.bigtable.table.Table>` owns a :class:`Row <.row.Row>`
(and all the cells in the row)
"""
from pkg_resources import get_distribution
from grpc.beta import implementations
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as instance_admin_v2_pb2)
# V1 table admin service
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
# V1 data service
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
operations_grpc_pb2 as operations_grpc_v2_pb2)
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
from gcloud.bigtable.instance import Instance
from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID
from gcloud.client import _ClientFactoryMixin
from gcloud.client import _ClientProjectMixin
from gcloud.credentials import get_credentials
TABLE_STUB_FACTORY_V2 = (
table_admin_v2_pb2.beta_create_BigtableTableAdmin_stub)
TABLE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com'
"""Table Admin API request host."""
TABLE_ADMIN_PORT_V2 = 443
"""Table Admin API request port."""
INSTANCE_STUB_FACTORY_V2 = (
instance_admin_v2_pb2.beta_create_BigtableInstanceAdmin_stub)
INSTANCE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com'
"""Cluster Admin API request host."""
INSTANCE_ADMIN_PORT_V2 = 443
"""Cluster Admin API request port."""
DATA_STUB_FACTORY_V2 = data_v2_pb2.beta_create_Bigtable_stub
DATA_API_HOST_V2 = 'bigtable.googleapis.com'
"""Data API request host."""
DATA_API_PORT_V2 = 443
"""Data API request port."""
OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v2_pb2.beta_create_Operations_stub
OPERATIONS_API_HOST_V2 = INSTANCE_ADMIN_HOST_V2
OPERATIONS_API_PORT_V2 = INSTANCE_ADMIN_PORT_V2
ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin'
"""Scope for interacting with the Cluster Admin and Table Admin APIs."""
DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data'
"""Scope for reading and writing table data."""
READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly'
"""Scope for reading table data."""
DEFAULT_TIMEOUT_SECONDS = 10
"""The default timeout to use for API requests."""
DEFAULT_USER_AGENT = 'gcloud-python/{0}'.format(
get_distribution('gcloud').version)
"""The default user agent for API requests."""
class Client(_ClientFactoryMixin, _ClientProjectMixin):
"""Client for interacting with Google Cloud Bigtable API.
.. note::
Since the Cloud Bigtable API requires the gRPC transport, no
``http`` argument is accepted by this class.
:type project: :class:`str` or :func:`unicode <unicode>`
:param project: (Optional) The ID of the project which owns the
instances, tables and data. If not provided, will
attempt to determine from the environment.
:type credentials:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>` or
:data:`NoneType <types.NoneType>`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not provided, defaults to the Google
Application Default Credentials.
:type read_only: bool
:param read_only: (Optional) Boolean indicating if the data scope should be
for reading only (or for writing as well). Defaults to
:data:`False`.
:type admin: bool
:param admin: (Optional) Boolean indicating if the client will be used to
interact with the Instance Admin or Table Admin APIs. This
requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`.
:type user_agent: str
:param user_agent: (Optional) The user agent to be used with API request.
Defaults to :const:`DEFAULT_USER_AGENT`.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out. If not
passed, defaults to
:const:`DEFAULT_TIMEOUT_SECONDS`.
:raises: :class:`ValueError <exceptions.ValueError>` if both ``read_only``
and ``admin`` are :data:`True`
"""
def __init__(self, project=None, credentials=None,
read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT,
timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
_ClientProjectMixin.__init__(self, project=project)
if credentials is None:
credentials = get_credentials()
if read_only and admin:
raise ValueError('A read-only client cannot also perform'
'administrative actions.')
scopes = []
if read_only:
scopes.append(READ_ONLY_SCOPE)
else:
scopes.append(DATA_SCOPE)
if admin:
scopes.append(ADMIN_SCOPE)
self._admin = bool(admin)
try:
credentials = credentials.create_scoped(scopes)
except AttributeError:
pass
self._credentials = credentials
self.user_agent = user_agent
self.timeout_seconds = timeout_seconds
# These will be set in start().
self._data_stub_internal = None
self._instance_stub_internal = None
self._operations_stub_internal = None
self._table_stub_internal = None
def copy(self):
"""Make a copy of this client.
Copies the local data stored as simple types but does not copy the
current state of any open connections with the Cloud Bigtable API.
:rtype: :class:`.Client`
:returns: A copy of the current client.
"""
credentials = self._credentials
copied_creds = credentials.create_scoped(credentials.scopes)
return self.__class__(
self.project,
copied_creds,
READ_ONLY_SCOPE in copied_creds.scopes,
self._admin,
self.user_agent,
self.timeout_seconds,
)
@property
def credentials(self):
"""Getter for client's credentials.
:rtype:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>`
:returns: The credentials stored on the client.
"""
return self._credentials
@property
def project_name(self):
"""Project name to be used with Instance Admin API.
.. note::
This property will not change if ``project`` does not, but the
return value is not cached.
The project name is of the form
``"projects/{project}"``
:rtype: str
:returns: The project name to be used with the Cloud Bigtable Admin
API RPC service.
"""
return 'projects/' + self.project
@property
def _data_stub(self):
"""Getter for the gRPC stub used for the Data API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client has not been :meth:`start`-ed.
"""
if self._data_stub_internal is None:
raise ValueError('Client has not been started.')
return self._data_stub_internal
@property
def _instance_stub(self):
"""Getter for the gRPC stub used for the Instance Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._instance_stub_internal is None:
raise ValueError('Client has not been started.')
return self._instance_stub_internal
@property
def _operations_stub(self):
"""Getter for the gRPC stub used for the Operations API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._operations_stub_internal is None:
raise ValueError('Client has not been started.')
return self._operations_stub_internal
@property
def _table_stub(self):
"""Getter for the gRPC stub used for the Table Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._table_stub_internal is None:
raise ValueError('Client has not been started.')
return self._table_stub_internal
def _make_data_stub(self):
"""Creates gRPC stub to make requests to the Data API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, DATA_STUB_FACTORY_V2,
DATA_API_HOST_V2, DATA_API_PORT_V2)
def _make_instance_stub(self):
"""Creates gRPC stub to make requests to the Instance Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, INSTANCE_STUB_FACTORY_V2,
INSTANCE_ADMIN_HOST_V2, INSTANCE_ADMIN_PORT_V2)
def _make_operations_stub(self):
"""Creates gRPC stub to make requests to the Operations API.
These are for long-running operations of the Instance Admin API,
hence the host and port matching.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, OPERATIONS_STUB_FACTORY_V2,
OPERATIONS_API_HOST_V2, OPERATIONS_API_PORT_V2)
def _make_table_stub(self):
"""Creates gRPC stub to make requests to the Table Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, TABLE_STUB_FACTORY_V2,
TABLE_ADMIN_HOST_V2, TABLE_ADMIN_PORT_V2)
def is_started(self):
"""Check if the client has been started.
:rtype: bool
:returns: Boolean indicating if the client has been started.
"""
return self._data_stub_internal is not None
def start(self):
"""Prepare the client to make requests.
Activates gRPC contexts for making requests to the Bigtable
Service(s).
"""
if self.is_started():
return
# NOTE: We __enter__ the stubs more-or-less permanently. This is
# because only after entering the context managers is the
# connection created. We don't want to immediately close
# those connections since the client will make many
# requests with it over HTTP/2.
self._data_stub_internal = self._make_data_stub()
self._data_stub_internal.__enter__()
if self._admin:
self._instance_stub_internal = self._make_instance_stub()
self._operations_stub_internal = self._make_operations_stub()
self._table_stub_internal = self._make_table_stub()
self._instance_stub_internal.__enter__()
self._operations_stub_internal.__enter__()
self._table_stub_internal.__enter__()
def __enter__(self):
"""Starts the client as a context manager."""
self.start()
return self
def stop(self):
"""Closes all the open gRPC clients."""
if not self.is_started():
return
# When exit-ing, we pass None as the exception type, value and
# traceback to __exit__.
self._data_stub_internal.__exit__(None, None, None)
if self._admin:
self._instance_stub_internal.__exit__(None, None, None)
self._operations_stub_internal.__exit__(None, None, None)
self._table_stub_internal.__exit__(None, None, None)
self._data_stub_internal = None
self._instance_stub_internal = None
self._operations_stub_internal = None
self._table_stub_internal = None
def __exit__(self, exc_type, exc_val, exc_t):
"""Stops the client as a context manager."""
self.stop()
def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID,
display_name=None, serve_nodes=DEFAULT_SERVE_NODES):
"""Factory to create a instance associated with this client.
:type instance_id: str
:param instance_id: The ID of the instance.
:type location: string
:param location: location name, in form
``projects/<project>/locations/<location>``; used to
set up the instance's cluster.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type serve_nodes: int
:param serve_nodes: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
:rtype: :class:`.Instance`
:returns: an instance owned by this client.
"""
return Instance(instance_id, self, location,
display_name=display_name, serve_nodes=serve_nodes)
def list_instances(self):
"""List instances owned by the project.
:rtype: tuple
:returns: A pair of results, the first is a list of
:class:`.Instance` objects returned and the second is a
list of strings (the failed locations in the request).
"""
request_pb = instance_admin_v2_pb2.ListInstancesRequest(
parent=self.project_name)
response = self._instance_stub.ListInstances(
request_pb, self.timeout_seconds)
instances = [Instance.from_pb(instance_pb, self)
for instance_pb in response.instances]
return instances, response.failed_locations
class _MetadataPlugin(object):
"""Callable class to transform metadata for gRPC requests.
:type client: :class:`.client.Client`
:param client: The client that owns the instance.
Provides authorization and user agent.
"""
def __init__(self, client):
self._credentials = client.credentials
self._user_agent = client.user_agent
def __call__(self, unused_context, callback):
"""Adds authorization header to request metadata."""
access_token = self._credentials.get_access_token().access_token
headers = [
('Authorization', 'Bearer ' + access_token),
('User-agent', self._user_agent),
]
callback(headers, None)
def _make_stub(client, stub_factory, host, port):
"""Makes a stub for an RPC service.
Uses / depends on the beta implementation of gRPC.
:type client: :class:`.client.Client`
:param client: The client that owns the instance.
Provides authorization and user agent.
:type stub_factory: callable
:param stub_factory: A factory which will create a gRPC stub for
a given service.
:type host: str
:param host: The host for the service.
:type port: int
:param port: The port for the service.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: The stub object used to make gRPC requests to a given API.
"""
# Leaving the first argument to ssl_channel_credentials() as None
# loads root certificates from `grpc/_adapter/credentials/roots.pem`.
transport_creds = implementations.ssl_channel_credentials(None, None, None)
custom_metadata_plugin = _MetadataPlugin(client)
auth_creds = implementations.metadata_call_credentials(
custom_metadata_plugin, name='google_creds')
channel_creds = implementations.composite_channel_credentials(
transport_creds, auth_creds)
channel = implementations.secure_channel(host, port, channel_creds)
return stub_factory(channel)
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "table"
_path_str = "table.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.table.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.table.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.table.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Converters for Quantity."""
import numpy as np
from astropy.units.core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled)
__all__ = ['can_have_arbitrary_unit', 'converters_and_unit',
'check_output', 'UFUNC_HELPERS', 'UNSUPPORTED_UFUNCS']
class UfuncHelpers(dict):
"""Registry of unit conversion functions to help ufunc evaluation.
Based on dict for quick access, but with a missing method to load
helpers for additional modules such as scipy.special and erfa.
Such modules should be registered using ``register_module``.
"""
UNSUPPORTED = set()
def register_module(self, module, names, importer):
"""Register (but do not import) a set of ufunc helpers.
Parameters
----------
module : str
Name of the module with the ufuncs (e.g., 'scipy.special').
names : iterable of str
Names of the module ufuncs for which helpers are available.
importer : callable
Function that imports the ufuncs and returns a dict of helpers
keyed by those ufuncs. If the value is `None`, the ufunc is
explicitly *not* supported.
"""
self.modules[module] = {'names': names,
'importer': importer}
@property
def modules(self):
"""Modules for which helpers are available (but not yet loaded)."""
if not hasattr(self, '_modules'):
self._modules = {}
return self._modules
def import_module(self, module):
"""Import the helpers from the given module using its helper function.
Parameters
----------
module : str
Name of the module. Has to have been registered beforehand.
"""
module_info = self.modules.pop(module)
self.update(module_info['importer']())
def __missing__(self, ufunc):
"""Called if a ufunc is not found.
Check if the ufunc is in any of the available modules, and, if so,
import the helpers for that module.
"""
if ufunc in self.UNSUPPORTED:
raise TypeError("Cannot use ufunc '{}' with quantities"
.format(ufunc.__name__))
for module, module_info in list(self.modules.items()):
if ufunc.__name__ in module_info['names']:
# A ufunc with the same name is supported by this module.
# Of course, this doesn't necessarily mean it is the
# right module. So, we try let the importer do its work.
# If it fails (e.g., for `scipy.special`), then that's
# fine, just raise the TypeError. If it succeeds, but
# the ufunc is not found, that is also fine: we will
# enter __missing__ again and either find another
# module or get the TypeError there.
try:
self.import_module(module)
except ImportError:
pass
else:
return self[ufunc]
raise TypeError("unknown ufunc {}. If you believe this ufunc "
"should be supported, please raise an issue on "
"https://github.com/astropy/astropy"
.format(ufunc.__name__))
def __setitem__(self, key, value):
# Implementation note: in principle, we could just let `None`
# mean that something is not implemented, but this means an
# extra if clause for the output, slowing down the common
# path where a ufunc is supported.
if value is None:
self.UNSUPPORTED |= {key}
self.pop(key, None)
else:
super().__setitem__(key, value)
self.UNSUPPORTED -= {key}
UFUNC_HELPERS = UfuncHelpers()
UNSUPPORTED_UFUNCS = UFUNC_HELPERS.UNSUPPORTED
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value)))
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : Quantity or other ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined in helpers) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
ufunc_helper = UFUNC_HELPERS[function]
if method == '__call__' or (method == 'outer' and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, 'unit', None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for multi-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
try:
# Don't fold this loop in the test above: this rare case
# should not make the common case slower.
for i, converter in enumerate(converters):
if converter is not False:
continue
if can_have_arbitrary_unit(args[i]):
converters[i] = None
else:
raise UnitConversionError(
"Can only apply '{}' function to "
"dimensionless quantities when other "
"argument is not a quantity (unless the "
"latter is all zero/infinity/nan)"
.format(function.__name__))
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError("Unsupported operand type(s) for ufunc {}: "
"'{}'".format(function.__name__,
','.join([arg.__class__.__name__
for arg in args])))
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(
dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], 'unit', None)
if method == 'at' and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], 'unit', None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {'reduce', 'accumulate', 'reduceat'} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == 'reduceat':
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {'reduce', 'accumulate',
'reduceat', 'outer'} and nin != 2:
raise ValueError("{} only supported for binary functions"
.format(method))
raise TypeError("Unexpected ufunc method {}. If this should "
"work, please raise an issue on"
"https://github.com/astropy/astropy"
.format(method))
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as the result is not a "
"Quantity.".format(function.__name__, method))
if (converters[0] is not None or
(unit is not None and unit is not result_unit and
(not result_unit.is_equivalent(unit) or
result_unit.to(unit) != 1.))):
# NOTE: this cannot be the more logical UnitTypeError, since
# then things like np.cumprod will not longer fail (they check
# for TypeError).
raise UnitsError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as it would change the unit."
.format(function.__name__, method))
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : `~numpy.ndarray` view of ``output`` (or tuple of such views).
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit))
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, '__quantity_subclass__'):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError("Cannot store non-quantity output{} in {} "
"instance".format(
(f" from {function.__name__} function"
if function is not None else ""),
type(output)))
if output.__quantity_subclass__(unit)[0] is not type(output):
raise UnitTypeError(
"Cannot store output with unit '{}'{} "
"in {} instance. Use {} instance instead."
.format(unit, (f" from {function.__name__} function"
if function is not None else ""), type(output),
output.__quantity_subclass__(unit)[0]))
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
output = output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError("Cannot store quantity with dimension "
"{}in a non-Quantity instance."
.format("" if function is None else
"resulting from {} function "
.format(function.__name__)))
# check we can handle the dtype (e.g., that we are not int
# when float is required).
if not np.can_cast(np.result_type(*inputs), output.dtype,
casting='same_kind'):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={}".format(output.dtype))
return output
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.continue_button import ContinueButton
from flexbe_states.wait_state import WaitState
from sara_flexbe_states.WonderlandGetPersonStat import WonderlandGetPersonStat
from sara_flexbe_states.sara_say import SaraSay
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from flexbe_states.calculation_state import CalculationState
from sara_flexbe_states.SetRosParamKey import SetRosParamKey
from flexbe_states.log_key_state import LogKeyState
from sara_flexbe_states.list_entities_by_name import list_entities_by_name
from sara_flexbe_states.get_speech import GetSpeech
from sara_flexbe_states.sara_nlu_spr import SaraNLUspr
from sara_flexbe_states.set_a_step import Set_a_step
from sara_flexbe_states.for_loop import ForLoop
from sara_flexbe_states.KeepLookingAt import KeepLookingAt
from sara_flexbe_behaviors.action_turn_sm import action_turnSM
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_behaviors.actionwrapper_move_sm import ActionWrapper_MoveSM
from sara_flexbe_states.story import Set_Story
from sara_flexbe_states.WonderlandClearPeoples import WonderlandClearPeoples
from sara_flexbe_states.moveit_move import MoveitMove
from sara_flexbe_states.SetKey import SetKey
from sara_flexbe_states.WonderlandAddUpdatePeople import WonderlandAddUpdatePeople
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu Jun 07 2018
@author: Lucas Maurice
'''
class Scenario_SPRSM(Behavior):
'''
Contient le scenario SPR.
'''
def __init__(self):
super(Scenario_SPRSM, self).__init__()
self.name = 'Scenario_SPR'
# parameters of this behavior
# references to used behaviors
self.add_behavior(action_turnSM, 'Waiting And Turn/action_turn')
self.add_behavior(ActionWrapper_MoveSM, 'Leave Arena')
self.add_behavior(ActionWrapper_MoveSM, 'Join Area/Join Arena')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
# O 100 629
# 0 - Waiting Begining|n1 - Join Game Room|n2 - Waiting Crowd Placement|n3 - Analysing Crowd|n4 - Begin Game|n5 - Find Operator|n6 - Question 1|n7 - Question 2|n8 - Question 3|n9 - Question 4|n10 - Question 5|n11 - Go out
def create(self):
# x:1474 y:331, x:56 y:575
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.half_turn = 3.1416
_state_machine.userdata.person = "person"
_state_machine.userdata.operator_param = "behavior/Operaror/Id"
_state_machine.userdata.join = ["Move", "spr/initialpose"]
_state_machine.userdata.leave = ["Move", "door2/exit"]
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:1182 y:163
_sm_rotate_0 = OperatableStateMachine(outcomes=['finished'])
with _sm_rotate_0:
# x:103 y:61
OperatableStateMachine.add('Look Left',
SaraSetHeadAngle(pitch=-0.35, yaw=0.5),
transitions={'done': 'Rotate Left'},
autonomy={'done': Autonomy.Off})
# x:794 y:54
OperatableStateMachine.add('Look Right',
SaraSetHeadAngle(pitch=-0.35, yaw=-0.5),
transitions={'done': 'Rotate Right'},
autonomy={'done': Autonomy.Off})
# x:325 y:61
OperatableStateMachine.add('Rotate Left',
WaitState(wait_time=8),
transitions={'done': 'Look Center'},
autonomy={'done': Autonomy.Off})
# x:961 y:65
OperatableStateMachine.add('Rotate Right',
WaitState(wait_time=4),
transitions={'done': 'Look Center 2'},
autonomy={'done': Autonomy.Off})
# x:1115 y:62
OperatableStateMachine.add('Look Center 2',
SaraSetHeadAngle(pitch=0.1, yaw=0),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:484 y:54
OperatableStateMachine.add('Look Center',
SaraSetHeadAngle(pitch=-0.35, yaw=0),
transitions={'done': 'Rotate Center'},
autonomy={'done': Autonomy.Off})
# x:657 y:49
OperatableStateMachine.add('Rotate Center',
WaitState(wait_time=4),
transitions={'done': 'Look Right'},
autonomy={'done': Autonomy.Off})
# x:30 y:458
_sm_follow_head_1 = OperatableStateMachine(outcomes=['end'], input_keys=['person'])
with _sm_follow_head_1:
# x:214 y:48
OperatableStateMachine.add('list all entities',
list_entities_by_name(frontality_level=0.5, distance_max=10),
transitions={'found': 'Get Nearest', 'none_found': 'list all entities'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'person', 'entity_list': 'entity_list', 'number': 'number'})
# x:456 y:51
OperatableStateMachine.add('Get Nearest',
CalculationState(calculation=lambda x: x[0].ID),
transitions={'done': 'look'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'entity_list', 'output_value': 'ID'})
# x:291 y:177
OperatableStateMachine.add('look',
KeepLookingAt(),
transitions={'failed': 'list all entities'},
autonomy={'failed': Autonomy.Off},
remapping={'ID': 'ID'})
# x:12 y:125, x:1130 y:515
_sm_nlu_2 = OperatableStateMachine(outcomes=['finished', 'failed'])
with _sm_nlu_2:
# x:156 y:37
OperatableStateMachine.add('say ask',
SaraSay(sentence="You can ask me your questions.", input_keys=[], emotion=1, block=True),
transitions={'done': 'Loop Questions'},
autonomy={'done': Autonomy.Off})
# x:1091 y:84
OperatableStateMachine.add('Listen',
GetSpeech(watchdog=10),
transitions={'done': 'Engine', 'nothing': 'Listen', 'fail': 'Listen'},
autonomy={'done': Autonomy.Off, 'nothing': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'words': 'sentence'})
# x:1324 y:110
OperatableStateMachine.add('Engine',
SaraNLUspr(),
transitions={'understood': 'Say_Answer', 'not_understood': 'Listen', 'fail': 'Listen'},
autonomy={'understood': Autonomy.Off, 'not_understood': Autonomy.Off, 'fail': Autonomy.Off},
remapping={'sentence': 'sentence', 'answer': 'answer'})
# x:632 y:77
OperatableStateMachine.add('Select Story',
CalculationState(calculation=lambda x: x+6),
transitions={'done': 'Set_a_step'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'index', 'output_value': 'story'})
# x:874 y:78
OperatableStateMachine.add('Set_a_step',
Set_a_step(step=1),
transitions={'done': 'Listen'},
autonomy={'done': Autonomy.Off})
# x:398 y:81
OperatableStateMachine.add('Loop Questions',
ForLoop(repeat=5),
transitions={'do': 'Select Story', 'end': 'loop step'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
# x:439 y:252
OperatableStateMachine.add('Say Blind Game',
SaraSay(sentence="Let's play the blind game !", input_keys=[], emotion=1, block=True),
transitions={'done': 'Loop Questions'},
autonomy={'done': Autonomy.Off})
# x:189 y:193
OperatableStateMachine.add('loop step',
ForLoop(repeat=1),
transitions={'do': 'Say Blind Game', 'end': 'finished'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
# x:721 y:205
OperatableStateMachine.add('Say_Answer',
SaraSay(sentence=lambda x: str(x), input_keys=[], emotion=0, block=True),
transitions={'done': 'Loop Questions'},
autonomy={'done': Autonomy.Off})
# x:817 y:123, x:130 y:458
_sm_analyse_crowd_3 = OperatableStateMachine(outcomes=['finished', 'error'])
with _sm_analyse_crowd_3:
# x:87 y:97
OperatableStateMachine.add('clear database',
WonderlandClearPeoples(),
transitions={'done': 'Rotate', 'error': 'error'},
autonomy={'done': Autonomy.Off, 'error': Autonomy.Off})
# x:531 y:112
OperatableStateMachine.add('Add Update Persons',
WonderlandAddUpdatePeople(),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:311 y:97
OperatableStateMachine.add('Rotate',
_sm_rotate_0,
transitions={'finished': 'Add Update Persons'},
autonomy={'finished': Autonomy.Inherit})
# x:1203 y:11, x:1006 y:366
_sm_init_scenario_4 = OperatableStateMachine(outcomes=['done', 'error'])
with _sm_init_scenario_4:
# x:30 y:42
OperatableStateMachine.add('Generate Vizbox Story',
Set_Story(titre="Speech and Person Recognition", storyline=["Waiting Begining","Join Game Room","Waiting Crowd Placement","Analysing Crowd","Begin Game","Find Operator","Question 1","Question 2","Question 3","Question 4","Question 5", "Leave Arena"]),
transitions={'done': 'Set Story Step'},
autonomy={'done': Autonomy.Off})
# x:559 y:44
OperatableStateMachine.add('WaitForBegining',
ContinueButton(),
transitions={'true': 'Reset Persons', 'false': 'Reset Persons'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off})
# x:807 y:61
OperatableStateMachine.add('Reset Persons',
WonderlandClearPeoples(),
transitions={'done': 'done', 'error': 'error'},
autonomy={'done': Autonomy.Off, 'error': Autonomy.Off})
# x:247 y:49
OperatableStateMachine.add('Set Story Step',
Set_a_step(step=0),
transitions={'done': 'setIDLE'},
autonomy={'done': Autonomy.Off})
# x:388 y:208
OperatableStateMachine.add('Reset Arm',
MoveitMove(move=True, waitForExecution=True, group="RightArm", watchdog=15),
transitions={'done': 'Reset Persons', 'failed': 'error'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'target': 'target'})
# x:427 y:40
OperatableStateMachine.add('setIDLE',
SetKey(Value="IdlePose"),
transitions={'done': 'Reset Arm'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'target'})
# x:30 y:458, x:230 y:458
_sm_join_area_5 = OperatableStateMachine(outcomes=['failed', 'finished'], input_keys=['join'])
with _sm_join_area_5:
# x:95 y:40
OperatableStateMachine.add('Say Join Area',
SaraSay(sentence="I will join the playing room !", input_keys=[], emotion=1, block=True),
transitions={'done': 'Join Arena'},
autonomy={'done': Autonomy.Off})
# x:92 y:134
OperatableStateMachine.add('Join Arena',
self.use_behavior(ActionWrapper_MoveSM, 'Join Area/Join Arena'),
transitions={'finished': 'finished', 'failed': 'failed', 'critical_fail': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'critical_fail': Autonomy.Inherit},
remapping={'Action': 'join'})
# x:489 y:56, x:604 y:278
_sm_waiting_and_turn_6 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['half_turn'])
with _sm_waiting_and_turn_6:
# x:50 y:51
OperatableStateMachine.add('Want Play',
SaraSay(sentence="Hum, I want to play riddles !", input_keys=[], emotion=1, block=True),
transitions={'done': 'Wait 10s'},
autonomy={'done': Autonomy.Off})
# x:272 y:121
OperatableStateMachine.add('action_turn',
self.use_behavior(action_turnSM, 'Waiting And Turn/action_turn'),
transitions={'finished': 'finished', 'failed': 'Cant turn'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'rotation': 'half_turn'})
# x:437 y:240
OperatableStateMachine.add('Cant turn',
SaraSay(sentence="I can't turn !", input_keys=[], emotion=1, block=True),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:63 y:178
OperatableStateMachine.add('Wait 10s',
WaitState(wait_time=10),
transitions={'done': 'Look In Front Of'},
autonomy={'done': Autonomy.Off})
# x:61 y:260
OperatableStateMachine.add('Look In Front Of',
SaraSetHeadAngle(pitch=0, yaw=0),
transitions={'done': 'action_turn'},
autonomy={'done': Autonomy.Off})
# x:472 y:69, x:476 y:113, x:470 y:196, x:330 y:458, x:430 y:458
_sm_questions_7 = ConcurrencyContainer(outcomes=['finished', 'failed'], input_keys=['person'], conditions=[
('finished', [('NLU', 'finished')]),
('failed', [('NLU', 'failed')]),
('finished', [('Follow Head', 'end')])
])
with _sm_questions_7:
# x:85 y:58
OperatableStateMachine.add('NLU',
_sm_nlu_2,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:84 y:185
OperatableStateMachine.add('Follow Head',
_sm_follow_head_1,
transitions={'end': 'finished'},
autonomy={'end': Autonomy.Inherit},
remapping={'person': 'person'})
# x:283 y:294, x:60 y:571
_sm_find_operator_8 = OperatableStateMachine(outcomes=['not_found', 'done'], input_keys=['person', 'operator_param'])
with _sm_find_operator_8:
# x:51 y:40
OperatableStateMachine.add('Ask Player',
SaraSay(sentence="Who wan't to play with me ?", input_keys=[], emotion=1, block=True),
transitions={'done': 'Wait Operator'},
autonomy={'done': Autonomy.Off})
# x:39 y:297
OperatableStateMachine.add('Get operator id',
CalculationState(calculation=lambda x: x.entities[0].face.id),
transitions={'done': 'Set Operator Id'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'entity_list', 'output_value': 'operator'})
# x:33 y:384
OperatableStateMachine.add('Set Operator Id',
SetRosParamKey(),
transitions={'done': 'Operator Id'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'operator_param', 'ParamName': 'operator'})
# x:46 y:117
OperatableStateMachine.add('Wait Operator',
WaitState(wait_time=6),
transitions={'done': 'Find Operator'},
autonomy={'done': Autonomy.Off})
# x:35 y:495
OperatableStateMachine.add('Operator Id',
LogKeyState(text="Operator find. Id: {}", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'operator'})
# x:30 y:205
OperatableStateMachine.add('Find Operator',
list_entities_by_name(frontality_level=0.5, distance_max=10),
transitions={'found': 'Get operator id', 'none_found': 'Find Operator'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'person', 'entity_list': 'entity_list', 'number': 'number'})
# x:703 y:198, x:88 y:199
_sm_tell_basic_stats_9 = OperatableStateMachine(outcomes=['finished', 'failed'])
with _sm_tell_basic_stats_9:
# x:50 y:40
OperatableStateMachine.add('wait',
WaitState(wait_time=0),
transitions={'done': 'GetPeopleStats'},
autonomy={'done': Autonomy.Off})
# x:218 y:43
OperatableStateMachine.add('GetPeopleStats',
WonderlandGetPersonStat(),
transitions={'done': 'GenerateSentence', 'none': 'Nobody', 'error': 'failed'},
autonomy={'done': Autonomy.Off, 'none': Autonomy.Off, 'error': Autonomy.Off},
remapping={'women': 'women', 'men': 'men', 'others': 'others'})
# x:466 y:46
OperatableStateMachine.add('Nobody',
SaraSay(sentence="There is nobody here !", input_keys=[], emotion=1, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:162 y:243
OperatableStateMachine.add('GenerateSentence',
FlexibleCalculationState(calculation=lambda x: "There is " + str(x[0]+x[1]+x[2]) + " persons.", input_keys=['men','women','others']),
transitions={'done': 'Tell_Stats'},
autonomy={'done': Autonomy.Off},
remapping={'men': 'men', 'women': 'women', 'others': 'others', 'output_value': 'sentence'})
# x:151 y:345
OperatableStateMachine.add('Generate Sentence 2',
FlexibleCalculationState(calculation=lambda x: "I recognize " + str(x[1]) + " women and " + str(x[0]) + " men.", input_keys=['men','women','others']),
transitions={'done': 'Tell_Stats 2'},
autonomy={'done': Autonomy.Off},
remapping={'men': 'men', 'women': 'women', 'others': 'others', 'output_value': 'sentence'})
# x:380 y:172
OperatableStateMachine.add('Tell_Stats',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'Generate Sentence 2'},
autonomy={'done': Autonomy.Off})
# x:409 y:276
OperatableStateMachine.add('Tell_Stats 2',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
with _state_machine:
# x:30 y:40
OperatableStateMachine.add('continue',
ContinueButton(),
transitions={'true': 'Init Scenario', 'false': 'Init Scenario'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off})
# x:715 y:127
OperatableStateMachine.add('Tell basic stats',
_sm_tell_basic_stats_9,
transitions={'finished': 'Set Find Operator', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:899 y:126
OperatableStateMachine.add('Find Operator',
_sm_find_operator_8,
transitions={'not_found': 'Find Operator', 'done': 'Questions'},
autonomy={'not_found': Autonomy.Inherit, 'done': Autonomy.Inherit},
remapping={'person': 'person', 'operator_param': 'operator_param'})
# x:1120 y:126
OperatableStateMachine.add('Questions',
_sm_questions_7,
transitions={'finished': 'set head', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'person': 'person'})
# x:343 y:49
OperatableStateMachine.add('Set Story Waiting',
Set_a_step(step=2),
transitions={'done': 'Waiting And Turn'},
autonomy={'done': Autonomy.Off})
# x:532 y:48
OperatableStateMachine.add('Set Analyse',
Set_a_step(step=3),
transitions={'done': 'Analyse Crowd'},
autonomy={'done': Autonomy.Off})
# x:696 y:47
OperatableStateMachine.add('Set Begin Game',
Set_a_step(step=4),
transitions={'done': 'Tell basic stats'},
autonomy={'done': Autonomy.Off})
# x:899 y:49
OperatableStateMachine.add('Set Find Operator',
Set_a_step(step=5),
transitions={'done': 'Find Operator'},
autonomy={'done': Autonomy.Off})
# x:332 y:127
OperatableStateMachine.add('Waiting And Turn',
_sm_waiting_and_turn_6,
transitions={'finished': 'Set Analyse', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'half_turn': 'half_turn'})
# x:1333 y:232
OperatableStateMachine.add('Set Go Out',
Set_a_step(step=11),
transitions={'done': 'Say And Of Game'},
autonomy={'done': Autonomy.Off})
# x:1517 y:148
OperatableStateMachine.add('Leave Arena',
self.use_behavior(ActionWrapper_MoveSM, 'Leave Arena'),
transitions={'finished': 'finished', 'failed': 'finished', 'critical_fail': 'finished'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'critical_fail': Autonomy.Inherit},
remapping={'Action': 'leave'})
# x:176 y:42
OperatableStateMachine.add('Set Join',
Set_a_step(step=1),
transitions={'done': 'Set Story Waiting'},
autonomy={'done': Autonomy.Off})
# x:177 y:120
OperatableStateMachine.add('Join Area',
_sm_join_area_5,
transitions={'failed': 'failed', 'finished': 'Set Story Waiting'},
autonomy={'failed': Autonomy.Inherit, 'finished': Autonomy.Inherit},
remapping={'join': 'join'})
# x:1302 y:140
OperatableStateMachine.add('Say And Of Game',
SaraSay(sentence="The game is finished. I will leave the arena. Thank you for playing with me.", input_keys=[], emotion=1, block=True),
transitions={'done': 'Leave Arena'},
autonomy={'done': Autonomy.Off})
# x:1120 y:237
OperatableStateMachine.add('set head',
SaraSetHeadAngle(pitch=-0.2, yaw=0),
transitions={'done': 'Set Go Out'},
autonomy={'done': Autonomy.Off})
# x:34 y:130
OperatableStateMachine.add('Init Scenario',
_sm_init_scenario_4,
transitions={'done': 'Set Join', 'error': 'failed'},
autonomy={'done': Autonomy.Inherit, 'error': Autonomy.Inherit})
# x:517 y:124
OperatableStateMachine.add('Analyse Crowd',
_sm_analyse_crowd_3,
transitions={'finished': 'Set Begin Game', 'error': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'error': Autonomy.Inherit})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
|
from __future__ import absolute_import
import re
import posixpath
from sentry.grouping.component import GroupingComponent
from sentry.grouping.strategies.base import strategy
from sentry.grouping.strategies.utils import remove_non_stacktrace_variants, has_url_origin
from sentry.grouping.strategies.similarity_encoders import text_shingle_encoder, ident_encoder
_ruby_anon_func = re.compile(r"_\d{2,}")
_filename_version_re = re.compile(
r"""(?:
v?(?:\d+\.)*\d+| # version numbers, v1, 1.0.0
[a-f0-9]{7,8}| # short sha
[a-f0-9]{32}| # md5
[a-f0-9]{40} # sha1
)/""",
re.X | re.I,
)
# OpenJDK auto-generated classes for reflection access:
# sun.reflect.GeneratedSerializationConstructorAccessor123
# sun.reflect.GeneratedConstructorAccessor456
# Note that this doesn't cover the following pattern for the sake of
# backward compatibility (to not to change the existing grouping):
# sun.reflect.GeneratedMethodAccessor789
_java_reflect_enhancer_re = re.compile(
r"""(sun\.reflect\.Generated(?:Serialization)?ConstructorAccessor)\d+""", re.X
)
# Java Spring specific anonymous classes.
# see: http://mydailyjava.blogspot.co.at/2013/11/cglib-missing-manual.html
_java_cglib_enhancer_re = re.compile(r"""(\$\$[\w_]+?CGLIB\$\$)[a-fA-F0-9]+(_[0-9]+)?""", re.X)
# Handle Javassist auto-generated classes and filenames:
# com.example.api.entry.EntriesResource_$$_javassist_74
# com.example.api.entry.EntriesResource_$$_javassist_seam_74
# EntriesResource_$$_javassist_seam_74.java
_java_assist_enhancer_re = re.compile(r"""(\$\$_javassist)(?:_seam)?(?:_[0-9]+)?""", re.X)
# Clojure anon functions are compiled down to myapp.mymodule$fn__12345
_clojure_enhancer_re = re.compile(r"""(\$fn__)\d+""", re.X)
# fields that need to be the same between frames for them to be considered
# recursive calls
RECURSION_COMPARISON_FIELDS = [
"abs_path",
"package",
"module",
"filename",
"function",
"lineno",
"colno",
]
def is_unhashable_module_legacy(frame, platform):
# Fix for the case where module is a partial copy of the URL
# and should not be hashed
if (
platform == "javascript"
and "/" in frame.module
and frame.abs_path
and frame.abs_path.endswith(frame.module)
):
return True
elif platform == "java" and "$$Lambda$" in frame.module:
return True
return False
def is_unhashable_function_legacy(func):
# TODO(dcramer): lambda$ is Java specific
# TODO(dcramer): [Anonymous is PHP specific (used for things like SQL
# queries and JSON data)
return func.startswith(("lambda$", "[Anonymous"))
def is_recursion_legacy(frame1, frame2):
"Returns a boolean indicating whether frames are recursive calls."
for field in RECURSION_COMPARISON_FIELDS:
if getattr(frame1, field, None) != getattr(frame2, field, None):
return False
return True
def remove_module_outliers_legacy(module, platform):
"""Remove things that augment the module but really should not."""
if platform == "java":
if module[:35] == "sun.reflect.GeneratedMethodAccessor":
return "sun.reflect.GeneratedMethodAccessor", "removed reflection marker"
if module[:44] == "jdk.internal.reflect.GeneratedMethodAccessor":
return "jdk.internal.reflect.GeneratedMethodAccessor", "removed reflection marker"
old_module = module
module = _java_reflect_enhancer_re.sub(r"\1<auto>", module)
module = _java_cglib_enhancer_re.sub(r"\1<auto>", module)
module = _java_assist_enhancer_re.sub(r"\1<auto>", module)
module = _clojure_enhancer_re.sub(r"\1<auto>", module)
if old_module != module:
return module, "removed codegen marker"
return module, None
def remove_filename_outliers_legacy(filename, platform):
"""
Attempt to normalize filenames by removing common platform outliers.
- Sometimes filename paths contain build numbers
"""
# On cocoa we generally only want to use the last path component as
# the filename. The reason for this is that the chances are very high
# that full filenames contain information we do want to strip but
# currently can't (for instance because the information we get from
# the dwarf files does not contain prefix information) and that might
# contain things like /Users/foo/Dropbox/...
if platform == "cocoa":
return posixpath.basename(filename), "stripped to basename"
removed = []
if platform == "java":
new_filename = _java_assist_enhancer_re.sub(r"\1<auto>", filename)
if new_filename != filename:
removed.append("javassist parts")
filename = new_filename
new_filename = _filename_version_re.sub("<version>/", filename)
if new_filename != filename:
removed.append("version")
filename = new_filename
if removed:
return filename, "removed %s" % " and ".join(removed)
return filename, None
def remove_function_outliers_legacy(function):
"""
Attempt to normalize functions by removing common platform outliers.
- Ruby generates (random?) integers for various anonymous style functions
such as in erb and the active_support library.
- Block functions have metadata that we don't care about.
"""
if function.startswith("block "):
return "block", "ruby block"
new_function = _ruby_anon_func.sub("_<anon>", function)
if new_function != function:
return new_function, "trimmed integer suffix"
return new_function, None
@strategy(id="single-exception:legacy", interfaces=["singleexception"], variants=["!system", "app"])
def single_exception_legacy(exception, config, **meta):
type_component = GroupingComponent(
id="type",
values=[exception.type] if exception.type else [],
similarity_encoder=ident_encoder,
contributes=False,
)
value_component = GroupingComponent(
id="value",
values=[exception.value] if exception.value else [],
similarity_encoder=text_shingle_encoder(5),
contributes=False,
)
stacktrace_component = GroupingComponent(id="stacktrace")
if exception.stacktrace is not None:
stacktrace_component = config.get_grouping_component(exception.stacktrace, **meta)
if stacktrace_component.contributes:
if exception.type:
type_component.update(contributes=True)
if exception.value:
value_component.update(hint="stacktrace and type take precedence")
elif exception.value:
value_component.update(hint="stacktrace takes precedence")
if not stacktrace_component.contributes:
if exception.type:
type_component.update(contributes=True)
if exception.value:
value_component.update(contributes=True)
return GroupingComponent(
id="exception", values=[stacktrace_component, type_component, value_component]
)
@strategy(
id="chained-exception:legacy", interfaces=["exception"], variants=["!system", "app"], score=2000
)
def chained_exception_legacy(chained_exception, config, **meta):
# Case 1: we have a single exception, use the single exception
# component directly
exceptions = chained_exception.exceptions()
if len(exceptions) == 1:
return config.get_grouping_component(exceptions[0], **meta)
# Case 2: try to build a new component out of the individual
# errors however with a trick. In case any exception has a
# stacktrace we want to ignore all other exceptions.
any_stacktraces = False
values = []
for exception in exceptions:
exception_component = config.get_grouping_component(exception, **meta)
stacktrace_component = exception_component.get_subcomponent("stacktrace")
if stacktrace_component is not None and stacktrace_component.contributes:
any_stacktraces = True
values.append(exception_component)
if any_stacktraces:
for value in values:
stacktrace_component = value.get_subcomponent("stacktrace")
if stacktrace_component is None or not stacktrace_component.contributes:
value.update(contributes=False, hint="exception has no stacktrace")
return GroupingComponent(id="chained-exception", values=values)
@chained_exception_legacy.variant_processor
def chained_exception_legacy_variant_processor(variants, config, **meta):
return remove_non_stacktrace_variants(variants)
@strategy(id="frame:legacy", interfaces=["frame"], variants=["!system", "app"])
def frame_legacy(frame, event, **meta):
platform = frame.platform or event.platform
# In certain situations we want to disregard the entire frame.
contributes = None
hint = None
# this requires some explanation: older sentry versions did not have
# raw_function but only function. For some platforms like native
# we now instead store a trimmed function name in frame.function so
# and the original value moved to raw_function. This requires us to
# prioritize raw_function over function in the legacy grouping code to
# avoid creating new groups.
func = frame.raw_function or frame.function
# Safari throws [native code] frames in for calls like ``forEach``
# whereas Chrome ignores these. Let's remove it from the hashing algo
# so that they're more likely to group together
filename_component = GroupingComponent(id="filename", similarity_encoder=ident_encoder)
if frame.filename == "<anonymous>":
filename_component.update(
contributes=False, values=[frame.filename], hint="anonymous filename discarded"
)
elif frame.filename == "[native code]":
contributes = False
hint = "native code indicated by filename"
elif frame.filename:
if has_url_origin(frame.abs_path):
filename_component.update(
contributes=False, values=[frame.filename], hint="ignored because filename is a URL"
)
# XXX(dcramer): dont compute hash using frames containing the 'Caused by'
# text as it contains an exception value which may may contain dynamic
# values (see raven-java#125)
elif frame.filename.startswith("Caused by: "):
filename_component.update(
values=[frame.filename], contributes=False, hint="ignored because invalid"
)
else:
hashable_filename, hashable_filename_hint = remove_filename_outliers_legacy(
frame.filename, platform
)
filename_component.update(values=[hashable_filename], hint=hashable_filename_hint)
# if we have a module we use that for grouping. This will always
# take precedence over the filename, even if the module is
# considered unhashable.
module_component = GroupingComponent(id="module", similarity_encoder=ident_encoder)
if frame.module:
if is_unhashable_module_legacy(frame, platform):
module_component.update(
values=[
GroupingComponent(
id="salt", values=["<module>"], hint="normalized generated module name"
)
],
hint="ignored module",
)
# <module> still contributes, though it should not contribute to
# similarity
module_component.similarity_encoder = None
else:
module_name, module_hint = remove_module_outliers_legacy(frame.module, platform)
module_component.update(values=[module_name], hint=module_hint)
if frame.filename:
filename_component.update(
values=[frame.filename], contributes=False, hint="module takes precedence"
)
# Context line when available is the primary contributor
context_line_component = GroupingComponent(id="context-line", similarity_encoder=ident_encoder)
if frame.context_line is not None:
if len(frame.context_line) > 120:
context_line_component.update(hint="discarded because line too long")
elif has_url_origin(frame.abs_path) and not func:
context_line_component.update(hint="discarded because from URL origin")
else:
context_line_component.update(values=[frame.context_line])
symbol_component = GroupingComponent(id="symbol", similarity_encoder=ident_encoder)
function_component = GroupingComponent(id="function", similarity_encoder=ident_encoder)
lineno_component = GroupingComponent(id="lineno", similarity_encoder=ident_encoder)
# The context line grouping information is the most reliable one.
# If we did not manage to find some information there, we want to
# see if we can come up with some extra information. We only want
# to do that if we managed to get a module of filename.
if not context_line_component.contributes and (
module_component.contributes or filename_component.contributes
):
if frame.symbol:
symbol_component.update(values=[frame.symbol])
if func:
function_component.update(
contributes=False, values=[func], hint="symbol takes precedence"
)
if frame.lineno:
lineno_component.update(
contributes=False, values=[frame.lineno], hint="symbol takes precedence"
)
elif func:
if is_unhashable_function_legacy(func):
function_component.update(
values=[
GroupingComponent(
id="salt", values=["<function>"], hint="normalized lambda function name"
)
]
)
# <module> still contributes, though it should not contribute to
# similarity
function_component.similarity_encoder = None
else:
function, function_hint = remove_function_outliers_legacy(func)
function_component.update(values=[function], hint=function_hint)
if frame.lineno:
lineno_component.update(
contributes=False, values=[frame.lineno], hint="function takes precedence"
)
elif frame.lineno:
lineno_component.update(values=[frame.lineno])
else:
if context_line_component.contributes:
fallback_hint = "is not used if context-line is available"
else:
fallback_hint = "is not used if module or filename are available"
if frame.symbol:
symbol_component.update(
contributes=False, values=[frame.symbol], hint="symbol " + fallback_hint
)
if func:
function_component.update(
contributes=False, values=[func], hint="function name " + fallback_hint
)
if frame.lineno:
lineno_component.update(
contributes=False, values=[frame.lineno], hint="line number " + fallback_hint
)
return GroupingComponent(
id="frame",
values=[
module_component,
filename_component,
context_line_component,
symbol_component,
function_component,
lineno_component,
],
contributes=contributes,
hint=hint,
)
@strategy(
id="stacktrace:legacy", interfaces=["stacktrace"], variants=["!system", "app"], score=1800
)
def stacktrace_legacy(stacktrace, config, variant, **meta):
frames = stacktrace.frames
contributes = None
hint = None
all_frames_considered_in_app = False
# TODO(dcramer): this should apply only to platform=javascript
# Browser JS will often throw errors (from inlined code in an HTML page)
# which contain only a single frame, no function name, and have the HTML
# document as the filename. In this case the hash is often not usable as
# the context cannot be trusted and the URL is dynamic (this also means
# the line number cannot be trusted).
if len(frames) == 1 and not frames[0].function and frames[0].is_url():
contributes = False
hint = "ignored single frame stack"
elif variant == "app":
total_frames = len(frames)
in_app_count = sum(1 if f.in_app else 0 for f in frames)
if in_app_count == 0:
in_app_count = total_frames
all_frames_considered_in_app = True
# if app frames make up less than 10% of the stacktrace discard
# the hash as invalid
if total_frames > 0 and in_app_count / float(total_frames) < 0.10:
contributes = False
hint = "less than 10% of frames are in-app"
values = []
prev_frame = None
frames_for_filtering = []
for frame in frames:
frame_component = config.get_grouping_component(frame, variant=variant, **meta)
if variant == "app" and not frame.in_app and not all_frames_considered_in_app:
frame_component.update(contributes=False, hint="non app frame")
elif prev_frame is not None and is_recursion_legacy(frame, prev_frame):
frame_component.update(contributes=False, hint="ignored due to recursion")
elif variant == "app" and not frame.in_app and all_frames_considered_in_app:
frame_component.update(hint="frame considered in-app because no frame is in-app")
values.append(frame_component)
frames_for_filtering.append(frame.get_raw_data())
prev_frame = frame
rv = config.enhancements.assemble_stacktrace_component(
values, frames_for_filtering, meta["event"].platform
)
rv.update(contributes=contributes, hint=hint)
return rv
@strategy(id="threads:legacy", interfaces=["threads"], variants=["!system", "app"], score=1900)
def threads_legacy(threads_interface, config, **meta):
thread_count = len(threads_interface.values)
if thread_count != 1:
return GroupingComponent(
id="threads",
contributes=False,
hint="ignored because contains %d threads" % thread_count,
)
stacktrace = threads_interface.values[0].get("stacktrace")
if not stacktrace:
return GroupingComponent(id="threads", contributes=False, hint="thread has no stacktrace")
return GroupingComponent(
id="threads", values=[config.get_grouping_component(stacktrace, **meta)]
)
|
|
from setuptools import setup, find_packages
from mfr import __version__
def parse_requirements(requirements):
with open(requirements) as f:
return [l.strip('\n') for l in f if l.strip('\n') and not l.startswith('#')]
requirements = parse_requirements('requirements.txt')
setup(
name='mfr',
version=__version__,
namespace_packages=['mfr', 'mfr.extensions', 'mfr.providers'],
description='Modular File Renderer',
author='Center for Open Science',
author_email='[email protected]',
url='https://github.com/CenterForOpenScience/modular-file-renderer',
packages=find_packages(exclude=("tests*", )),
package_dir={'mfr': 'mfr'},
include_package_data=True,
# install_requires=requirements,
zip_safe=False,
classifiers=[
'Natural Language :: English',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
],
provides=[
'mfr.extensions',
'mfr.providers',
],
entry_points={
'mfr.providers': [
'http = mfr.providers.http:HttpProvider',
'osf = mfr.providers.osf:OsfProvider',
],
'mfr.exporters': [
# google docs
'.gdraw = mfr.extensions.unoconv:UnoconvExporter',
'.gdoc = mfr.extensions.unoconv:UnoconvExporter',
'.gsheet = mfr.extensions.unoconv:UnoconvExporter',
'.gslides = mfr.extensions.unoconv:UnoconvExporter',
# image
'.jpg = mfr.extensions.image:ImageExporter',
'.jpeg = mfr.extensions.image:ImageExporter',
'.png = mfr.extensions.image:ImageExporter',
'.bmp = mfr.extensions.image:ImageExporter',
'.gif = mfr.extensions.image:ImageExporter',
'.tif = mfr.extensions.image:ImageExporter',
'.tiff = mfr.extensions.image:ImageExporter',
# unoconv
# '.bib = mfr.extensions.unoconv:UnoconvExporter',
# '.bmp = mfr.extensions.unoconv:UnoconvExporter',
# '.csv = mfr.extensions.unoconv:UnoconvExporter',
'.dbf = mfr.extensions.unoconv:UnoconvExporter',
'.dif = mfr.extensions.unoconv:UnoconvExporter',
'.doc = mfr.extensions.unoconv:UnoconvExporter',
'.docx = mfr.extensions.unoconv:UnoconvExporter',
'.emf = mfr.extensions.unoconv:UnoconvExporter',
'.eps = mfr.extensions.unoconv:UnoconvExporter',
'.fodg = mfr.extensions.unoconv:UnoconvExporter',
'.fodp = mfr.extensions.unoconv:UnoconvExporter',
'.fods = mfr.extensions.unoconv:UnoconvExporter',
'.fodt = mfr.extensions.unoconv:UnoconvExporter',
# '.gif = mfr.extensions.unoconv:UnoconvExporter',
# '.html = mfr.extensions.unoconv:UnoconvExporter',
# '.jpg = mfr.extensions.unoconv:UnoconvExporter',
# '.ltx = mfr.extensions.unoconv:UnoconvExporter',
'.met = mfr.extensions.unoconv:UnoconvExporter',
'.odd = mfr.extensions.unoconv:UnoconvExporter',
'.odg = mfr.extensions.unoconv:UnoconvExporter',
'.odp = mfr.extensions.unoconv:UnoconvExporter',
'.ods = mfr.extensions.unoconv:UnoconvExporter',
'.odt = mfr.extensions.unoconv:UnoconvExporter',
'.otg = mfr.extensions.unoconv:UnoconvExporter',
'.otp = mfr.extensions.unoconv:UnoconvExporter',
'.ots = mfr.extensions.unoconv:UnoconvExporter',
'.ott = mfr.extensions.unoconv:UnoconvExporter',
'.pbm = mfr.extensions.unoconv:UnoconvExporter',
'.pct = mfr.extensions.unoconv:UnoconvExporter',
# '.pdb = mfr.extensions.unoconv:UnoconvExporter',
# '.pdf = mfr.extensions.unoconv:UnoconvExporter',
'.pgm = mfr.extensions.unoconv:UnoconvExporter',
# '.png = mfr.extensions.unoconv:UnoconvExporter',
'.pot = mfr.extensions.unoconv:UnoconvExporter',
'.potm = mfr.extensions.unoconv:UnoconvExporter',
'.ppm = mfr.extensions.unoconv:UnoconvExporter',
'.pps = mfr.extensions.unoconv:UnoconvExporter',
'.ppt = mfr.extensions.unoconv:UnoconvExporter',
'.pptx = mfr.extensions.unoconv:UnoconvExporter',
'.psw = mfr.extensions.unoconv:UnoconvExporter',
'.pwp = mfr.extensions.unoconv:UnoconvExporter',
'.pxl = mfr.extensions.unoconv:UnoconvExporter',
'.ras = mfr.extensions.unoconv:UnoconvExporter',
'.rtf = mfr.extensions.unoconv:UnoconvExporter',
'.sda = mfr.extensions.unoconv:UnoconvExporter',
'.sdc = mfr.extensions.unoconv:UnoconvExporter',
'.sdd = mfr.extensions.unoconv:UnoconvExporter',
'.sdw = mfr.extensions.unoconv:UnoconvExporter',
'.slk = mfr.extensions.unoconv:UnoconvExporter',
'.stc = mfr.extensions.unoconv:UnoconvExporter',
'.std = mfr.extensions.unoconv:UnoconvExporter',
'.sti = mfr.extensions.unoconv:UnoconvExporter',
'.stw = mfr.extensions.unoconv:UnoconvExporter',
'.svg = mfr.extensions.unoconv:UnoconvExporter',
'.svm = mfr.extensions.unoconv:UnoconvExporter',
'.swf = mfr.extensions.unoconv:UnoconvExporter',
'.sxc = mfr.extensions.unoconv:UnoconvExporter',
'.sxd = mfr.extensions.unoconv:UnoconvExporter',
'.sxi = mfr.extensions.unoconv:UnoconvExporter',
'.sxw = mfr.extensions.unoconv:UnoconvExporter',
# '.tiff = mfr.extensions.unoconv:UnoconvExporter',
# '.txt = mfr.extensions.unoconv:UnoconvExporter',
'.uop = mfr.extensions.unoconv:UnoconvExporter',
'.uos = mfr.extensions.unoconv:UnoconvExporter',
'.uot = mfr.extensions.unoconv:UnoconvExporter',
'.vor = mfr.extensions.unoconv:UnoconvExporter',
'.wmf = mfr.extensions.unoconv:UnoconvExporter',
'.wps = mfr.extensions.unoconv:UnoconvExporter',
# '.xhtml = mfr.extensions.unoconv:UnoconvExporter',
# '.xls = mfr.extensions.unoconv:UnoconvExporter',
# '.xlsx = mfr.extensions.unoconv:UnoconvExporter',
'.xlt = mfr.extensions.unoconv:UnoconvExporter',
# '.xml = mfr.extensions.unoconv:UnoconvExporter',
'.xpm = mfr.extensions.unoconv:UnoconvExporter',
],
'mfr.renderers': [
# audio
'.mp3 = mfr.extensions.audio:AudioRenderer',
'.ogg = mfr.extensions.audio:AudioRenderer',
'.wav = mfr.extensions.audio:AudioRenderer',
# code pygments
'none = mfr.extensions.codepygments:CodePygmentsRenderer',
'.1 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.2 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.3 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.4 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.5 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.6 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.6pm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.6pl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.7 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.abap = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ada = mfr.extensions.codepygments:CodePygmentsRenderer',
'.adb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ads = mfr.extensions.codepygments:CodePygmentsRenderer',
'.agda = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ahk = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ahkl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.aj = mfr.extensions.codepygments:CodePygmentsRenderer',
'.als = mfr.extensions.codepygments:CodePygmentsRenderer',
'.apl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.applescript = mfr.extensions.codepygments:CodePygmentsRenderer',
'.arexx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.as = mfr.extensions.codepygments:CodePygmentsRenderer',
'.asax = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ascx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ashx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.asm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.asmx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.aspx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.asy = mfr.extensions.codepygments:CodePygmentsRenderer',
'.at = mfr.extensions.codepygments:CodePygmentsRenderer',
'.au3 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.aux = mfr.extensions.codepygments:CodePygmentsRenderer',
'.awk = mfr.extensions.codepygments:CodePygmentsRenderer',
'.axd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.b = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bas = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bash = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bat = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.befunge = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bib = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bmx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.boo = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bro = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bug = mfr.extensions.codepygments:CodePygmentsRenderer',
'.c = mfr.extensions.codepygments:CodePygmentsRenderer',
'.c++ = mfr.extensions.codepygments:CodePygmentsRenderer',
'.c++-objdump = mfr.extensions.codepygments:CodePygmentsRenderer',
'.c-objdump = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cbl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cdf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ceylon = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cfc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cfg = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cfm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cfml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.chai = mfr.extensions.codepygments:CodePygmentsRenderer',
'.chpl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cirru = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.clay = mfr.extensions.codepygments:CodePygmentsRenderer',
'.clj = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cljs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cls = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cmake = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cmd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cob = mfr.extensions.codepygments:CodePygmentsRenderer',
'.coffee = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cpp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cpp-objdump = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cpy = mfr.extensions.codepygments:CodePygmentsRenderer',
'.croc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cry = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.csh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.css = mfr.extensions.codepygments:CodePygmentsRenderer',
'.css.in = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cu = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cuh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cw = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cxx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cxx-objdump = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cyp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.cypher = mfr.extensions.codepygments:CodePygmentsRenderer',
'.d = mfr.extensions.codepygments:CodePygmentsRenderer',
'.d-objdump = mfr.extensions.codepygments:CodePygmentsRenderer',
'.darcspatch = mfr.extensions.codepygments:CodePygmentsRenderer',
'.dart = mfr.extensions.codepygments:CodePygmentsRenderer',
'.decls = mfr.extensions.codepygments:CodePygmentsRenderer',
'.def = mfr.extensions.codepygments:CodePygmentsRenderer',
'.dg = mfr.extensions.codepygments:CodePygmentsRenderer',
'.di = mfr.extensions.codepygments:CodePygmentsRenderer',
'.diff = mfr.extensions.codepygments:CodePygmentsRenderer',
'.docker = mfr.extensions.codepygments:CodePygmentsRenderer',
'.dpatch = mfr.extensions.codepygments:CodePygmentsRenderer',
'.dtd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.duby = mfr.extensions.codepygments:CodePygmentsRenderer',
'.duel = mfr.extensions.codepygments:CodePygmentsRenderer',
'.dyl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.dylan = mfr.extensions.codepygments:CodePygmentsRenderer',
'.dylan-console = mfr.extensions.codepygments:CodePygmentsRenderer',
'.e = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ebnf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ebuild = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ec = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ecl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.eclass = mfr.extensions.codepygments:CodePygmentsRenderer',
'.eh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.el = mfr.extensions.codepygments:CodePygmentsRenderer',
# '.eps = mfr.extensions.codepygments:CodePygmentsRenderer',
'.erl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.erl-sh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.es = mfr.extensions.codepygments:CodePygmentsRenderer',
'.escript = mfr.extensions.codepygments:CodePygmentsRenderer',
'.evoque = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ex = mfr.extensions.codepygments:CodePygmentsRenderer',
'.exs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.f = mfr.extensions.codepygments:CodePygmentsRenderer',
'.f90 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.factor = mfr.extensions.codepygments:CodePygmentsRenderer',
'.fan = mfr.extensions.codepygments:CodePygmentsRenderer',
'.fancypack = mfr.extensions.codepygments:CodePygmentsRenderer',
'.feature = mfr.extensions.codepygments:CodePygmentsRenderer',
'.fhtml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.flx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.flxh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.frag = mfr.extensions.codepygments:CodePygmentsRenderer',
'.fs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.fsi = mfr.extensions.codepygments:CodePygmentsRenderer',
'.fun = mfr.extensions.codepygments:CodePygmentsRenderer',
'.fy = mfr.extensions.codepygments:CodePygmentsRenderer',
'.g = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gap = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gdc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gemspec = mfr.extensions.codepygments:CodePygmentsRenderer',
'.geo = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gi = mfr.extensions.codepygments:CodePygmentsRenderer',
'.go = mfr.extensions.codepygments:CodePygmentsRenderer',
'.golo = mfr.extensions.codepygments:CodePygmentsRenderer',
'.groovy = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gsp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gst = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gsx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.h = mfr.extensions.codepygments:CodePygmentsRenderer',
'.h++ = mfr.extensions.codepygments:CodePygmentsRenderer',
'.haml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.handlebars = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hbs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hdp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hpp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hrl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.htm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.html = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hxml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hxsl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hxx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hy = mfr.extensions.codepygments:CodePygmentsRenderer',
'.hyb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.i = mfr.extensions.codepygments:CodePygmentsRenderer',
'.i6t = mfr.extensions.codepygments:CodePygmentsRenderer',
'.i7x = mfr.extensions.codepygments:CodePygmentsRenderer',
'.idc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.idr = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ik = mfr.extensions.codepygments:CodePygmentsRenderer',
'.inc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.inf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ini = mfr.extensions.codepygments:CodePygmentsRenderer',
'.intr = mfr.extensions.codepygments:CodePygmentsRenderer',
'.io = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ipf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.j = mfr.extensions.codepygments:CodePygmentsRenderer',
'.jade = mfr.extensions.codepygments:CodePygmentsRenderer',
'.jag = mfr.extensions.codepygments:CodePygmentsRenderer',
'.java = mfr.extensions.codepygments:CodePygmentsRenderer',
'.jbst = mfr.extensions.codepygments:CodePygmentsRenderer',
'.jl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.js = mfr.extensions.codepygments:CodePygmentsRenderer',
'.js.in = mfr.extensions.codepygments:CodePygmentsRenderer',
'.json = mfr.extensions.codepygments:CodePygmentsRenderer',
'.jsonld = mfr.extensions.codepygments:CodePygmentsRenderer',
'.jsp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.jsx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.kal = mfr.extensions.codepygments:CodePygmentsRenderer',
'.kid = mfr.extensions.codepygments:CodePygmentsRenderer',
'.kk = mfr.extensions.codepygments:CodePygmentsRenderer',
'.kki = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ksh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.kt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lagda = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lasso = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lasso[89] = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lcry = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lean = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lgt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lhs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lid = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lidr = mfr.extensions.codepygments:CodePygmentsRenderer',
'.liquid = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lisp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ll = mfr.extensions.codepygments:CodePygmentsRenderer',
'.log = mfr.extensions.codepygments:CodePygmentsRenderer',
'.logtalk = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ls = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lsl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lsp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ltx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.lua = mfr.extensions.codepygments:CodePygmentsRenderer',
'.m = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ma = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mak = mfr.extensions.codepygments:CodePygmentsRenderer',
'.man = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mao = mfr.extensions.codepygments:CodePygmentsRenderer',
'.maql = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mask = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mhtml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mi = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mk = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mli = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mll = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mly = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mo = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mod = mfr.extensions.codepygments:CodePygmentsRenderer',
'.monkey = mfr.extensions.codepygments:CodePygmentsRenderer',
'.moo = mfr.extensions.codepygments:CodePygmentsRenderer',
'.moon = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mq4 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mq5 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mqh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.msc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mu = mfr.extensions.codepygments:CodePygmentsRenderer',
'.mxml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.myt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.n = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nbp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ni = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nim = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nimrod = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nit = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nix = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nqp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ns2 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nsh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.nsi = mfr.extensions.codepygments:CodePygmentsRenderer',
'.objdump = mfr.extensions.codepygments:CodePygmentsRenderer',
'.objdump-intel = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ooc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.opa = mfr.extensions.codepygments:CodePygmentsRenderer',
'.p = mfr.extensions.codepygments:CodePygmentsRenderer',
'.p6 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.p6l = mfr.extensions.codepygments:CodePygmentsRenderer',
'.p6m = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pan = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pas = mfr.extensions.codepygments:CodePygmentsRenderer',
'.patch = mfr.extensions.codepygments:CodePygmentsRenderer',
'.php = mfr.extensions.codepygments:CodePygmentsRenderer',
'.php[345] = mfr.extensions.codepygments:CodePygmentsRenderer',
'.phtml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pig = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pike = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pl6 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.plot = mfr.extensions.codepygments:CodePygmentsRenderer',
'.plt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pm6 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pmod = mfr.extensions.codepygments:CodePygmentsRenderer',
'.po = mfr.extensions.codepygments:CodePygmentsRenderer',
# '.pot = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pov = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.prg = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pro = mfr.extensions.codepygments:CodePygmentsRenderer',
'.prolog = mfr.extensions.codepygments:CodePygmentsRenderer',
'.properties = mfr.extensions.codepygments:CodePygmentsRenderer',
'.proto = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ps = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ps1 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.psm1 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pwn = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pxd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pxi = mfr.extensions.codepygments:CodePygmentsRenderer',
'.py = mfr.extensions.codepygments:CodePygmentsRenderer',
'.py3tb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pypylog = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pytb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pyw = mfr.extensions.codepygments:CodePygmentsRenderer',
'.pyx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.qml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.r = mfr.extensions.codepygments:CodePygmentsRenderer',
'.r3 = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rake = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rbw = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rbx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rdf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.reb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.red = mfr.extensions.codepygments:CodePygmentsRenderer',
'.reds = mfr.extensions.codepygments:CodePygmentsRenderer',
'.reg = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rest = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rex = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rexx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rhtml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ris = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rkt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rktd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rktl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.robot = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rout = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rpf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rq = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rql = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rs = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rsl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rss = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rvt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rx = mfr.extensions.codepygments:CodePygmentsRenderer',
'.s = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sage = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sass = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.scad = mfr.extensions.codepygments:CodePygmentsRenderer',
'.scala = mfr.extensions.codepygments:CodePygmentsRenderer',
'.scaml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sce = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sci = mfr.extensions.codepygments:CodePygmentsRenderer',
'.scm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.scss = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sh-session = mfr.extensions.codepygments:CodePygmentsRenderer',
'.shell-session = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sig = mfr.extensions.codepygments:CodePygmentsRenderer',
'.slim = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sls = mfr.extensions.codepygments:CodePygmentsRenderer',
'.smali = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.snobol = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sparql = mfr.extensions.codepygments:CodePygmentsRenderer',
'.spec = mfr.extensions.codepygments:CodePygmentsRenderer',
'.spt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sql = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sqlite3-console = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ss = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ssp = mfr.extensions.codepygments:CodePygmentsRenderer',
'.st = mfr.extensions.codepygments:CodePygmentsRenderer',
'.stan = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sv = mfr.extensions.codepygments:CodePygmentsRenderer',
'.svh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.swg = mfr.extensions.codepygments:CodePygmentsRenderer',
'.swift = mfr.extensions.codepygments:CodePygmentsRenderer',
'.t = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tac = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tcl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tcsh = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tea = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tex = mfr.extensions.codepygments:CodePygmentsRenderer',
'.thy = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tmpl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.toc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.todotxt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tpl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.treetop = mfr.extensions.codepygments:CodePygmentsRenderer',
'.ts = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tst = mfr.extensions.codepygments:CodePygmentsRenderer',
'.tt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.twig = mfr.extensions.codepygments:CodePygmentsRenderer',
'.txt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rmd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.do = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sas = mfr.extensions.codepygments:CodePygmentsRenderer',
'.sps = mfr.extensions.codepygments:CodePygmentsRenderer',
'.u = mfr.extensions.codepygments:CodePygmentsRenderer',
'.v = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vala = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vapi = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vark = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vb = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vert = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vhd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vhdl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vim = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.weechatlog = mfr.extensions.codepygments:CodePygmentsRenderer',
'.wlua = mfr.extensions.codepygments:CodePygmentsRenderer',
'.wsdl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.wsf = mfr.extensions.codepygments:CodePygmentsRenderer',
'.x = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xhtml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xi = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xmi = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xpl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xq = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xql = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xqm = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xquery = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xqy = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xsd = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xsl = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xslt = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xtend = mfr.extensions.codepygments:CodePygmentsRenderer',
'.xul.in = mfr.extensions.codepygments:CodePygmentsRenderer',
'.yaml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.yml = mfr.extensions.codepygments:CodePygmentsRenderer',
'.zep = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.*config.in* = mfr.extensions.codepygments:CodePygmentsRenderer',
'.renviron = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rhistory = mfr.extensions.codepygments:CodePygmentsRenderer',
'.rprofile = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.bash_* = mfr.extensions.codepygments:CodePygmentsRenderer',
'.bashrc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.exrc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.gvimrc = mfr.extensions.codepygments:CodePygmentsRenderer',
'.htaccess = mfr.extensions.codepygments:CodePygmentsRenderer',
'.vimrc = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.cmakelists.txt = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.dockerfile = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.gnumakefile = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.kconfig = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.makefile = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.makefile.* = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.pkgbuild = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.rakefile = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.sconscript = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.sconstruct = mfr.extensions.codepygments:CodePygmentsRenderer',
#'._exrc = mfr.extensions.codepygments:CodePygmentsRenderer',
#'._gvimrc = mfr.extensions.codepygments:CodePygmentsRenderer',
#'._vimrc = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.apache.conf = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.apache2.conf = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.autodelegate = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.autohandler = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.bash_* = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.bashrc = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.control = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.dhandler = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.external.in* = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.gvimrc = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.makefile = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.sources.list = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.squid.conf = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.standard-modules.in = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.todo.txt = mfr.extensions.codepygments:CodePygmentsRenderer',
#'.vimrc' = mfr.extensions.codepygments:CodePygmentsRenderer',
# docx
# '.docx = mfr.extensions.docx:DocxRenderer',
# google docs
'.gdraw = mfr.extensions.image:ImageRenderer',
'.gdoc = mfr.extensions.unoconv:UnoconvRenderer',
'.gsheet = mfr.extensions.tabular:TabularRenderer',
'.gslides = mfr.extensions.unoconv:UnoconvRenderer',
# image
'.bmp = mfr.extensions.image:ImageRenderer',
'.jpg = mfr.extensions.image:ImageRenderer',
'.jpeg = mfr.extensions.image:ImageRenderer',
'.gif = mfr.extensions.image:ImageRenderer',
'.ico = mfr.extensions.image:ImageRenderer',
'.png = mfr.extensions.image:ImageRenderer',
'.tif = mfr.extensions.image:ImageRenderer',
'.tiff = mfr.extensions.image:ImageRenderer',
# ipynb
'.ipynb = mfr.extensions.ipynb:IpynbRenderer',
# md
'.md = mfr.extensions.md:MdRenderer',
'.markdown = mfr.extensions.md:MdRenderer',
# jsc3d
'.3ds = mfr.extensions.jsc3d:JSC3DRenderer',
'.ctm = mfr.extensions.jsc3d:JSC3DRenderer',
'.obj = mfr.extensions.jsc3d:JSC3DRenderer',
'.stl = mfr.extensions.jsc3d:JSC3DRenderer',
# pdb
'.pdb = mfr.extensions.pdb:PdbRenderer',
# pdf
'.pdf = mfr.extensions.pdf:PdfRenderer',
# rst
'.rst = mfr.extensions.rst:RstRenderer',
# svg
# '.svg = mfr.extensions.svg:SvgRenderer',
# tabular
'.csv = mfr.extensions.tabular:TabularRenderer',
'.tsv = mfr.extensions.tabular:TabularRenderer',
'.xls = mfr.extensions.tabular:TabularRenderer',
'.xlsx = mfr.extensions.tabular:TabularRenderer',
#'.dta = mfr.extensions.tabular:TabularRenderer',
'.sav = mfr.extensions.tabular:TabularRenderer',
#'.ods = mfr.extensions.tabular:TabularRenderer',
# unoconv
# '.bib = mfr.extensions.unoconv:UnoconvRenderer',
# '.bmp = mfr.extensions.unoconv:UnoconvRenderer',
# '.csv = mfr.extensions.unoconv:UnoconvRenderer',
'.dbf = mfr.extensions.unoconv:UnoconvRenderer',
'.dif = mfr.extensions.unoconv:UnoconvRenderer',
'.doc = mfr.extensions.unoconv:UnoconvRenderer',
'.docx = mfr.extensions.unoconv:UnoconvRenderer',
'.emf = mfr.extensions.unoconv:UnoconvRenderer',
'.eps = mfr.extensions.unoconv:UnoconvRenderer',
'.fodg = mfr.extensions.unoconv:UnoconvRenderer',
'.fodp = mfr.extensions.unoconv:UnoconvRenderer',
'.fods = mfr.extensions.unoconv:UnoconvRenderer',
'.fodt = mfr.extensions.unoconv:UnoconvRenderer',
# '.gif = mfr.extensions.unoconv:UnoconvRenderer',
# '.html = mfr.extensions.unoconv:UnoconvRenderer',
# '.jpg = mfr.extensions.unoconv:UnoconvRenderer',
# '.ltx = mfr.extensions.unoconv:UnoconvRenderer',
'.met = mfr.extensions.unoconv:UnoconvRenderer',
'.odd = mfr.extensions.unoconv:UnoconvRenderer',
'.odg = mfr.extensions.unoconv:UnoconvRenderer',
'.odp = mfr.extensions.unoconv:UnoconvRenderer',
'.ods = mfr.extensions.unoconv:UnoconvRenderer',
'.odt = mfr.extensions.unoconv:UnoconvRenderer',
'.otg = mfr.extensions.unoconv:UnoconvRenderer',
'.otp = mfr.extensions.unoconv:UnoconvRenderer',
'.ots = mfr.extensions.unoconv:UnoconvRenderer',
'.ott = mfr.extensions.unoconv:UnoconvRenderer',
'.pbm = mfr.extensions.unoconv:UnoconvRenderer',
'.pct = mfr.extensions.unoconv:UnoconvRenderer',
# '.pdb = mfr.extensions.unoconv:UnoconvRenderer',
# '.pdf = mfr.extensions.unoconv:UnoconvRenderer',
'.pgm = mfr.extensions.unoconv:UnoconvRenderer',
# '.png = mfr.extensions.unoconv:UnoconvRenderer',
'.pot = mfr.extensions.unoconv:UnoconvRenderer',
'.potm = mfr.extensions.unoconv:UnoconvRenderer',
'.ppm = mfr.extensions.unoconv:UnoconvRenderer',
'.pps = mfr.extensions.unoconv:UnoconvRenderer',
'.ppt = mfr.extensions.unoconv:UnoconvRenderer',
'.pptx = mfr.extensions.unoconv:UnoconvRenderer',
'.psw = mfr.extensions.unoconv:UnoconvRenderer',
'.pwp = mfr.extensions.unoconv:UnoconvRenderer',
'.pxl = mfr.extensions.unoconv:UnoconvRenderer',
'.ras = mfr.extensions.unoconv:UnoconvRenderer',
'.rtf = mfr.extensions.unoconv:UnoconvRenderer',
'.sda = mfr.extensions.unoconv:UnoconvRenderer',
'.sdc = mfr.extensions.unoconv:UnoconvRenderer',
'.sdd = mfr.extensions.unoconv:UnoconvRenderer',
'.sdw = mfr.extensions.unoconv:UnoconvRenderer',
'.slk = mfr.extensions.unoconv:UnoconvRenderer',
'.stc = mfr.extensions.unoconv:UnoconvRenderer',
'.std = mfr.extensions.unoconv:UnoconvRenderer',
'.sti = mfr.extensions.unoconv:UnoconvRenderer',
'.stw = mfr.extensions.unoconv:UnoconvRenderer',
'.svg = mfr.extensions.unoconv:UnoconvRenderer',
'.svm = mfr.extensions.unoconv:UnoconvRenderer',
'.swf = mfr.extensions.unoconv:UnoconvRenderer',
'.sxc = mfr.extensions.unoconv:UnoconvRenderer',
'.sxd = mfr.extensions.unoconv:UnoconvRenderer',
'.sxi = mfr.extensions.unoconv:UnoconvRenderer',
'.sxw = mfr.extensions.unoconv:UnoconvRenderer',
# '.tiff = mfr.extensions.unoconv:UnoconvRenderer',
# '.txt = mfr.extensions.unoconv:UnoconvRenderer',
'.uop = mfr.extensions.unoconv:UnoconvRenderer',
'.uos = mfr.extensions.unoconv:UnoconvRenderer',
'.uot = mfr.extensions.unoconv:UnoconvRenderer',
'.vor = mfr.extensions.unoconv:UnoconvRenderer',
'.wmf = mfr.extensions.unoconv:UnoconvRenderer',
'.wps = mfr.extensions.unoconv:UnoconvRenderer',
# '.xhtml = mfr.extensions.unoconv:UnoconvRenderer',
# '.xls = mfr.extensions.unoconv:UnoconvRenderer',
# '.xlsx = mfr.extensions.unoconv:UnoconvRenderer',
'.xlt = mfr.extensions.unoconv:UnoconvRenderer',
# '.xml = mfr.extensions.unoconv:UnoconvRenderer',
'.xpm = mfr.extensions.unoconv:UnoconvRenderer',
# video
'.mp4 = mfr.extensions.video:VideoRenderer',
'.m4v = mfr.extensions.video:VideoRenderer',
#'.avi = mfr.extensions.video:VideoRenderer',
'.ogv = mfr.extensions.video:VideoRenderer',
#'.wmv = mfr.extensions.video:VideoRenderer',
'.webm = mfr.extensions.video:VideoRenderer',
# JASP
'.jasp = mfr.extensions.jasp:JASPRenderer'
]
},
)
|
|
#!/usr/bin/env python
"""Doxygen XML to SWIG docstring converter.
Converts Doxygen generated XML files into a file containing docstrings
that can be used by SWIG-1.3.x. Note that you need to get SWIG
version > 1.3.23 or use Robin Dunn's docstring patch to be able to use
the resulting output.
Usage:
doxy2swig.py input.xml output.i
input.xml is your doxygen generated XML file and output.i is where the
output will be written (the file will be clobbered).
"""
# This code is implemented using Mark Pilgrim's code as a guideline:
# http://www.faqs.org/docs/diveintopython/kgp_divein.html
#
# Author: Prabhu Ramachandran
# License: BSD style
# This version of the script originated from ITK/Wrapping/Generators/Doc/doxy2swig.py.
# My mods:
# self.multi is always 1 (0 was cutting lines improperly)
# added self.java to enable output for JavaDocs
#
# Dave Chen
from xml.dom import minidom
import re
import textwrap
import sys
import types
import os.path
def my_open_read(source):
if hasattr(source, "read"):
return source
else:
return open(source)
def my_open_write(dest, mode='w'):
if hasattr(dest, "write"):
return dest
else:
return open(dest, mode)
class Doxy2SWIG:
"""Converts Doxygen generated XML files into a file containing
docstrings that can be used by SWIG-1.3.x that have support for
feature("docstring"). Once the data is parsed it is stored in
self.pieces.
"""
def __init__(self, src, javaFlag=0):
"""Initialize the instance given a source object (file or
filename).
"""
f = my_open_read(src)
self.my_dir = os.path.dirname(f.name)
self.xmldoc = minidom.parse(f).documentElement
f.close()
self.pieces = []
# self.pieces.append('\n// File: %s\n'%os.path.basename(f.name))
self.space_re = re.compile(r'\s+')
self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)')
self.java = javaFlag
# self.multi was 0, but it was cutting lines improperly -Dave Chen
self.multi = 1
self.ignores = ('inheritancegraph', 'param', 'listofallmembers',
'innerclass', 'name', 'declname', 'incdepgraph',
'invincdepgraph', 'programlisting', 'type',
'references', 'referencedby', 'location',
'collaborationgraph', 'reimplements',
'reimplementedby', 'derivedcompoundref',
'basecompoundref')
#self.generics = []
def generate(self):
"""Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
"""
self.parse(self.xmldoc)
def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s"%node.__class__.__name__)
pm(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
txt = node.data
txt = txt.replace('\\', r'\\\\')
txt = txt.replace('"', r'\"')
# ignore pure whitespace
m = self.space_re.match(txt)
if m and len(m.group()) == len(txt):
pass
else:
self.add_text(textwrap.fill(txt))
def parse_Element(self, node):
"""Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
"""
name = node.tagName
ignores = self.ignores
if name in ignores:
return
attr = "do_%s" % name
if hasattr(self, attr):
handlerMethod = getattr(self, attr)
handlerMethod(node)
else:
self.generic_parse(node)
#if name not in self.generics: self.generics.append(name)
def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
if type(value) in (list, tuple):
self.pieces.extend(value)
else:
self.pieces.append(value)
def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes \
if x.nodeType == x.ELEMENT_NODE and \
x.tagName in names]
return dict(nodes)
def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n')
def space_parse(self, node):
self.add_text(' ')
self.generic_parse(node)
do_ref = space_parse
do_emphasis = space_parse
do_bold = space_parse
do_computeroutput = space_parse
do_formula = space_parse
def do_compoundname(self, node):
self.add_text('\n\n')
data = node.firstChild.data
if self.java:
self.add_text('%%typemap(javaimports) %s "/**\n'%data)
else:
self.add_text('%%feature("docstring") %s "\n'%data)
def do_compounddef(self, node):
kind = node.attributes['kind'].value
if kind in ('class', 'struct'):
prot = node.attributes['prot'].value
if prot != 'public':
return
names = ('compoundname', 'briefdescription',
'detaileddescription', 'includes')
first = self.get_specific_nodes(node, names)
for n in names:
if n in first:
self.parse(first[n])
if self.java:
self.add_text(['*/"','\n'])
else:
self.add_text(['";','\n'])
for n in node.childNodes:
if n not in list(first.values()):
self.parse(n)
elif kind in ('file', 'namespace'):
nodes = node.getElementsByTagName('sectiondef')
for n in nodes:
self.parse(n)
def do_includes(self, node):
self.add_text('C++ includes: ')
self.generic_parse(node, pad=1)
def do_parameterlist(self, node):
self.add_text(['\n', '\n', 'Parameters:', '\n'])
self.generic_parse(node, pad=1)
def do_para(self, node):
self.add_text('\n')
self.generic_parse(node, pad=1)
def do_parametername(self, node):
self.add_text('\n')
self.add_text("%s: "%node.firstChild.data)
def do_parameterdefinition(self, node):
self.generic_parse(node, pad=1)
def do_detaileddescription(self, node):
self.generic_parse(node, pad=1)
def do_briefdescription(self, node):
self.generic_parse(node, pad=1)
def do_memberdef(self, node):
prot = node.attributes['prot'].value
id = node.attributes['id'].value
kind = node.attributes['kind'].value
tmp = node.parentNode.parentNode.parentNode
compdef = tmp.getElementsByTagName('compounddef')[0]
cdef_kind = compdef.attributes['kind'].value
if prot == 'public':
first = self.get_specific_nodes(node, ('definition', 'name'))
name = first['name'].firstChild.data
if name[:8] == 'operator': # Don't handle operators yet.
return
defn = first['definition'].firstChild.data
self.add_text('\n')
if self.java:
self.add_text('%javamethodmodifiers ')
else:
self.add_text('%feature("docstring") ')
anc = node.parentNode.parentNode
if cdef_kind in ('file', 'namespace'):
ns_node = anc.getElementsByTagName('innernamespace')
if not ns_node and cdef_kind == 'namespace':
ns_node = anc.getElementsByTagName('compoundname')
if ns_node:
ns = ns_node[0].firstChild.data
if self.java:
self.add_text(' %s::%s "/**\n%s'%(ns, name, defn))
else:
self.add_text(' %s::%s "\n'%(ns, name))
else:
if self.java:
self.add_text(' %s "/**\n%s'%(name, defn))
else:
self.add_text(' %s "\n'%(name))
elif cdef_kind in ('class', 'struct'):
# Get the full function name.
anc_node = anc.getElementsByTagName('compoundname')
cname = anc_node[0].firstChild.data
if self.java:
self.add_text(' %s::%s "/**\n%s'%(cname, name, defn))
else:
self.add_text(' %s::%s "\n'%(cname, name))
for n in node.childNodes:
if n not in list(first.values()):
self.parse(n)
if self.java:
self.add_text(['*/\npublic ";', '\n'])
else:
self.add_text(['";', '\n'])
def do_definition(self, node):
data = node.firstChild.data
self.add_text('%s "\n%s'%(data, data))
def do_sectiondef(self, node):
kind = node.attributes['kind'].value
if kind in ('public-func', 'func'):
self.generic_parse(node)
def do_simplesect(self, node):
kind = node.attributes['kind'].value
if kind in ('date', 'rcs', 'version'):
pass
elif kind == 'warning':
self.add_text(['\n', 'WARNING:'])
self.generic_parse(node)
elif kind == 'see':
self.add_text('\n')
self.add_text('See:')
self.generic_parse(node)
else:
self.generic_parse(node)
def do_argsstring(self, node):
if self.java:
self.generic_parse(node, pad=1)
def do_member(self, node):
kind = node.attributes['kind'].value
refid = node.attributes['refid'].value
if kind == 'function' and refid[:9] == 'namespace':
self.generic_parse(node)
def do_doxygenindex(self, node):
self.multi = 1
comps = node.getElementsByTagName('compound')
for c in comps:
refid = c.attributes['refid'].value
fname = refid + '.xml'
if not os.path.exists(fname):
fname = os.path.join(self.my_dir, fname)
# print "parsing file: %s"%fname
p = Doxy2SWIG(fname)
p.generate()
self.pieces.extend(self.clean_pieces(p.pieces))
def write(self, fname, mode='w'):
o = my_open_write(fname, mode)
if self.multi:
o.write("".join(self.pieces))
else:
o.write("".join(self.clean_pieces(self.pieces)))
o.close()
def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n'*count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:':
ret.extend(['Parameters:\n-----------', '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
_tmp = textwrap.fill(i.strip())
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret
def main(input, output):
p = Doxy2SWIG(input)
p.generate()
p.write(output)
if __name__ == '__main__':
if len(sys.argv) != 3:
print (__doc__)
sys.exit(1)
main(sys.argv[1], sys.argv[2])
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import external_net as extnet_apidef
from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net as provider
from neutron_lib.api import validators
from neutron_lib import constants
from neutron_lib.db import api as db_api
from neutron_lib import exceptions as exc
from neutron_lib.exceptions import multiprovidernet as mpnet_exc
from neutron_lib.exceptions import placement as place_exc
from neutron_lib.exceptions import vlantransparent as vlan_exc
from neutron_lib.plugins.ml2 import api
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
import stevedore
from neutron._i18n import _
from neutron.conf.plugins.ml2 import config
from neutron.db import segments_db
from neutron.objects import ports
from neutron.plugins.ml2.common import exceptions as ml2_exc
LOG = log.getLogger(__name__)
MAX_BINDING_LEVELS = 10
config.register_ml2_plugin_opts()
class TypeManager(stevedore.named.NamedExtensionManager):
"""Manage network segment types using drivers."""
def __init__(self):
# Mapping from type name to DriverManager
self.drivers = {}
LOG.info("Configured type driver names: %s",
cfg.CONF.ml2.type_drivers)
super(TypeManager, self).__init__('neutron.ml2.type_drivers',
cfg.CONF.ml2.type_drivers,
invoke_on_load=True)
LOG.info("Loaded type driver names: %s", self.names())
self._register_types()
self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types)
self._check_external_network_type(cfg.CONF.ml2.external_network_type)
def _register_types(self):
for ext in self:
network_type = ext.obj.get_type()
if network_type in self.drivers:
LOG.error("Type driver '%(new_driver)s' ignored because"
" type driver '%(old_driver)s' is already"
" registered for type '%(type)s'",
{'new_driver': ext.name,
'old_driver': self.drivers[network_type].name,
'type': network_type})
else:
self.drivers[network_type] = ext
LOG.info("Registered types: %s", self.drivers.keys())
def _check_tenant_network_types(self, types):
self.tenant_network_types = []
for network_type in types:
if network_type in self.drivers:
self.tenant_network_types.append(network_type)
else:
LOG.error("No type driver for tenant network_type: %s. "
"Service terminated!", network_type)
raise SystemExit(1)
LOG.info("Tenant network_types: %s", self.tenant_network_types)
def _check_external_network_type(self, ext_network_type):
if ext_network_type and ext_network_type not in self.drivers:
LOG.error("No type driver for external network_type: %s. "
"Service terminated!", ext_network_type)
raise SystemExit(1)
def _process_provider_segment(self, segment):
(network_type, physical_network,
segmentation_id) = (self._get_attribute(segment, attr)
for attr in provider.ATTRIBUTES)
if validators.is_attr_set(network_type):
segment = {api.NETWORK_TYPE: network_type,
api.PHYSICAL_NETWORK: physical_network,
api.SEGMENTATION_ID: segmentation_id}
self.validate_provider_segment(segment)
return segment
msg = _("network_type required")
raise exc.InvalidInput(error_message=msg)
def _process_provider_create(self, network):
if any(validators.is_attr_set(network.get(attr))
for attr in provider.ATTRIBUTES):
# Verify that multiprovider and provider attributes are not set
# at the same time.
if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)):
raise mpnet_exc.SegmentsSetInConjunctionWithProviders()
segment = self._get_provider_segment(network)
return [self._process_provider_segment(segment)]
elif validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)):
segments = [self._process_provider_segment(s)
for s in network[mpnet_apidef.SEGMENTS]]
mpnet_apidef.check_duplicate_segments(
segments, self.is_partial_segment)
return segments
def _match_segment(self, segment, filters):
return all(not filters.get(attr) or segment.get(attr) in filters[attr]
for attr in provider.ATTRIBUTES)
def _get_provider_segment(self, network):
# TODO(manishg): Placeholder method
# Code intended for operating on a provider segment should use
# this method to extract the segment, even though currently the
# segment attributes are part of the network dictionary. In the
# future, network and segment information will be decoupled and
# here we will do the job of extracting the segment information.
return network
def network_matches_filters(self, network, filters):
if not filters:
return True
if any(validators.is_attr_set(network.get(attr))
for attr in provider.ATTRIBUTES):
segments = [self._get_provider_segment(network)]
elif validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)):
segments = self._get_attribute(network, mpnet_apidef.SEGMENTS)
else:
return True
return any(self._match_segment(s, filters) for s in segments)
def _get_attribute(self, attrs, key):
value = attrs.get(key)
if value is constants.ATTR_NOT_SPECIFIED:
value = None
return value
def extend_network_dict_provider(self, context, network):
# this method is left for backward compat even though it would be
# easy to change the callers in tree to use the bulk function
return self.extend_networks_dict_provider(context, [network])
def extend_networks_dict_provider(self, context, networks):
ids = [network['id'] for network in networks]
net_segments = segments_db.get_networks_segments(context, ids)
for network in networks:
segments = net_segments[network['id']]
self._extend_network_dict_provider(network, segments)
def _extend_network_dict_provider(self, network, segments):
if not segments:
LOG.debug("Network %s has no segments", network['id'])
for attr in provider.ATTRIBUTES:
network[attr] = None
elif len(segments) > 1:
network[mpnet_apidef.SEGMENTS] = [
{provider.NETWORK_TYPE: segment[api.NETWORK_TYPE],
provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK],
provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]}
for segment in segments]
else:
segment = segments[0]
network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE]
network[provider.PHYSICAL_NETWORK] = segment[
api.PHYSICAL_NETWORK]
network[provider.SEGMENTATION_ID] = segment[
api.SEGMENTATION_ID]
def initialize(self):
for network_type, driver in self.drivers.items():
LOG.info("Initializing driver for type '%s'", network_type)
driver.obj.initialize()
def initialize_network_segment_range_support(self):
for network_type, driver in self.drivers.items():
if network_type in constants.NETWORK_SEGMENT_RANGE_TYPES:
LOG.info("Initializing driver network segment range support "
"for type '%s'", network_type)
driver.obj.initialize_network_segment_range_support()
def _add_network_segment(self, context, network_id, segment,
segment_index=0):
segments_db.add_network_segment(
context, network_id, segment, segment_index)
def _update_network_segment(self, context, network_id, segmentation_id):
segments_db.update_network_segment(
context, network_id, segmentation_id)
def create_network_segments(self, context, network, tenant_id):
"""Call type drivers to create network segments."""
segments = self._process_provider_create(network)
filters = {'project_id': tenant_id}
with db_api.CONTEXT_WRITER.using(context):
network_id = network['id']
if segments:
for segment_index, segment in enumerate(segments):
segment = self.reserve_provider_segment(
context, segment, filters=filters)
self._add_network_segment(context, network_id, segment,
segment_index)
elif (cfg.CONF.ml2.external_network_type and
self._get_attribute(network, extnet_apidef.EXTERNAL)):
segment = self._allocate_ext_net_segment(
context, filters=filters)
self._add_network_segment(context, network_id, segment)
else:
segment = self._allocate_tenant_net_segment(
context, filters=filters)
self._add_network_segment(context, network_id, segment)
def update_network_segment(self, context, network, net_data, segment):
"""Call type drivers to update a network segment.
Update operation is currently only supported for VLAN type segments,
and only the SEGMENTATION_ID field can be changed.
"""
project_id = network['project_id']
segmentation_id = net_data.get(provider.SEGMENTATION_ID)
network_type = segment[api.NETWORK_TYPE]
if network_type != constants.TYPE_VLAN:
msg = (_('Only VLAN type networks can be updated.'))
raise exc.InvalidInput(error_message=msg)
if not segmentation_id:
msg = (_('Only %s field can be updated in VLAN type networks') %
api.SEGMENTATION_ID)
raise exc.InvalidInput(error_message=msg)
new_segment = {api.NETWORK_TYPE: segment[api.NETWORK_TYPE],
api.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK],
api.SEGMENTATION_ID: segmentation_id}
self.validate_provider_segment(new_segment)
self.reserve_provider_segment(context, new_segment,
filters={'project_id': project_id})
self._update_network_segment(context, segment['id'], segmentation_id)
self.release_network_segment(context, segment)
def reserve_network_segment(self, context, segment_data):
"""Call type drivers to reserve a network segment."""
# Validate the data of segment
if not validators.is_attr_set(segment_data[api.NETWORK_TYPE]):
msg = _("network_type required")
raise exc.InvalidInput(error_message=msg)
net_type = self._get_attribute(segment_data, api.NETWORK_TYPE)
phys_net = self._get_attribute(segment_data, api.PHYSICAL_NETWORK)
seg_id = self._get_attribute(segment_data, api.SEGMENTATION_ID)
segment = {api.NETWORK_TYPE: net_type,
api.PHYSICAL_NETWORK: phys_net,
api.SEGMENTATION_ID: seg_id}
self.validate_provider_segment(segment)
# Reserve segment in type driver
with db_api.CONTEXT_WRITER.using(context):
return self.reserve_provider_segment(context, segment)
def is_partial_segment(self, segment):
network_type = segment[api.NETWORK_TYPE]
driver = self.drivers.get(network_type)
if driver:
return driver.obj.is_partial_segment(segment)
else:
msg = _("network_type value '%s' not supported") % network_type
raise exc.InvalidInput(error_message=msg)
def validate_provider_segment(self, segment):
network_type = segment[api.NETWORK_TYPE]
driver = self.drivers.get(network_type)
if driver:
driver.obj.validate_provider_segment(segment)
else:
msg = _("network_type value '%s' not supported") % network_type
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, context, segment, filters=None):
network_type = segment.get(api.NETWORK_TYPE)
driver = self.drivers.get(network_type)
if isinstance(driver.obj, api.TypeDriver):
return driver.obj.reserve_provider_segment(context.session,
segment, filters)
else:
return driver.obj.reserve_provider_segment(context,
segment, filters)
def _allocate_segment(self, context, network_type, filters=None):
driver = self.drivers.get(network_type)
if isinstance(driver.obj, api.TypeDriver):
return driver.obj.allocate_tenant_segment(context.session, filters)
else:
return driver.obj.allocate_tenant_segment(context, filters)
def _allocate_tenant_net_segment(self, context, filters=None):
for network_type in self.tenant_network_types:
segment = self._allocate_segment(context, network_type, filters)
if segment:
return segment
raise exc.NoNetworkAvailable()
def _allocate_ext_net_segment(self, context, filters=None):
network_type = cfg.CONF.ml2.external_network_type
segment = self._allocate_segment(context, network_type, filters)
if segment:
return segment
raise exc.NoNetworkAvailable()
def release_network_segments(self, context, network_id):
segments = segments_db.get_network_segments(context, network_id,
filter_dynamic=None)
for segment in segments:
self.release_network_segment(context, segment)
def release_network_segment(self, context, segment):
network_type = segment.get(api.NETWORK_TYPE)
driver = self.drivers.get(network_type)
if driver:
if isinstance(driver.obj, api.TypeDriver):
driver.obj.release_segment(context.session, segment)
else:
driver.obj.release_segment(context, segment)
else:
LOG.error("Failed to release segment '%s' because "
"network type is not supported.", segment)
@db_api.retry_if_session_inactive()
def allocate_dynamic_segment(self, context, network_id, segment):
"""Allocate a dynamic segment using a partial or full segment dict."""
dynamic_segment = segments_db.get_dynamic_segment(
context, network_id, segment.get(api.PHYSICAL_NETWORK),
segment.get(api.SEGMENTATION_ID))
if dynamic_segment:
return dynamic_segment
with db_api.CONTEXT_WRITER.using(context):
driver = self.drivers.get(segment.get(api.NETWORK_TYPE))
if isinstance(driver.obj, api.TypeDriver):
dynamic_segment = driver.obj.reserve_provider_segment(
context.session, segment)
else:
dynamic_segment = driver.obj.reserve_provider_segment(
context, segment)
segments_db.add_network_segment(context, network_id,
dynamic_segment,
is_dynamic=True)
return dynamic_segment
@db_api.retry_if_session_inactive()
def release_dynamic_segment(self, context, segment_id):
"""Delete a dynamic segment."""
segment = segments_db.get_segment_by_id(context, segment_id)
if segment:
with db_api.CONTEXT_WRITER.using(context):
driver = self.drivers.get(segment.get(api.NETWORK_TYPE))
if driver:
if isinstance(driver.obj, api.TypeDriver):
driver.obj.release_segment(context.session, segment)
else:
driver.obj.release_segment(context, segment)
segments_db.delete_network_segment(context, segment_id)
else:
LOG.error("Failed to release segment '%s' because "
"network type is not supported.", segment)
else:
LOG.debug("No segment found with id %(segment_id)s", segment_id)
def update_network_segment_range_allocations(self, network_type):
driver = self.drivers.get(network_type)
driver.obj.update_network_segment_range_allocations()
def network_type_supported(self, network_type):
return bool(network_type in self.drivers)
class MechanismManager(stevedore.named.NamedExtensionManager):
"""Manage networking mechanisms using drivers."""
def __init__(self):
# Registered mechanism drivers, keyed by name.
self.mech_drivers = {}
# Ordered list of mechanism drivers, defining
# the order in which the drivers are called.
self.ordered_mech_drivers = []
LOG.info("Configured mechanism driver names: %s",
cfg.CONF.ml2.mechanism_drivers)
super(MechanismManager, self).__init__(
'neutron.ml2.mechanism_drivers',
cfg.CONF.ml2.mechanism_drivers,
invoke_on_load=True,
name_order=True,
on_missing_entrypoints_callback=self._driver_not_found,
on_load_failure_callback=self._driver_not_loaded
)
LOG.info("Loaded mechanism driver names: %s", self.names())
self._register_mechanisms()
self.host_filtering_supported = self.is_host_filtering_supported()
if not self.host_filtering_supported:
LOG.info("No mechanism drivers provide segment reachability "
"information for agent scheduling.")
def _driver_not_found(self, names):
msg = (_("The following mechanism drivers were not found: %s")
% names)
LOG.critical(msg)
raise SystemExit(msg)
def _driver_not_loaded(self, manager, entrypoint, exception):
LOG.critical("The '%(entrypoint)s' entrypoint could not be"
" loaded for the following reason: '%(reason)s'.",
{'entrypoint': entrypoint,
'reason': exception})
raise SystemExit(str(exception))
def _register_mechanisms(self):
"""Register all mechanism drivers.
This method should only be called once in the MechanismManager
constructor.
"""
for ext in self:
self.mech_drivers[ext.name] = ext
self.ordered_mech_drivers.append(ext)
LOG.info("Registered mechanism drivers: %s",
[driver.name for driver in self.ordered_mech_drivers])
def initialize(self):
for driver in self.ordered_mech_drivers:
LOG.info("Initializing mechanism driver '%s'", driver.name)
driver.obj.initialize()
def _check_vlan_transparency(self, context):
"""Helper method for checking vlan transparecncy support.
:param context: context parameter to pass to each method call
:raises: neutron_lib.exceptions.vlantransparent.
VlanTransparencyDriverError if any mechanism driver doesn't
support vlan transparency.
"""
if context.current.get('vlan_transparent'):
for driver in self.ordered_mech_drivers:
if not driver.obj.check_vlan_transparency(context):
raise vlan_exc.VlanTransparencyDriverError()
def _call_on_drivers(self, method_name, context,
continue_on_failure=False, raise_db_retriable=False):
"""Helper method for calling a method across all mechanism drivers.
:param method_name: name of the method to call
:param context: context parameter to pass to each method call
:param continue_on_failure: whether or not to continue to call
all mechanism drivers once one has raised an exception
:param raise_db_retriable: whether or not to treat retriable db
exception by mechanism drivers to propagate up to upper layer so
that upper layer can handle it or error in ML2 player
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver call fails. or DB retriable error when
raise_db_retriable=False. See neutron_lib.db.api.is_retriable for
what db exception is retriable
"""
errors = []
for driver in self.ordered_mech_drivers:
try:
getattr(driver.obj, method_name)(context)
except Exception as e:
if raise_db_retriable and db_api.is_retriable(e):
with excutils.save_and_reraise_exception():
LOG.debug("DB exception raised by Mechanism driver "
"'%(name)s' in %(method)s",
{'name': driver.name, 'method': method_name},
exc_info=e)
LOG.exception(
"Mechanism driver '%(name)s' failed in %(method)s",
{'name': driver.name, 'method': method_name}
)
errors.append(e)
if not continue_on_failure:
break
if errors:
raise ml2_exc.MechanismDriverError(
method=method_name,
errors=errors
)
def create_network_precommit(self, context):
"""Notify all mechanism drivers during network creation.
:raises: DB retriable error if create_network_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_network_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._check_vlan_transparency(context)
self._call_on_drivers("create_network_precommit", context,
raise_db_retriable=True)
def create_network_postcommit(self, context):
"""Notify all mechanism drivers after network creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_network_postcommit call fails.
Called after the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, where the network will be deleted, triggering
any required cleanup. There is no guarantee that all mechanism
drivers are called in this case.
"""
self._call_on_drivers("create_network_postcommit", context)
def update_network_precommit(self, context):
"""Notify all mechanism drivers during network update.
:raises: DB retriable error if update_network_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_network_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_network_precommit", context,
raise_db_retriable=True)
def update_network_postcommit(self, context):
"""Notify all mechanism drivers after network update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_network_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure.
"""
self._call_on_drivers("update_network_postcommit", context,
continue_on_failure=True)
def delete_network_precommit(self, context):
"""Notify all mechanism drivers during network deletion.
:raises: DB retriable error if delete_network_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_network_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("delete_network_precommit", context,
raise_db_retriable=True)
def delete_network_postcommit(self, context):
"""Notify all mechanism drivers after network deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_network_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure. In
general we expect the caller to ignore the error, as the
network resource has already been deleted from the database
and it doesn't make sense to undo the action by recreating the
network.
"""
self._call_on_drivers("delete_network_postcommit", context,
continue_on_failure=True)
def create_subnet_precommit(self, context):
"""Notify all mechanism drivers during subnet creation.
:raises: DB retriable error if create_subnet_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_subnet_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("create_subnet_precommit", context,
raise_db_retriable=True)
def create_subnet_postcommit(self, context):
"""Notify all mechanism drivers after subnet creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_subnet_postcommit call fails.
Called after the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, where the subnet will be deleted, triggering
any required cleanup. There is no guarantee that all mechanism
drivers are called in this case.
"""
self._call_on_drivers("create_subnet_postcommit", context)
def update_subnet_precommit(self, context):
"""Notify all mechanism drivers during subnet update.
:raises: DB retriable error if update_subnet_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_subnet_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_subnet_precommit", context,
raise_db_retriable=True)
def update_subnet_postcommit(self, context):
"""Notify all mechanism drivers after subnet update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_subnet_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure.
"""
self._call_on_drivers("update_subnet_postcommit", context,
continue_on_failure=True)
def delete_subnet_precommit(self, context):
"""Notify all mechanism drivers during subnet deletion.
:raises: DB retriable error if delete_subnet_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_subnet_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("delete_subnet_precommit", context,
raise_db_retriable=True)
def delete_subnet_postcommit(self, context):
"""Notify all mechanism drivers after subnet deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_subnet_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure. In
general we expect the caller to ignore the error, as the
subnet resource has already been deleted from the database
and it doesn't make sense to undo the action by recreating the
subnet.
"""
self._call_on_drivers("delete_subnet_postcommit", context,
continue_on_failure=True)
def create_port_precommit(self, context):
"""Notify all mechanism drivers during port creation.
:raises: DB retriable error if create_port_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_port_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("create_port_precommit", context,
raise_db_retriable=True)
def create_port_postcommit(self, context):
"""Notify all mechanism drivers of port creation.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver create_port_postcommit call fails.
Called after the database transaction. Errors raised by
mechanism drivers are left to propagate to the caller, where
the port will be deleted, triggering any required
cleanup. There is no guarantee that all mechanism drivers are
called in this case.
"""
self._call_on_drivers("create_port_postcommit", context)
def update_port_precommit(self, context):
"""Notify all mechanism drivers during port update.
:raises: DB retriable error if update_port_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_port_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("update_port_precommit", context,
raise_db_retriable=True)
def update_port_postcommit(self, context):
"""Notify all mechanism drivers after port update.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver update_port_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure.
"""
self._call_on_drivers("update_port_postcommit", context,
continue_on_failure=True)
def delete_port_precommit(self, context):
"""Notify all mechanism drivers during port deletion.
:raises:DB retriable error if delete_port_precommit raises them
See neutron_lib.db.api.is_retriable for what db exception is retriable
or neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_port_precommit call fails.
Called within the database transaction. If a mechanism driver
raises an exception, then a MechanismDriverError is propagated
to the caller, triggering a rollback. There is no guarantee
that all mechanism drivers are called in this case.
"""
self._call_on_drivers("delete_port_precommit", context,
raise_db_retriable=True)
def delete_port_postcommit(self, context):
"""Notify all mechanism drivers after port deletion.
:raises: neutron.plugins.ml2.common.MechanismDriverError
if any mechanism driver delete_port_postcommit call fails.
Called after the database transaction. If any mechanism driver
raises an error, then the error is logged but we continue to
call every other mechanism driver. A MechanismDriverError is
then reraised at the end to notify the caller of a failure. In
general we expect the caller to ignore the error, as the
port resource has already been deleted from the database
and it doesn't make sense to undo the action by recreating the
port.
"""
self._call_on_drivers("delete_port_postcommit", context,
continue_on_failure=True)
def bind_port(self, context):
"""Attempt to bind a port using registered mechanism drivers.
:param context: PortContext instance describing the port
Called outside any transaction to attempt to establish a port
binding.
"""
binding = context._binding
LOG.debug("Attempting to bind port %(port)s on host %(host)s "
"for vnic_type %(vnic_type)s with profile %(profile)s",
{'port': context.current['id'],
'host': context.host,
'vnic_type': binding.vnic_type,
'profile': binding.profile})
context._clear_binding_levels()
if not self._bind_port_level(context, 0,
context.network.network_segments):
binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED
LOG.error("Failed to bind port %(port)s on host %(host)s "
"for vnic_type %(vnic_type)s using segments "
"%(segments)s",
{'port': context.current['id'],
'host': context.host,
'vnic_type': binding.vnic_type,
'segments': context.network.network_segments})
def _bind_port_level(self, context, level, segments_to_bind,
drivers=None, redoing_bottom=False):
if drivers is None:
drivers = self.ordered_mech_drivers
binding = context._binding
port_id = context.current['id']
LOG.debug("Attempting to bind port %(port)s by drivers %(drivers)s "
"on host %(host)s at level %(level)s using "
"segments %(segments)s",
{'port': port_id,
'drivers': ','.join([driver.name for driver in drivers]),
'host': context.host,
'level': level,
'segments': segments_to_bind})
if level == MAX_BINDING_LEVELS:
LOG.error("Exceeded maximum binding levels attempting to bind "
"port %(port)s on host %(host)s",
{'port': context.current['id'],
'host': context.host})
return False
drivers = self._check_drivers_connectivity(drivers, context)
if not drivers:
LOG.error("Port %(port)s does not have an IP address assigned and "
"there are no driver with 'connectivity' = 'l2'. The "
"port cannot be bound.",
{'port': context.current['id']})
return False
for driver in drivers:
if not self._check_driver_to_bind(driver, segments_to_bind,
context._binding_levels):
continue
try:
context._prepare_to_bind(segments_to_bind)
driver.obj.bind_port(context)
segment = context._new_bound_segment
if segment:
pbl_obj = ports.PortBindingLevel(
context._plugin_context,
port_id=port_id,
host=context.host,
level=level,
driver=driver.name,
segment_id=segment
)
context._push_binding_level(pbl_obj)
next_segments = context._next_segments_to_bind
if next_segments:
# Continue binding another level.
if self._bind_port_level(context, level + 1,
next_segments):
return True
else:
LOG.warning("Failed to bind port %(port)s on "
"host %(host)s at level %(lvl)s",
{'port': context.current['id'],
'host': context.host,
'lvl': level + 1})
context._pop_binding_level()
else:
# NOTE(bence romsics): Consider: "In case of
# hierarchical port binding binding_profile.allocation
# [decided and sent by Placement and Nova]
# is meant to drive the binding only on the binding
# level that represents the closest physical interface
# to the nova server." Link to spec:
#
# https://review.opendev.org/#/c/508149/14/specs\
# /rocky/minimum-bandwidth-\
# allocation-placement-api.rst@582
#
# But we cannot tell if a binding level is
# the bottom binding level before set_binding()
# gets called, and that's already too late. So we
# must undo the last binding after set_binding()
# was called and redo the last level trying to
# bind only with one driver as inferred from
# the allocation. In order to undo the binding
# here we must also assume that each driver's
# bind_port() implementation is side effect free
# beyond calling set_binding().
#
# Also please note that technically we allow for
# a redo to call continue_binding() instead of
# set_binding() and by that turn what was supposed
# to be the bottom level into a non-bottom binding
# level. A thorough discussion is recommended if
# you think of taking advantage of this.
#
# Also if we find use cases requiring
# diamond-shaped selections of drivers on different
# levels (eg. driverA and driverB can be both
# a valid choice on level 0, but on level 1 both
# previous choice leads to driverC) then we need
# to restrict segment selection too based on
# traits of the allocated resource provider on
# the top binding_level (==0).
if (context.current['binding:profile'] is not None and
context.current[
'binding:profile'].get('allocation') and
not redoing_bottom):
LOG.debug(
"Undo bottom bound level and redo it "
"according to binding_profile.allocation: %s",
context.current['binding:profile'][
'allocation'])
context._pop_binding_level()
context._unset_binding()
return self._bind_port_level(
context, level, segments_to_bind,
drivers=[self._infer_driver_from_allocation(
context)],
redoing_bottom=True)
# Binding complete.
LOG.debug("Bound port: %(port)s, "
"host: %(host)s, "
"vif_type: %(vif_type)s, "
"vif_details: %(vif_details)s, "
"binding_levels: %(binding_levels)s",
{'port': port_id,
'host': context.host,
'vif_type': binding.vif_type,
'vif_details': binding.vif_details,
'binding_levels': context.binding_levels})
return True
except Exception:
LOG.exception("Mechanism driver %s failed in "
"bind_port",
driver.name)
def _infer_driver_from_allocation(self, context):
"""Choose mechanism driver as implied by allocation in placement.
:param context: PortContext instance describing the port
:returns: a single MechanismDriver instance
Ports allocated to a resource provider (ie. a physical network
interface) in Placement have the UUID of the provider in their
binding:profile.allocation. The choice of a physical network
interface (as recorded in the allocation) implies a choice of
mechanism driver too. When an allocation was received we expect
exactly one mechanism driver to be responsible for that physical
network interface resource provider.
"""
drivers = []
for driver in self.ordered_mech_drivers:
if driver.obj.responsible_for_ports_allocation(context):
drivers.append(driver)
allocation = context.current['binding:profile']['allocation']
if len(drivers) == 0:
LOG.error("Failed to bind port %(port)s on host "
"%(host)s allocated on resource providers: "
"%(rsc_providers)s, because no mechanism driver "
"reports being responsible",
{'port': context.current['id'],
'host': context.host,
'rsc_providers': ','.join(allocation.values())})
raise place_exc.UnknownResourceProvider(
rsc_provider=','.join(allocation.values()))
if len(drivers) >= 2:
raise place_exc.AmbiguousResponsibilityForResourceProvider(
rsc_provider=','.join(allocation.values()),
drivers=','.join([driver.name for driver in drivers]))
# NOTE(bence romsics): The error conditions for raising either
# UnknownResourceProvider or AmbiguousResponsibilityForResourceProvider
# are pretty static therefore the usual 10-times-retry of a binding
# failure could easily be unnecessary in those cases. However at this
# point special handling of these exceptions in the binding retry loop
# seems like premature optimization to me since these exceptions are
# always a sign of a misconfigured neutron deployment.
LOG.debug("Restricting possible bindings of port %(port)s "
"(as inferred from placement allocation) to "
"mechanism driver '%(driver)s'",
{'port': context.current['id'],
'driver': drivers[0].name})
return drivers[0]
def is_host_filtering_supported(self):
return all(driver.obj.is_host_filtering_supported()
for driver in self.ordered_mech_drivers)
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
"""Filter hosts with access to at least one segment.
:returns: a subset of candidate_hosts.
This method returns all hosts from candidate_hosts with access to a
segment according to at least one driver.
"""
candidate_hosts = set(candidate_hosts)
if not self.host_filtering_supported:
return candidate_hosts
hosts_with_access = set()
for driver in self.ordered_mech_drivers:
hosts = driver.obj.filter_hosts_with_segment_access(
context, segments, candidate_hosts, agent_getter)
hosts_with_access |= hosts
candidate_hosts -= hosts
if not candidate_hosts:
break
return hosts_with_access
def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels):
# To prevent a possible binding loop, don't try to bind with
# this driver if the same driver has already bound at a higher
# level to one of the segments we are currently trying to
# bind. Note that it is OK for the same driver to bind at
# multiple levels using different segments.
segment_ids_to_bind = {s[api.ID]
for s in segments_to_bind}
for level in binding_levels:
if (level.driver == driver.name and
level.segment_id in segment_ids_to_bind):
LOG.debug("segment %(segment)s is already bound "
"by driver %(driver)s",
{"segment": level.segment_id,
"driver": level.driver})
return False
return True
def _check_drivers_connectivity(self, drivers, port_context):
"""If port does not have an IP address, driver connectivity must be l2
A port without an IP address can be bound only to a mech driver with
"connectivity" = "l2". "legacy" or "l3" (e.g.: Calico) drivers cannot
have a port bound without an IP allocated.
"""
if port_context.current.get('fixed_ips'):
return drivers
return [d for d in drivers if
d.obj.connectivity == portbindings.CONNECTIVITY_L2]
def get_workers(self):
workers = []
for driver in self.ordered_mech_drivers:
workers += driver.obj.get_workers()
return workers
class ExtensionManager(stevedore.named.NamedExtensionManager):
"""Manage extension drivers using drivers."""
def __init__(self):
# Ordered list of extension drivers, defining
# the order in which the drivers are called.
self.ordered_ext_drivers = []
LOG.info("Configured extension driver names: %s",
cfg.CONF.ml2.extension_drivers)
super(ExtensionManager, self).__init__('neutron.ml2.extension_drivers',
cfg.CONF.ml2.extension_drivers,
invoke_on_load=True,
name_order=True)
LOG.info("Loaded extension driver names: %s", self.names())
self._register_drivers()
def _register_drivers(self):
"""Register all extension drivers.
This method should only be called once in the ExtensionManager
constructor.
"""
for ext in self:
self.ordered_ext_drivers.append(ext)
LOG.info("Registered extension drivers: %s",
[driver.name for driver in self.ordered_ext_drivers])
def initialize(self):
# Initialize each driver in the list.
for driver in self.ordered_ext_drivers:
LOG.info("Initializing extension driver '%s'", driver.name)
driver.obj.initialize()
def extension_aliases(self):
exts = []
for driver in self.ordered_ext_drivers:
aliases = driver.obj.extension_aliases
for alias in aliases:
if not alias:
continue
exts.append(alias)
LOG.info("Got %(alias)s extension from driver '%(drv)s'",
{'alias': alias, 'drv': driver.name})
return exts
def _call_on_ext_drivers(self, method_name, plugin_context, data, result):
"""Helper method for calling a method across all extension drivers."""
for driver in self.ordered_ext_drivers:
try:
getattr(driver.obj, method_name)(plugin_context, data, result)
except Exception:
with excutils.save_and_reraise_exception():
LOG.info("Extension driver '%(name)s' failed in "
"%(method)s",
{'name': driver.name, 'method': method_name})
def process_create_network(self, plugin_context, data, result):
"""Notify all extension drivers during network creation."""
self._call_on_ext_drivers("process_create_network", plugin_context,
data, result)
def process_update_network(self, plugin_context, data, result):
"""Notify all extension drivers during network update."""
self._call_on_ext_drivers("process_update_network", plugin_context,
data, result)
def process_create_subnet(self, plugin_context, data, result):
"""Notify all extension drivers during subnet creation."""
self._call_on_ext_drivers("process_create_subnet", plugin_context,
data, result)
def process_update_subnet(self, plugin_context, data, result):
"""Notify all extension drivers during subnet update."""
self._call_on_ext_drivers("process_update_subnet", plugin_context,
data, result)
def process_create_port(self, plugin_context, data, result):
"""Notify all extension drivers during port creation."""
self._call_on_ext_drivers("process_create_port", plugin_context,
data, result)
def process_update_port(self, plugin_context, data, result):
"""Notify all extension drivers during port update."""
self._call_on_ext_drivers("process_update_port", plugin_context,
data, result)
def _call_on_dict_driver(self, method_name, session, base_model, result):
for driver in self.ordered_ext_drivers:
try:
getattr(driver.obj, method_name)(session, base_model, result)
except Exception:
LOG.exception("Extension driver '%(name)s' failed in "
"%(method)s",
{'name': driver.name, 'method': method_name})
raise ml2_exc.ExtensionDriverError(driver=driver.name)
def extend_network_dict(self, session, base_model, result):
"""Notify all extension drivers to extend network dictionary."""
self._call_on_dict_driver("extend_network_dict", session, base_model,
result)
def extend_subnet_dict(self, session, base_model, result):
"""Notify all extension drivers to extend subnet dictionary."""
self._call_on_dict_driver("extend_subnet_dict", session, base_model,
result)
def extend_port_dict(self, session, base_model, result):
"""Notify all extension drivers to extend port dictionary."""
self._call_on_dict_driver("extend_port_dict", session, base_model,
result)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for cloud tpu client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from six.moves.urllib import request
from tensorflow.python.platform import test
from tensorflow.python.tpu.client import client
mock = test.mock
def mock_request_compute_metadata(path):
if path == 'project/project-id':
return 'test-project'
elif path == 'instance/zone':
return 'projects/test-project/locations/us-central1-c'
elif path == 'instance/network-interfaces/0/ip':
return '10.128.1.2'
return ''
class MockRequestClass(object):
def __init__(self, name, tpu_map):
self._name = name
self._tpu_map = tpu_map
def execute(self):
if self._name in self._tpu_map:
tpu_dict = self._tpu_map[self._name].copy()
if isinstance(tpu_dict.get('health'), list):
# Do extraction of health list to a single health string based on time.
time_now = time.time()
health_now = tpu_dict.get('health')[time_now]
tpu_dict['health'] = health_now
return tpu_dict
else:
raise KeyError('Resource %s was not found' % self._name)
class MockNodeClass(object):
def __init__(self, tpu_map):
self._tpu_map = tpu_map
def get(self, name):
return MockRequestClass(name, self._tpu_map)
class CloudTpuClientTest(test.TestCase):
def setUp(self):
super(CloudTpuClientTest, self).setUp()
if 'TPU_API_DISCOVERY_URL' in os.environ:
del os.environ['TPU_API_DISCOVERY_URL']
if 'TPU_NAME' in os.environ:
del os.environ['TPU_NAME']
self._time_now = 0
def _mock_time(self, *args, **kwargs):
return self._time_now
def _mock_sleep(self, secs):
self._time_now += secs
def mock_service_client(self, tpu_map=None):
if tpu_map is None:
tpu_map = {}
mock_locations = mock.MagicMock()
mock_locations.nodes.return_value = MockNodeClass(tpu_map)
mock_project = mock.MagicMock()
mock_project.locations.return_value = mock_locations
mock_client = mock.MagicMock()
mock_client.projects.return_value = mock_project
return mock_client
def testEnvironmentDiscoveryUrl(self):
os.environ['TPU_API_DISCOVERY_URL'] = 'https://{api}.internal/{apiVersion}'
self.assertEqual('https://{api}.internal/{apiVersion}',
(client._environment_discovery_url()))
def testEnvironmentVarToNetworkEndpointsSingleIp(self):
self.assertEqual(
[{'ipAddress': '1.2.3.4', 'port': '1234'}],
list(client._environment_var_to_network_endpoints(
'1.2.3.4:1234')))
def testEnvironmentVarToNetworkEndpointsSingleGrpcAddress(self):
self.assertEqual(
[{'ipAddress': '1.2.3.4', 'port': '2000'}],
list(
client._environment_var_to_network_endpoints(
'grpc://1.2.3.4:2000')))
def testEnvironmentVarToNetworkEndpointsMultipleIps(self):
self.assertEqual(
[{'ipAddress': '1.2.3.4', 'port': '2000'},
{'ipAddress': '5.6.7.8', 'port': '1234'}],
list(
client._environment_var_to_network_endpoints(
'1.2.3.4:2000,5.6.7.8:1234')))
def testEnvironmentVarToNetworkEndpointsMultipleGrpcAddresses(self):
self.assertEqual(
[{'ipAddress': '1.2.3.4', 'port': '2000'},
{'ipAddress': '5.6.7.8', 'port': '1234'}],
list(client._environment_var_to_network_endpoints(
'grpc://1.2.3.4:2000,grpc://5.6.7.8:1234')))
def testEnvironmentVarToNetworkEndpointsMissingPortAndMixed(self):
self.assertEqual(
[{'ipAddress': '1.2.3.4', 'port': '2000'},
{'ipAddress': '5.6.7.8', 'port': '8470'}],
list(client._environment_var_to_network_endpoints(
'1.2.3.4:2000,grpc://5.6.7.8')))
def testInitializeNoArguments(self):
with self.assertRaisesRegex(
ValueError, 'Please provide a TPU Name to connect to.'):
client.Client()
def testInitializeMultiElementTpuArray(self):
with self.assertRaisesRegex(
NotImplementedError,
'Using multiple TPUs in a single session is not yet implemented'):
client.Client(tpu=['multiple', 'elements'])
def assertClientContains(self, c):
self.assertEqual('tpu_name', c._tpu)
self.assertEqual(True, c._use_api)
self.assertEqual(None, c._credentials)
self.assertEqual('test-project', c._project)
self.assertEqual('us-central1-c', c._zone)
self.assertEqual(None, c._discovery_url)
self.assertEqual([{
'ipAddress': '10.1.2.3',
'port': '8470'
}], c.network_endpoints())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testNetworkEndpointsNotReadyWithApi(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
}
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
self.assertRaisesRegex(
RuntimeError, 'TPU .* is not yet ready; state: "None"',
c.network_endpoints)
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testInitializeNoArgumentsWithEnvironmentVariable(self):
os.environ['TPU_NAME'] = 'tpu_name'
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': 'HEALTHY',
}
}
c = client.Client(
service=self.mock_service_client(tpu_map=tpu_map))
self.assertClientContains(c)
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testInitializeTpuName(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': 'HEALTHY',
}
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
self.assertClientContains(c)
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testInitializeIpAddress(self):
c = client.Client(tpu='grpc://1.2.3.4:8470')
self.assertEqual('grpc://1.2.3.4:8470', c._tpu)
self.assertEqual(False, c._use_api)
self.assertEqual(None, c._service)
self.assertEqual(None, c._credentials)
self.assertEqual(None, c._project)
self.assertEqual(None, c._zone)
self.assertEqual(None, c._discovery_url)
self.assertEqual([{
'ipAddress': '1.2.3.4',
'port': '8470'
}], c.network_endpoints())
def testInitializeWithoutMetadata(self):
c = client.Client(
tpu='tpu_name', project='project', zone='zone')
self.assertEqual('tpu_name', c._tpu)
self.assertEqual(True, c._use_api)
self.assertEqual(None, c._service)
self.assertEqual(None, c._credentials)
self.assertEqual('project', c._project)
self.assertEqual('zone', c._zone)
self.assertEqual(None, c._discovery_url)
def testRecoverableNoApiAccess(self):
c = client.Client(tpu='grpc://1.2.3.4:8470')
self.assertEqual(True, c.recoverable())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testRecoverableNoState(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
}
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual(True, c.recoverable())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testRecoverableReady(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
}
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual(True, c.recoverable())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testRecoverablePreempted(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'PREEMPTED',
}
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual(False, c.recoverable())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testHealthApi(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'PREEMPTED',
'health': 'HEALTHY',
'acceleratorType': 'v3-8',
'tensorflowVersion': 'nightly',
}
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual('HEALTHY', c.health())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testRuntimeVersionApi(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'PREEMPTED',
'health': 'HEALTHY',
'acceleratorType': 'v3-8',
'tensorflowVersion': 'nightly',
}
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual('nightly', c.runtime_version())
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testAcceleratorTypeApi(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'PREEMPTED',
'health': 'HEALTHY',
'acceleratorType': 'v3-8',
'tensorflowVersion': 'nightly',
}
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
self.assertEqual('v3-8', c.accelerator_type())
def testHandlesByteStrings(self):
self.assertEqual(
client.Client(
tpu='tpu_name', zone='zone', project='project')._full_name(),
client.Client(
tpu=b'tpu_name', zone=b'zone', project=b'project')._full_name(),
)
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testWaitForHealthy(self):
time_mock = mock.patch.object(time, 'time', autospec=True).start()
time_mock.side_effect = self._mock_time
sleep_mock = mock.patch.object(time, 'sleep', autospec=True).start()
sleep_mock.side_effect = self._mock_sleep
health_timeseries = (['UNHEALTHY_MAINTENANCE']*30 + ['TIMEOUT']*10
+ [None]*20 + ['HEALTHY']*30)
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': health_timeseries,
},
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
# Doesn't throw RuntimeError as TPU becomes HEALTHY before timeout
timeout = 80
interval = 5
return_time = 60
c.wait_for_healthy(timeout_s=timeout, interval=interval)
self.assertEqual(time.time(), return_time)
self.assertEqual(sleep_mock.call_count, return_time/interval)
@mock.patch.object(client, '_request_compute_metadata',
mock_request_compute_metadata)
def testWaitForHealthyRaisesError(self):
time_mock = mock.patch.object(time, 'time', autospec=True).start()
time_mock.side_effect = self._mock_time
sleep_mock = mock.patch.object(time, 'sleep', autospec=True).start()
sleep_mock.side_effect = self._mock_sleep
# Mock timeseries where takes longer than timeout.
health_timeseries = ['UNHEALTHY_MAINTENANCE']*50 + ['TIMEOUT']*50
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'ipAddress': '10.1.2.3',
'port': '8470',
'state': 'READY',
'health': health_timeseries,
},
}
c = client.Client(
tpu='tpu_name', service=self.mock_service_client(tpu_map=tpu_map))
# Doesn't throw RuntimeError as TPU becomes HEALTHY before timeout
with self.assertRaisesRegex(
RuntimeError,
'Timed out waiting for TPU .* to become healthy'):
c.wait_for_healthy(timeout_s=80, interval=5)
def baseConfigureTpuVersion(self):
tpu_map = {
'projects/test-project/locations/us-central1-c/nodes/tpu_name': {
'state':
'READY',
'networkEndpoints': [
{
'ipAddress': '1.2.3.4'
},
{
'ipAddress': '5.6.7.8'
},
]
}
}
return client.Client(
tpu='tpu_name',
project='test-project',
zone='us-central1-c',
service=self.mock_service_client(tpu_map=tpu_map))
@mock.patch.object(request, 'urlopen')
def testConfigureTpuVersion(self, urlopen):
c = self.baseConfigureTpuVersion()
c.configure_tpu_version('1.15')
paths = [call[0][0].full_url for call in urlopen.call_args_list]
self.assertEqual([
'http://1.2.3.4:8475/requestversion/1.15?restartType=always',
'http://5.6.7.8:8475/requestversion/1.15?restartType=always'
], sorted(paths))
@mock.patch.object(request, 'urlopen')
def testConfigureTpuVersionRestartIfneeded(self, urlopen):
c = self.baseConfigureTpuVersion()
c.configure_tpu_version('1.15', restart_type='ifNeeded')
paths = [call[0][0].full_url for call in urlopen.call_args_list]
self.assertEqual([
'http://1.2.3.4:8475/requestversion/1.15?restartType=ifNeeded',
'http://5.6.7.8:8475/requestversion/1.15?restartType=ifNeeded'
], sorted(paths))
if __name__ == '__main__':
test.main()
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-11 10:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import geotrek.authent.models
class Migration(migrations.Migration):
dependencies = [
('signage', '0009_auto_20191029_1110'),
]
operations = [
migrations.AlterModelOptions(
name='signagetype',
options={'ordering': ('label',), 'verbose_name': 'Signage Type', 'verbose_name_plural': 'Signage Types'},
),
migrations.AlterField(
model_name='blade',
name='color',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='signage.Color', verbose_name='Color'),
),
migrations.AlterField(
model_name='blade',
name='condition',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='infrastructure.InfrastructureCondition', verbose_name='Condition'),
),
migrations.AlterField(
model_name='blade',
name='direction',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='signage.Direction', verbose_name='Direction'),
),
migrations.AlterField(
model_name='blade',
name='number',
field=models.CharField(max_length=250, verbose_name='Number'),
),
migrations.AlterField(
model_name='blade',
name='signage',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='signage.Signage', verbose_name='Signage'),
),
migrations.AlterField(
model_name='blade',
name='structure',
field=models.ForeignKey(default=geotrek.authent.models.default_structure_pk, on_delete=django.db.models.deletion.CASCADE, to='authent.Structure', verbose_name='Related structure'),
),
migrations.AlterField(
model_name='blade',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='signage.BladeType', verbose_name='Type'),
),
migrations.AlterField(
model_name='bladetype',
name='label',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='bladetype',
name='structure',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='authent.Structure', verbose_name='Related structure'),
),
migrations.AlterField(
model_name='color',
name='label',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='direction',
name='label',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='line',
name='blade',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='lines', to='signage.Blade', verbose_name='Blade'),
),
migrations.AlterField(
model_name='line',
name='distance',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=8, null=True, verbose_name='Distance'),
),
migrations.AlterField(
model_name='line',
name='number',
field=models.IntegerField(verbose_name='Number'),
),
migrations.AlterField(
model_name='line',
name='pictogram_name',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='Pictogramm name'),
),
migrations.AlterField(
model_name='line',
name='structure',
field=models.ForeignKey(default=geotrek.authent.models.default_structure_pk, on_delete=django.db.models.deletion.CASCADE, to='authent.Structure', verbose_name='Related structure'),
),
migrations.AlterField(
model_name='line',
name='text',
field=models.CharField(max_length=1000, verbose_name='Text'),
),
migrations.AlterField(
model_name='line',
name='time',
field=models.DurationField(blank=True, help_text='Hours:Minutes:Seconds', null=True, verbose_name='Time'),
),
migrations.AlterField(
model_name='sealing',
name='label',
field=models.CharField(max_length=250, verbose_name='Name'),
),
migrations.AlterField(
model_name='sealing',
name='structure',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='authent.Structure', verbose_name='Related structure'),
),
migrations.AlterField(
model_name='signage',
name='code',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='Code'),
),
migrations.AlterField(
model_name='signage',
name='condition',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='infrastructure.InfrastructureCondition', verbose_name='Condition'),
),
migrations.AlterField(
model_name='signage',
name='description',
field=models.TextField(blank=True, help_text='Specificites', verbose_name='Description'),
),
migrations.AlterField(
model_name='signage',
name='eid',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='External id'),
),
migrations.AlterField(
model_name='signage',
name='implantation_year',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Implantation year'),
),
migrations.AlterField(
model_name='signage',
name='manager',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='common.Organism', verbose_name='Manager'),
),
migrations.AlterField(
model_name='signage',
name='name',
field=models.CharField(help_text='Reference, code, ...', max_length=128, verbose_name='Name'),
),
migrations.AlterField(
model_name='signage',
name='printed_elevation',
field=models.IntegerField(blank=True, null=True, verbose_name='Printed elevation'),
),
migrations.AlterField(
model_name='signage',
name='sealing',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='signage.Sealing', verbose_name='Sealing'),
),
migrations.AlterField(
model_name='signage',
name='structure',
field=models.ForeignKey(default=geotrek.authent.models.default_structure_pk, on_delete=django.db.models.deletion.CASCADE, to='authent.Structure', verbose_name='Related structure'),
),
migrations.AlterField(
model_name='signage',
name='topo_object',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.Topology'),
),
migrations.AlterField(
model_name='signage',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='signage.SignageType', verbose_name='Type'),
),
migrations.AlterField(
model_name='signagetype',
name='label',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='signagetype',
name='structure',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='authent.Structure', verbose_name='Related structure'),
),
migrations.AlterField(
model_name='blade',
name='deleted',
field=models.BooleanField(default=False, editable=False, verbose_name='Deleted'),
),
migrations.AlterModelTable(
name='blade',
table=None,
),
migrations.AlterModelTable(
name='bladetype',
table=None,
),
migrations.AlterModelTable(
name='color',
table=None,
),
migrations.AlterModelTable(
name='direction',
table=None,
),
migrations.AlterModelTable(
name='line',
table=None,
),
migrations.AlterModelTable(
name='sealing',
table=None,
),
migrations.AlterModelTable(
name='signage',
table=None,
),
migrations.AlterModelTable(
name='signagetype',
table=None,
),
]
|
|
import cupy
import cupyx.scipy.fft
from cupy import _core
from cupy._core import _routines_math as _math
from cupy._core import fusion
from cupy.lib import stride_tricks
import numpy
_dot_kernel = _core.ReductionKernel(
'T x1, T x2',
'T y',
'x1 * x2',
'a + b',
'y = a',
'0',
'dot_product'
)
def _choose_conv_method(in1, in2, mode):
if in1.ndim != 1 or in2.ndim != 1:
raise NotImplementedError('Only 1d inputs are supported currently')
if in1.dtype.kind in 'bui' or in2.dtype.kind in 'bui':
return 'direct'
if _fftconv_faster(in1, in2, mode):
return 'fft'
return 'direct'
def _fftconv_faster(x, h, mode):
"""
.. seealso:: :func: `scipy.signal._signaltools._fftconv_faster`
"""
# TODO(Dahlia-Chehata): replace with GPU-based constants.
return True
def convolve(a, v, mode='full'):
"""Returns the discrete, linear convolution of two one-dimensional sequences.
Args:
a (cupy.ndarray): first 1-dimensional input.
v (cupy.ndarray): second 1-dimensional input.
mode (str, optional): `valid`, `same`, `full`
Returns:
cupy.ndarray: Discrete, linear convolution of a and v.
.. seealso:: :func:`numpy.convolve`
"""
if a.size == 0:
raise ValueError('a cannot be empty')
if v.size == 0:
raise ValueError('v cannot be empty')
if v.ndim > 1:
raise ValueError('v cannot be multidimensional array')
if v.size > a.size:
a, v = v, a
a = a.ravel()
v = v.ravel()
method = _choose_conv_method(a, v, mode)
if method == 'direct':
out = _dot_convolve(a, v, mode)
elif method == 'fft':
out = _fft_convolve(a, v, mode)
else:
raise ValueError('Unsupported method')
return out
def _fft_convolve(a1, a2, mode):
offset = 0
if a1.size < a2.size:
a1, a2 = a2, a1
offset = 1 - a2.size % 2
# if either of them is complex, the dtype after multiplication will also be
if a1.dtype.kind == 'c' or a2.dtype.kind == 'c':
fft, ifft = cupy.fft.fft, cupy.fft.ifft
else:
fft, ifft = cupy.fft.rfft, cupy.fft.irfft
dtype = cupy.result_type(a1, a2)
n1, n2 = a1.size, a2.size
out_size = cupyx.scipy.fft.next_fast_len(n1 + n2 - 1)
fa1 = fft(a1, out_size)
fa2 = fft(a2, out_size)
out = ifft(fa1 * fa2, out_size)
if mode == 'full':
start, end = 0, n1 + n2 - 1
elif mode == 'same':
start = (n2 - 1) // 2 + offset
end = start + n1
elif mode == 'valid':
start, end = n2 - 1, n1
else:
raise ValueError(
'acceptable mode flags are `valid`, `same`, or `full`.')
out = out[start:end]
if dtype.kind in 'iu':
out = cupy.around(out)
return out.astype(dtype, copy=False)
def _dot_convolve(a1, a2, mode):
offset = 0
if a1.size < a2.size:
a1, a2 = a2, a1
offset = 1 - a2.size % 2
dtype = cupy.result_type(a1, a2)
n1, n2 = a1.size, a2.size
a1 = a1.astype(dtype, copy=False)
a2 = a2.astype(dtype, copy=False)
if mode == 'full':
out_size = n1 + n2 - 1
a1 = cupy.pad(a1, n2 - 1)
elif mode == 'same':
out_size = n1
pad_size = (n2 - 1) // 2 + offset
a1 = cupy.pad(a1, (n2 - 1 - pad_size, pad_size))
elif mode == 'valid':
out_size = n1 - n2 + 1
stride = a1.strides[0]
a1 = stride_tricks.as_strided(a1, (out_size, n2), (stride, stride))
output = _dot_kernel(a1, a2[::-1], axis=1)
return output
def clip(a, a_min=None, a_max=None, out=None):
"""Clips the values of an array to a given interval.
This is equivalent to ``maximum(minimum(a, a_max), a_min)``, while this
function is more efficient.
Args:
a (cupy.ndarray): The source array.
a_min (scalar, cupy.ndarray or None): The left side of the interval.
When it is ``None``, it is ignored.
a_max (scalar, cupy.ndarray or None): The right side of the interval.
When it is ``None``, it is ignored.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Clipped array.
.. seealso:: :func:`numpy.clip`
"""
if fusion._is_fusing():
return fusion._call_ufunc(_math.clip,
a, a_min, a_max, out=out)
# TODO(okuta): check type
return a.clip(a_min, a_max, out=out)
# sqrt_fixed is deprecated.
# numpy.sqrt is fixed in numpy 1.11.2.
sqrt = sqrt_fixed = _core.sqrt
cbrt = _core.create_ufunc(
'cupy_cbrt',
('e->e', 'f->f', 'd->d'),
'out0 = cbrt(in0)',
doc='''Elementwise cube root function.
.. seealso:: :data:`numpy.cbrt`
''')
square = _core.create_ufunc(
'cupy_square',
('b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q',
'Q->Q', 'e->e', 'f->f', 'd->d', 'F->F', 'D->D'),
'out0 = in0 * in0',
doc='''Elementwise square function.
.. seealso:: :data:`numpy.square`
''')
absolute = _core.absolute
fabs = _core.create_ufunc(
'cupy_fabs',
('e->e', 'f->f', 'd->d'),
'out0 = abs(in0)',
doc='''Calculates absolute values element-wise.
Only real values are handled.
.. seealso:: :data:`numpy.fabs`
''')
_unsigned_sign = 'out0 = in0 > 0'
_complex_sign = '''
if (in0.real() == 0) {
out0 = (in0.imag() > 0) - (in0.imag() < 0);
} else {
out0 = (in0.real() > 0) - (in0.real() < 0);
}
'''
sign = _core.create_ufunc(
'cupy_sign',
('b->b', ('B->B', _unsigned_sign), 'h->h', ('H->H', _unsigned_sign),
'i->i', ('I->I', _unsigned_sign), 'l->l', ('L->L', _unsigned_sign),
'q->q', ('Q->Q', _unsigned_sign), 'e->e', 'f->f', 'd->d',
('F->F', _complex_sign), ('D->D', _complex_sign)),
'out0 = (in0 > 0) - (in0 < 0)',
doc='''Elementwise sign function.
It returns -1, 0, or 1 depending on the sign of the input.
.. seealso:: :data:`numpy.sign`
''')
_float_preamble = '''
#ifndef NAN
#define NAN __int_as_float(0x7fffffff)
#endif
'''
_float_maximum = ('out0 = (isnan(in0) | isnan(in1)) ? out0_type(NAN) : '
'out0_type(max(in0, in1))')
maximum = _core.create_ufunc(
'cupy_maximum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_maximum),
('ff->f', _float_maximum),
('dd->d', _float_maximum),
('FF->F', _float_maximum),
('DD->D', _float_maximum)),
'out0 = max(in0, in1)',
preamble=_float_preamble,
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.maximum`
''',
cutensor_op=('OP_MAX', 1, 1))
_float_minimum = ('out0 = (isnan(in0) | isnan(in1)) ? out0_type(NAN) : '
'out0_type(min(in0, in1))')
minimum = _core.create_ufunc(
'cupy_minimum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_minimum),
('ff->f', _float_minimum),
('dd->d', _float_minimum),
('FF->F', _float_minimum),
('DD->D', _float_minimum)),
'out0 = min(in0, in1)',
preamble=_float_preamble,
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.minimum`
''',
cutensor_op=('OP_MIN', 1, 1))
fmax = _core.create_ufunc(
'cupy_fmax',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', 'out0 = fmax(in0, in1)'),
('ff->f', 'out0 = fmax(in0, in1)'),
('dd->d', 'out0 = fmax(in0, in1)'),
'FF->F', 'DD->D'),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmax`
''')
fmin = _core.create_ufunc(
'cupy_fmin',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', 'out0 = fmin(in0, in1)'),
('ff->f', 'out0 = fmin(in0, in1)'),
('dd->d', 'out0 = fmin(in0, in1)'),
'FF->F', 'DD->D'),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmin`
''')
_nan_to_num_preamble = '''
template <class T>
__device__ T nan_to_num(T x, T nan, T posinf, T neginf) {
if (isnan(x))
return nan;
if (isinf(x))
return x > 0 ? posinf : neginf;
return x;
}
template <class T>
__device__ complex<T> nan_to_num(complex<T> x, T nan, T posinf, T neginf) {
T re = nan_to_num(x.real(), nan, posinf, neginf);
T im = nan_to_num(x.imag(), nan, posinf, neginf);
return complex<T>(re, im);
}
'''
_nan_to_num = _core.create_ufunc(
'cupy_nan_to_num_',
('????->?', 'bbbb->b', 'BBBB->B', 'hhhh->h', 'HHHH->H',
'iiii->i', 'IIII->I', 'llll->l', 'LLLL->L', 'qqqq->q', 'QQQQ->Q',
('eeee->e',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('ffff->f',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('dddd->d',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('Ffff->F',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('Dddd->D',
'out0 = nan_to_num(in0, in1, in2, in3)')),
'out0 = in0',
preamble=_nan_to_num_preamble,
doc='''Elementwise nan_to_num function.
.. seealso:: :func:`numpy.nan_to_num`
''')
def _check_nan_inf(x, dtype, neg=None):
if dtype.char in 'FD':
dtype = cupy.dtype(dtype.char.lower())
if dtype.char not in 'efd':
x = 0
elif x is None and neg is not None:
x = cupy.finfo(dtype).min if neg else cupy.finfo(dtype).max
elif cupy.isnan(x):
x = cupy.nan
elif cupy.isinf(x):
x = cupy.inf * (-1)**(x < 0)
return cupy.asanyarray(x, dtype)
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
"""Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
.. seealso:: :func:`numpy.nan_to_num`
"""
if not isinstance(x, cupy.ndarray):
out = cupy.full((), x)
else:
out = cupy.empty_like(x) if copy else x
dtype = out.dtype
nan = _check_nan_inf(nan, dtype)
posinf = _check_nan_inf(posinf, dtype, False)
neginf = _check_nan_inf(neginf, dtype, True)
return _nan_to_num(x, nan, posinf, neginf, out=out)
def real_if_close(a, tol=100):
"""If input is complex with all imaginary parts close to zero, return real
parts.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.real_if_close`
"""
if not issubclass(a.dtype.type, cupy.complexfloating):
return a
if tol > 1:
f = numpy.finfo(a.dtype.type)
tol = f.eps * tol
if cupy.all(cupy.absolute(a.imag) < tol):
a = a.real
return a
@cupy._util.memoize(for_each_device=True)
def _get_interp_kernel(is_complex):
in_params = 'raw V x, raw U idx, '
in_params += 'raw W fx, raw Y fy, U len, raw Y left, raw Y right'
out_params = 'Z y' # output dtype follows NumPy's
if is_complex:
preamble = 'typedef double real_t;\n'
else:
preamble = 'typedef Z real_t;\n'
preamble += 'typedef Z value_t;\n'
preamble += cupy._sorting.search._preamble # for _isnan
code = r'''
U x_idx = idx[i] - 1;
if ( _isnan<V>(x[i]) ) { y = x[i]; }
else if (x_idx < 0) { y = left[0]; }
else if (x[i] == fx[len - 1]) {
// searchsorted cannot handle both of the boundary points,
// so we must detect and correct ourselves...
y = fy[len - 1];
}
else if (x_idx >= len - 1) { y = right[0]; }
else {
const Z slope = (value_t)(fy[x_idx+1] - fy[x_idx]) / \
((real_t)fx[x_idx+1] - (real_t)fx[x_idx]);
Z out = slope * ((real_t)x[i] - (real_t)fx[x_idx]) \
+ (value_t)fy[x_idx];
if (_isnan<Z>(out)) {
out = slope * ((real_t)x[i] - (real_t)fx[x_idx+1]) \
+ (value_t)fy[x_idx+1];
if (_isnan<Z>(out) && (fy[x_idx] == fy[x_idx+1])) {
out = fy[x_idx];
}
}
y = out;
}
'''
return cupy.ElementwiseKernel(
in_params, out_params, code, 'cupy_interp', preamble=preamble)
def interp(x, xp, fp, left=None, right=None, period=None):
""" One-dimensional linear interpolation.
Args:
x (cupy.ndarray): a 1D array of points on which the interpolation
is performed.
xp (cupy.ndarray): a 1D array of points on which the function values
(``fp``) are known.
fp (cupy.ndarray): a 1D array containing the function values at the
the points ``xp``.
left (float or complex): value to return if ``x < xp[0]``. Default is
``fp[0]``.
right (float or complex): value to return if ``x > xp[-1]``. Default is
``fp[-1]``.
period (None or float): a period for the x-coordinates. Parameters
``left`` and ``right`` are ignored if ``period`` is specified.
Default is ``None``.
Returns:
cupy.ndarray: The interpolated values, same shape as ``x``.
.. note::
This function may synchronize if ``left`` or ``right`` is not already
on the device.
.. seealso:: :func:`numpy.interp`
"""
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError('xp and fp must be 1D arrays')
if xp.size != fp.size:
raise ValueError('fp and xp are not of the same length')
if xp.size == 0:
raise ValueError('array of sample points is empty')
if not x.flags.c_contiguous:
raise NotImplementedError('Non-C-contiguous x is currently not '
'supported')
x_dtype = cupy.common_type(x, xp)
if not cupy.can_cast(x_dtype, cupy.float64):
raise TypeError('Cannot cast array data from'
' {} to {} according to the rule \'safe\''
.format(x_dtype, cupy.float64))
if period is not None:
# The handling of "period" below is modified from NumPy's
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
x = x.astype(cupy.float64)
xp = xp.astype(cupy.float64)
# normalizing periodic boundaries
x %= period
xp %= period
asort_xp = cupy.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = cupy.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = cupy.concatenate((fp[-1:], fp, fp[0:1]))
assert xp.flags.c_contiguous
assert fp.flags.c_contiguous
# NumPy always returns float64 or complex128, so we upcast all values
# on the fly in the kernel
out_dtype = 'D' if fp.dtype.kind == 'c' else 'd'
output = cupy.empty(x.shape, dtype=out_dtype)
idx = cupy.searchsorted(xp, x, side='right')
left = fp[0] if left is None else cupy.array(left, fp.dtype)
right = fp[-1] if right is None else cupy.array(right, fp.dtype)
kern = _get_interp_kernel(out_dtype == 'D')
kern(x, idx, xp, fp, xp.size, left, right, output)
return output
|
|
# Copyright (c) 2011, Enthought, Ltd.
# Authors: Pietro Berkes <[email protected]>, Andrey Rzhetsky
# License: Modified BSD license (2-clause)
"""This module defines model B-with-theta, optimized for a loop design.
The implementation assumes that there are a total or 8 annotators. Each item is
annotated by a triplet of annotators, according to the loop design described
in Rzhetsky et al., 2009.
E.g., for 16 items the loop design looks like this (`A` indicates a label,
`*` indicates a missing value): ::
A A A * * * * *
A A A * * * * *
* A A A * * * *
* A A A * * * *
* * A A A * * *
* * A A A * * *
* * * A A A * *
* * * A A A * *
* * * * A A A *
* * * * A A A *
* * * * * A A A
* * * * * A A A
A * * * * * A A
A * * * * * A A
A A * * * * * A
A A * * * * * A
"""
import numpy as np
import scipy.optimize
import scipy.stats
from traits.api import Int, Array
from pyanno.abstract_model import AbstractModel
from pyanno.sampling import optimize_step_size, sample_distribution
from pyanno.util import (random_categorical, compute_counts,
SMALLEST_FLOAT, MISSING_VALUE, labels_frequency,
is_valid, ninf_to_num)
import logging
logger = logging.getLogger(__name__)
# map of `n` to list of all possible triplets of `n` elements
_triplet_combinations = {}
def _get_triplet_combinations(n):
"""Return array of all possible combinations of n elements in triplets.
"""
if not _triplet_combinations.has_key(n):
_triplet_combinations[n] = (
np.array([i for i in np.ndindex(n,n,n)]) )
return _triplet_combinations[n]
class ModelBtLoopDesign(AbstractModel):
"""Implementation of Model B-with-theta from (Rzhetsky et al., 2009).
The model assumes the existence of "true" underlying labels for each item,
which are drawn from a categorical distribution, gamma. Annotators report
these labels with some noise.
This model is closely related to :class:`~ModelB`, but, crucially,
the noise distribution is described by a small number of parameters (one
per annotator), which makes their estimation efficient and less sensitive
to local optima.
These are the model parameters:
- gamma[k] is the probability of label k
- theta[j] parametrizes the probability that annotator `j` reports label
`k'` given ground truth, `k`. More specifically,
`P( annotator j chooses k' | real label = k)` is
`theta[j]` for k' = k, or `(1 - theta[j]) / sum(theta)` if `k' != k `.
This implementation is optimized for the loop design introduced in
(Rzhetsky et al., 2009), which assumes that each item is annotated by 3
out of 8 annotators. For a more general implementation, see
:class:`~ModelBt`
See the documentation for a more detailed description of the model.
**Reference**
* Rzhetsky A., Shatkay, H., and Wilbur, W.J. (2009). "How to get the most
from your curation effort", PLoS Computational Biology, 5(5).
"""
nclasses = Int
nannotators = Int(8)
# number of annotators rating each item in the loop design
nannotators_per_item = Int(3)
gamma = Array(dtype=float, shape=(None,))
theta = Array(dtype=float, shape=(None,))
def __init__(self, nclasses, gamma, theta, **traits):
"""Create an instance of ModelB.
Arguments
----------
nclasses : int
Number of possible annotation classes
nannotators : int
Number of annotators
gamma : ndarray, shape = (n_classes, )
gamma[k] is the prior probability of label class k
theta : ndarray, shape = (n_annotators, )
theta[j] parametrizes the accuracy of annotator j. Specifically,
`P( annotator j chooses k' | real label = k)` is
`theta[j]` for k' = k, or `(1 - theta[j]) / sum(theta)`
if `k' != k `.
"""
self.nclasses = nclasses
self.gamma = gamma
self.theta = theta
super(ModelBtLoopDesign, self).__init__(**traits)
##### Model and data generation methods ###################################
@staticmethod
def create_initial_state(nclasses, gamma=None, theta=None):
"""Factory method returning a model with random initial parameters.
It is often more convenient to use this factory method over the
constructor, as one does not need to specify the initial model
parameters.
The parameters theta and gamma, controlling accuracy and prevalence,
are initialized at random as follows:
:math:`\\theta_j \sim \mathrm{Uniform}(0.6, 0.95)`
:math:`\gamma \sim \mathrm{Dirichlet}(2.0)`
Arguments
---------
nclasses : int
number of categories
gamma : nparray
An array of floats with size that holds the probability of each
annotation value. Default is None
theta : nparray
An array of floats that the parameters of P( v_i | psi ) (one for
each annotator)
Returns
-------
model : :class:`~ModelBtLoopDesign`
Instance of ModelBtLoopDesign
"""
if gamma is None:
gamma = ModelBtLoopDesign._random_gamma(nclasses)
if theta is None:
nannotators = 8
theta = ModelBtLoopDesign._random_theta(nannotators)
model = ModelBtLoopDesign(nclasses, gamma, theta)
return model
@staticmethod
def _random_gamma(nclasses):
beta = 2.*np.ones((nclasses,))
return np.random.dirichlet(beta)
@staticmethod
def _random_theta(nannotators):
return np.random.uniform(low=0.6, high=0.95,
size=(nannotators,))
def generate_labels(self, nitems):
"""Generate random labels from the model."""
return random_categorical(self.gamma, nitems)
def generate_annotations_from_labels(self, labels):
"""Generate random annotations from the model, given labels
The method samples random annotations from the conditional probability
distribution of annotations, :math:`x_i^j`
given labels, :math:`y_i`.
Arguments
----------
labels : ndarray, shape = (n_items,), dtype = int
Set of "true" labels
Returns
-------
annotations : ndarray, shape = (n_items, n_annotators)
annotations[i,j] is the annotation of annotator j for item i
"""
theta = self.theta
nannotators = self.nannotators
nitems = labels.shape[0]
nitems_per_loop = np.ceil(float(nitems) / nannotators)
annotations = np.empty((nitems, nannotators), dtype=int)
for j in xrange(nannotators):
for i in xrange(nitems):
distr = self._theta_to_categorical(theta[j], labels[i])
annotations[i,j] = random_categorical(distr, 1)
# mask annotation value according to loop design
for l in xrange(nannotators):
label_idx = np.arange(l+self.nannotators_per_item, l+nannotators) % 8
annotations[l*nitems_per_loop:(l+1)*nitems_per_loop,
label_idx] = MISSING_VALUE
return annotations
def generate_annotations(self, nitems):
"""Generate a random annotation set from the model.
Sample a random set of annotations from the probability distribution
defined the current model parameters:
1) Label classes are generated from the prior distribution, pi
2) Annotations are generated from the conditional distribution of
annotations given classes, parametrized by theta
Arguments
---------
nitems : int
Number of items to sample
Returns
-------
annotations : ndarray, shape = (n_items, n_annotators)
annotations[i,j] is the annotation of annotator j for item i
"""
labels = self.generate_labels(nitems)
return self.generate_annotations_from_labels(labels)
def _theta_to_categorical(self, theta, psi):
"""Returns P( v_i = psi | theta_i ) as a distribution."""
distr = np.empty((self.nclasses,))
distr.fill((1.-theta)/(self.nclasses-1.))
distr[psi] = theta
assert np.allclose(distr.sum(), 1.)
return distr
##### Parameters estimation methods #######################################
def mle(self, annotations, estimate_gamma=True):
"""Computes maximum likelihood estimate (MLE) of parameters.
Estimate the parameters :attr:`theta` and :attr:`gamma` from a set of
observed annotations using maximum likelihood estimation.
Arguments
----------
annotations : ndarray, shape = (n_items, n_annotators)
annotations[i,j] is the annotation of annotator j for item i
estimate_gamma : bool
If True, the parameters :attr:`gamma` are estimated by the empirical
class frequency. If False, :attr:`gamma` is left unchanged.
"""
self._raise_if_incompatible(annotations)
# wrap log likelihood function to give it to optimize.fmin
_llhood_counts = self._log_likelihood_counts
def _wrap_llhood(params, counts):
self.gamma, self.theta = self._vector_to_params(params)
# minimize *negative* likelihood
return - _llhood_counts(counts)
self._parameter_estimation(_wrap_llhood, annotations,
estimate_gamma=estimate_gamma)
def map(self, annotations, estimate_gamma=True):
"""Computes maximum a posteriori (MAP) estimate of parameters.
Estimate the parameters :attr:`theta` and :attr:`gamma` from a set of
observed annotations using maximum a posteriori estimation.
Arguments
----------
annotations : ndarray, shape = (n_items, n_annotators)
annotations[i,j] is the annotation of annotator j for item i
estimate_gamma : bool
If True, the parameters :attr:`gamma` are estimated by the empirical
class frequency. If False, :attr:`gamma` is left unchanged.
"""
self._raise_if_incompatible(annotations)
# wrap log likelihood function to give it to optimize.fmin
_llhood_counts = self._log_likelihood_counts
_log_prior = self._log_prior
def _wrap_llhood(params, counts):
self.gamma, self.theta = self._vector_to_params(params)
# minimize *negative* posterior probability of parameters
return - (_llhood_counts(counts) + _log_prior())
self._parameter_estimation(_wrap_llhood, annotations,
estimate_gamma=estimate_gamma)
def _parameter_estimation(self, objective, annotations,
estimate_gamma=True):
counts = compute_counts(annotations, self.nclasses)
params_start = self._random_initial_parameters(annotations,
estimate_gamma)
logger.info('Start parameters optimization...')
# TODO: use gradient, constrained optimization
params_best = scipy.optimize.fmin(objective,
params_start,
args=(counts,),
xtol=1e-4, ftol=1e-4,
disp=False, maxiter=10000)
logger.info('Parameters optimization finished')
# parse arguments and update
self.gamma, self.theta = self._vector_to_params(params_best)
def _random_initial_parameters(self, annotations, estimate_gamma):
if estimate_gamma:
# estimate gamma from observed annotations
gamma = labels_frequency(annotations, self.nclasses)
else:
gamma = ModelBtLoopDesign._random_gamma(self.nclasses)
theta = ModelBtLoopDesign._random_theta(self.nannotators)
return self._params_to_vector(gamma, theta)
def _params_to_vector(self, gamma, theta):
"""Convert the tuple (gamma, theta) to a parameters vector.
Used to interface with the optimization routines.
"""
return np.r_[gamma[:-1], theta]
def _vector_to_params(self, params):
"""Convert a parameters vector to (gamma, theta) tuple.
Used to interface with the optimization routines.
"""
nclasses = self.nclasses
gamma = np.zeros((nclasses,))
gamma[:nclasses-1] = params[:nclasses-1]
gamma[-1] = 1. - gamma[:nclasses-1].sum()
theta = params[nclasses-1:]
return gamma, theta
##### Model likelihood methods ############################################
def log_likelihood(self, annotations):
"""Compute the log likelihood of a set of annotations given the model.
Returns :math:`\log P(\mathbf{x} | \gamma, \\theta)`,
where :math:`\mathbf{x}` is the array of annotations.
Arguments
----------
annotations : ndarray, shape = (n_items, n_annotators)
annotations[i,j] is the annotation of annotator j for item i
Returns
-------
log_lhood : float
log likelihood of `annotations`
"""
self._raise_if_incompatible(annotations)
counts = compute_counts(annotations, self.nclasses)
return self._log_likelihood_counts(counts)
def _log_likelihood_counts(self, counts):
"""Compute the log likelihood of annotations given the model.
This method assumes the data is in counts format.
"""
# TODO: check if it's possible to replace these constraints with bounded optimization
# check boundary conditions
if (min(min(self.gamma), min(self.theta)) < 0.
or max(max(self.gamma), max(self.theta)) > 1.):
#return np.inf
return SMALLEST_FLOAT
llhood = 0.
# loop over the 8 combinations of annotators
for i in range(8):
# extract the theta parameters for this triplet
triplet_indices = np.arange(i, i+3) % self.nannotators
triplet_indices.sort()
theta_triplet = self.theta[triplet_indices]
# compute the likelihood for the triplet
llhood += self._log_likelihood_triplet(counts[:,i],
theta_triplet)
return llhood
def _log_likelihood_triplet(self, counts_triplet, theta_triplet):
"""Compute the log likelihood of data for one triplet of annotators.
Input:
counts_triplet -- count data for one combination of annotators
theta_triplet -- theta parameters of the current triplet
"""
# log \prod_n P(v_{ijk}^{n} | params)
# = \sum_n log P(v_{ijk}^{n} | params)
# = \sum_v_{ijk} count(v_{ijk}) log P( v_{ijk} | params )
#
# where n is n-th annotation of triplet {ijk}]
# compute P( v_{ijk} | params )
pf = self._pattern_frequencies(theta_triplet)
log_pf = ninf_to_num(np.log(pf))
l = (counts_triplet * log_pf).sum()
return l
def _pattern_frequencies(self, theta_triplet):
"""Compute vector of P(v_{ijk}|params) for each combination of v_{ijk}.
"""
gamma = self.gamma
nclasses = self.nclasses
# list of all possible combinations of v_i, v_j, v_k elements
v_ijk_combinations = _get_triplet_combinations(nclasses)
# P( v_{ijk} | params ) = \sum_psi P( v_{ijk} | psi, params ) P( psi )
pf = 0.
not_theta = (1.-theta_triplet) / (nclasses-1.)
p_v_ijk_given_psi = np.empty_like(v_ijk_combinations, dtype=float)
for psi in range(nclasses):
for j in range(3):
p_v_ijk_given_psi[:,j] = np.where(v_ijk_combinations[:,j]==psi,
theta_triplet[j],
not_theta[j])
pf += p_v_ijk_given_psi.prod(1) * gamma[psi]
return pf
def _log_prior(self):
"""Compute log probability of prior on the theta parameters."""
log_prob = scipy.stats.beta._logpdf(self.theta, 2., 1.).sum()
return log_prob
##### Sampling posterior over parameters ##################################
def sample_posterior_over_accuracy(self, annotations, nsamples,
burn_in_samples = 100,
thin_samples = 5,
target_rejection_rate = 0.3,
rejection_rate_tolerance = 0.2,
step_optimization_nsamples = 500,
adjust_step_every = 100):
"""Return samples from posterior distribution over theta given data.
Samples are drawn using a variant of a Metropolis-Hasting Markov Chain
Monte Carlo (MCMC) algorithm. Sampling proceeds in two phases:
1) *step size estimation phase*: first, the step size in the
MCMC algorithm is adjusted to achieve a given rejection rate.
2) *sampling phase*: second, samples are collected using the
step size from phase 1.
Arguments
----------
annotations : ndarray, shape = (n_items, n_annotators)
annotations[i,j] is the annotation of annotator j for item i
nsamples : int
Number of samples to return (i.e., burn-in and thinning samples
are not included)
burn_in_samples : int
Discard the first `burn_in_samples` during the initial burn-in
phase, where the Monte Carlo chain converges to the posterior
thin_samples : int
Only return one every `thin_samples` samples in order to reduce
the auto-correlation in the sampling chain. This is called
"thinning" in MCMC parlance.
target_rejection_rate : float
target rejection rate for the step size estimation phase
rejection_rate_tolerance : float
the step size estimation phase is ended when the rejection rate for
all parameters is within `rejection_rate_tolerance` from
`target_rejection_rate`
step_optimization_nsamples : int
number of samples to draw in the step size estimation phase
adjust_step_every : int
number of samples after which the step size is adjusted during
the step size estimation pahse
Returns
-------
samples : ndarray, shape = (n_samples, n_annotators)
samples[i,:] is one sample from the posterior distribution over the
parameters `theta`
"""
self._raise_if_incompatible(annotations)
nsamples = self._compute_total_nsamples(nsamples,
burn_in_samples,
thin_samples)
# optimize step size
counts = compute_counts(annotations, self.nclasses)
# wrap log likelihood function to give it to optimize_step_size and
# sample_distribution
_llhood_counts = self._log_likelihood_counts
_log_prior = self._log_prior
def _wrap_llhood(params, counts):
self.theta = params
return _llhood_counts(counts) + _log_prior()
# TODO this save-reset is rather ugly, refactor: create copy of
# model and sample over it
# save internal parameters to reset at the end of sampling
save_params = (self.gamma, self.theta)
try:
# compute optimal step size for given target rejection rate
params_start = self.theta.copy()
params_upper = np.ones((self.nannotators,))
params_lower = np.zeros((self.nannotators,))
step = optimize_step_size(_wrap_llhood, params_start, counts,
params_lower, params_upper,
step_optimization_nsamples,
adjust_step_every,
target_rejection_rate,
rejection_rate_tolerance)
# draw samples from posterior distribution over theta
samples = sample_distribution(_wrap_llhood, params_start, counts,
step, nsamples,
params_lower, params_upper)
return self._post_process_samples(samples, burn_in_samples,
thin_samples)
finally:
# reset parameters
self.gamma, self.theta = save_params
##### Posterior distributions #############################################
def infer_labels(self, annotations):
"""Infer posterior distribution over label classes.
Compute the posterior distribution over label classes given observed
annotations, :math:`P( \mathbf{y} | \mathbf{x}, \\theta, \omega)`.
Arguments
----------
annotations : ndarray, shape = (n_items, n_annotators)
annotations[i,j] is the annotation of annotator j for item i
Returns
-------
posterior : ndarray, shape = (n_items, n_classes)
posterior[i,k] is the posterior probability of class k given the
annotation observed in item i.
"""
self._raise_if_incompatible(annotations)
nitems = annotations.shape[0]
gamma = self.gamma
nclasses = self.nclasses
# get indices of annotators active in each row
valid_entries = is_valid(annotations).nonzero()
annotator_indices = np.reshape(valid_entries[1],
(nitems, self.nannotators_per_item))
valid_annotations = annotations[valid_entries]
valid_annotations = np.reshape(valid_annotations,
(nitems, self.nannotators_per_item))
# thetas of active annotators
theta_equal = self.theta[annotator_indices]
theta_not_equal = (1. - theta_equal) / (nclasses - 1.)
# compute posterior over psi
psi_distr = np.zeros((nitems, nclasses))
for psi in xrange(nclasses):
tmp = np.where(valid_annotations == psi,
theta_equal, theta_not_equal)
psi_distr[:,psi] = gamma[psi] * tmp.prod(1)
# normalize distribution
psi_distr /= psi_distr.sum(1)[:,np.newaxis]
return psi_distr
##### Verify input ########################################################
def are_annotations_compatible(self, annotations):
"""Check if the annotations are compatible with the models' parameters.
"""
if not super(ModelBtLoopDesign, self).are_annotations_compatible(
annotations):
return False
masked_annotations = np.ma.masked_equal(annotations, MISSING_VALUE)
# exactly 3 annotations per row
nvalid = (~masked_annotations.mask).sum(1)
if not np.all(nvalid == self.nannotators_per_item):
return False
return True
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import sys
from collections import OrderedDict, defaultdict, namedtuple
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.core.targets.resources import Resources
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.tasks.jvm_dependency_analyzer import JvmDependencyAnalyzer
from pants.base.build_environment import get_buildroot
from pants.build_graph.target import Target
from pants.util.dirutil import fast_relpath
from pants.util.fileutil import create_size_estimators
class JvmDependencyUsage(JvmDependencyAnalyzer):
"""Determines the dependency usage ratios of targets.
Analyzes the relationship between the products a target T produces vs. the products
which T's dependents actually require (this is done by observing analysis files).
If the ratio of required products to available products is low, then this is a sign
that target T isn't factored well.
A graph is formed from these results, where each node of the graph is a target, and
each edge is a product usage ratio between a target and its dependency. The nodes
also contain additional information to guide refactoring -- for example, the estimated
job size of each target, which indicates the impact a poorly factored target has on
the build times. (see DependencyUsageGraph->to_json)
The graph is either summarized for local analysis or outputted as a JSON file for
aggregation and analysis on a larger scale.
"""
size_estimators = create_size_estimators()
@classmethod
def register_options(cls, register):
super(JvmDependencyUsage, cls).register_options(register)
register('--internal-only', default=True, action='store_true',
help='Specifies that only internal dependencies should be included in the graph '
'output (no external jars).')
register('--summary', default=True, action='store_true',
help='When set, outputs a summary of the "worst" dependencies; otherwise, '
'outputs a JSON report.')
register('--size-estimator',
choices=list(cls.size_estimators.keys()), default='filesize',
help='The method of target size estimation.')
register('--transitive', default=True, action='store_true',
help='Score all targets in the build graph transitively.')
register('--output-file', type=str,
help='Output destination. When unset, outputs to <stdout>.')
@classmethod
def prepare(cls, options, round_manager):
super(JvmDependencyUsage, cls).prepare(options, round_manager)
if not options.skip:
round_manager.require_data('classes_by_source')
round_manager.require_data('classes_by_target')
round_manager.require_data('product_deps_by_src')
def execute(self):
if self.get_options().skip:
return
targets = (self.context.targets() if self.get_options().transitive
else self.context.target_roots)
graph = self.create_dep_usage_graph(targets, get_buildroot())
output_file = self.get_options().output_file
if output_file:
self.context.log.info('Writing dependency usage to {}'.format(output_file))
with open(output_file, 'w') as fh:
self._render(graph, fh)
else:
sys.stdout.write('\n')
self._render(graph, sys.stdout)
def _render(self, graph, fh):
chunks = graph.to_summary() if self.get_options().summary else graph.to_json()
for chunk in chunks:
fh.write(chunk)
fh.flush()
def _resolve_aliases(self, target):
"""Recursively resolve `target` aliases."""
for declared in target.dependencies:
if isinstance(declared, Dependencies) or type(declared) == Target:
for r in self._resolve_aliases(declared):
yield r
else:
yield declared
def _is_declared_dep(self, target, dep):
"""Returns true if the given dep target should be considered a declared dep of target."""
return dep in self._resolve_aliases(target)
def _select(self, target):
if self.get_options().internal_only and isinstance(target, JarLibrary):
return False
elif isinstance(target, (Dependencies, Resources)) or type(target) == Target:
# ignore aliases and resources
return False
else:
return True
def _normalize_product_dep(self, buildroot, classes_by_source, dep):
"""Normalizes the given product dep from the given dep into a set of classfiles.
Product deps arrive as sources, jars, and classfiles: this method normalizes them to classfiles.
TODO: This normalization should happen in the super class.
"""
if dep.endswith(".jar"):
# TODO: post sbt/zinc jar output patch, binary deps will be reported directly as classfiles
return set()
elif dep.endswith(".class"):
return set([dep])
else:
# assume a source file and convert to classfiles
rel_src = fast_relpath(dep, buildroot)
return set(p for _, paths in classes_by_source[rel_src].rel_paths() for p in paths)
def create_dep_usage_graph(self, targets, buildroot):
"""Creates a graph of concrete targets, with their sum of products and dependencies.
Synthetic targets contribute products and dependencies to their concrete target.
"""
# Initialize all Nodes.
classes_by_source = self.context.products.get_data('classes_by_source')
classes_by_target = self.context.products.get_data('classes_by_target')
product_deps_by_src = self.context.products.get_data('product_deps_by_src')
nodes = dict()
for target in targets:
if not self._select(target):
continue
# Create or extend a Node for the concrete version of this target.
concrete_target = target.concrete_derived_from
products_total = sum(len(paths) for _, paths in classes_by_target[target].rel_paths())
node = nodes.get(concrete_target)
if not node:
node = nodes.setdefault(concrete_target, Node(concrete_target))
node.add_derivation(target, products_total)
# Record declared Edges.
for dep_tgt in self._resolve_aliases(target):
derived_from = dep_tgt.concrete_derived_from
if self._select(derived_from):
node.add_edge(Edge(is_declared=True, products_used=set()), derived_from)
# Record the used products and undeclared Edges for this target. Note that some of
# these may be self edges, which are considered later.
target_product_deps_by_src = product_deps_by_src.get(target, dict())
for src in target.sources_relative_to_buildroot():
for product_dep in target_product_deps_by_src.get(os.path.join(buildroot, src), []):
for dep_tgt in self.targets_by_file.get(product_dep, []):
derived_from = dep_tgt.concrete_derived_from
if not self._select(derived_from):
continue
is_declared = self._is_declared_dep(target, dep_tgt)
normalized_deps = self._normalize_product_dep(buildroot, classes_by_source, product_dep)
node.add_edge(Edge(is_declared=is_declared, products_used=normalized_deps), derived_from)
# Prune any Nodes with 0 products.
for concrete_target, node in nodes.items()[:]:
if node.products_total == 0:
nodes.pop(concrete_target)
return DependencyUsageGraph(nodes, self.size_estimators[self.get_options().size_estimator])
class Node(object):
def __init__(self, concrete_target):
self.concrete_target = concrete_target
self.products_total = 0
self.derivations = set()
# Dict mapping concrete dependency targets to an Edge object.
self.dep_edges = defaultdict(Edge)
def add_derivation(self, derived_target, derived_products):
self.derivations.add(derived_target)
self.products_total += derived_products
def add_edge(self, edge, dest):
self.dep_edges[dest] += edge
class Edge(object):
"""Record a set of used products, and a boolean indicating that a depedency edge was declared."""
def __init__(self, is_declared=False, products_used=None):
self.products_used = products_used or set()
self.is_declared = is_declared
def __iadd__(self, that):
self.products_used |= that.products_used
self.is_declared |= that.is_declared
return self
class DependencyUsageGraph(object):
def __init__(self, nodes, size_estimator):
self._nodes = nodes
self._size_estimator = size_estimator
self._cost_cache = {}
self._trans_cost_cache = {}
def _cost(self, target):
if target not in self._cost_cache:
self._cost_cache[target] = self._size_estimator(target.sources_relative_to_buildroot())
return self._cost_cache[target]
def _trans_cost(self, target):
if target not in self._trans_cost_cache:
dep_sum = sum(self._trans_cost(dep) for dep in target.dependencies)
self._trans_cost_cache[target] = self._cost(target) + dep_sum
return self._trans_cost_cache[target]
def _edge_type(self, target, edge, dep):
if target == dep:
return 'self'
elif edge.is_declared:
return 'declared'
else:
return 'undeclared'
def _used_ratio(self, dep_tgt, edge):
dep_tgt_products_total = max(self._nodes[dep_tgt].products_total if dep_tgt in self._nodes else 1, 1)
return len(edge.products_used) / dep_tgt_products_total
def to_summary(self):
"""Outputs summarized dependencies ordered by a combination of max usage and cost."""
# Aggregate inbound edges by their maximum product usage ratio.
max_target_usage = defaultdict(lambda: 0.0)
for target, node in self._nodes.items():
for dep_target, edge in node.dep_edges.items():
if target == dep_target:
continue
used_ratio = self._used_ratio(dep_target, edge)
max_target_usage[dep_target] = max(max_target_usage[dep_target], used_ratio)
# Calculate a score for each.
Score = namedtuple('Score', ('badness', 'max_usage', 'cost_transitive', 'target'))
scores = []
for target, max_usage in max_target_usage.items():
cost_transitive = self._trans_cost(target)
score = int(cost_transitive / (max_usage if max_usage > 0.0 else 1.0))
scores.append(Score(score, max_usage, cost_transitive, target.address.spec))
# Output in order by score.
yield '[\n'
first = True
for score in sorted(scores, key=lambda s: s.badness):
yield '{} {}'.format('' if first else ',\n', json.dumps(score._asdict()))
first = False
yield '\n]\n'
def to_json(self):
"""Outputs the entire graph."""
res_dict = {}
def gen_dep_edge(node, edge, dep_tgt):
return {
'target': dep_tgt.address.spec,
'dependency_type': self._edge_type(node.concrete_target, edge, dep_tgt),
'products_used': len(edge.products_used),
'products_used_ratio': self._used_ratio(dep_tgt, edge),
}
for node in self._nodes.values():
res_dict[node.concrete_target.address.spec] = {
'cost': self._cost(node.concrete_target),
'cost_transitive': self._trans_cost(node.concrete_target),
'products_total': node.products_total,
'dependencies': [gen_dep_edge(node, edge, dep_tgt) for dep_tgt, edge in node.dep_edges.items()]
}
yield json.dumps(res_dict, indent=2, sort_keys=True)
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API VM utility module to build SOAP object specs.
"""
import copy
import functools
from oslo.config import cfg
from oslo.utils import units
from oslo.vmware import exceptions as vexc
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ALL_SUPPORTED_NETWORK_DEVICES = ['VirtualE1000', 'VirtualE1000e',
'VirtualPCNet32', 'VirtualSriovEthernetCard',
'VirtualVmxnet']
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id):
_VM_REFS_CACHE.pop(id, None)
def vm_ref_cache_update(id, vm_ref):
_VM_REFS_CACHE[id] = vm_ref
def vm_ref_cache_get(id):
return _VM_REFS_CACHE.get(id)
def _vm_ref_cache(id, func, session, data):
vm_ref = vm_ref_cache_get(id)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id = instance['uuid']
return _vm_ref_cache(id, func, session, instance)
return wrapper
def vm_ref_cache_from_name(func):
@functools.wraps(func)
def wrapper(session, name):
id = name
return _vm_ref_cache(id, func, session, name)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
def _iface_id_option_value(client_factory, iface_id, port_index):
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % port_index
opt.value = iface_id
return opt
def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type=constants.DEFAULT_OS_TYPE,
allocations=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = os_type
# The name is the unique identifier for the VM. This will either be the
# instance UUID or the instance UUID with suffix '-rescue' for VM's that
# are in rescue mode
config_spec.instanceUuid = name
# Allow nested ESX instances to host 64 bit VMs.
if os_type == "vmkernel5Guest":
config_spec.nestedHVEnabled = "True"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance['vcpus'])
config_spec.memoryMB = int(instance['memory_mb'])
# Configure cpu information
if (allocations is not None and
('cpu_limit' in allocations or
'cpu_reservation' in allocations or
'cpu_shares_level' in allocations)):
allocation = client_factory.create('ns0:ResourceAllocationInfo')
if 'cpu_limit' in allocations:
allocation.limit = allocations['cpu_limit']
if 'cpu_reservation' in allocations:
allocation.reservation = allocations['cpu_reservation']
if 'cpu_shares_level' in allocations:
shares = client_factory.create('ns0:SharesInfo')
shares.level = allocations['cpu_shares_level']
if (shares.level == 'custom' and
'cpu_shares_share' in allocations):
shares.shares = allocations['cpu_shares_share']
else:
shares.shares = 0
allocation.shares = shares
config_spec.cpuAllocation = allocation
vif_spec_list = []
for vif_info in vif_infos:
vif_spec = _create_vif_spec(client_factory, vif_info)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance['uuid']
extra_config.append(opt)
port_index = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
extra_config.append(_iface_id_option_value(client_factory,
vif_info['iface_id'],
port_index))
port_index += 1
config_spec.extraConfig = extra_config
# Set the VM to be 'managed' by 'OpenStack'
managed_by = client_factory.create('ns0:ManagedByInfo')
managed_by.extensionKey = constants.EXTENSION_KEY
managed_by.type = constants.EXTENSION_TYPE_INSTANCE
config_spec.managedBy = managed_by
return config_spec
def get_vm_resize_spec(client_factory, instance):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = int(instance['vcpus'])
resize_spec.memoryMB = int(instance['memory_mb'])
return resize_spec
def create_controller_spec(client_factory, key,
adapter_type=constants.DEFAULT_ADAPTER_TYPE):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == constants.ADAPTER_TYPE_BUSLOGIC:
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == constants.ADAPTER_TYPE_LSILOGICSAS:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
elif adapter_type == constants.ADAPTER_TYPE_PARAVIRTUAL:
virtual_controller = client_factory.create(
'ns0:ParaVirtualSCSIController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = 0
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
if name == network_model.VIF_MODEL_E1000E:
return 'VirtualE1000e'
if name not in ALL_SUPPORTED_NETWORK_DEVICES:
msg = _('%s is not supported.') % name
raise exception.Invalid(msg)
return name
def _create_vif_spec(client_factory, vif_info):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
vif_info['vif_model'] = convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing_name = ''.join(['ns0:VirtualEthernetCard',
'OpaqueNetworkBackingInfo'])
backing = client_factory.create(backing_name)
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name)
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
network_spec.device = net_device
return network_spec
def get_network_attach_config_spec(client_factory, vif_info, index):
"""Builds the vif attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
vif_spec = _create_vif_spec(client_factory, vif_info)
config_spec.deviceChange = [vif_spec]
if vif_info['iface_id'] is not None:
config_spec.extraConfig = [_iface_id_option_value(client_factory,
vif_info['iface_id'],
index)]
return config_spec
def get_network_detach_config_spec(client_factory, device, port_index):
"""Builds the vif detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
config_spec.deviceChange = [virtual_device_config]
# If a key is already present then it cannot be deleted, only updated.
# This enables us to reuse this key if there is an additional
# attachment. The keys need to be preserved. This is due to the fact
# that there is logic on the ESX that does the network wiring
# according to these values. If they are changed then this will
# break networking to and from the interface.
config_spec.extraConfig = [_iface_id_option_value(client_factory,
'free',
port_index)]
return config_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in extra_opts.iteritems():
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def get_vmdk_path(session, vm_ref, instance):
"""Gets the vmdk file path for specified instance."""
hardware_devices = session._call_method(vim_util,
"get_dynamic_property", vm_ref, "VirtualMachine",
"config.hardware.device")
(vmdk_path, adapter_type, disk_type) = get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance['uuid'])
return vmdk_path
def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
if uuid:
if uuid in device.backing.fileName:
vmdk_file_path = device.backing.fileName
else:
vmdk_file_path = device.backing.fileName
vmdk_controller_key = device.controllerKey
if getattr(device.backing, 'thinProvisioned', False):
disk_type = "thin"
else:
if getattr(device.backing, 'eagerlyScrub', False):
disk_type = "eagerZeroedThick"
else:
disk_type = constants.DEFAULT_DISK_TYPE
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = constants.DEFAULT_ADAPTER_TYPE
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_BUSLOGIC
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_IDE
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_LSILOGICSAS
elif device.__class__.__name__ == "ParaVirtualSCSIController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_PARAVIRTUAL
adapter_type = adapter_type_dict.get(vmdk_controller_key, "")
return (vmdk_file_path, adapter_type, disk_type)
def _find_controller_slot(controller_keys, taken, max_unit_number):
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if unit_number not in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController',
'ParaVirtualSCSIController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == constants.ADAPTER_TYPE_IDE:
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2)
elif adapter_type in [constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_vmdk_create_spec(client_factory, size_in_kb,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def create_virtual_disk_spec(client_factory, controller_key,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == "thin":
disk_file_backing.thinProvisioned = True
else:
if disk_type == "eagerZeroedThick":
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
extras = [opt_enabled, opt_port]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if not hasattr(obj, 'propSet'):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
token = _get_token(result)
if token:
result = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return vnc_ports
# NOTE(mdbooth): this convenience function is temporarily duplicated in
# ds_util. The correct fix is to handle paginated results as they are returned
# from the relevant vim_util function. However, vim_util is currently
# effectively deprecated as we migrate to oslo.vmware. This duplication will be
# removed when we fix it properly in oslo.vmware.
def _get_token(results):
"""Get the token from the property results."""
return getattr(results, 'token', None)
def _get_reference_for_value(results, value):
for object in results.objects:
if object.obj.value == value:
return object
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(results, value):
for object in results.objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
token = _get_token(results)
object = func(results, value)
if object:
if token:
session._call_method(vim_util,
"cancel_retrieve",
token)
return object
if token:
results = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
return None
def _cancel_retrieve_if_necessary(session, results):
token = _get_token(results)
if token:
results = session._call_method(vim_util,
"cancel_retrieve",
token)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
@vm_ref_cache_from_name
def get_vm_ref_from_name(session, vm_name):
return (_get_vm_ref_from_vm_uuid(session, vm_name) or
_get_vm_ref_from_name(session, vm_name))
def _get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching
instance_uuid. It is far more optimal to use
_get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session.vim,
"FindAllByUuid",
session.vim.service_content.searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance['uuid']
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance['name']))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_uuid(session, identifier))
return vm_ref
def get_host_ref_from_id(session, host_id, property_list=None):
"""Get a host reference object for a host_id string."""
if property_list is None:
property_list = ['name']
host_refs = session._call_method(
vim_util, "get_objects",
"HostSystem", property_list)
return _get_object_from_results(session, host_refs, host_id,
_get_reference_for_value)
def get_host_id_from_vm_ref(session, vm_ref):
"""This method allows you to find the managed object
ID of the host running a VM. Since vMotion can
change the value, you should not presume that this
is a value that you can cache for very long and
should be prepared to allow for it to change.
:param session: a vSphere API connection
:param vm_ref: a reference object to the running VM
:return: the host_id running the virtual machine
"""
# to prevent typographical errors below
property_name = 'runtime.host'
# a property collector in VMware vSphere Management API
# is a set of local representations of remote values.
# property_set here, is a local representation of the
# properties we are querying for.
property_set = session._call_method(
vim_util, "get_object_properties",
None, vm_ref, vm_ref._type, [property_name])
prop = property_from_property_set(
property_name, property_set)
if prop is not None:
prop = prop.val.value
else:
# reaching here represents an impossible state
raise RuntimeError(
"Virtual Machine %s exists without a runtime.host!"
% (vm_ref))
return prop
def property_from_property_set(property_name, property_set):
'''Use this method to filter property collector results.
Because network traffic is expensive, multiple
VMwareAPI calls will sometimes pile-up properties
to be collected. That means results may contain
many different values for multiple purposes.
This helper will filter a list for a single result
and filter the properties of that result to find
the single value of whatever type resides in that
result. This could be a ManagedObjectReference ID
or a complex value.
:param property_name: name of property you want
:param property_set: all results from query
:return: the value of the property.
'''
for prop in property_set.objects:
p = _property_from_propSet(prop.propSet, property_name)
if p is not None:
return p
def _property_from_propSet(propSet, name='name'):
for p in propSet:
if p.name == name:
return p
def get_host_ref_for_vm(session, instance, props):
"""Get the ESXi host running a VM by its name."""
vm_ref = get_vm_ref(session, instance)
host_id = get_host_id_from_vm_ref(session, vm_ref)
return get_host_ref_from_id(session, host_id, props)
def get_host_name_for_vm(session, instance):
"""Get the ESXi host running a VM by its name."""
host_ref = get_host_ref_for_vm(session, instance, ['name'])
return get_host_name_from_host_ref(host_ref)
def get_host_name_from_host_ref(host_ref):
p = _property_from_propSet(host_ref.propSet)
if p is not None:
return p.val
def get_vm_state_from_name(session, vm_name):
vm_ref = get_vm_ref_from_name(session, vm_name)
vm_state = session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []}
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if (runtime_summary.inMaintenanceMode is False and
runtime_summary.connectionState == "connected"):
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
cpu_info['vcpus'] += hardware_summary.numCpuThreads
cpu_info['cores'] += hardware_summary.numCpuCores
cpu_info['vendor'].append(hardware_summary.vendor)
cpu_info['model'].append(hardware_summary.cpuModel)
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
_cancel_retrieve_if_necessary(session, results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vim_util, "get_dynamic_property",
cluster, "ClusterComputeResource",
"host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
| http://pubs.vmware.com/vsphere-51/index.jsp
| #com.vmware.wssdk.apiref.doc/
| vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
# TODO(hartsocks): once support for Python 2.6 is dropped
# change to {[(prop.name, prop.val) for prop in propset]}
return dict([(prop.name, prop.val) for prop in propset])
def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
volume_uuid in device.backing.fileName):
return device.backing.uuid
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster, node_mo_id):
"""Get the resource pool."""
if cluster is None:
# With no cluster named, use the root resource pool.
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
# The 0th resource pool is always the root resource pool on both ESX
# and vCenter.
res_pool_ref = results.objects[0].obj
else:
if cluster.value == node_mo_id:
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vim_util,
"get_dynamic_property",
cluster,
"ClusterComputeResource",
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get cluster references %s") % excep)
def get_all_res_pool_mors(session):
"""Get all the resource pools in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
def get_dynamic_property_mor(session, mor_ref, attribute):
"""Get the value of an attribute for a given managed object."""
return session._call_method(vim_util, "get_dynamic_property",
mor_ref, mor_ref._type, attribute)
def find_entity_mor(entity_list, entity_name):
"""Returns managed object ref for given cluster or resource pool name."""
return [mor for mor in entity_list if (hasattr(mor, 'propSet') and
mor.propSet[0].val == entity_name)]
def get_all_cluster_refs_by_name(session, path_list):
"""Get reference to the Cluster, ResourcePool with the path specified.
The path is the display name. This can be the full path as well.
The input will have the list of clusters and resource pool names
"""
cls = get_all_cluster_mors(session)
if not cls:
return {}
res = get_all_res_pool_mors(session)
if not res:
return {}
path_list = [path.strip() for path in path_list]
list_obj = []
for entity_path in path_list:
# entity_path could be unique cluster and/or resource-pool name
res_mor = find_entity_mor(res, entity_path)
cls_mor = find_entity_mor(cls, entity_path)
cls_mor.extend(res_mor)
for mor in cls_mor:
list_obj.append((mor.obj, mor.propSet[0].val))
return get_dict_mor(session, list_obj)
def get_dict_mor(session, list_obj):
"""The input is a list of objects in the form
(manage_object,display_name)
The managed object will be in the form
{ value = "domain-1002", _type = "ClusterComputeResource" }
Output data format:
| dict_mors = {
| 'respool-1001': { 'cluster_mor': clusterMor,
| 'res_pool_mor': resourcePoolMor,
| 'name': display_name },
| 'domain-1002': { 'cluster_mor': clusterMor,
| 'res_pool_mor': resourcePoolMor,
| 'name': display_name },
| }
"""
dict_mors = {}
for obj_ref, path in list_obj:
if obj_ref._type == "ResourcePool":
# Get owner cluster-ref mor
cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner")
dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref,
'res_pool_mor': obj_ref,
'name': path,
}
else:
# Get default resource pool of the cluster
res_pool_ref = get_dynamic_property_mor(session,
obj_ref, "resourcePool")
dict_mors[obj_ref.value] = {'cluster_mor': obj_ref,
'res_pool_mor': res_pool_ref,
'name': path,
}
return dict_mors
def get_mo_id_from_instance(instance):
"""Return the managed object ID from the instance.
The instance['node'] will have the hypervisor_hostname field of the
compute node on which the instance exists or will be provisioned.
This will be of the form
'respool-1001(MyResPoolName)'
'domain-1001(MyClusterName)'
"""
return instance['node'].partition('(')[0]
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic & ParaVirtual
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type in [constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
vmdk_adapter_type = constants.DEFAULT_ADAPTER_TYPE
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
"""Create VM on ESX host."""
LOG.debug("Creating VM on the ESX host", instance=instance)
vm_create_task = session._call_method(
session.vim,
"CreateVM_Task", vm_folder,
config=config_spec, pool=res_pool_ref)
task_info = session._wait_for_task(vm_create_task)
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
virtual_disk_path, size_in_kb):
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
LOG.debug("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the data store",
{"vmdk_file_size_in_kb": size_in_kb,
"adapter_type": adapter_type})
vmdk_create_spec = get_vmdk_create_spec(
session.vim.client.factory,
size_in_kb,
adapter_type,
disk_type)
vmdk_create_task = session._call_method(
session.vim,
"CreateVirtualDisk_Task",
session.vim.service_content.virtualDiskManager,
name=virtual_disk_path,
datacenter=dc_ref,
spec=vmdk_create_spec)
session._wait_for_task(vmdk_create_task)
LOG.debug("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s",
{"vmdk_file_size_in_kb": size_in_kb,
"disk_type": disk_type})
def copy_virtual_disk(session, dc_ref, source, dest):
"""Copy a sparse virtual disk to a thin virtual disk. This is also
done to generate the meta-data file whose specifics
depend on the size of the disk, thin/thick provisioning and the
storage adapter type.
:param session: - session for connection
:param dc_ref: - data center reference object
:param source: - source datastore path
:param dest: - destination datastore path
"""
LOG.debug("Copying Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
vim = session.vim
vmdk_copy_task = session._call_method(
vim,
"CopyVirtualDisk_Task",
vim.service_content.virtualDiskManager,
sourceName=source,
sourceDatacenter=dc_ref,
destName=dest)
session._wait_for_task(vmdk_copy_task)
LOG.debug("Copied Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
def reconfigure_vm(session, vm_ref, config_spec):
"""Reconfigure a VM according to the config spec."""
reconfig_task = session._call_method(session.vim,
"ReconfigVM_Task", vm_ref,
spec=config_spec)
session._wait_for_task(reconfig_task)
def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
vmfolder_ref):
"""Clone VM and link the cloned VM to the instance.
Clones the passed vm_ref into a new VM and links the cloned vm to
the passed instance.
"""
if vm_ref is None:
LOG.warn(_("vmwareapi:vm_util:clone_vmref_for_instance, called "
"with vm_ref=None"))
raise vexc.MissingParameter(param="vm_ref")
# Get the clone vm spec
client_factory = session.vim.client.factory
rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref,
disk_move_type='moveAllDiskBackingsAndDisallowSharing')
extra_opts = {'nvp.vm-uuid': instance['uuid']}
config_spec = get_vm_extra_config_spec(client_factory, extra_opts)
config_spec.instanceUuid = instance['uuid']
clone_spec = clone_vm_spec(client_factory, rel_spec, config=config_spec)
# Clone VM on ESX host
LOG.debug("Cloning VM for instance %s", instance['uuid'],
instance=instance)
vm_clone_task = session._call_method(session.vim, "CloneVM_Task",
vm_ref, folder=vmfolder_ref,
name=instance['uuid'],
spec=clone_spec)
session._wait_for_task(vm_clone_task)
LOG.debug("Cloned VM for instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def disassociate_vmref_from_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Disassociates the VM linked to the instance.
Disassociates the VM linked to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]+suffix
2. Rename the VM to be instance[uuid]+suffix instead
3. Reset the instanceUUID of the VM to a new generated value
"""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
extra_opts = {'nvp.vm-uuid': instance['uuid'] + suffix}
client_factory = session.vim.client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid'] + suffix
reconfig_spec.instanceUuid = ''
LOG.debug("Disassociating VM from instance %s", instance['uuid'],
instance=instance)
reconfigure_vm(session, vm_ref, reconfig_spec)
LOG.debug("Disassociated VM from instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def associate_vmref_for_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Associates the VM to the instance.
Associates the VM to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]
2. Rename the VM to be instance[uuid]
3. Reset the instanceUUID of the VM to be instance[uuid]
"""
if vm_ref is None:
vm_ref = search_vm_ref_by_identifier(session,
instance['uuid'] + suffix)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['uuid']
+ suffix)
extra_opts = {'nvp.vm-uuid': instance['uuid']}
client_factory = session.vim.client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid']
reconfig_spec.instanceUuid = instance['uuid']
LOG.debug("Associating VM to instance %s", instance['uuid'],
instance=instance)
reconfigure_vm(session, vm_ref, reconfig_spec)
LOG.debug("Associated VM to instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def power_on_instance(session, instance, vm_ref=None):
"""Power on the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering on the VM", instance=instance)
try:
poweron_task = session._call_method(
session.vim,
"PowerOnVM_Task", vm_ref)
session._wait_for_task(poweron_task)
LOG.debug("Powered on the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered on", instance=instance)
def get_values_from_object_properties(session, props):
"""Get the specific values from a object list.
The object values will be returned as a dictionary.
"""
dictionary = {}
while props:
for elem in props.objects:
propdict = propset_dict(elem.propSet)
dictionary.update(propdict)
token = _get_token(props)
if not token:
break
props = session._call_method(vim_util,
"continue_to_get_objects",
token)
return dictionary
def _get_vm_port_indices(session, vm_ref):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
ports = []
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value != 'free'):
ports.append(int(option.key.split('.')[2]))
return ports
def get_attach_port_index(session, vm_ref):
"""Get the first free port index."""
ports = _get_vm_port_indices(session, vm_ref)
# No ports are configured on the VM
if not ports:
return 0
ports.sort()
configured_ports_len = len(ports)
# Find the first free port index
for port_index in range(configured_ports_len):
if port_index != ports[port_index]:
return port_index
return configured_ports_len
def get_vm_detach_port_index(session, vm_ref, iface_id):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value == iface_id):
return int(option.key.split('.')[2])
def power_off_instance(session, instance, vm_ref=None):
"""Power off the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering off the VM", instance=instance)
try:
poweroff_task = session._call_method(session.vim,
"PowerOffVM_Task", vm_ref)
session._wait_for_task(poweroff_task)
LOG.debug("Powered off the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered off", instance=instance)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
import morphforge.stdimports as mf
import morphforgecontrib.stdimports as mfc
import pylab
from morphforgecontrib.simulation.synapse_templates.neurounit import *
from morphforgecontrib.simulation.synapse_templates.exponential_form.expsyn.core import *
from morphforgecontrib.simulation.synapse_templates.exponential_form.exp2syn.core import *
from morphforgecontrib.simulation.synapse_templates.exponential_form.exp2synnmda.core import *
from morphforge import units
def main():
# Define the formulae used in the model:
na_eqnset_txt = """
define_component sautois_hh_na {
i = gmax * (v-erev) * m**3*h
minf = m_alpha_rate / (m_alpha_rate + m_beta_rate)
mtau = 1.0 / (m_alpha_rate + m_beta_rate)
m' = (minf-m) / mtau
hinf = h_alpha_rate / (h_alpha_rate + h_beta_rate)
htau = 1.0 / (h_alpha_rate + h_beta_rate)
h' = (hinf-h) / htau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1+a2*V)/(a3+std.math.exp((V+a4)/a5))
m_alpha_rate = StdFormAB(V=v, a1=m_a1, a2=m_a2, a3=m_a3, a4=m_a4, a5=m_a5)
m_beta_rate = StdFormAB(V=v, a1=m_b1, a2=m_b2, a3=m_b3, a4=m_b4, a5=m_b5)
h_alpha_rate = StdFormAB(V=v, a1=h_a1, a2=h_a2, a3=h_a3, a4=h_a4, a5=h_a5)
h_beta_rate = StdFormAB(V=v, a1=h_b1, a2=h_b2, a3=h_b3, a4=h_b4, a5=h_b5)
<=> PARAMETER m_a1:{s-1}, m_a2:{V-1 s-1}, m_a3:{}, m_a4:{V}, m_a5:{V}
<=> PARAMETER m_b1:{s-1}, m_b2:{V-1 s-1}, m_b3:{}, m_b4:{V}, m_b5:{V}
<=> PARAMETER h_a1:{s-1}, h_a2:{V-1 s-1}, h_a3:{}, h_a4:{V}, h_a5:{V}
<=> PARAMETER h_b1:{s-1}, h_b2:{V-1 s-1}, h_b3:{}, h_b4:{V}, h_b5:{V}
<=> PARAMETER gmax:(S/m2), erev:(V)
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
}"""
lk_eqnset_txt = """
define_component sautois_hh_lk {
i = gmax * (v-erev)
<=> PARAMETER gmax:(S/m2), erev:(V)
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
}"""
k_eqnset_txt = """
define_component chlstd_hh_k {
i = gmax * (v-erev) * n
ninf = n_alpha_rate / (n_alpha_rate + n_beta_rate)
ntau = 1.0 / (n_alpha_rate + n_beta_rate)
n' = (ninf-n) / ntau
StdFormAB(V, a1, a2, a3, a4, a5) = (a1 + a2*V)/(a3+std.math.exp((V+a4)/a5))
n_alpha_rate = StdFormAB(V=v, a1=k_a1, a2=k_a2, a3=k_a3, a4=k_a4, a5=k_a5)
n_beta_rate = StdFormAB(V=v, a1=k_b1, a2=k_b2, a3=k_b3, a4=k_b4, a5=k_b5)
<=> PARAMETER k_a1:{s-1}, k_a2:{V-1 s-1}, k_a3:{}, k_a4:{V}, k_a5:{V}
<=> PARAMETER k_b1:{s-1}, k_b2:{V-1 s-1}, k_b3:{}, k_b4:{V}, k_b5:{V}
<=> PARAMETER gmax:(S/m2), erev:(V)
<=> OUTPUT i:(A/m2) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
<=> INPUT v: V METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
}"""
# Some Utility Functions:
def extract_params(p, prefix, replace_prefix=""):
return dict([(replace_prefix+k[len(prefix):], v) for (k, v) in p.iteritems() if k.startswith(prefix)])
def remap_keys(dct, remap_dct):
# Avoid collisions
for v in remap_dct.values():
assert not v in dct
return dict ([(remap_dct.get(k, k), v) for (k, v) in dct.iteritems()])
param_units = """
nS/um2; nS/um2; nS/um2; nS/um2;
mV; mV; mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
"""
param_names = """
lk_gmax na_gmax kf_gmax ks_gmax
lk_erev na_erev kf_erev ks_erev
na_m_a1 na_m_a2 na_m_a3 na_m_a4 na_m_a5
na_m_b1 na_m_b2 na_m_b3 na_m_b4 na_m_b5
na_h_a1 na_h_a2 na_h_a3 na_h_a4 na_h_a5
na_h_b1 na_h_b2 na_h_b3 na_h_b4 na_h_b5
kf_a1 kf_a2 kf_a3 kf_a4 kf_a5
kf_b1 kf_b2 kf_b3 kf_b4 kf_b5
ks_a1 ks_a2 ks_a3 ks_a4 ks_a5
ks_b1 ks_b2 ks_b3 ks_b4 ks_b5
"""
# Param Details:
params_aIN = """
1.35 150 15 2.5
-54 50 -80 -80
8.67 0.00 0.50 -13.01 -18.56
5.73 0.00 1.00 -2.99 9.69
0.04 0.00 0.00 15.8 26.00
4.08 0.00 0.001 -19.09 -10.21
3.10 0.00 1.00 -35.5 -9.30
1.10 0.00 1.00 0.98 16.19
0.20 0.00 1.00 -10.96 -7.74
0.05 0.00 1.00 -22.07 6.10
"""
params_MN = """
2.4691 110.0 8.0 1.0
-61 50 -80 -80
13.26 0.0 0.5 -5.01 -12.56
5.73 0.0 1.0 5.01 9.69
0.04 0.0 0.0 28.8 26.0
2.04 0.0 0.001 -9.09 -10.21
3.1 0.0 1.0 -27.5 -9.3
0.44 0.0 1.0 8.98 16.19
0.20 0.0 1.0 -2.96 -7.74
0.05 0.0 1.0 -14.07 6.1
"""
params_dIN = """
3.6765 210.0 0.5 3.0
-51 50 -80 -80
13.01 0.0 4.0 -1.01 -12.56
5.73 0.0 1.0 9.01 9.69
0.06 0.0 0.0 30.88 26.0
3.06 0.0 1.0 -7.09 -10.21
3.10 0.0 1.0 -31.50 -9.3
0.44 0.0 1.0 4.98 16.19
0.20 0.0 1.0 -6.96 -7.74
0.05 0.0 2.0 -18.07 6.1
"""
params_RB = """
4.3573 120 1.5 8.0
-70 50 -80 -80
13.01 0 1 -4.01 -12.56
5.73 0 1 6.01 9.69
0.04 0 0 29.88 26.00
2.04 0 1 -8.09 -10.21
3.1 0 1 -32.5 -9.30
0.44 0 1 3.98 16.19
0.2 0 1 -7.96 -7.74
0.05 0 2 -19.07 6.10
"""
params_dlc = """
2.3364 420 70 10
-66 50 -80 -80
13.26 0 3.00 -3.01 -12.56
5.73 0 1.00 6.01 9.69
0.06 0 0.00 19.88 26.0
4.08 0 0.001 -8.09 -10.21
3.10 0 1.00 -32.5 -9.3
1.10 0 2.00 3.98 16.19
4.00 0 1.00 -53.0 -7.74
0.01 0 1.00 47.0 6.1
"""
params_dla = """
0.6964 150 70 5
-63 50 -80 -80
13.26 0.0 1.20 -9.01 -12.56
5.73 0.0 1.00 1.01 9.69
0.04 0.0 0.00 14.88 26.00
2.04 0.0 0.001 -13.09 -10.21
3.10 0.0 1.00 -37.5 -9.3
1.10 0.0 0.60 -1.02 16.19
4.00 0.0 1.00 -58.0 -7.74
0.01 0.0 1.00 42.0 6.1
"""
params_cIN = """
4.8544 500 30 20
-60 50 -80 -80
13.26 0 0.1 -10.01 -12.56
5.73 0 1 0.01 9.69
0.06 0 0 23.8 26.0
3.06 0 0.001 -14.09 -10.21
3.10 0 1 -32.5 -9.3
1.10 0 1 3.98 16.19
0.20 0 1 -7.96 -7.74
0.05 0 0.5 -19.07 6.1
"""
params_dINr = """
4.807 190 20 3
-59 50 -80 -80
12.3 0 0.1 -10.01 -12.56
3.73 0 1.0 0.01 9.69
0.04 0 0.0 23.8 26.0
2.04 0 0.3 -18.09 -10.21
3.10 0 1.0 -32.5 -9.3
1.10 0 1.0 3.98 16.19
0.20 0 0.5 -7.96 -7.74
0.05 0 0.7 -19.07 6.1
"""
def load_std_channels(param_str):
nrn_params = dict([(p, mf.qty("%s:%s"%(v, u.strip()))) for (p, u, v) in zip(param_names.split(), param_units.split(';'), param_str.split())])
nrn_params_na = extract_params(nrn_params, prefix='na_')
nrn_params_lk = extract_params(nrn_params, prefix='lk_')
nrn_params_ks = extract_params(nrn_params, prefix='ks_', replace_prefix='k_')
nrn_params_kf = extract_params(nrn_params, prefix='kf_', replace_prefix='k_')
nrn_params_ks = remap_keys(nrn_params_ks, {'k_gmax':'gmax', 'k_erev':'erev'})
nrn_params_kf = remap_keys(nrn_params_kf, {'k_gmax':'gmax', 'k_erev':'erev'})
eqnsetna = mf.neurounits.NeuroUnitParser.Parse9MLFile(na_eqnset_txt).get_component()
eqnsetlk = mf.neurounits.NeuroUnitParser.Parse9MLFile(lk_eqnset_txt).get_component()
eqnsetk = mf.neurounits.NeuroUnitParser.Parse9MLFile(k_eqnset_txt).get_component()
na_chl = mfc.Neuron_NeuroUnitEqnsetMechanism(name="Chl1", eqnset=eqnsetna, default_parameters = nrn_params_na)
lk_chl = mfc.Neuron_NeuroUnitEqnsetMechanism(name="Chl2", eqnset=eqnsetlk, default_parameters = nrn_params_lk)
ksChls = mfc.Neuron_NeuroUnitEqnsetMechanism(name="Chl3", eqnset=eqnsetk, default_parameters = nrn_params_ks)
kfChls = mfc.Neuron_NeuroUnitEqnsetMechanism(name="Chl4", eqnset=eqnsetk, default_parameters = nrn_params_kf)
chls = [na_chl, lk_chl, ksChls, kfChls]
return chls
def load_ka_channel():
ka_param_names = """
ka_gmax ka_erev
ka_m_a1 ka_m_a2 ka_m_a3 ka_m_a4 ka_m_a5
ka_m_b1 ka_m_b2 ka_m_b3 ka_m_b4 ka_m_b5
ka_h_a1 ka_h_a2 ka_h_a3 ka_h_a4 ka_h_a5
ka_h_b1 ka_h_b2 ka_h_b3 ka_h_b4 ka_h_b5
"""
ka_param_units = """
nS/um2; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
ms-1; mV-1 ms-1; ;mV; mV;
"""
ka_param_str = """
30 -80
12.025 0 0.5 -10.01 -12.56
14.325 0 1 -8.01 9.69
0.0001 0 1 15.88 26.0
10.000 0 500 -22.09 -10.21
"""
nrn_params = dict([(p, mf.qty("%s:%s"%(v, u.strip()))) for (p, u, v) in zip(ka_param_names.split(), ka_param_units.split(';'), ka_param_str.split())])
nrn_params_ka = extract_params(nrn_params, prefix='ka_')
print nrn_params_ka
eqnsetka = mf.neurounits.NeuroUnitParser.Parse9MLFile(ka_eqnset_txt.replace("sautois", "saut2")).get_component()
kaChls = mfc.Neuron_NeuroUnitEqnsetMechanism(name="Chl5", eqnset=eqnsetka, default_parameters = nrn_params_ka)
return kaChls
@mf.cached_functor
def get_ain_chls():
return load_std_channels(params_aIN)
@mf.cached_functor
def get_mn_chls():
return load_std_channels(params_MN)
@mf.cached_functor
def get_dinr_chls():
return load_std_channels(params_dINr)
@mf.cached_functor
def get_din_chls():
return load_std_channels(params_dIN)
@mf.cached_functor
def get_rb_chls():
return load_std_channels(params_RB)
@mf.cached_functor
def get_dla_chls():
return load_std_channels(params_dla)
@mf.cached_functor
def get_dlc_chls():
return load_std_channels(params_dlc)
@mf.cached_functor
def get_cin_chls():
return load_std_channels(params_cIN) + [load_ka_channel()]
import random
def make_cell(sim, cell_name, cell_chl_functor):
m1 = mf.MorphologyBuilder.get_single_section_soma(area=mf.qty("1:um2"))
cell = sim.create_cell(name=cell_name, morphology=m1)
for chl in cell_chl_functor():
cell.apply_channel( chl, parameter_multipliers={'gmax':random.uniform(0.9, 1.1)})
cell.set_passive( mf.PassiveProperty.SpecificCapacitance, mf.qty('4:pF/um2'))
return cell
def make_cell_ain(sim, name=None, cell_tags=[]):
return make_cell(sim, cell_name=name, cell_chl_functor= get_ain_chls)
def make_cell_cin(sim, name=None, cell_tags=[]):
return make_cell(sim, cell_name=name, cell_chl_functor= get_cin_chls)
def make_cell_din(sim, name=None, cell_tags=[]):
return make_cell(sim, cell_name=name, cell_chl_functor= get_din_chls)
def make_cell_dinr(sim, name=None, cell_tags=[]):
return make_cell(sim, cell_name=name, cell_chl_functor= get_dinr_chls)
celltypes = [
('aIN', get_ain_chls),
('MN', get_mn_chls),
('dIN', get_din_chls),
('RB', get_rb_chls),
('dla', get_dla_chls),
('dlc', get_dlc_chls),
('cIN', get_cin_chls),
('dINr', get_dinr_chls),
]
# Test the effects of step-current injections:
# ############################################
def test_cell_current(cell_name, cell_chl_functor, current):
sim = mf.NEURONEnvironment().Simulation()
m1 = mf.MorphologyBuilder.get_single_section_soma(area=mf.qty("1:um2"))
cell = sim.create_cell(name=cell_name, morphology=m1)
cc = sim.create_currentclamp(name="CC1", delay=100*mf.ms, dur=400*mf.ms, amp=current * mf.pA, cell_location=cell.soma)
for chl in cell_chl_functor():
mf.cell.apply_channel( chl)
mf.cell.set_passive( mf.PassiveProperty.SpecificCapacitance, mf.qty('4:pF/um2'))
sim.record(cell, what=mf.Cell.Recordables.MembraneVoltage)
sim.record(cc, what=mf.CurrentClamp.Recordables.Current)
res =sim.run()
return res
def test_cell(cell_name, cell_chl_functor):
current_levels = [0, 40, 80, 120, 160, 200, 240]
reses = [test_cell_current(cell_name=cell_name, cell_chl_functor=cell_chl_functor, current=c) for c in current_levels]
mf.TagViewer(reses, show=False)
def test_step_current_injections():
for cell_name, functor in celltypes:
test_cell(cell_name, functor)
# End of step current injections
################################
simple_syn = """
define_component syn_simple {
g' = - g/g_tau
i = gmax * (v-erev) * g
gmax = 300pS
erev = 0mV
g_tau = 20ms
<=> INPUT v: mV METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
<=> OUTPUT i:(mA) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
on on_event() {
g = g + 1.0
}
}
"""
syn_inhib = """
define_component syn_decaying_inhib {
o' = - o/{1.5 ms}
c' = - c/{4.0 ms}
i = (v- {-80mV}) *g
g = {0.435nS} * (c-o) * scale
x=10.0
f = scale
plas' = -plas / beta
<=> PARAMETER scale:()
<=> INPUT v: mV METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
<=> OUTPUT i:(mA) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
alpha = 0.75
beta = 60ms
update = x - (x * (1-alpha) * plas)
on on_event() {
#update = x - (x * (1-alpha) * plas)
o = o + [update]if[update>0]else[0]
c = c + [update]if[update>0]else[0]
plas = plas + 1.0
}
}
"""
syn_onto_driver= """
define_component syn_simple {
o' = - o/{1.5 ms}
c' = - c/{10.0 s}
i = {0.835nS} * (v- {0mV}) * (c-o) * scale
<=> PARAMETER scale:()
<=> INPUT v: mV METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
<=> OUTPUT i:(mA) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
on on_event() {
o = o + 10
c = c + 10
}
}
"""
syn_std_excite_AMPA= """
define_component syn_simple {
o' = - o/{1.5 ms}
c' = - c/{4.0 ms}
i = {0.435nS} * (v- {0mV}) * (c-o) * scale
<=> PARAMETER scale:()
<=> INPUT v: mV METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
<=> OUTPUT i:(mA) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
on on_event() {
o = o + 10
c = c + 10
}
}
"""
syn_std_excite_NMDA= """
define_component syn_simple {
o' = - o/{1.5 ms}
c' = - c/{80 ms}
i = {0.435nS} * (v- {0mV}) * (c-o) * scale * vdep
vdep = 1/(1+ 0.1*0.5*std.math.exp(-0.08*v/{1mV}))
<=> PARAMETER scale:()
<=> INPUT v: mV METADATA {"mf":{"role":"MEMBRANEVOLTAGE"} }
<=> OUTPUT i:(mA) METADATA {"mf":{"role":"TRANSMEMBRANECURRENT"} }
on on_event() {
o = [o + 10]if[o<15]else[25]
c = [c + 10]if[c<15]else[25]
}
}
"""
env = mf.NEURONEnvironment()
# Create the synaptic templates:
driver_syn_tmpl = env.PostSynapticMechTemplate(
NeuroUnitEqnsetPostSynaptic,
eqnset = mf.neurounits.NeuroUnitParser.Parse9MLFile(syn_onto_driver).get_component(),
template_name='driver_syn_templ',
default_parameters={'scale':1.0}
)
excite_ampa_syn_tmpl = env.PostSynapticMechTemplate(
NeuroUnitEqnsetPostSynaptic,
template_name='excite_ampa_syn_tmpl',
eqnset = mf.neurounits.NeuroUnitParser.Parse9MLFile(syn_std_excite_AMPA).get_component(),
default_parameters={'scale':1.0}
)
excite_nmda_syn_tmpl = env.PostSynapticMechTemplate(
NeuroUnitEqnsetPostSynaptic,
template_name='excite_nmda_syn_tmpl',
eqnset = mf.neurounits.NeuroUnitParser.Parse9MLFile(syn_std_excite_NMDA).get_component(),
default_parameters={'scale':1.0}
)
inhib_syn_tmpl = env.PostSynapticMechTemplate(
NeuroUnitEqnsetPostSynaptic,
template_name='inhib_syn_tmpl',
eqnset = mf.neurounits.NeuroUnitParser.Parse9MLFile(syn_inhib).get_component(),
default_parameters={'scale':1.0}
)
exptemplate = env.PostSynapticMechTemplate(
PostSynapticMech_ExpSyn_Base,
template_name='expsyn1tmpl',
tau = 5 * units.ms,
e_rev = 0 * units.mV,
peak_conductance = 300 * units.pS
)
exp2template = env.PostSynapticMechTemplate(
PostSynapticMech_Exp2Syn_Base,
template_name='expsyn2tmpl',
tau_open = 5 * units.ms, tau_close=20*units.ms, e_rev = 0 * units.mV, popening=1.0,
peak_conductance = 300 * units.pS
)
def build_trigger( env, cell):
return env.SynapticTrigger(
mfc.SynapticTriggerByVoltageThreshold,
cell_location = cell.soma,
voltage_threshold = mf.qty("0:mV"),
delay = mf.qty("1:ms"),
#,
)
def onto_driver(sim, postsynaptic, times):
return sim.create_synapse(
trigger = env.SynapticTrigger(
mfc.SynapticTriggerAtTimes,
time_list = times,
),
postsynaptic_mech = driver_syn_tmpl.instantiate(
cell_location = postsynaptic.soma,
parameter_multipliers = {'scale':1.0 },
#peak_conductance = mf.qty("1:nS")
)
)
def dual_driver(sim, presynaptic, postsynaptic, ampa_scale, nmda_scale):
ampa = sim.create_synapse(
trigger = build_trigger( env, presynaptic),
postsynaptic_mech = excite_ampa_syn_tmpl.instantiate(
cell_location = postsynaptic.soma,
parameter_multipliers = {'scale':ampa_scale * units.dimensionless },
#peak_conductance = mf.qty("1:nS")
)
)
nmda = sim.create_synapse(
trigger = build_trigger( env, presynaptic),
postsynaptic_mech = excite_nmda_syn_tmpl.instantiate(
cell_location = postsynaptic.soma,
parameter_multipliers = {'scale':nmda_scale * units.dimensionless },
#peak_conductance = mf.qty("1:nS")
)
)
return [ampa, nmda]
def inhib(sim, presynaptic, postsynaptic, scale):
inhib_syn = sim.create_synapse(
trigger = build_trigger( env, presynaptic),
postsynaptic_mech = inhib_syn_tmpl.instantiate(
cell_location = postsynaptic.soma,
parameter_multipliers = {'scale':scale * units.dimensionless },
)
)
return [inhib_syn]
def expbuiltin_syn(sim, presynaptic, postsynaptic, scale):
inhib_syn = sim.create_synapse(
trigger = build_trigger( env, presynaptic),
postsynaptic_mech = exptemplate.instantiate( cell_location = postsynaptic.soma,)
)
return [inhib_syn]
def exp2builtin_syn(sim, presynaptic, postsynaptic, scale):
inhib_syn = sim.create_synapse(
trigger = build_trigger( env, presynaptic),
postsynaptic_mech = exp2template.instantiate( cell_location = postsynaptic.soma,)
)
return [inhib_syn]
def driver_onto_dinr(sim, presynaptic, postsynaptic):
return dual_driver(sim=sim, presynaptic=presynaptic, postsynaptic=postsynaptic, ampa_scale=0.1, nmda_scale=1.0)
def driver_onto_cin(sim, presynaptic, postsynaptic):
return dual_driver(sim=sim, presynaptic=presynaptic, postsynaptic=postsynaptic, ampa_scale=0.1, nmda_scale=0.1)
def dinr_onto_cin(sim, presynaptic, postsynaptic):
return dual_driver(sim=sim, presynaptic=presynaptic, postsynaptic=postsynaptic, ampa_scale=0.0, nmda_scale=1.0)
def cin_onto_cin(sim, presynaptic, postsynaptic):
return inhib(sim=sim, presynaptic=presynaptic, postsynaptic=postsynaptic, scale=4.0)
def cin_onto_dinr(sim, presynaptic, postsynaptic):
return inhib(sim=sim, presynaptic=presynaptic, postsynaptic=postsynaptic, scale=4.0)
def expbuiltin(sim, presynaptic, postsynaptic):
return expbuiltin_syn(sim=sim, presynaptic=presynaptic, postsynaptic=postsynaptic, scale=4.0)
def exp2builtin(sim, presynaptic, postsynaptic):
return exp2builtin_syn(sim=sim, presynaptic=presynaptic, postsynaptic=postsynaptic, scale=4.0)
sim = env.Simulation()
nNeurons = 1
dINr_LHS =mfc.NeuronPopulation(sim=sim, neuron_functor=make_cell_dinr, n=5, pop_name="dINR_LHS")
dINr_RHS =mfc.NeuronPopulation(sim=sim, neuron_functor=make_cell_dinr, n=5, pop_name="dINR_RHS")
cIN_LHS =mfc.NeuronPopulation(sim=sim, neuron_functor=make_cell_dinr, n=5, pop_name="cIN_LHS")
cIN_RHS =mfc.NeuronPopulation(sim=sim, neuron_functor=make_cell_dinr, n=5, pop_name="cIN_RHS")
#
#aIN_LHS =mfc.NeuronPopulation(sim=sim, neuron_functor=make_cell_ain, n=nNeurons, pop_name="aIN_LHS")
#aIN_RHS =mfc.NeuronPopulation(sim=sim, neuron_functor=make_cell_ain, n=nNeurons, pop_name="aIN_RHS")
driver_LHS =mfc.NeuronPopulation(sim=sim, neuron_functor=make_cell_dinr, n=nNeurons, pop_name="driver_LHS")
driver_RHS =mfc.NeuronPopulation(sim=sim, neuron_functor=make_cell_dinr, n=nNeurons, pop_name="driver_RHS")
# Connect the drivers:
mfc.Connectors.times_to_all(sim, syncronous_times=(100,)*mf.units.ms, postsynaptic_population= driver_LHS, connect_functor = onto_driver)
mfc.Connectors.times_to_all(sim, syncronous_times=(105,)*mf.units.ms, postsynaptic_population= driver_RHS, connect_functor = onto_driver)
#
# LHS
#######
# Connect the drivers to eveything:
mfc.Connectors.all_to_all(sim, presynaptic_population=driver_LHS, postsynaptic_population= cIN_LHS, connect_functor = driver_onto_cin)
mfc.Connectors.all_to_all(sim, presynaptic_population=driver_LHS, postsynaptic_population= dINr_LHS, connect_functor = driver_onto_dinr)
# Connect the dINrs to eveything:
mfc.Connectors.all_to_all(sim, presynaptic_population=dINr_LHS, postsynaptic_population= cIN_LHS, connect_functor = dinr_onto_cin)
# Connect the cINs to eveything contra-laterally:
mfc.Connectors.all_to_all(sim, presynaptic_population=cIN_LHS, postsynaptic_population= cIN_RHS, connect_functor = cin_onto_cin)
syn_cin_dinr_lr = mfc.Connectors.all_to_all(sim, presynaptic_population=cIN_LHS, postsynaptic_population= dINr_RHS, connect_functor = cin_onto_dinr, synapse_pop_name='syn_cin_dinr_lr')
## RHS
########
mfc.Connectors.all_to_all(sim, presynaptic_population=driver_RHS, postsynaptic_population= cIN_RHS, connect_functor = driver_onto_cin)
mfc.Connectors.all_to_all(sim, presynaptic_population=driver_RHS, postsynaptic_population= dINr_RHS, connect_functor = driver_onto_dinr)
# Connect the dINrs to eveything:
mfc.Connectors.all_to_all(sim, presynaptic_population=dINr_RHS, postsynaptic_population= cIN_RHS, connect_functor = driver_onto_cin)
#mfc.Connectors.all_to_all(sim, presynaptic_population=dINr_RHS, postsynaptic_population= dINr_RHS, connect_functor = driver_onto_dinr)
# Connect the cINs to eveything contra-laterally:
mfc.Connectors.all_to_all(sim, presynaptic_population=cIN_RHS, postsynaptic_population= cIN_LHS, connect_functor = cin_onto_cin)
#syn_cin_dinr_rl = mfc.Connectors.all_to_all(sim, presynaptic_population=cIN_RHS, postsynaptic_population= dINr_LHS, connect_functor = cin_onto_dinr, synapse_pop_name='syn_cin_dinr_lr')
driver_LHS.record_from_all(what=mf.Cell.Recordables.MembraneVoltage)
driver_RHS.record_from_all(what=mf.Cell.Recordables.MembraneVoltage)
dINr_LHS.record_from_all(what=mf.Cell.Recordables.MembraneVoltage)
dINr_RHS.record_from_all(what=mf.Cell.Recordables.MembraneVoltage)
cIN_LHS.record_from_all(what=mf.Cell.Recordables.MembraneVoltage)
cIN_RHS.record_from_all(what=mf.Cell.Recordables.MembraneVoltage)
#aIN_LHS.record_from_all(what=mf.Cell.Recordables.MembraneVoltage)
#aIN_RHS.record_from_all(what=mf.Cell.Recordables.MembraneVoltage)
#syn_cin_dinr_lr.record_from_all(what='g')
mfc.Connectors.all_to_all(sim, presynaptic_population=cIN_RHS, postsynaptic_population= cIN_LHS, connect_functor = expbuiltin)
mfc.Connectors.all_to_all(sim, presynaptic_population=cIN_RHS, postsynaptic_population= cIN_LHS, connect_functor = exp2builtin)
#mf.SimulationMRedoc.build(sim).to_pdf('~/Desktop/BartSim.pdf')
res =sim.run()
for tr in res.get_traces():
print tr.tags
mf.TagViewer(res,
plots=[
mf.TagPlot("ALL{Voltage}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot("ALL{Voltage,driver_LHS}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot("ALL{Voltage,dINR_LHS}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot("ALL{Voltage,cIN_LHS}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot("ALL{Voltage,aIN_LHS}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot("ALL{Voltage,driver_RHS}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot("ALL{Voltage,dINR_RHS}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot("ALL{Voltage,cIN_RHS}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot("ALL{Voltage,aIN_RHS}", yrange=(-80*mf.mV, 50*mf.mV) ),
mf.TagPlot('ALL{PREPOP:cIN_LHS,POSTPOP:dINR_RHS}'),
mf.TagPlot('ALL{PREPOP:cIN_RHS,POSTPOP:dINR_LHS}'),
])
# What to run:
#test_step_current_injections()
pylab.show()
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
"""ProgressBar class."""
import sys
import time
import string
import progress
import progress.eta
class ProgressBar(object):
"""A flexible progress bar for Python 2.6+."""
# Valid format strings
_PROGRESS = 'progress'
_PERCENTAGE = 'percentage'
_NOMINATOR = 'nominator'
_DENOMINATOR = 'denominator'
_VALID_ETA = ['hours', 'minutes', 'seconds']
_VALID_FMTS = ['progress', 'percentage', 'nominator', 'denominator']\
+ _VALID_ETA
def __init__(self, fmt, width=20, char='=', head='>', fill=' ',
min=0, max=100, target=sys.stderr, etaobj=None):
"""Initialize the progress bar with keyword arguments.
width The character width of the progress bar
char The character that fills progress bar
head The lead character
fill Character for the remaining space
min Minimum/starting value
max Maximum/end value
fmt The format used to print on each update
target The target stream, defaults to sys.stderr
etaobj A subclass of progress.eta.ETA
"""
# Do a ton of error checking to ensure a valid format and parameters
if int(width) <= 0:
raise ValueError("Width must be greater than zero")
c, h = len(char), len(head)
if c != 1:
raise ValueError("char and head must be of length 1")
if c > width or h > width or c + h > width:
raise ValueError("Character lengths or combined length must be "
"less than progress bar width")
self._progchar = fill * width
self._fmtdict = dict(zip(ProgressBar._VALID_FMTS,
[self._progchar, 0.0, 0, max, 0, 0, 0]))
# Check the format is valid
self._check_format(fmt)
self._etaobj = None
if etaobj is None:
if self._has_eta:
self._etaobj = progress.eta.SimpleETA()
else:
if not self._has_eta:
raise ValueError("Specified etaobj, but missing eta format in "
"format string")
if not isinstance(etaobj, progress.eta.BaseETA):
raise TypeError("ETA object must derive from the "
"progress.eta.BaseETA class")
self._etaobj = etaobj
self._width = width
self._char = char
self._head = head
self._fill = fill
self._min = min
self._max = max
self._fmt = fmt
self._target = target
self._value = min
self._percentage = 0.0
self._bdels = 0
self._timer = time.clock if sys.platform.startswith('win')\
else time.time
self._lastlen = 0
def _update(self, value):
"""Internal method for updating the ProgressBar's state."""
if value < 0:
raise ValueError("Cannot update progress bar with"
"a negative value")
if value > 0:
self._value += value
# Clamp to [mn, mx]
self._value = max(self.min, min(self.value, self.max))
v = float(self.value - self.min) / float(self.max - self.min)
self._percentage = v
# Set progress string
if not self.done():
lh = len(self.head)
self._progchar = self.char * ((int(v * self.width) - lh) //
len(self.char)) + self.head
self._progchar += self.fill * (self.width - len(self._progchar))
else:
self._progchar = self.char * (self.width - 1) +\
(self.char if not self.head else self.head)
for e in ProgressBar._VALID_ETA:
if e in self._fmtdict:
self._fmtdict[e] = 0.
self._fmtdict.update(zip([ProgressBar._PROGRESS,
ProgressBar._PERCENTAGE,
ProgressBar._NOMINATOR],
[self._progchar,
self._percentage * 100.0,
self._value]))
def update(self, value):
"""Update the progress bar with value."""
# Update and format ETA if needed
if self._has_eta:
self._etaobj.update(self._timer(), self.value + value, self.max)
res = self._etaobj.get()
if res is not None:
if type(res) not in (tuple, list):
raise ValueError("Expected a tuple or list of three "
"elements from ETA object, not "
"'{0}'".format(type(res).__name__))
if len(res) != 3:
raise ValueError("Expected exactly three elements from "
"ETA object, not '{0}'"
.format(type(res).__name__))
self._fmtdict.update(zip(ProgressBar._VALID_ETA, res))
self._update(value)
def clear(self):
"""Remove the progress bar from the output stream."""
self._target.write('\r' + ' ' * self._lastlen + '\r')
def reset(self):
"""Reset the progress bar."""
self.value = self.min
if self._etaobj:
self._etaobj.reset()
def done(self):
"""Return True if the progress bar has completed."""
return self._value == self.max
def show(self, *args, **kwargs):
"""Print the progress bar.
args and kwargs can contain userdata
"""
tempdict = dict(**self._fmtdict)
if kwargs:
if any(kw in self._fmtdict for kw in kwargs):
raise ValueError("kwargs cannot override internal format keys")
tempdict.update(kwargs)
self.clear()
tmp = self._fmt.format(*args, **tempdict)
self._target.write(tmp)
self._target.flush() # Needed for Python 3.x
self._lastlen = len(tmp)
def autoupdate(self, value, *args, **kwargs):
"""Clear the progress bar, update it with value and show it.
Essentially, a short-hand way of doing:
bar.clear()
bar.update(value)
bar.show()
"""
self.clear()
self.update(value)
self.show(*args, **kwargs)
def _check_format(self, fmt):
"""Check that a given format is valid."""
if not fmt:
raise ValueError("Expected a non-empty format string")
fmt_count = dict.fromkeys(ProgressBar._VALID_FMTS, 0)
self._has_eta = False
for _, name, _, _ in string.Formatter().parse(fmt):
if name in ProgressBar._VALID_ETA:
self._fmtdict[name] = 0
self._has_eta = True
elif name in ProgressBar._VALID_FMTS:
fmt_count[name] += 1
if fmt_count[name] > 1:
raise ValueError("Format string '{0}' appears more "
"than once".format(name))
@property
def width(self):
return self._width
@width.setter
def width(self, width):
if width < 1:
raise ValueError("Width must be at least 1 character")
self._width = width
self._update(0)
@property
def char(self):
return self._char
@char.setter
def char(self, char):
if len(char) != 1:
raise ValueError("char must be one character")
self._char = char
self._update(0)
@property
def head(self):
return self._head
@head.setter
def head(self, head):
if len(head) != 1:
raise ValueError("head must be one character")
self._head = head
self._update(0)
@property
def fill(self):
return self._fill
@fill.setter
def fill(self, fill):
if len(fill) != 1:
raise ValueError("fill must be one character")
self._fill = fill
self._update(0)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
value = max(self.min, min(self.max, value))
if self._value < value:
self.update(value - self._value)
elif self._value >= value:
self._value = value
self.update(0)
@property
def percent(self):
return self._percentage
@percent.setter
def percent(self, percent):
# Percentage will be set in self._update
self._value = self.min + percent * (self.max - self.min)
self._update(0)
@property
def min(self):
return self._min
@min.setter
def min(self, min):
if min >= self.max:
raise ValueError("min must less than max ({0})".format(self.max))
self._min = min
self._update(0)
@property
def max(self):
return self._max
@max.setter
def max(self, max):
if max <= self.min:
raise ValueError("max must greater than min ({0})"
.format(self.min))
self._max = max
self._update(0)
@property
def format(self):
return self._fmt
@format.setter
def format(self, fmt):
self._check_format(fmt)
self._fmt = fmt
@property
def target(self):
return self._target
@target.setter
def target(self, t):
if t not in (sys.stdout, sys.stderr):
raise ValueError("Valid targets are either sys.stdout or "
"sys.stderr")
self._target = t
@property
def eta(self):
"""Return a tuple of the remaining hours, minutes and seconds."""
return tuple(self.eta_object.get()) if self.eta_object\
else (None, None, None)
@property
def eta_object(self):
"""Return the eta object used to calculate ETA."""
return self._etaobj
def __str__(self):
"""Return the string representation as used by show()."""
return self._fmt.format(**self._fmtdict)
def __repr__(self):
"""Return the same string representation as __str()__."""
return "ProgressBar(format={0!r}, value={1!r})".format(self.format,
self.value)
def __iadd__(self, value):
"""Update the progress bar with value."""
self.update(value)
return self
def __len__(self):
"""Return the current length of the progress in characters."""
return len(str(self))
|
|
import collections
import random
import aging
import lifetables
from agent import MakeAgents, MaleState, FemaleState
from dispersal import HamadryasDispersal
from seedgroups import HamadryasSeed
import relatedness
def main():
hamadryas = HamadryasSim()
hamadryas.run_simulation()
class Population(object):
def __init__(self):
self.dict = {}
self.all = self.dict.keys()
self.groupsdict = {}
self.topeverindex = 0
self.halfyear = 0
class HamaPopulation(Population):
def __init__(self):
self.avail_females = []
self.eligible_males = []
self.young_natal_females = []
self.count_opportunistic_takeovers = 0
self.count_inheritances = 0
self.count_challenge_takeovers = 0
self.count_initial_units = 0
super(HamaPopulation, self).__init__()
class Simulation(object):
# to hold generic functions pertaining to any/most sims.
def __init__(self):
self.interbirth_int = []
def mortality_check(self, population, halfyear):
ret = 0
for agentindex in list(population.dict.keys()):
if agentindex in population.dict.keys():
agent = population.dict[agentindex]
getdeathchance = lifetables.getdeathchance(agent)
if agent.taxon == "savannah":
getdeathchance *= 1.41
elif agent.taxon == "hamadryas":
getdeathchance *= 1.25
dieroll = random.uniform(0, 1)
if getdeathchance >= dieroll:
if agent.taxon == "savannah":
ret += self.kill_agent(agent, population, population.groupsdict[agent.troopID], halfyear)
elif agent.taxon == "hamadryas":
ret += self.kill_agent(agent, population, population.groupsdict[agent.bandID], halfyear)
return ret
def birth_check(self, population, halfyear):
births = 0
for agentindex in population.dict.keys():
agent = population.dict[agentindex]
if agent.sex == 'f':
if agent.femaleState == FemaleState.cycling:
if agent.taxon == "hamadryas":
birthchance = lifetables.getbirthchance(agent)
dieroll = random.uniform(0, 1)
if birthchance >= dieroll:
agent.femaleState = FemaleState.pregnant
agent.sire_of_fetus = agent.OMUID
elif agent.femaleState == FemaleState.pregnant:
self.birthagent(agent, population, halfyear)
agent.femaleState = FemaleState.nursing0
births += 1
return births
def promotions(self, population):
for agent in population.dict.keys():
agent = population.dict[agent]
aging.promote_agent(agent)
def kill_agent(self, agent, population, group, halfyear):
if agent.sex == 'f':
if agent.offspring and agent.offspring[-1] in population.dict.keys():
if population.dict[agent.offspring[-1]].age < 2:
self.kill_agent(population.dict[agent.offspring[-1]], population, group, halfyear)
if agent.taxon == "hamadryas" and agent.sex == 'm':
if agent.index in population.eligible_males:
population.eligible_males.remove(agent.index)
if agent.females: # if he is a hamadryas leader male
if agent.malefols: # malefols inherit first
HamadryasDispersal.inherit_females(agent, population, self)
# after inheritance, females are "up for grabs"
population.avail_females += agent.females
if agent.index in group.leadermales:
group.leadermales.remove(agent.index)
if agent.maleState == MaleState.fol:
if agent.OMUID in population.dict.keys():
population.dict[agent.OMUID].malefols.remove(agent.index)
elif agent.taxon == "hamadryas" and agent.sex == 'f':
if agent.dispersed and agent.OMUID in population.dict.keys():
population.dict[agent.OMUID].females.remove(agent.index)
if agent.index in population.avail_females:
population.avail_females.remove(agent.index)
if agent.age <= 1:
if agent.parents:
if agent.parents[0] in population.dict.keys():
population.dict[agent.parents[0]].femaleState = FemaleState.cycling
del population.dict[agent.index]
population.all.remove(agent.index)
group.agents.remove(agent.index)
assert agent.index not in population.all
assert agent.index not in population.dict.keys()
return 1
def birthagent(self, mother, population, halfyear):
sex = random.choice(['m', 'f'])
if mother.taxon == "hamadryas":
group = mother.bandID
sire = mother.sire_of_fetus
infant = MakeAgents.makenewhamadryas(group, sex, mother.index,
sire,
population, self)
infant.OMUID = mother.OMUID
infant.clanID = mother.clanID
mother.sire_of_fetus = None
if not mother.last_birth:
mother.last_birth = halfyear
else:
interval = halfyear - mother.last_birth
self.interbirth_int += [interval]
mother.last_birth = halfyear
infant.born = True
population.all.append(infant.index)
population.dict[infant.index] = infant
self.parent_dict[infant.index] = infant.parents
population.groupsdict[group].agents.append(infant.index)
def get_sex_age_ratios(self, population):
adult_females = 0.0
adult_males = 0.0
subadult_females = 0.0
subadult_males = 0.0
for agent in population.dict.values():
if agent.sex == 'f':
if agent.age >= 5:
adult_females += 1.0
else:
subadult_females += 1.0
elif agent.sex == 'm':
if agent.age >= 7:
adult_males += 1.0
else:
subadult_males += 1.0
return {"adult sex ratio": adult_females / adult_males,
"adult to nonadult ratio": (adult_females + adult_males) / (subadult_females + subadult_males),
"adult females: ": adult_females,
"adult males: ": adult_males}
# also add here specialized lists!!!
"""
TAXA SPECIFIC CLASSES BELOW
are designed to hold schedules.
Schedules can vary between species to allow for
completely different functions e.g. takeovers
in hamadryas baboons and male dispersal in savannah.
"""
class HamadryasSim(Simulation):
# loop with unique functions when needed
def __init__(self):
self.duration = 300
self.recog = False
self.attraction_strength = 2
self.parent_dict = {}
self.codispersal = False
super(HamadryasSim, self).__init__()
def run_simulation(self):
population = HamaPopulation()
for groupindex in range(0, 10):
population = HamadryasSeed.makeseed(groupindex, population, self)
for halfyear in range(0, self.duration):
population.halfyear = halfyear
for group in population.groupsdict.values():
group.leadermales = set()
self.mortality_check(population, halfyear)
self.male_eligibility(population)
self.get_young_natal_females(population)
if population.avail_females:
for female in population.avail_females:
female = population.dict[female]
HamadryasDispersal.opportun_takeover(female, population, self)
population.avail_females = []
males = [male for male in population.dict.values() if male.sex == 'm']
for male in males:
self.male_choices(male, population)
if population.avail_females:
for female in population.avail_females:
female = population.dict[female]
HamadryasDispersal.opportun_takeover(female, population, self)
population.avail_females = []
self.birth_check(population, halfyear)
self.promotions(population)
print "Population: " + str(len(population.dict.keys()))
print "Hamadryas half-year " + str(halfyear) + " done!"
if len(population.all) == 0:
break
ratios = self.get_sex_age_ratios(population)
related = relatedness.main(population, self.parent_dict)
return {"within_omu_relat_mean": related[0],
"within_omu_relat_var": related[1],
"within_dyads": related[2],
"across_omu_relat_mean": related[3],
"across_omu_relat_var": related[4],
"across_dyads": related[5],
"pop_size": len(population.all),
"adult_sex_ratio": ratios["adult sex ratio"],
"adult_to_nonadult_ratio": ratios["adult to nonadult ratio"],
"initial_units": population.count_initial_units,
"opportunistic_takeovers": population.count_opportunistic_takeovers,
"inheritances": population.count_inheritances,
"challenge_takeovers": population.count_challenge_takeovers}
def male_eligibility(self, population):
population.eligible_males = []
for agent in population.dict.values():
if agent.sex == 'm':
if agent.dispersed:
if (agent.maleState is not MaleState.juvsol) and (agent.maleState is not MaleState.fol):
population.eligible_males.append(agent.index)
if agent.maleState == MaleState.lea:
population.groupsdict[agent.bandID].leadermales.add(agent.index)
def get_young_natal_females(self, population):
population.young_natal_females = []
for agent in population.dict.values():
if agent.sex == 'f':
if 2 <= agent.age < 5:
population.young_natal_females.append(agent.index)
elif agent.age == 5 and not agent.dispersed:
population.avail_females.append(agent.index)
def male_choices(self, male, population):
if male.maleState == MaleState.fol:
HamadryasDispersal.fol_choices(male, population, self)
elif male.maleState == MaleState.sol:
HamadryasDispersal.sol_choices(male, population, self)
elif male.maleState == MaleState.lea:
if not male.females:
male.maleState = MaleState.sol
male.OMUID = None
if male.malefols:
for malefol in male.malefols:
malefol = population.dict[malefol]
malefol.maleState = MaleState.sol
malefol.OMUID = None
male.malefols = []
# leaders have no choices
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.