gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import nose.tools as nt
import numpy.testing as npt
import pandas.util.testing as pdt
from numpy.testing.decorators import skipif
from nose import SkipTest
try:
import statsmodels.regression.linear_model as smlm
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
from .. import linearmodels as lm
from .. import algorithms as algo
from .. import utils
from ..palettes import color_palette
rs = np.random.RandomState(0)
class TestLinearPlotter(object):
rs = np.random.RandomState(77)
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list("abcdefghij"), 6)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
df.y_na.ix[[10, 20, 30]] = np.nan
def test_establish_variables_from_frame(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y="y")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_series(self):
p = lm._LinearPlotter()
p.establish_variables(None, x=self.df.x, y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_array(self):
p = lm._LinearPlotter()
p.establish_variables(None,
x=self.df.x.values,
y=self.df.y.values)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_mix(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_bad(self):
p = lm._LinearPlotter()
with nt.assert_raises(ValueError):
p.establish_variables(None, x="x", y=self.df.y)
def test_dropna(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y_na="y_na")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y_na, self.df.y_na)
p.dropna("x", "y_na")
mask = self.df.y_na.notnull()
pdt.assert_series_equal(p.x, self.df.x[mask])
pdt.assert_series_equal(p.y_na, self.df.y_na[mask])
class TestRegressionPlotter(object):
rs = np.random.RandomState(49)
grid = np.linspace(-3, 3, 30)
n_boot = 100
bins_numeric = 3
bins_given = [-1, 0, 1]
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list(range(6)), 10)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
bw_err = rs.randn(6)[df.s.values] * 2
df.y += bw_err
p = 1 / (1 + np.exp(-(df.x * 2 + rs.randn(60))))
df["c"] = [rs.binomial(1, p_i) for p_i in p]
df.y_na.ix[[10, 20, 30]] = np.nan
def test_variables_from_frame(self):
p = lm._RegressionPlotter("x", "y", data=self.df, units="s")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_series_equal(p.units, self.df.s)
pdt.assert_frame_equal(p.data, self.df)
def test_variables_from_series(self):
p = lm._RegressionPlotter(self.df.x, self.df.y, units=self.df.s)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
npt.assert_array_equal(p.units, self.df.s)
nt.assert_is(p.data, None)
def test_variables_from_mix(self):
p = lm._RegressionPlotter("x", self.df.y + 1, data=self.df)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y + 1)
pdt.assert_frame_equal(p.data, self.df)
def test_dropna(self):
p = lm._RegressionPlotter("x", "y_na", data=self.df)
nt.assert_equal(len(p.x), pd.notnull(self.df.y_na).sum())
p = lm._RegressionPlotter("x", "y_na", data=self.df, dropna=False)
nt.assert_equal(len(p.x), len(self.df.y_na))
def test_ci(self):
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 95)
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95, x_ci=68)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 68)
@skipif(_no_statsmodels)
def test_fast_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit with the "fast" function, which just does linear algebra
yhat_fast, _ = p.fit_fast(self.grid)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_fast, yhat_smod)
@skipif(_no_statsmodels)
def test_regress_poly(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit an first-order polynomial
yhat_poly, _ = p.fit_poly(self.grid, 1)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_poly, yhat_smod)
def test_regress_logx(self):
x = np.arange(1, 10)
y = np.arange(1, 10)
grid = np.linspace(1, 10, 100)
p = lm._RegressionPlotter(x, y, n_boot=self.n_boot)
yhat_lin, _ = p.fit_fast(grid)
yhat_log, _ = p.fit_logx(grid)
nt.assert_greater(yhat_lin[0], yhat_log[0])
nt.assert_greater(yhat_log[20], yhat_lin[20])
nt.assert_greater(yhat_lin[90], yhat_log[90])
@skipif(_no_statsmodels)
def test_regress_n_boot(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
npt.assert_equal(boots_fast.shape, (self.n_boot, self.grid.size))
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
npt.assert_equal(boots_poly.shape, (self.n_boot, self.grid.size))
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
npt.assert_equal(boots_smod.shape, (self.n_boot, self.grid.size))
@skipif(_no_statsmodels)
def test_regress_without_bootstrap(self):
p = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot, ci=None)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
nt.assert_is(boots_fast, None)
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
nt.assert_is(boots_poly, None)
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
nt.assert_is(boots_smod, None)
def test_numeric_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_numeric)
npt.assert_equal(len(bins), self.bins_numeric)
npt.assert_array_equal(np.unique(x_binned), bins)
def test_provided_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
npt.assert_array_equal(np.unique(x_binned), self.bins_given)
def test_bin_results(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
nt.assert_greater(self.df.x[x_binned == 0].min(),
self.df.x[x_binned == -1].max())
nt.assert_greater(self.df.x[x_binned == 1].min(),
self.df.x[x_binned == 0].max())
def test_scatter_data(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, x_jitter=.1)
x, y = p.scatter_data
nt.assert_true((x != self.df.d).any())
npt.assert_array_less(np.abs(self.df.d - x), np.repeat(.1, len(x)))
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, y_jitter=.05)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_less(np.abs(self.df.y - y), np.repeat(.1, len(y)))
def test_estimate_data(self):
p = lm._RegressionPlotter(self.df.d, self.df.y, x_estimator=np.mean)
x, y, ci = p.estimate_data
npt.assert_array_equal(x, np.sort(np.unique(self.df.d)))
npt.assert_array_almost_equal(y, self.df.groupby("d").y.mean())
npt.assert_array_less(np.array(ci)[:, 0], y)
npt.assert_array_less(y, np.array(ci)[:, 1])
def test_estimate_cis(self):
# set known good seed to avoid the test stochastically failing
np.random.seed(123)
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=95)
_, _, ci_big = p.estimate_data
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=50)
_, _, ci_wee = p.estimate_data
npt.assert_array_less(np.diff(ci_wee), np.diff(ci_big))
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=None)
_, _, ci_nil = p.estimate_data
npt.assert_array_equal(ci_nil, [None] * len(ci_nil))
def test_estimate_units(self):
# Seed the RNG locally
np.random.seed(345)
p = lm._RegressionPlotter("x", "y", data=self.df,
units="s", x_bins=3)
_, _, ci_big = p.estimate_data
ci_big = np.diff(ci_big, axis=1)
p = lm._RegressionPlotter("x", "y", data=self.df, x_bins=3)
_, _, ci_wee = p.estimate_data
ci_wee = np.diff(ci_wee, axis=1)
npt.assert_array_less(ci_wee, ci_big)
def test_partial(self):
x = self.rs.randn(100)
y = x + self.rs.randn(100)
z = x + self.rs.randn(100)
p = lm._RegressionPlotter(y, z)
_, r_orig = np.corrcoef(p.x, p.y)[0]
p = lm._RegressionPlotter(y, z, y_partial=x)
_, r_semipartial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_semipartial, r_orig)
p = lm._RegressionPlotter(y, z, x_partial=x, y_partial=x)
_, r_partial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_partial, r_orig)
@skipif(_no_statsmodels)
def test_logistic_regression(self):
p = lm._RegressionPlotter("x", "c", data=self.df,
logistic=True, n_boot=self.n_boot)
_, yhat, _ = p.fit_regression(x_range=(-3, 3))
npt.assert_array_less(yhat, 1)
npt.assert_array_less(0, yhat)
@skipif(_no_statsmodels)
def test_robust_regression(self):
p_ols = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot)
_, ols_yhat, _ = p_ols.fit_regression(x_range=(-3, 3))
p_robust = lm._RegressionPlotter("x", "y", data=self.df,
robust=True, n_boot=self.n_boot)
_, robust_yhat, _ = p_robust.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(ols_yhat), len(robust_yhat))
@skipif(_no_statsmodels)
def test_lowess_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, lowess=True)
grid, yhat, err_bands = p.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(grid), len(yhat))
nt.assert_is(err_bands, None)
def test_regression_options(self):
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, order=2)
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, logistic=True)
def test_regression_limits(self):
f, ax = plt.subplots()
ax.scatter(self.df.x, self.df.y)
p = lm._RegressionPlotter("x", "y", data=self.df)
grid, _, _ = p.fit_regression(ax)
xlim = ax.get_xlim()
nt.assert_equal(grid.min(), xlim[0])
nt.assert_equal(grid.max(), xlim[1])
p = lm._RegressionPlotter("x", "y", data=self.df, truncate=True)
grid, _, _ = p.fit_regression()
nt.assert_equal(grid.min(), self.df.x.min())
nt.assert_equal(grid.max(), self.df.x.max())
plt.close("all")
class TestRegressionPlots(object):
rs = np.random.RandomState(56)
df = pd.DataFrame(dict(x=rs.randn(90),
y=rs.randn(90) + 5,
z=rs.randint(0, 1, 90),
g=np.repeat(list("abc"), 30),
h=np.tile(list("xy"), 45),
u=np.tile(np.arange(6), 15)))
bw_err = rs.randn(6)[df.u.values]
df.y += bw_err
def test_regplot_basic(self):
f, ax = plt.subplots()
lm.regplot("x", "y", self.df)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
plt.close("all")
def test_regplot_selective(self):
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, scatter=False, ax=ax)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, fit_reg=False)
nt.assert_equal(len(ax.lines), 0)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, ci=None)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
plt.close("all")
def test_regplot_scatter_kws_alpha(self):
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_is(ax.collections[0]._alpha, None)
nt.assert_equal(ax.collections[0]._facecolors[0, 3], 0.5)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color,
'alpha': 0.4})
nt.assert_equal(ax.collections[0]._alpha, 0.4)
f, ax = plt.subplots()
color = 'r'
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
plt.close("all")
def test_regplot_binned(self):
ax = lm.regplot("x", "y", self.df, x_bins=5)
nt.assert_equal(len(ax.lines), 6)
nt.assert_equal(len(ax.collections), 2)
plt.close("all")
def test_lmplot_basic(self):
g = lm.lmplot("x", "y", self.df)
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
plt.close("all")
def test_lmplot_hue(self):
g = lm.lmplot("x", "y", data=self.df, hue="h")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 2)
nt.assert_equal(len(ax.collections), 4)
plt.close("all")
def test_lmplot_markers(self):
g1 = lm.lmplot("x", "y", data=self.df, hue="h", markers="s")
nt.assert_equal(g1.hue_kws, {"marker": ["s", "s"]})
g2 = lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s"])
nt.assert_equal(g2.hue_kws, {"marker": ["o", "s"]})
with nt.assert_raises(ValueError):
lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s", "d"])
plt.close("all")
def test_lmplot_marker_linewidths(self):
if mpl.__version__ == "1.4.2":
raise SkipTest
g = lm.lmplot("x", "y", data=self.df, hue="h",
fit_reg=False, markers=["o", "+"])
c = g.axes[0, 0].collections
nt.assert_equal(c[0].get_linewidths()[0], 0)
rclw = mpl.rcParams["lines.linewidth"]
nt.assert_equal(c[1].get_linewidths()[0], rclw)
plt.close("all")
def test_lmplot_facets(self):
g = lm.lmplot("x", "y", data=self.df, row="g", col="h")
nt.assert_equal(g.axes.shape, (3, 2))
g = lm.lmplot("x", "y", data=self.df, col="u", col_wrap=4)
nt.assert_equal(g.axes.shape, (6,))
g = lm.lmplot("x", "y", data=self.df, hue="h", col="u")
nt.assert_equal(g.axes.shape, (1, 6))
plt.close("all")
def test_lmplot_hue_col_nolegend(self):
g = lm.lmplot("x", "y", data=self.df, col="h", hue="h")
nt.assert_is(g._legend, None)
plt.close("all")
def test_lmplot_scatter_kws(self):
g = lm.lmplot("x", "y", hue="h", data=self.df, ci=None)
red_scatter, blue_scatter = g.axes[0, 0].collections
red, blue = color_palette(n_colors=2)
npt.assert_array_equal(red, red_scatter.get_facecolors()[0, :3])
npt.assert_array_equal(blue, blue_scatter.get_facecolors()[0, :3])
plt.close("all")
def test_residplot(self):
x, y = self.df.x, self.df.y
ax = lm.residplot(x, y)
resid = y - np.polyval(np.polyfit(x, y, 1), x)
x_plot, y_plot = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, x_plot)
npt.assert_array_almost_equal(resid, y_plot)
plt.close("all")
@skipif(_no_statsmodels)
def test_residplot_lowess(self):
ax = lm.residplot("x", "y", self.df, lowess=True)
nt.assert_equal(len(ax.lines), 2)
x, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, np.sort(self.df.x))
plt.close("all")
|
|
from django.contrib.auth.models import User, Group
from django.test import TestCase
from django.test.client import Client
from object_permissions_m2m import *
from object_permissions_m2m.registration import TestModel, TestModelChild, \
TestModelChildChild, UnknownPermissionException
from object_permissions_m2m.views.permissions import ObjectPermissionForm, \
ObjectPermissionFormNewUsers
# XXX set global vars to make test code a bit cleaner
user0 = None
user1 = None
superuser = None
obj = None
object0 = None
object1 = None
child = None
perms = None
group = None
c = None
perms = set(['Perm1', 'Perm2', 'Perm3', 'Perm4'])
class TestModelPermissions(TestCase):
def setUp(self):
global user0, user1, object0, object1, perms, group
self.tearDown()
user0 = User(id=2, username='tester')
user0.save()
user1 = User(id=3, username='tester2')
user1.save()
object0 = TestModel.objects.create(name='test0')
object0.save()
object1 = TestModel.objects.create(name='test1')
object1.save()
group = Group(name='testers')
group.save()
group.user_set.add(user0)
def tearDown(self):
TestModel.objects.all().delete()
TestModelChild.objects.all().delete()
TestModelChildChild.objects.all().delete()
User.objects.all().delete()
Group.objects.all().delete()
global user0, user1, object0, object1, perms, group
user0 = None
user1 = None
object0 = None
object1 = None
group = None
def test_trivial(self):
pass
def test_registration(self):
"""
Test that permissions were registered correctly
"""
perms1 = get_model_perms(TestModel)
perms2 = get_model_perms(TestModelChild)
self.assertTrue('Perm1' in perms1)
self.assertTrue('Perm2' in perms1)
self.assertTrue('Perm3' in perms1)
self.assertTrue('Perm4' in perms1)
self.assertTrue(isinstance(perms2, (dict,)))
self.assertTrue('Perm1' in perms2)
self.assertTrue('Perm2' in perms2)
self.assertTrue('Perm3' in perms2)
self.assertTrue('Perm4' in perms2)
def test_grant_user_permissions(self):
"""
Grant a user permissions
Verifies:
* granted properties are available via backend (has_perm)
* granted properties are only granted to the specified user, object
combinations
* granting unknown permission raises error
"""
# grant single property
grant(user0, 'Perm1', object0)
self.assertTrue(user0.has_perm('Perm1', object0))
self.assertFalse(user0.has_perm('Perm1', object1))
self.assertFalse(user1.has_perm('Perm1', object0))
self.assertFalse(user1.has_perm('Perm1', object1))
# grant property again
grant(user0, 'Perm1', object0)
self.assertTrue(user0.has_perm('Perm1', object0))
self.assertFalse(user0.has_perm('Perm1', object1))
self.assertFalse(user1.has_perm('Perm1', object0))
self.assertFalse(user1.has_perm('Perm1', object1))
# grant second property
grant(user0, 'Perm2', object0)
self.assertTrue(user0.has_perm('Perm1', object0))
self.assertFalse(user0.has_perm('Perm1', object1))
self.assertFalse(user1.has_perm('Perm1', object0))
self.assertFalse(user1.has_perm('Perm1', object1))
self.assertTrue(user0.has_perm('Perm2', object0))
self.assertFalse(user0.has_perm('Perm2', object1))
self.assertFalse(user1.has_perm('Perm2', object0))
self.assertFalse(user1.has_perm('Perm2', object1))
# grant property to another object
grant(user0, 'Perm2', object1)
self.assertTrue(user0.has_perm('Perm1', object0))
self.assertFalse(user0.has_perm('Perm1', object1))
self.assertFalse(user1.has_perm('Perm1', object0))
self.assertFalse(user1.has_perm('Perm1', object1))
self.assertTrue(user0.has_perm('Perm2', object0))
self.assertTrue(user0.has_perm('Perm2', object1))
self.assertFalse(user1.has_perm('Perm2', object0))
self.assertFalse(user1.has_perm('Perm2', object1))
# grant perms to other user
grant(user1, 'Perm3', object0)
self.assertTrue(user0.has_perm('Perm1', object0))
self.assertFalse(user0.has_perm('Perm1', object1))
self.assertFalse(user1.has_perm('Perm1', object0))
self.assertFalse(user1.has_perm('Perm1', object1))
self.assertTrue(user0.has_perm('Perm2', object0))
self.assertTrue(user0.has_perm('Perm2', object1))
self.assertFalse(user1.has_perm('Perm2', object0))
self.assertFalse(user1.has_perm('Perm2', object1))
self.assertTrue(user1.has_perm('Perm3', object0))
def grant_unknown():
grant(user1, 'UnknownPerm', object0)
self.assertRaises(UnknownPermissionException, grant_unknown)
def test_superuser(self):
"""
Superuser tests
Verifies:
* superuser has access to everything
* asking for invalid permissions returns false
* granting valid permissions do not generate any errors
* granting unknown permission raises error
"""
# set up
superUser = User(id=4, username='supertester', is_superuser=True)
superUser.save()
# should have access
self.assertTrue(superUser.has_object_perm('Perm1', object0))
self.assertTrue(superUser.has_object_perm('Perm1', object1))
self.assertTrue(superUser.has_object_perm('Perm2', object0))
self.assertTrue(superUser.has_object_perm('Perm2', object1))
self.assertTrue(superUser.has_object_perm('Perm3', object0))
self.assertTrue(superUser.has_object_perm('Perm3', object1))
self.assertTrue(superUser.has_object_perm('Perm4', object0))
self.assertTrue(superUser.has_object_perm('Perm4', object1))
# other users should not have access
self.assertFalse(user1.has_object_perm('Perm1', object0))
# shouldn't raise an error
grant(superUser, 'Perm1', object0)
# nothing should have changed
self.assertTrue(superUser.has_object_perm('Perm1', object0))
# should not have any invalid perms
self.assertFalse(superUser.has_object_perm('InvalidPerm', object0))
# should return all objects of type
query = superUser.get_objects_all_perms(TestModel, ['Perm1', 'Perm2', 'Perm3', 'Perm4'])
self.assertEqual(2, query.count())
self.assertTrue(object0 in query)
self.assertTrue(object1 in query)
query = superUser.get_objects_any_perms(TestModel, ['Perm1', 'Perm2', 'Perm3', 'Perm4'])
self.assertEqual(2, query.count())
self.assertTrue(object0 in query)
self.assertTrue(object1 in query)
def grant_unknown():
grant(superUser, 'UnknownPerm', object0)
self.assertRaises(UnknownPermissionException, grant_unknown)
# tear down
superUser.revoke_all(object0)
superUser.revoke_all(object1)
superUser.delete()
def test_revoke_user_permissions(self):
"""
Test revoking permissions from users
Verifies:
* revoked properties are removed
* revoked properties are only removed from the correct user/obj combinations
* revoking property user does not have does not give an error
* revoking unknown permission raises error
"""
# revoke perm when user has no perms
revoke(user0, 'Perm1', object0)
for perm in perms:
grant(user0, perm, object0)
grant(user0, perm, object1)
grant(user1, perm, object0)
grant(user1, perm, object1)
# revoke single perm
revoke(user0, 'Perm1', object0)
self.assertEqual(set(['Perm2', u'Perm3', 'Perm4']), set(get_user_perms(user0, object0)))
self.assertEqual(perms, set(get_user_perms(user0, object1)))
self.assertEqual(perms, set(get_user_perms(user1, object0)))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
# revoke a second perm
revoke(user0, 'Perm3', object0)
self.assertEqual(set(['Perm2', 'Perm4']), set(get_user_perms(user0, object0)))
self.assertEqual(perms, set(get_user_perms(user0, object1)))
self.assertEqual(perms, set(get_user_perms(user1, object0)))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
# revoke from another object
revoke(user0, 'Perm3', object1)
self.assertEqual(set(['Perm2', 'Perm4']), set(get_user_perms(user0, object0)))
self.assertEqual(set(['Perm1', 'Perm2', 'Perm4']), set(get_user_perms(user0, object1)))
self.assertEqual(perms, set(get_user_perms(user1, object0)))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
# revoke from another user
revoke(user1, 'Perm4', object0)
self.assertEqual(set(['Perm2', 'Perm4']), set(get_user_perms(user0, object0)))
self.assertEqual(set(['Perm1', 'Perm2', 'Perm4']), set(get_user_perms(user0, object1)))
self.assertEqual(set(['Perm1', 'Perm2', u'Perm3']), set(get_user_perms(user1, object0)))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
# revoke perm user does not have
revoke(user0, 'Perm1', object0)
self.assertEqual(set(['Perm2', 'Perm4']), set(get_user_perms(user0, object0)))
self.assertEqual(set(['Perm1', 'Perm2', 'Perm4']), set(get_user_perms(user0, object1)))
self.assertEqual(set(['Perm1', 'Perm2', u'Perm3']), set(get_user_perms(user1, object0)))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
# revoke perm that does not exist
revoke(user0, 'DoesNotExist', object0)
self.assertEqual(set(['Perm2', 'Perm4']), set(get_user_perms(user0, object0)))
self.assertEqual(set(['Perm1', 'Perm2', 'Perm4']), set(get_user_perms(user0, object1)))
self.assertEqual(set(['Perm1', 'Perm2', u'Perm3']), set(get_user_perms(user1, object0)))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
def test_revoke_all(self):
"""
Test revoking all permissions from a user
Verifies
* revoked properties are only removed from the correct user/obj combinations
* revoking property user does not have does not give an error
* revoking unknown permission raises error
"""
for perm in perms:
grant(user0, perm, object0)
grant(user0, perm, object1)
grant(user1, perm, object0)
grant(user1, perm, object1)
revoke_all(user0, object0)
self.assertEqual([], get_user_perms(user0, object0))
self.assertEqual(perms, set(get_user_perms(user0, object1)))
self.assertEqual(perms, set(get_user_perms(user1, object0)))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
revoke_all(user0, object1)
self.assertEqual([], get_user_perms(user0, object0))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual(perms, set(get_user_perms(user1, object0)))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
revoke_all(user1, object0)
self.assertEqual([], get_user_perms(user0, object0))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
self.assertEqual(perms, set(get_user_perms(user1, object1)))
revoke_all(user1, object1)
self.assertEqual([], get_user_perms(user0, object0))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
self.assertEqual([], get_user_perms(user1, object1))
def test_set_perms(self):
"""
Test setting perms to an exact set
"""
perms1 = perms
perms2 = set(['Perm1', 'Perm2'])
perms3 = set(['Perm2', 'Perm3'])
perms4 = []
# grant single property
set_user_perms(user0, perms1, object0)
self.assertEqual(perms1, set(get_user_perms(user0, object0)))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
set_user_perms(user0, perms2, object0)
self.assertEqual(perms2, set(get_user_perms(user0, object0)))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
set_user_perms(user0, perms3, object0)
self.assertEqual(perms3, set(get_user_perms(user0, object0)))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
# remove perms
set_user_perms(user0, perms4, object0)
self.assertEqual(perms4, get_user_perms(user0, object0))
self.assertFalse(User.objects.filter(pk=user0.pk,
perm_perm1_testmodel_set__isnull=False).exists())
self.assertFalse(User.objects.filter(pk=user0.pk,
perm_perm2_testmodel_set__isnull=False).exists())
self.assertFalse(User.objects.filter(pk=user0.pk,
perm_perm3_testmodel_set__isnull=False).exists())
self.assertFalse(User.objects.filter(pk=user0.pk,
perm_perm4_testmodel_set__isnull=False).exists())
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
set_user_perms(user0, perms2, object1)
self.assertEqual(perms4, get_user_perms(user0, object0))
self.assertEqual(perms2, set(get_user_perms(user0, object1)))
self.assertEqual([], get_user_perms(user1, object0))
set_user_perms(user1, perms1, object0)
self.assertEqual(perms4, get_user_perms(user0, object0))
self.assertEqual(perms2, set(get_user_perms(user0, object1)))
self.assertEqual(perms1, set(get_user_perms(user1, object0)))
def test_has_perm(self):
"""
Additional tests for has_perms
Verifies:
* None object always returns false
* Nonexistent perm returns false
* Perm user does not possess returns false
"""
grant(user0, 'Perm1', object0)
self.assertTrue(user0.has_perm('Perm1', object0))
self.assertFalse(user0.has_perm('Perm1', None))
self.assertFalse(user0.has_perm('DoesNotExist'), object0)
self.assertFalse(user0.has_perm('Perm2', object0))
def test_get_perms(self):
"""
tests retrieving list of perms across any instance of a model
Verifies:
* No Perms returns empty list
* some perms returns just that list
* all perms returns all perms
"""
self.assertEqual([], user0.get_perms(object0))
grant(user0, 'Perm1', object0)
grant(user0, 'Perm3', object1)
grant(user0, 'Perm4', object1)
grant(user1, 'Perm2', object0)
self.assertEqual(['Perm1'], user0.get_perms(object0))
perms = user0.get_perms(object1)
self.assertEqual(2, len(perms))
self.assertEqual(set(['Perm3','Perm4']), set(perms))
def test_get_perms_any(self):
"""
tests retrieving list of perms across any instance of a model
Verifies:
* No Perms returns empty list
* some perms returns just that list
* all perms returns all perms
"""
self.assertEqual([], user0.get_perms_any(TestModel))
grant(user0, 'Perm1', object0)
grant(user0, 'Perm3', object1)
grant(user0, 'Perm4', object1)
grant(user1, 'Perm2', object0)
perms = user0.get_perms_any(TestModel)
self.assertEqual(3, len(perms))
self.assertEqual(set(['Perm1', 'Perm3', 'Perm4']), set(perms))
def test_get_users(self):
"""
Tests retrieving list of users with perms on an object
"""
grant(user0, 'Perm1', object0)
grant(user0, 'Perm3', object1)
grant(user1, 'Perm2', object1)
self.assertTrue(user0 in get_users(object0))
self.assertFalse(user1 in get_users(object0))
self.assertTrue(user0 in get_users(object1))
self.assertTrue(user1 in get_users(object1))
self.assertTrue(len(get_users(object1))==2)
def test_get_users_any(self):
"""
Tests retrieving list of users with perms on an object
"""
user0.set_perms(['Perm1', 'Perm2'], object0)
user0.set_perms(['Perm1', 'Perm3'], object1)
user1.set_perms(['Perm2'], object1)
# no perms
self.assertFalse(user1 in get_users_any(object0, ['Perm1']))
# explicit any perms
self.assertTrue(user0 in get_users_any(object0))
self.assertTrue(user0 in get_users_any(object1))
self.assertFalse(user1 in get_users_any(object0))
self.assertTrue(user1 in get_users_any(object1))
# has perms, but not the right one
self.assertFalse(user0 in get_users_any(object0, ['Perm3']))
# has one perm, but not all
self.assertTrue(user0 in get_users_any(object0, ['Perm1','Perm3']))
self.assertTrue(user0 in get_users_any(object1, ['Perm1','Perm2']))
# has single perm
self.assertTrue(user0 in get_users_any(object0, ['Perm1']))
self.assertTrue(user0 in get_users_any(object0, ['Perm2']))
self.assertTrue(user1 in get_users_any(object1, ['Perm2']))
# has multiple perms
self.assertTrue(user0 in get_users_any(object0, ['Perm1','Perm2']))
self.assertTrue(user0 in get_users_any(object1, ['Perm1','Perm3']))
# retry all tests via groups
# reset perms for group test
user0.revoke_all(object1)
group.set_perms(['Perm1', 'Perm3'], object1)
# ---------------------------------------------------------------------
# retry tests including groups, should be same set of results since
# user0 now has same permissions except object1 perms are through a
# group
# ---------------------------------------------------------------------
# no perms
self.assertFalse(user1 in get_users_any(object0, ['Perm1']))
# explicit any perms
self.assertTrue(user0 in get_users_any(object0))
self.assertTrue(user0 in get_users_any(object1))
self.assertFalse(user1 in get_users_any(object0))
self.assertTrue(user1 in get_users_any(object1))
# has perms, but not the right one
self.assertFalse(user0 in get_users_any(object0, ['Perm3']))
# has one perm, but not all
self.assertTrue(user0 in get_users_any(object0, ['Perm1','Perm3']))
self.assertTrue(user0 in get_users_any(object1, ['Perm1','Perm2']))
# has single perm
self.assertTrue(user0 in get_users_any(object0, ['Perm1']))
self.assertTrue(user0 in get_users_any(object0, ['Perm2']))
self.assertTrue(user1 in get_users_any(object1, ['Perm2']))
# has multiple perms
self.assertTrue(user0 in get_users_any(object0, ['Perm1','Perm2']))
self.assertTrue(user0 in get_users_any(object1, ['Perm1','Perm3']))
# ----------------------------
# retry tests excluding groups
# ----------------------------
# no perms
self.assertFalse(user1 in get_users_any(object0, ['Perm1'], groups=False))
# explicit any perms
self.assertTrue(user0 in get_users_any(object0, groups=False))
self.assertFalse(user0 in get_users_any(object1, groups=False))
self.assertFalse(user1 in get_users_any(object0, groups=False))
self.assertTrue(user1 in get_users_any(object1, groups=False))
# has perms, but not the right one
self.assertFalse(user0 in get_users_any(object0, ['Perm3'], groups=False))
# has one perm, but not all
self.assertTrue(user0 in get_users_any(object0, ['Perm1','Perm3'], groups=False))
self.assertFalse(user0 in get_users_any(object1, ['Perm1','Perm2'], groups=False))
# has single perm
self.assertTrue(user0 in get_users_any(object0, ['Perm1'], groups=False))
self.assertTrue(user0 in get_users_any(object0, ['Perm2'], groups=False))
self.assertTrue(user1 in get_users_any(object1, ['Perm2'], groups=False))
# has multiple perms
self.assertTrue(user0 in get_users_any(object0, ['Perm1','Perm2'], groups=False))
self.assertFalse(user0 in get_users_any(object1, ['Perm1','Perm3'], groups=False))
def test_get_users_all(self):
"""
Tests retrieving list of users with perms on an object
"""
user0.set_perms(['Perm1', 'Perm2'], object0)
user0.set_perms(['Perm1', 'Perm3'], object1)
user1.set_perms(['Perm2'], object1)
# no perms
self.assertFalse(user1 in get_users_all(object0, ['Perm1']))
# has perms, but not the right one
self.assertFalse(user0 in get_users_all(object0, ['Perm3']))
# has one perm, but not all
self.assertFalse(user0 in get_users_all(object0, ['Perm1','Perm3']))
self.assertFalse(user0 in get_users_all(object1, ['Perm1','Perm2']))
# has single perm
self.assertTrue(user0 in get_users_all(object0, ['Perm1']))
self.assertTrue(user0 in get_users_all(object0, ['Perm2']))
self.assertTrue(user1 in get_users_all(object1, ['Perm2']))
# has multiple perms
self.assertTrue(user0 in get_users_all(object0, ['Perm1','Perm2']))
self.assertTrue(user0 in get_users_all(object1, ['Perm1','Perm3']))
# retry all tests via groups
# reset perms for group test
user0.revoke_all(object1)
group.set_perms(['Perm1', 'Perm3'], object1)
# ---------------------------------------------------------------------
# retry tests including groups, should be same set of results since
# user0 now has same permissions except object1 perms are through a
# group
# ---------------------------------------------------------------------
# no perms
self.assertFalse(user1 in get_users_all(object0, ['Perm1']))
# has perms, but not the right one
self.assertFalse(user0 in get_users_all(object0, ['Perm3']))
# has one perm, but not all
self.assertFalse(user0 in get_users_all(object0, ['Perm1','Perm3']))
self.assertFalse(user0 in get_users_all(object1, ['Perm1','Perm2']))
# has single perm
self.assertTrue(user0 in get_users_all(object0, ['Perm1']))
self.assertTrue(user0 in get_users_all(object0, ['Perm2']))
self.assertTrue(user1 in get_users_all(object1, ['Perm2']))
# has multiple perms
self.assertTrue(user0 in get_users_all(object0, ['Perm1','Perm2']))
self.assertTrue(user0 in get_users_all(object1, ['Perm1','Perm3']))
# ----------------------------
# retry tests excluding groups
# ----------------------------
# no perms
self.assertFalse(user1 in get_users_all(object0, ['Perm1'], groups=False))
# has perms, but not the right one
self.assertFalse(user0 in get_users_all(object0, ['Perm3'], groups=False))
# has one perm, but not all
self.assertFalse(user0 in get_users_all(object0, ['Perm1','Perm3'], groups=False))
self.assertFalse(user0 in get_users_all(object1, ['Perm1','Perm2'], groups=False))
# has single perm
self.assertTrue(user0 in get_users_all(object0, ['Perm1'], groups=False))
self.assertTrue(user0 in get_users_all(object0, ['Perm2'], groups=False))
self.assertTrue(user1 in get_users_all(object1, ['Perm2'], groups=False))
# has multiple perms
self.assertTrue(user0 in get_users_all(object0, ['Perm1','Perm2'], groups=False))
self.assertFalse(user0 in get_users_all(object1, ['Perm1','Perm3'], groups=False))
def test_get_user_permissions(self):
# grant single property
grant(user0, 'Perm1', object0)
self.assertEqual(['Perm1'], get_user_perms(user0, object0))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
self.assertEqual([], get_user_perms(user1, object1))
# grant property again
grant(user0, 'Perm1', object0)
self.assertEqual(['Perm1'], get_user_perms(user0, object0))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
self.assertEqual([], get_user_perms(user1, object1))
# grant second property
grant(user0, 'Perm2', object0)
self.assertEqual(set(['Perm1', 'Perm2']), set(get_user_perms(user0, object0)))
self.assertEqual([], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
self.assertEqual([], get_user_perms(user1, object1))
# grant property to another object
grant(user0, 'Perm2', object1)
self.assertEqual(set(['Perm1', 'Perm2']), set(get_user_perms(user0, object0)))
self.assertEqual(['Perm2'], get_user_perms(user0, object1))
self.assertEqual([], get_user_perms(user1, object0))
self.assertEqual([], get_user_perms(user1, object1))
# grant perms to other user
grant(user1, 'Perm3', object0)
self.assertEqual(set(['Perm1', 'Perm2']), set(get_user_perms(user0, object0)))
self.assertEqual(['Perm2'], get_user_perms(user0, object1))
self.assertEqual(['Perm3'], get_user_perms(user1, object0))
self.assertEqual([], get_user_perms(user1, object1))
def test_get_objects_any_perms(self):
"""
Test retrieving objects with any matching perms
"""
object2 = TestModel.objects.create(name='test2')
object2.save()
object3 = TestModel.objects.create(name='test3')
object3.save()
user0.grant('Perm1', object0)
user0.grant('Perm2', object1)
user1.grant('Perm3', object2)
user1.grant('Perm4', object3)
# implicit any
self.assertTrue(object0 in user0.get_objects_any_perms(TestModel))
self.assertTrue(object1 in user0.get_objects_any_perms(TestModel))
self.assertFalse(object2 in user0.get_objects_any_perms(TestModel))
self.assertTrue(object2 in user1.get_objects_any_perms(TestModel))
self.assertTrue(object3 in user1.get_objects_any_perms(TestModel))
# retrieve single perm
self.assertTrue(object0 in user0.get_objects_any_perms(TestModel, ['Perm1']))
self.assertTrue(object1 in user0.get_objects_any_perms(TestModel, ['Perm2']))
self.assertTrue(object2 in user1.get_objects_any_perms(TestModel, ['Perm3']))
self.assertTrue(object3 in user1.get_objects_any_perms(TestModel, ['Perm4']))
# retrieve multiple perms
query = user0.get_objects_any_perms(TestModel, ['Perm1', 'Perm2', 'Perm3'])
self.assertTrue(object0 in query)
self.assertTrue(object1 in query)
self.assertEqual(2, query.count())
query = user1.get_objects_any_perms(TestModel, ['Perm1','Perm3', 'Perm4'])
self.assertTrue(object2 in query)
self.assertTrue(object3 in query)
self.assertEqual(2, query.count())
# retrieve no results
query = user0.get_objects_any_perms(TestModel, ['Perm3'])
self.assertEqual(0, query.count())
query = user1.get_objects_any_perms(TestModel, ['Perm1'])
self.assertEqual(0, query.count())
# extra kwargs
query = user0.get_objects_any_perms(TestModel, ['Perm1', 'Perm2', 'Perm3']).filter(name='test0')
self.assertTrue(object0 in query)
self.assertEqual(1, query.count())
# exclude groups
self.assertTrue(object0 in user0.get_objects_any_perms(TestModel, ['Perm1'], groups=False))
query = user0.get_objects_any_perms(TestModel, ['Perm1', 'Perm2', 'Perm3'], groups=False)
self.assertTrue(object0 in query)
self.assertTrue(object1 in query)
self.assertEqual(2, query.count())
def test_get_objects_any_perms_related(self):
"""
Test retrieving objects with any matching perms and related model
options
"""
object2 = TestModel.objects.create(name='test2')
object2.save()
child0 = TestModelChild.objects.create(parent=object0)
child1 = TestModelChild.objects.create(parent=object1)
child2 = TestModelChild.objects.create(parent=object2)
child3 = TestModelChild.objects.create(parent=object2)
child0.save()
child1.save()
child2.save()
childchild = TestModelChildChild.objects.create(parent=child0)
childchild.save()
user0.grant('Perm1', object0) # perms on both
user0.grant('Perm2', child0) # perms on both
user0.grant('Perm3', object1) # perm on parent only (child 1)
user0.grant('Perm4', child2) # perm on child only
user0.grant('Perm1', childchild)
# # related field with implicit perms
# query = user0.get_objects_any_perms(TestModelChild, parent=None)
# self.assertEqual(3, len(query))
# self.assertTrue(child0 in query, 'user should have perms on parent and directly')
# self.assertTrue(child1 in query, 'user should have perms on parent')
# self.assertTrue(child2 in query, 'user should have perms on parent, and directly')
# self.assertFalse(child3 in query, 'user should have no perms on this object or its parent')
# related field with single perms
query = user0.get_objects_any_perms(TestModelChild, parent=['Perm3'])
self.assertEqual(3, len(query))
self.assertTrue(child0 in query, 'user should have perms on parent and directly')
self.assertTrue(child1 in query, 'user should have perms on parent')
self.assertTrue(child2 in query, 'user should have perms on parent')
self.assertFalse(child3 in query, 'user should have no perms on this object or its parent')
# related field with multiple perms
query = user0.get_objects_any_perms(TestModelChild, parent=['Perm1','Perm3'])
self.assertEqual(3, len(query))
self.assertTrue(child0 in query, 'user should have perms on parent and directly')
self.assertTrue(child1 in query, 'user should have perms on parent')
self.assertTrue(child2 in query, 'user should have perms on parent')
self.assertFalse(child3 in query, 'user should have no perms on this object or its parent')
# mix of direct and related perms
query = user0.get_objects_any_perms(TestModelChild, perms=['Perm4'], parent=['Perm1'])
self.assertEqual(2, len(query))
self.assertTrue(child0 in query, 'user should have perms on parent and directly')
self.assertFalse(child1 in query, 'user should not have perms on parent')
self.assertTrue(child2 in query, 'user should have perms directly')
self.assertFalse(child3 in query, 'user should have no perms on this object or its parent')
# multiple relations
query = user0.get_objects_any_perms(TestModelChildChild, parent=['Perm2'], parent__parent=['Perm1'])
self.assertEqual(1, len(query))
self.assertTrue(childchild in query)
def test_get_objects_all_perms(self):
"""
Test retrieving objects that have all matching perms
"""
object2 = TestModel.objects.create(name='test2')
object2.save()
object3 = TestModel.objects.create(name='test3')
object3.save()
user0.grant('Perm1', object0)
user0.grant('Perm2', object0)
user0.grant('Perm4', object1)
user1.grant('Perm3', object2)
user1.grant('Perm4', object2)
# retrieve single perm
self.assertTrue(object0 in user0.get_objects_all_perms(TestModel, ['Perm1']))
self.assertTrue(object1 in user0.get_objects_all_perms(TestModel, ['Perm4']))
self.assertTrue(object2 in user1.get_objects_all_perms(TestModel, ['Perm3']))
self.assertTrue(object2 in user1.get_objects_all_perms(TestModel, ['Perm4']))
# retrieve multiple perms
query = user0.get_objects_all_perms(TestModel, ['Perm1', 'Perm2'])
self.assertTrue(object0 in query)
self.assertFalse(object1 in query)
self.assertEqual(1, query.count())
query = user1.get_objects_all_perms(TestModel, ['Perm3', 'Perm4'])
self.assertTrue(object2 in query)
self.assertFalse(object3 in query)
self.assertEqual(1, query.count())
# retrieve no results
self.assertFalse(user0.get_objects_all_perms(TestModel, ['Perm3']).exists())
self.assertFalse(user0.get_objects_all_perms(TestModel, ['Perm1','Perm4']).exists())
self.assertFalse(user1.get_objects_all_perms(TestModel, ['Perm1']).exists())
# extra kwargs
query = user0.get_objects_all_perms(TestModel, ['Perm1', 'Perm2']).filter(name='test0')
self.assertTrue(object0 in query)
self.assertEqual(1, query.count())
# exclude groups
self.assertTrue(object0 in user0.get_objects_all_perms(TestModel, ['Perm1'], groups=False))
query = user0.get_objects_all_perms(TestModel, ['Perm1', 'Perm2'], groups=False)
self.assertTrue(object0 in query)
self.assertFalse(object1 in query)
self.assertEqual(1, query.count())
def test_get_objects_all_perms_related(self):
"""
Test retrieving objects with all matching perms and related model
options
"""
child0 = TestModelChild.objects.create(parent=object0)
child1 = TestModelChild.objects.create(parent=object0)
child2 = TestModelChild.objects.create(parent=object1)
child3 = TestModelChild.objects.create(parent=object1)
child0.save()
child1.save()
child2.save()
child3.save()
childchild = TestModelChildChild.objects.create(parent=child0)
childchild.save()
user0.grant('Perm1', object0)
user0.grant('Perm1', object1)
user0.grant('Perm2', object1)
user0.grant('Perm1', child0)
user0.grant('Perm1', child1)
user0.grant('Perm2', child1)
user0.grant('Perm1', child3)
user0.grant('Perm1', childchild)
# related field with single perms
query = user0.get_objects_all_perms(TestModelChild, perms=['Perm1'], parent=['Perm1'])
self.assertEqual(3, len(query))
self.assertTrue(child0 in query)
self.assertTrue(child1 in query)
self.assertFalse(child2 in query)
self.assertTrue(child3 in query)
# related field with single perms - has parent but not child
query = user0.get_objects_all_perms(TestModelChild, perms=['Perm4'], parent=['Perm1'])
self.assertEqual(0, len(query))
# related field with single perms - has child but not parent
query = user0.get_objects_all_perms(TestModelChild, perms=['Perm1'], parent=['Perm4'])
self.assertEqual(0, len(query))
# related field with multiple perms
query = user0.get_objects_all_perms(TestModelChild, perms=['Perm1'], parent=['Perm1','Perm2'])
self.assertEqual(1, len(query))
self.assertFalse(child0 in query)
self.assertFalse(child1 in query)
self.assertFalse(child2 in query)
self.assertTrue(child3 in query)
# multiple relations
query = user0.get_objects_all_perms(TestModelChildChild, perms=['Perm1'], parent=['Perm1'], parent__parent=['Perm1'])
self.assertEqual(1, len(query))
self.assertTrue(childchild in query)
def test_get_all_objects_any_perms(self):
"""
Test retrieving all objects from all models
"""
object2 = TestModel.objects.create(name='test2')
object2.save()
object3 = TestModel.objects.create(name='test3')
object3.save()
object4 = TestModel.objects.create(name='test4')
object4.save()
user0.grant('Perm1', object0)
user0.grant('Perm2', object1)
user0.grant('Perm4', object1)
perm_dict = user0.get_all_objects_any_perms()
self.assertTrue(isinstance(perm_dict, (dict,)))
self.assertTrue(TestModel in perm_dict, perm_dict.keys())
self.assertTrue(object0 in perm_dict[TestModel])
self.assertTrue(object1 in perm_dict[TestModel])
self.assertFalse(object2 in perm_dict[TestModel])
self.assertFalse(object3 in perm_dict[TestModel])
self.assertFalse(object4 in perm_dict[TestModel])
# no perms
perm_dict = user1.get_all_objects_any_perms()
self.assertTrue(isinstance(perm_dict, (dict,)))
self.assertTrue(TestModel in perm_dict, perm_dict.keys())
self.assertEqual(0, perm_dict[TestModel].count())
# ---------------------------------------------------------------------
# retry tests including groups, should be same set of results since
# user0 now has same permissions except object1 perms are through a
# group
# ---------------------------------------------------------------------
user0.revoke_all(object1)
group.set_perms(['Perm1', 'Perm3'], object1)
perm_dict = user0.get_all_objects_any_perms()
self.assertTrue(isinstance(perm_dict, (dict,)))
self.assertTrue(TestModel in perm_dict, perm_dict.keys())
self.assertTrue(object0 in perm_dict[TestModel])
self.assertTrue(object1 in perm_dict[TestModel])
self.assertFalse(object2 in perm_dict[TestModel])
self.assertFalse(object3 in perm_dict[TestModel])
self.assertFalse(object4 in perm_dict[TestModel])
# ----------------------------
# retry tests excluding groups
# ----------------------------
perm_dict = user0.get_all_objects_any_perms(groups=False)
self.assertTrue(isinstance(perm_dict, (dict,)))
self.assertTrue(TestModel in perm_dict, perm_dict.keys())
self.assertTrue(object0 in perm_dict[TestModel])
self.assertFalse(object1 in perm_dict[TestModel])
self.assertFalse(object2 in perm_dict[TestModel])
self.assertFalse(object3 in perm_dict[TestModel])
self.assertFalse(object4 in perm_dict[TestModel])
def test_has_any_on_model(self):
"""
Test checking if a user has perms on any instance of the model
"""
object2 = TestModel.objects.create(name='test2')
object2.save()
object3 = TestModel.objects.create(name='test3')
object3.save()
user0.grant('Perm1', object0)
user0.grant('Perm2', object1)
user1.grant('Perm3', object2)
# check single perm
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1']))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm2']))
self.assertTrue(user1.has_any_perms(TestModel, ['Perm3']))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1'], False))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm2'], False))
self.assertTrue(user1.has_any_perms(TestModel, ['Perm3'], False))
# check multiple perms
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1', 'Perm4']))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1', 'Perm2']))
self.assertTrue(user1.has_any_perms(TestModel, ['Perm3', 'Perm4']))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1', 'Perm4'], False))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1', 'Perm2'], False))
self.assertTrue(user1.has_any_perms(TestModel, ['Perm3', 'Perm4'], False))
# no results
self.assertFalse(user0.has_any_perms(TestModel, ['Perm3']))
self.assertFalse(user1.has_any_perms(TestModel, ['Perm4']))
self.assertFalse(user0.has_any_perms(TestModel, ['Perm3', 'Perm4']))
self.assertFalse(user1.has_any_perms(TestModel, ['Perm1', 'Perm4']))
self.assertFalse(user0.has_any_perms(TestModel, ['Perm3'], False))
self.assertFalse(user1.has_any_perms(TestModel, ['Perm4'], False))
self.assertFalse(user0.has_any_perms(TestModel, ['Perm3', 'Perm4'], False))
self.assertFalse(user1.has_any_perms(TestModel, ['Perm1', 'Perm4'], False))
# ---------------------------------------------------------------------
# retry tests including groups, should be same set of results since
# user0 now has same permissions except object1 perms are through a
# group
# ---------------------------------------------------------------------
user0.revoke_all(object1)
group.grant("Perm2", object1)
# check single perm
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1']))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm2']))
self.assertTrue(user1.has_any_perms(TestModel, ['Perm3']))
# check multiple perms
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1', 'Perm4']))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1', 'Perm2']))
self.assertTrue(user1.has_any_perms(TestModel, ['Perm3', 'Perm4']))
# no results
self.assertFalse(user0.has_any_perms(TestModel, ['Perm3']))
self.assertFalse(user1.has_any_perms(TestModel, ['Perm4']))
self.assertFalse(user0.has_any_perms(TestModel, ['Perm3', 'Perm4']))
self.assertFalse(user1.has_any_perms(TestModel, ['Perm1', 'Perm4']))
# ----------------------------
# retry tests excluding groups
# ----------------------------
# check single perm
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1'], False))
self.assertFalse(user0.has_any_perms(TestModel, ['Perm2'], False))
self.assertTrue(user1.has_any_perms(TestModel, ['Perm3'], False))
# check multiple perms
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1', 'Perm4'], False))
self.assertTrue(user0.has_any_perms(TestModel, ['Perm1', 'Perm2'], False))
self.assertTrue(user1.has_any_perms(TestModel, ['Perm3', 'Perm4'], False))
# no results
self.assertFalse(user0.has_any_perms(TestModel, ['Perm3'], False))
self.assertFalse(user1.has_any_perms(TestModel, ['Perm4'], False))
self.assertFalse(user0.has_any_perms(TestModel, ['Perm2', 'Perm4'], False))
self.assertFalse(user0.has_any_perms(TestModel, ['Perm3', 'Perm4'], False))
self.assertFalse(user1.has_any_perms(TestModel, ['Perm1', 'Perm4'], False))
def test_has_any_perm(self):
"""
Test the user_has_any_perm() function.
"""
# no perms
self.assertFalse(user_has_any_perms(user0, object0))
self.assertFalse(user_has_any_perms(user0, object0, ['Perm1', 'Perm2']))
self.assertFalse(user_has_any_perms(user0, object0, groups=True))
self.assertFalse(user_has_any_perms(user0, object0, ['Perm1', 'Perm2']))
# single perm
user0.grant("Perm1", object0)
user1.grant("Perm2", object0)
self.assertTrue(user_has_any_perms(user0, object0))
self.assertTrue(user_has_any_perms(user1, object0))
self.assertTrue(user_has_any_perms(user0, object0, ['Perm1', 'Perm2']))
self.assertTrue(user_has_any_perms(user1, object0, ['Perm1', 'Perm2']))
user0.revoke_all(object0)
user1.revoke_all(object0)
# perm on group, but not checking
group.grant("Perm3", object0)
self.assertFalse(user_has_any_perms(user0, object0, groups=False))
self.assertFalse(user_has_any_perms(user0, object0, ['Perm1', 'Perm3'], groups=False))
# perm on group, checking groups
self.assertTrue(user_has_any_perms(user0, object0, groups=True))
self.assertTrue(user_has_any_perms(user0, object0, ['Perm1', 'Perm3']))
def test_has_all_on_model(self):
"""
Test checking if a user has perms on any instance of the model
"""
object2 = TestModel.objects.create(name='test2')
object2.save()
object3 = TestModel.objects.create(name='test3')
object3.save()
user0.grant('Perm1', object0)
user0.grant('Perm2', object0)
user0.grant('Perm2', object1)
user1.grant('Perm3', object2)
# check single perm
self.assertTrue(user0.has_all_perms(TestModel, ['Perm1']))
self.assertTrue(user0.has_all_perms(TestModel, ['Perm2']))
self.assertTrue(user1.has_all_perms(TestModel, ['Perm3']))
self.assertTrue(user0.has_all_perms(TestModel, ['Perm1'], False))
self.assertTrue(user0.has_all_perms(TestModel, ['Perm2'], False))
self.assertTrue(user1.has_all_perms(TestModel, ['Perm3'], False))
# check multiple perms
self.assertFalse(user0.has_all_perms(TestModel, ['Perm1', 'Perm4']))
self.assertTrue(user0.has_all_perms(TestModel, ['Perm1', 'Perm2']))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm3', 'Perm4']))
self.assertFalse(user0.has_all_perms(TestModel, ['Perm1', 'Perm4'], False))
self.assertTrue(user0.has_all_perms(TestModel, ['Perm1', 'Perm2'], False))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm3', 'Perm4'], False))
# no results
self.assertFalse(user0.has_all_perms(TestModel, ['Perm3']))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm4']))
self.assertFalse(user0.has_all_perms(TestModel, ['Perm3', 'Perm4']))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm1', 'Perm4']))
self.assertFalse(user0.has_all_perms(TestModel, ['Perm3'], False))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm4'], False))
self.assertFalse(user0.has_all_perms(TestModel, ['Perm3', 'Perm4'], False))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm1', 'Perm4'], False))
# ---------------------------------------------------------------------
# retry tests including groups, should be same set of results since
# user0 now has same permissions except object1 perms are through a
# group
# ---------------------------------------------------------------------
user0.revoke_all(object1)
group.grant("Perm2", object1)
# check single perm
self.assertTrue(user0.has_all_perms(TestModel, ['Perm1']))
self.assertTrue(user0.has_all_perms(TestModel, ['Perm2']))
self.assertTrue(user1.has_all_perms(TestModel, ['Perm3']))
# check multiple perms
self.assertFalse(user0.has_all_perms(TestModel, ['Perm1', 'Perm4']))
self.assertTrue(user0.has_all_perms(TestModel, ['Perm1', 'Perm2']))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm3', 'Perm4']))
# no results
self.assertFalse(user0.has_all_perms(TestModel, ['Perm3']))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm4']))
self.assertFalse(user0.has_all_perms(TestModel, ['Perm3', 'Perm4']))
self.assertFalse(user1.has_all_perms(TestModel, ['Perm1', 'Perm4']))
def test_has_all_perm(self):
"""
Test the user_has_any_perm() function.
"""
# no perms
self.assertFalse(user_has_all_perms(user0, object0, ['Perm1', 'Perm2']))
self.assertFalse(user_has_all_perms(user0, object0, ['Perm1', 'Perm2']))
# single perm
user0.grant("Perm1", object0)
user1.grant("Perm2", object0)
self.assertFalse(user_has_all_perms(user0, object0, ['Perm1', 'Perm2']))
self.assertFalse(user_has_all_perms(user1, object0, ['Perm1', 'Perm2']))
self.assertTrue(user_has_all_perms(user0, object0, ['Perm1']))
self.assertTrue(user_has_all_perms(user1, object0, ['Perm2']))
user0.revoke_all(object0)
user1.revoke_all(object0)
# perm on group, but not checking
group.grant("Perm3", object0)
self.assertFalse(user_has_all_perms(user0, object0, ['Perm3'], groups=False))
# perm on group, checking groups
self.assertTrue(user_has_all_perms(user0, object0, ['Perm3']))
#class TestPermissionViews(TestCase):
# """ tests for user specific test views """
#
# def setUp(self):
# self.tearDown()
# global user0, user1, superuser, obj, c
#
# user0 = User(id=2, username='tester0')
# user0.set_password('secret')
# user0.save()
# user1 = User(id=3, username='tester1')
# user1.set_password('secret')
# user1.save()
# superuser = User(id=4, username='superuser', is_superuser=True)
# superuser.set_password('secret')
# superuser.save()
#
# obj = TestModel.objects.create(name='test')
#
# c = Client()
#
# def tearDown(self):
# TestModel.objects.all().delete()
# User.objects.all().delete()
#
# def test_permissions_all(self):
# """ tests view for returning all permissions across all objects """
# url = '/user/%s/permissions/all'
#
# # anonymous user
# response = c.get(url % user1.pk, follow=True)
# self.assertEqual(200, response.status_code)
# self.assertTemplateUsed(response, 'registration/login.html')
#
# # unauthorized user
# self.assertTrue(c.login(username=user0.username, password='secret'))
# response = c.get(url % user1.pk)
# self.assertEqual(403, response.status_code)
#
# # unknown user
# user0.is_superuser = True
# user0.save()
# response = c.get(url % 123456)
# self.assertEqual(404, response.status_code)
#
# # superuser
# response = c.get(url % user1.pk)
# self.assertEqual(200, response.status_code)
# self.assertTemplateUsed(response, 'object_permissions/permissions/objects.html')
# def test_permissions_generic_add(self):
# """
# Tests adding permissions to a new object using the generic perm view
# """
# url = '/user/%s/permissions/%s/'
# args = (user1.pk, 'TestModel')
#
# # anonymous user
# response = c.get(url % args, follow=True)
# self.assertEqual(200, response.status_code)
# self.assertTemplateUsed(response, 'registration/login.html')
#
# # unauthorized user
# self.assertTrue(c.login(username=user0.username, password='secret'))
# response = c.get(url % args)
# self.assertEqual(403, response.status_code)
#
# # invalid class
# self.assertTrue(c.login(username=superuser.username, password='secret'))
# response = c.get(url % (user1.pk, 'DoesNotExist'))
# self.assertEqual(404, response.status_code)
#
# # invalid user
# response = c.get(url % (-1, 'TestModel'))
# self.assertEqual(404, response.status_code)
#
# # GET - success
# response = c.get(url % args)
# self.assertEqual(200, response.status_code)
# self.assertTemplateUsed(response, 'object_permissions/permissions/form.html')
#
# # POST - no perms
# data = {'user':user1.pk, 'obj':obj.pk}
# response = c.post(url % args, data)
# self.assertEqual(200, response.status_code)
# self.assertEquals('application/json', response['content-type'])
# self.assertEqual([], user1.get_perms(obj))
#
# # POST - no object
# data = {'user':user1.pk, 'permissions':['Perm1']}
# response = c.post(url % args, data)
# self.assertEqual(200, response.status_code)
# self.assertEquals('application/json', response['content-type'])
# self.assertEqual([], user1.get_perms(obj))
#
# # POST - success
# data = {'user':user1.pk, 'permissions':['Perm1'], 'obj':obj.pk}
# response = c.post(url % args, data)
# self.assertEqual(200, response.status_code)
# self.assertTemplateUsed(response, 'object_permissions/permissions/object_row.html')
# self.assertEqual(['Perm1'], user1.get_perms(obj))
# def test_permissions_generic_edit(self):
# """
# Tests adding permissions to a new object using the generic perm view
# """
# url = '/user/%s/permissions/%s/%s/'
# args = (user1.pk, 'TestModel',obj.pk)
# user1.grant('Perm1', obj)
#
# # anonymous user
# response = c.get(url % args, follow=True)
# self.assertEqual(200, response.status_code)
# self.assertTemplateUsed(response, 'registration/login.html')
#
# # unauthorized user
# self.assertTrue(c.login(username=user0.username, password='secret'))
# response = c.get(url % args)
# self.assertEqual(403, response.status_code)
#
# # invalid class
# self.assertTrue(c.login(username=superuser.username, password='secret'))
# response = c.get(url % (user1.pk, 'DoesNotExist',obj.pk))
# self.assertEqual(404, response.status_code)
#
# # invalid user
# response = c.get(url % (-1, 'TestModel',obj.pk))
# self.assertEqual(404, response.status_code)
#
# #invalid object
# response = c.get(url % (user1.pk, 'TestModel',-1))
# self.assertEqual(404, response.status_code)
#
# # GET - success
# response = c.get(url % args)
# self.assertEqual(200, response.status_code)
# self.assertTemplateUsed(response, 'object_permissions/permissions/form.html')
#
# # POST - no object
# data = {'user':user1.pk, 'permissions':['Perm2']}
# response = c.post(url % args, data)
# self.assertEqual(200, response.status_code)
# self.assertEquals('application/json', response['content-type'])
# self.assertEqual(['Perm1'], user1.get_perms(obj))
#
# # POST - success
# data = {'user':user1.pk, 'permissions':['Perm2','Perm3'], 'obj':obj.pk}
# response = c.post(url % args, data)
# self.assertEqual(200, response.status_code)
# self.assertTemplateUsed(response, 'object_permissions/permissions/object_row.html')
# self.assertEqual(set(['Perm2','Perm3']), set(user1.get_perms(obj)))
#
# # POST - no perms (removes all perms)
# data = {'user':user1.pk, 'obj':obj.pk}
# response = c.post(url % args, data)
# self.assertEqual(200, response.status_code)
# self.assertEquals('application/json', response['content-type'])
# self.assertEquals('"TestModel_%s"' % obj.pk, response.content)
# self.assertEqual([], user1.get_perms(obj))
class TestObjectPermissionForm(TestCase):
""" Tests for testing forms for editing permissions """
def setUp(self):
self.tearDown()
global obj, child, user, group
obj = TestModel.objects.create()
child = TestModelChild.objects.create()
user = User.objects.create(username='tester')
group = Group.objects.create(name='test_group')
def tearDown(self):
global user, child, obj, group
if obj:
if user:
user.revoke_all(obj)
if group:
group.revoke_all(obj)
TestModel.objects.all().delete()
TestModelChild.objects.all().delete()
User.objects.all().delete()
Group.objects.all().delete()
def test_trivial(self):
ObjectPermissionForm(TestModel)
def test_choices_generation(self):
""" tests that permissions choices lists are generated correctly """
choices = ObjectPermissionForm.get_choices(obj)
self.assertEqual(4, len(choices))
choice3, choice2, choice1, choice4 = choices
perm, display = choice1
self.assertEqual('Perm1', perm)
self.assertEqual(display, {'label':'Perm One','description':'The first permission'})
perm, display = choice2
self.assertEqual('Perm2', perm)
self.assertEqual(display, {'label':'Perm2','description':'The second permission'})
perm, display = choice3
self.assertEqual('Perm3', perm)
self.assertEqual(display, {'label':'Perm Three'})
perm, display = choice4
self.assertEqual('Perm4', perm)
self.assertEqual(display, {'label':'Perm4'})
def test_choices_cache(self):
""" tests that choices lists are cached """
choices = ObjectPermissionForm.get_choices(TestModel)
choices2 = ObjectPermissionForm.get_choices(TestModel)
choices3 = ObjectPermissionForm.get_choices(TestModel)
choices4 = ObjectPermissionForm.get_choices(TestModel)
self.assertEqual(id(choices), id(choices2))
self.assertEqual(id(choices3), id(choices4))
def test_invalid_grantee(self):
""" tests entering bad id for group or user """
data = {'user':1234, 'obj':obj.pk, 'permissions':['Perm1']}
form = ObjectPermissionForm(TestModel, data)
self.assertFalse(form.is_valid())
data = {'group':1234, 'obj':obj.pk, 'permissions':['Perm1']}
form = ObjectPermissionForm(TestModel, data)
self.assertFalse(form.is_valid())
def test_user_group_exclusivity(self):
""" tests that only a user or a group can be selected """
global user
data = {'user':user.pk, 'obj':obj.pk, 'group':group.pk, 'permissions':['Perm1']}
form = ObjectPermissionForm(TestModel, data)
self.assertFalse(form.is_valid())
data = {'user':user.pk, 'obj':obj.pk, 'permissions':['Perm1']}
form = ObjectPermissionForm(TestModel, data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(user, form.cleaned_data['grantee'])
data = {'group':group.pk, 'obj':obj.pk, 'permissions':['Perm1']}
form = ObjectPermissionForm(TestModel, data)
self.assertTrue(form.is_valid())
self.assertEqual(group, form.cleaned_data['grantee'])
class TestObjectPermissionFormNewUsers(TestCase):
def setUp(self):
self.tearDown()
global obj, user
obj = TestModel.objects.create()
user = User.objects.create(username='tester')
def tearDown(self):
global obj, user
if user:
user.revoke_all(obj)
TestModel.objects.all().delete()
TestModelChild.objects.all().delete()
User.objects.all().delete()
obj = None
user = None
def test_trivial(self):
ObjectPermissionFormNewUsers(TestModel)
def test_new_user(self):
"""
Tests adding a new user
validates:
* perms must be included
"""
global user
data = {'user':user.pk, 'obj':obj.pk}
form = ObjectPermissionFormNewUsers(TestModel, data)
self.assertFalse(form.is_valid())
data = {'user':user.pk, 'obj':obj.pk, 'permissions':[]}
form = ObjectPermissionFormNewUsers(TestModel, data)
self.assertFalse(form.is_valid())
data = {'user':user.pk, 'obj':obj.pk, 'permissions':['Perm1']}
form = ObjectPermissionFormNewUsers(TestModel, data)
self.assertTrue(form.is_valid())
self.assertTrue(form.cleaned_data['new'])
def test_modify_user(self):
"""
Tests modifying a user's perms
"""
global user
user.grant('Perm1', obj)
data = {'user':user.pk, 'obj':obj.pk, 'permissions':['Perm1']}
form = ObjectPermissionFormNewUsers(TestModel, data)
self.assertTrue(form.is_valid())
self.assertFalse(form.cleaned_data['new'])
data = {'user':user.pk, 'obj':obj.pk}
form = ObjectPermissionFormNewUsers(TestModel, data)
self.assertTrue(form.is_valid())
self.assertFalse(form.cleaned_data['new'])
user.grant('Perm1', obj)
data = {'user':user.pk, 'obj':obj.pk, 'permissions':[]}
form = ObjectPermissionFormNewUsers(TestModel, data)
self.assertTrue(form.is_valid())
self.assertFalse(form.cleaned_data['new'])
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from frappe.model.utils import get_fetch_values
from frappe.model.mapper import get_mapped_doc
from erpnext.stock.stock_balance import update_bin_qty, get_reserved_qty
from frappe.desk.notifications import clear_doctype_notifications
from erpnext.controllers.recurring_document import month_map, get_next_date
from frappe.contacts.doctype.address.address import get_company_address
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class WarehouseRequired(frappe.ValidationError): pass
class SalesOrder(SellingController):
def __init__(self, arg1, arg2=None):
super(SalesOrder, self).__init__(arg1, arg2)
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.validate_for_items()
self.validate_warehouse()
self.validate_drop_ship()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
self.validate_with_previous_doc()
self.set_status()
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date:
for d in self.get("items"):
if d.delivery_date and getdate(self.po_date) > getdate(d.delivery_date):
frappe.throw(_("Row #{0}: Expected Delivery Date cannot be before Purchase Order Date")
.format(d.idx))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0] and not cint(frappe.db.get_single_value("Selling Settings",
"allow_against_multiple_purchase_orders")):
frappe.msgprint(_("Warning: Sales Order {0} already exists against Customer's Purchase Order {1}").format(so[0][0], self.po_no))
def validate_for_items(self):
check_list = []
for d in self.get('items'):
check_list.append(cstr(d.item_code))
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code, d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
# check for same entry multiple times
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list) and \
not cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")):
frappe.msgprint(_("Same item has been entered multiple times"),
title=_("Warning"), indicator='orange')
def product_bundle_has_stock_item(self, product_bundle):
"""Returns true if product bundle has stock item"""
ret = len(frappe.db.sql("""select i.name from tabItem i, `tabProduct Bundle Item` pbi
where pbi.parent = %s and pbi.item_code = i.name and i.is_stock_item = 1""", product_bundle))
return ret
def validate_sales_mntc_quotation(self):
for d in self.get('items'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s",
(d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}")
.format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales':
if not self.delivery_date:
self.delivery_date = max([d.delivery_date for d in self.get("items")])
if self.delivery_date:
for d in self.get("items"):
if not d.delivery_date:
d.delivery_date = self.delivery_date
if getdate(self.transaction_date) > getdate(d.delivery_date):
frappe.msgprint(_("Expected Delivery Date should be after Sales Order Date"),
indicator='orange', title=_('Warning'))
else:
frappe.throw(_("Please enter Delivery Date"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project))
def validate_warehouse(self):
super(SalesOrder, self).validate_warehouse()
for d in self.get("items"):
if (frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1 or
(self.has_product_bundle(d.item_code) and self.product_bundle_has_stock_item(d.item_code))) \
and not d.warehouse and not cint(d.delivered_by_supplier):
frappe.throw(_("Delivery warehouse required for stock item {0}").format(d.item_code),
WarehouseRequired)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc({
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get("items")])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
doc.update_opportunity()
def validate_drop_ship(self):
for d in self.get('items'):
if d.delivered_by_supplier and not d.supplier:
frappe.throw(_("Row #{0}: Set Supplier for item {1}").format(d.idx, d.item_code))
def on_submit(self):
self.check_credit_limit()
self.update_reserved_qty()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
self.update_project()
self.update_prevdoc_status('submit')
def on_cancel(self):
# Cannot cancel closed SO
if self.status == 'Closed':
frappe.throw(_("Closed order cannot be cancelled. Unclose to cancel."))
self.check_nextdoc_docstatus()
self.update_reserved_qty()
self.update_project()
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def update_project(self):
project_list = []
if self.project:
project = frappe.get_doc("Project", self.project)
project.flags.dont_sync_tasks = True
project.update_sales_costing()
project.save()
project_list.append(self.project)
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
check_credit_limit(self.customer, self.company)
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.sales_order = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def update_status(self, status):
self.check_modified_date()
self.set_status(update=True, status=status)
self.update_reserved_qty()
self.notify_update()
clear_doctype_notifications(self)
def update_reserved_qty(self, so_item_rows=None):
"""update requested qty (before ordered_qty is updated)"""
item_wh_list = []
def _valid_for_reserve(item_code, warehouse):
if item_code and warehouse and [item_code, warehouse] not in item_wh_list \
and frappe.db.get_value("Item", item_code, "is_stock_item"):
item_wh_list.append([item_code, warehouse])
for d in self.get("items"):
if (not so_item_rows or d.name in so_item_rows) and not d.delivered_by_supplier:
if self.has_product_bundle(d.item_code):
for p in self.get("packed_items"):
if p.parent_detail_docname == d.name and p.parent_item == d.item_code:
_valid_for_reserve(p.item_code, p.warehouse)
else:
_valid_for_reserve(d.item_code, d.warehouse)
for item_code, warehouse in item_wh_list:
update_bin_qty(item_code, warehouse, {
"reserved_qty": get_reserved_qty(item_code, warehouse)
})
def on_update(self):
pass
def before_update_after_submit(self):
self.validate_po()
self.validate_drop_ship()
self.validate_supplier_after_submit()
def validate_supplier_after_submit(self):
"""Check that supplier is the same after submit if PO is already made"""
exc_list = []
for item in self.items:
if item.supplier:
supplier = frappe.db.get_value("Sales Order Item", {"parent": self.name, "item_code": item.item_code},
"supplier")
if item.ordered_qty > 0.0 and item.supplier != supplier:
exc_list.append(_("Row #{0}: Not allowed to change Supplier as Purchase Order already exists").format(item.idx))
if exc_list:
frappe.throw('\n'.join(exc_list))
def update_delivery_status(self):
"""Update delivery status from Purchase Order for drop shipping"""
tot_qty, delivered_qty = 0.0, 0.0
for item in self.items:
if item.delivered_by_supplier:
item_delivered_qty = frappe.db.sql("""select sum(qty)
from `tabPurchase Order Item` poi, `tabPurchase Order` po
where poi.sales_order_item = %s
and poi.item_code = %s
and poi.parent = po.name
and po.docstatus = 1
and po.status = 'Delivered'""", (item.name, item.item_code))
item_delivered_qty = item_delivered_qty[0][0] if item_delivered_qty else 0
item.db_set("delivered_qty", flt(item_delivered_qty), update_modified=False)
delivered_qty += item.delivered_qty
tot_qty += item.qty
self.db_set("per_delivered", flt(delivered_qty/tot_qty) * 100,
update_modified=False)
def set_indicator(self):
"""Set indicator for portal"""
if self.per_billed < 100 and self.per_delivered < 100:
self.indicator_color = "orange"
self.indicator_title = _("Not Paid and Not Delivered")
elif self.per_billed == 100 and self.per_delivered < 100:
self.indicator_color = "orange"
self.indicator_title = _("Paid and Not Delivered")
else:
self.indicator_color = "green"
self.indicator_title = _("Paid")
def get_production_order_items(self):
'''Returns items with BOM that already do not have a linked production order'''
items = []
for i in self.packed_items or self.items:
bom = frappe.get_all('BOM', dict(item=i.item_code, is_active=True),
order_by='is_default desc')
bom = bom[0].name if bom else None
stock_qty = i.qty if self.packed_items else i.stock_qty
items.append(dict(
item_code= i.item_code,
bom = bom,
warehouse = i.warehouse,
pending_qty= stock_qty - flt(frappe.db.sql('''select sum(qty) from `tabProduction Order`
where production_item=%s and sales_order=%s''', (i.item_code, self.name))[0][0])
))
return items
def on_recurring(self, reference_doc):
mcount = month_map[reference_doc.recurring_type]
self.set("delivery_date", get_next_date(reference_doc.delivery_date, mcount,
cint(reference_doc.repeat_on_day_of_month)))
for d in self.get("items"):
reference_delivery_date = frappe.db.get_value("Sales Order Item",
{"parent": reference_doc.name, "item_code": d.item_code, "idx": d.idx}, "delivery_date")
d.set("delivery_date",
get_next_date(reference_delivery_date, mcount, cint(reference_doc.repeat_on_day_of_month)))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Orders'),
})
return list_context
@frappe.whitelist()
def close_or_unclose_sales_orders(names, status):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
names = json.loads(names)
for name in names:
so = frappe.get_doc("Sales Order", name)
if so.docstatus == 1:
if status == "Closed":
if so.status not in ("Cancelled", "Closed") and (so.per_delivered < 100 or so.per_billed < 100):
so.update_status(status)
else:
if so.status == "Closed":
so.update_status('Draft')
frappe.local.message_log = []
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
def update_item(source, target, source_parent):
target.project = source_parent.project
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Packed Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order",
"stock_uom": "uom"
},
"postprocess": update_item
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order",
"stock_uom": "uom",
"stock_qty": "qty"
},
"condition": lambda doc: not frappe.db.exists('Product Bundle', doc.item_code),
"postprocess": update_item
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_project(source_name, target_doc=None):
def postprocess(source, doc):
doc.project_type = "External"
doc.project_name = source.name
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Project",
"validation": {
"docstatus": ["=", 1]
},
"field_map":{
"name" : "sales_order",
"base_grand_total" : "estimated_costing",
}
},
"Sales Order Item": {
"doctype": "Project Task",
"field_map": {
"description": "title",
},
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
if source.po_no:
if target.po_no:
target_po_no = target.po_no.split(", ")
target_po_no.append(source.po_no)
target.po_no = ", ".join(list(set(target_po_no))) if len(target_po_no) > 1 else target_po_no[0]
else:
target.po_no = source.po_no
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
item = frappe.db.get_value("Item", target.item_code, ["item_group", "selling_cost_center"], as_dict=1)
target.cost_center = frappe.db.get_value("Project", source_parent.project, "cost_center") \
or item.selling_cost_center \
or frappe.db.get_value("Item Group", item.item_group, "default_cost_center")
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "so_detail",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: abs(doc.delivered_qty) < abs(doc.qty) and doc.delivered_by_supplier!=1
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None, ignore_permissions=False):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Sales Invoice Advance
target.set_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.flags.ignore_permissions = True
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
# set company address
target.update(get_company_address(target.company))
if target.company_address:
target.update(get_fetch_values("Sales Invoice", 'company_address', target.company_address))
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
item = frappe.db.get_value("Item", target.item_code, ["item_group", "selling_cost_center"], as_dict=1)
target.cost_center = frappe.db.get_value("Project", source_parent.project, "cost_center") \
or item.selling_cost_center \
or frappe.db.get_value("Item Group", item.item_group, "default_cost_center")
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"field_map": {
"party_account_currency": "party_account_currency"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.qty and (doc.base_amount==0 or abs(doc.billed_amt) < abs(doc.amount))
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess, ignore_permissions=ignore_permissions)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.sales_order=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "sales_order"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Sales Order", filters)
data = frappe.db.sql("""
select
`tabSales Order`.name, `tabSales Order`.customer_name, `tabSales Order`.status,
`tabSales Order`.delivery_status, `tabSales Order`.billing_status,
`tabSales Order Item`.delivery_date
from
`tabSales Order`, `tabSales Order Item`
where `tabSales Order`.name = `tabSales Order Item`.parent
and (ifnull(`tabSales Order Item`.delivery_date, '0000-00-00')!= '0000-00-00') \
and (`tabSales Order Item`.delivery_date between %(start)s and %(end)s)
and `tabSales Order`.docstatus < 2
{conditions}
""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
def make_purchase_order_for_drop_shipment(source_name, for_supplier, target_doc=None):
def set_missing_values(source, target):
target.supplier = for_supplier
target.apply_discount_on = ""
target.additional_discount_percentage = 0.0
target.discount_amount = 0.0
default_price_list = frappe.get_value("Supplier", for_supplier, "default_price_list")
if default_price_list:
target.buying_price_list = default_price_list
if any( item.delivered_by_supplier==1 for item in source.items):
if source.shipping_address_name:
target.shipping_address = source.shipping_address_name
target.shipping_address_display = source.shipping_address
else:
target.shipping_address = source.customer_address
target.shipping_address_display = source.address_display
target.customer_contact_person = source.contact_person
target.customer_contact_display = source.contact_display
target.customer_contact_mobile = source.contact_mobile
target.customer_contact_email = source.contact_email
else:
target.customer = ""
target.customer_name = ""
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.schedule_date = source.delivery_date
target.qty = flt(source.qty) - flt(source.ordered_qty)
target.stock_qty = (flt(source.qty) - flt(source.ordered_qty)) * flt(source.conversion_factor)
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Purchase Order",
"field_no_map": [
"address_display",
"contact_display",
"contact_mobile",
"contact_email",
"contact_person"
],
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "sales_order_item"],
["parent", "sales_order"],
["stock_uom", "stock_uom"],
["uom", "uom"],
["conversion_factor", "conversion_factor"],
["delivery_date", "schedule_date"]
],
"field_no_map": [
"rate",
"price_list_rate"
],
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty and doc.supplier == for_supplier
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def get_supplier(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_type"]
else:
fields = ["name", "supplier_name", "supplier_type"]
fields = ", ".join(fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s)
and name in (select supplier from `tabSales Order Item` where parent = %(parent)s)
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': fields,
'key': frappe.db.escape(searchfield)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'parent': filters.get('parent')
})
@frappe.whitelist()
def make_production_orders(items, sales_order, company, project=None):
'''Make Production Orders against the given Sales Order for the given `items`'''
items = json.loads(items).get('items')
out = []
for i in items:
production_order = frappe.get_doc(dict(
doctype='Production Order',
production_item=i['item_code'],
bom_no=i['bom'],
qty=i['pending_qty'],
company=company,
sales_order=sales_order,
project=project,
fg_warehouse=i['warehouse']
)).insert()
production_order.set_production_order_operations()
production_order.save()
out.append(production_order)
return [p.name for p in out]
@frappe.whitelist()
def update_status(status, name):
so = frappe.get_doc("Sales Order", name)
so.update_status(status)
|
|
# -*- coding: utf-8 -*-
import re
from django.conf import settings
from django.core import mail
from django.http import HttpRequest
from django.http import HttpResponse
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import CommonMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.test import TestCase
class CommonMiddlewareTest(TestCase):
def setUp(self):
self.append_slash = settings.APPEND_SLASH
self.prepend_www = settings.PREPEND_WWW
self.ignorable_404_urls = settings.IGNORABLE_404_URLS
self.send_broken_email_links = settings.SEND_BROKEN_LINK_EMAILS
def tearDown(self):
settings.APPEND_SLASH = self.append_slash
settings.PREPEND_WWW = self.prepend_www
settings.IGNORABLE_404_URLS = self.ignorable_404_urls
settings.SEND_BROKEN_LINK_EMAILS = self.send_broken_email_links
def _get_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = "/middleware/%s" % path
return request
def test_append_slash_have_slash(self):
"""
Tests that URLs with slashes go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('slash/')
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_resource(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('noslash')
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_unknown(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
settings.APPEND_SLASH = True
request = self._get_request('unknown')
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_redirect(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
settings.APPEND_SLASH = True
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/middleware/slash/')
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
settings.APPEND_SLASH = True
settings.DEBUG = True
request = self._get_request('slash')
request.method = 'POST'
self.assertRaises(
RuntimeError,
CommonMiddleware().process_request,
request)
try:
CommonMiddleware().process_request(request)
except RuntimeError, e:
self.assertTrue('end in a slash' in str(e))
settings.DEBUG = False
def test_append_slash_disabled(self):
"""
Tests disabling append slash functionality.
"""
settings.APPEND_SLASH = False
request = self._get_request('slash')
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_quoted(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
settings.APPEND_SLASH = True
request = self._get_request('needsquoting#')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r['Location'],
'http://testserver/middleware/needsquoting%23/')
def test_prepend_www(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = False
request = self._get_request('path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r['Location'],
'http://www.testserver/middleware/path/')
def test_prepend_www_append_slash_have_slash(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'],
'http://www.testserver/middleware/slash/')
def test_prepend_www_append_slash_slashless(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'],
'http://www.testserver/middleware/slash/')
# The following tests examine expected behavior given a custom urlconf that
# overrides the default one through the request object.
def test_append_slash_have_slash_custom_urlconf(self):
"""
Tests that URLs with slashes go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/slash/')
request.urlconf = 'regressiontests.middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/noslash')
request.urlconf = 'regressiontests.middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/unknown')
request.urlconf = 'regressiontests.middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_redirect_custom_urlconf(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/slash')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertFalse(r is None,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/middleware/customurlconf/slash/')
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
settings.APPEND_SLASH = True
settings.DEBUG = True
request = self._get_request('customurlconf/slash')
request.urlconf = 'regressiontests.middleware.extra_urls'
request.method = 'POST'
self.assertRaises(
RuntimeError,
CommonMiddleware().process_request,
request)
try:
CommonMiddleware().process_request(request)
except RuntimeError, e:
self.assertTrue('end in a slash' in str(e))
settings.DEBUG = False
def test_append_slash_disabled_custom_urlconf(self):
"""
Tests disabling append slash functionality.
"""
settings.APPEND_SLASH = False
request = self._get_request('customurlconf/slash')
request.urlconf = 'regressiontests.middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_quoted_custom_urlconf(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/needsquoting#')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertFalse(r is None,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(
r['Location'],
'http://testserver/middleware/customurlconf/needsquoting%23/')
def test_prepend_www_custom_urlconf(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = False
request = self._get_request('customurlconf/path/')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r['Location'],
'http://www.testserver/middleware/customurlconf/path/')
def test_prepend_www_append_slash_have_slash_custom_urlconf(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/slash/')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'],
'http://www.testserver/middleware/customurlconf/slash/')
def test_prepend_www_append_slash_slashless_custom_urlconf(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/slash')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'],
'http://www.testserver/middleware/customurlconf/slash/')
# Tests for the 404 error reporting via email
def test_404_error_reporting(self):
settings.IGNORABLE_404_URLS = (re.compile(r'foo'),)
settings.SEND_BROKEN_LINK_EMAILS = True
request = self._get_request('regular_url/that/does/not/exist')
request.META['HTTP_REFERER'] = '/another/url/'
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Broken', mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
settings.IGNORABLE_404_URLS = (re.compile(r'foo'),)
settings.SEND_BROKEN_LINK_EMAILS = True
request = self._get_request('regular_url/that/does/not/exist')
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
settings.IGNORABLE_404_URLS = (re.compile(r'foo'),)
settings.SEND_BROKEN_LINK_EMAILS = True
request = self._get_request('foo_url/that/does/not/exist/either')
request.META['HTTP_REFERER'] = '/another/url/'
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 0)
class ConditionalGetMiddlewareTest(TestCase):
urls = 'regressiontests.middleware.cond_get_urls'
def setUp(self):
self.req = HttpRequest()
self.req.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
self.req.path = self.req.path_info = "/"
self.resp = self.client.get(self.req.path)
# Tests for the Date header
def test_date_header_added(self):
self.assertFalse('Date' in self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertTrue('Date' in self.resp)
# Tests for the Content-Length header
def test_content_length_header_added(self):
content_length = len(self.resp.content)
self.assertFalse('Content-Length' in self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertTrue('Content-Length' in self.resp)
self.assertEqual(int(self.resp['Content-Length']), content_length)
def test_content_length_header_not_changed(self):
bad_content_length = len(self.resp.content) + 10
self.resp['Content-Length'] = bad_content_length
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(int(self.resp['Content-Length']), bad_content_length)
# Tests for the ETag header
def test_if_none_match_and_no_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_none_match_and_etag(self):
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_same_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_different_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
# Tests for the Last-Modified header
def test_if_modified_since_and_no_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_modified_since_and_last_modified(self):
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_same_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_past(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_future(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:41:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
class XFrameOptionsMiddlewareTest(TestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def setUp(self):
self.x_frame_options = settings.X_FRAME_OPTIONS
def tearDown(self):
settings.X_FRAME_OPTIONS = self.x_frame_options
def test_same_origin(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to SAMEORIGIN to
have the middleware use that value for the HTTP header.
"""
settings.X_FRAME_OPTIONS = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
settings.X_FRAME_OPTIONS = 'sameorigin'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_deny(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to DENY to
have the middleware use that value for the HTTP header.
"""
settings.X_FRAME_OPTIONS = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
settings.X_FRAME_OPTIONS = 'deny'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_defaults_sameorigin(self):
"""
Tests that if the X_FRAME_OPTIONS setting is not set then it defaults
to SAMEORIGIN.
"""
del settings.X_FRAME_OPTIONS
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_dont_set_if_set(self):
"""
Tests that if the X-Frame-Options header is already set then the
middleware does not attempt to override it.
"""
settings.X_FRAME_OPTIONS = 'DENY'
response = HttpResponse()
response['X-Frame-Options'] = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
settings.X_FRAME_OPTIONS = 'SAMEORIGIN'
response = HttpResponse()
response['X-Frame-Options'] = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_response_exempt(self):
"""
Tests that if the response has a xframe_options_exempt attribute set
to False then it still sets the header, but if it's set to True then
it does not.
"""
settings.X_FRAME_OPTIONS = 'SAMEORIGIN'
response = HttpResponse()
response.xframe_options_exempt = False
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
response = HttpResponse()
response.xframe_options_exempt = True
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r.get('X-Frame-Options', None), None)
def test_is_extendable(self):
"""
Tests that the XFrameOptionsMiddleware method that determines the
X-Frame-Options header value can be overridden based on something in
the request or response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, 'sameorigin', False):
return 'SAMEORIGIN'
if getattr(response, 'sameorigin', False):
return 'SAMEORIGIN'
return 'DENY'
settings.X_FRAME_OPTIONS = 'DENY'
response = HttpResponse()
response.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(request,
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
settings.X_FRAME_OPTIONS = 'SAMEORIGIN'
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
|
|
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_drive_firmware
version_added: "2.9"
short_description: NetApp E-Series manage drive firmware
description:
- Ensure drive firmware version is activated on specified drive model.
author:
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.eseries
options:
firmware:
description:
- list of drive firmware file paths.
- NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
type: list
required: True
wait_for_completion:
description:
- This flag will cause module to wait for any upgrade actions to complete.
type: bool
default: false
ignore_inaccessible_drives:
description:
- This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible.
type: bool
default: false
upgrade_drives_online:
description:
- This flag will determine whether drive firmware can be upgrade while drives are accepting I/O.
- When I(upgrade_drives_online==False) stop all I/O before running task.
type: bool
default: true
"""
EXAMPLES = """
- name: Ensure correct firmware versions
nac_santricity_drive_firmware:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
firmware: "path/to/drive_firmware"
wait_for_completion: true
ignore_inaccessible_drives: false
"""
RETURN = """
msg:
description: Whether any drive firmware was upgraded and whether it is in progress.
type: str
returned: always
sample:
{ changed: True, upgrade_in_process: True }
"""
import os
import re
from time import sleep
from ansible.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata
from ansible.module_utils._text import to_native, to_text, to_bytes
class NetAppESeriesDriveFirmware(NetAppESeriesModule):
WAIT_TIMEOUT_SEC = 60 * 15
def __init__(self):
ansible_options = dict(
firmware=dict(type="list", required=True),
wait_for_completion=dict(type="bool", default=False),
ignore_inaccessible_drives=dict(type="bool", default=False),
upgrade_drives_online=dict(type="bool", default=True))
super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options,
web_services_version="02.00.0000.0000",
supports_check_mode=True)
args = self.module.params
self.firmware_list = args["firmware"]
self.wait_for_completion = args["wait_for_completion"]
self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"]
self.upgrade_drives_online = args["upgrade_drives_online"]
self.upgrade_list_cache = None
self.upgrade_required_cache = None
self.upgrade_in_progress = False
self.drive_info_cache = None
def upload_firmware(self):
"""Ensure firmware has been upload prior to uploaded."""
for firmware in self.firmware_list:
firmware_name = os.path.basename(firmware)
files = [("file", firmware_name, firmware)]
headers, data = create_multipart_formdata(files)
try:
rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
except Exception as error:
self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error)))
def upgrade_list(self):
"""Determine whether firmware is compatible with the specified drives."""
if self.upgrade_list_cache is None:
self.upgrade_list_cache = list()
try:
rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid)
# Create upgrade list, this ensures only the firmware uploaded is applied
for firmware in self.firmware_list:
filename = os.path.basename(firmware)
for uploaded_firmware in response["compatibilities"]:
if uploaded_firmware["filename"] == filename:
# Determine whether upgrade is required
drive_reference_list = []
for drive in uploaded_firmware["compatibleDrives"]:
try:
rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"]))
# Add drive references that are supported and differ from current firmware
if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and
uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]):
if self.ignore_inaccessible_drives or (not drive_info["offline"] and drive_info["available"]):
drive_reference_list.append(drive["driveRef"])
if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online:
self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]."
% (self.ssid, drive["driveRef"]))
except Exception as error:
self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]."
% (self.ssid, drive["driveRef"], to_native(error)))
if drive_reference_list:
self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}])
except Exception as error:
self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
return self.upgrade_list_cache
def wait_for_upgrade_completion(self):
"""Wait for drive firmware upgrade to complete."""
drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]]
last_status = None
for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)):
try:
rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid)
# Check drive status
for status in response["driveStatus"]:
last_status = status
if status["driveRef"] in drive_references:
if status["status"] == "okay":
continue
elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]:
break
else:
self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]."
% (self.ssid, status["driveRef"], status["status"]))
else:
self.upgrade_in_progress = False
break
except Exception as error:
self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
sleep(5)
else:
self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status))
def upgrade(self):
"""Apply firmware to applicable drives."""
try:
rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s"
% (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list())
self.upgrade_in_progress = True
except Exception as error:
self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
if self.wait_for_completion:
self.wait_for_upgrade_completion()
def apply(self):
"""Apply firmware policy has been enforced on E-Series storage system."""
self.upload_firmware()
if self.upgrade_list() and not self.module.check_mode:
self.upgrade()
self.module.exit_json(changed=True if self.upgrade_list() else False,
upgrade_in_process=self.upgrade_in_progress)
def main():
drive_firmware = NetAppESeriesDriveFirmware()
drive_firmware.apply()
if __name__ == '__main__':
main()
|
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
"""given a database ID and GPLID, get the files"""
from Betsy import module_utils
#in_data = antecedents
GSEID = user_options['GSEID']
GPLID = None
if 'GPLID' in user_options:
GPLID = user_options['GPLID']
assert GSEID.startswith('GSE'), 'GSEID %s is not correct' % GSEID
if not GPLID:
download_geo_with_GSEID(GSEID, outfile)
else:
assert GPLID.startswith('GPL'), 'GPLID %s is not correct' % GPLID
download_geo_with_GPLID(GSEID, GPLID, outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for download_geo_dataset_GPL fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(user_options['GSEID'])
filename = 'expression_files_' + original_file
return filename
def clean_cel_filename(cel_file):
"""clean the cel_file name"""
import string
if cel_file.upper().endswith('CEL'):
cel_file = cel_file[0:-4]
punc = string.punctuation
indicate = [x in cel_file for x in punc]
if True in indicate:
punct_index = []
start = 0
while start < len(indicate) - 1:
try:
start = indicate.index(True, start + 1)
punct_index.append(start)
except (SystemError, MemoryError, KeyError), x:
raise
except Exception:
break
break_point = [cel_file.index(punc[x]) for x in punct_index]
return cel_file[0:min(break_point)] + '.CEL'
else:
return cel_file + '.CEL'
else:
return cel_file
def download_geo_with_GSEID(GSEID, outfile):
import os
import shutil
from Betsy import module_utils
from genomicode import affyio
import gzip
#file_folder = os.path.join(".", GSEID)
file_folder = module_utils.download_dataset(GSEID)
#get chip name
cel_files = os.listdir(file_folder)
unknown_folder = os.path.join(".", 'unknown_folder')
chip_name_list = []
for cel_file in cel_files:
fileloc = os.path.join(file_folder, cel_file)
if fileloc.endswith('.gz'):
newcelfname = clean_cel_filename(os.path.splitext(cel_file)[0])
#unzip the gz data
unzipfile = os.path.splitext(fileloc)[0]
fileObj = gzip.GzipFile(fileloc, 'rb')
fileObjOut = file(unzipfile, 'wb')
while 1:
line = fileObj.readline()
if line == '':
break
fileObjOut.write(line)
fileObj.close()
fileObjOut.close()
assert os.path.exists(unzipfile), ('the unzip %s fails' % unzipfile
)
else:
unzipfile = fileloc
newcelfname = clean_cel_filename(cel_file)
#get chip_name and copy into different folder
chip_name = None
try:
chip_name = affyio.extract_chip_name(unzipfile)
except (SystemError, MemoryError, KeyError), x:
raise
except Exception, x:
if not os.path.exists(unknown_folder):
os.mkdir(unknown_folder)
shutil.copyfile(unzipfile,
os.path.join(unknown_folder, newcelfname))
if chip_name is not None:
if chip_name not in chip_name_list:
chip_name_list.append(chip_name)
os.mkdir(os.path.join(".", chip_name))
chip_folder = os.path.join(".", chip_name)
shutil.copyfile(unzipfile, os.path.join(chip_folder, newcelfname))
if fileloc.endswith('.gz'):
os.remove(unzipfile)
#determine the one to preprocess
if len(chip_name_list) == 1:
out_filename = os.path.join(".", chip_name_list[0])
elif len(chip_name_list) > 1:
size_list = [os.path.getsize(os.path.join(".", x))
for x in chip_name_list]
#makesure there is no multiple folder have the same maximum size
maxsize = max(size_list)
new_size_list = size_list[:]
new_size_list.remove(maxsize)
#only one folder is maximum size
if maxsize > max(new_size_list):
out_chip_name = chip_name_list[size_list.index(maxsize)]
#out_filename = os.path.join(".", out_chip_name)
#multiple has same maximum size
elif maxsize == max(new_size_list):
start = -1
folder_index = []
while start < len(size_list) - 1:
try:
start = size_list.index(maxsize, start + 1)
folder_index.append(start)
except (SystemError, MemoryError, KeyError), x:
raise
except Exception:
break
folder_names = [chip_name_list[x] for x in folder_index]
Is_HG = [x.startswith('HG') for x in folder_names]
a = []
for i in Is_HG:
if i:
a.append(1)
else:
a.append(0)
#choose the human platform
if sum(a) == 1:
out_chip_name = folder_names[a.index(1)]
out_filename = os.path.join(".", out_chip_name)
#multipld human paltforms
elif sum(a) > 1:
if 'HG-U133_Plus_2' in folder_names:
out_filename = os.path.join(".", 'HG-U133_Plus_2')
elif 'HG-U133A' in folder_names:
out_filename = os.path.join(".", 'HG-U133A')
elif 'HG-U95A' in folder_names:
out_filename = os.path.join(".", 'HG-U95A')
else:
raise ValueError('does not recognazie the platform')
os.rename(out_filename, outfile)
matrix_files = get_seriesmatrix_file(GSEID)
for matrix_file in matrix_files:
newmatrix_filename = os.path.split(matrix_file)[-1]
shutil.copyfile(matrix_file, os.path.join(outfile, newmatrix_filename))
def download_geo_with_GPLID(GSEID, GPLID, outfile):
import os
import shutil
from Betsy import module_utils
GSEID_path = module_utils.download_dataset(GSEID)
platform_txtfiles = get_seriesmatrix_file(GSEID, GPLID)
#get the cel file name for the GPL platform
if not os.path.exists(outfile):
os.mkdir(outfile)
if len(platform_txtfiles) > 0:
for platform_txtfile in platform_txtfiles:
cel_list = open(platform_txtfile, 'r').readlines()
cel_line = None
for linecontent in cel_list:
if linecontent.startswith('!Sample_geo_accession'):
cel_line = linecontent
break
assert cel_line, (
'the file %s does not contain "!Sample_geo_accession"' %
platform_txtfile)
filecontent = os.listdir(GSEID_path)
cel_names = []
for x in linecontent.split()[1:]:
x = x.strip()
assert x.startswith('\"') and x.endswith('\"')
x = x[1:-1]
cel_names.append(x)
#check if the GSM Id cannot found in the data set
file_name_string = ' '.join(filecontent)
for cel_name in cel_names:
if cel_name not in file_name_string:
raise ValueError(
'The GSM ID %s cannot find in data set' % cel_name)
else:
for cel_file in filecontent:
if cel_file.upper().startswith(cel_name.upper()):
if cel_file.lower().endswith('gz'):
cel_file = clean_cel_filename(
os.path.splitext(cel_file)[0]) + '.gz'
outfilename = os.path.join(outfile, cel_file)
shutil.copyfile(os.path.join(GSEID_path, cel_file),
outfilename)
else:
os.rename(GSEID_path, outfile)
for matrix_file in platform_txtfiles:
newmatrix_filename = os.path.split(matrix_file)[-1]
shutil.copyfile(matrix_file, os.path.join(outfile, newmatrix_filename))
def get_seriesmatrix_file(GSEID, GPLID):
'download series matrix and unzip'
import os
from ftplib import FTP
#from genomicode import Matrix
try:
ftp = FTP('ftp.ncbi.nih.gov')
ftp.login()
except Exception, e:
raise ValueError(e)
try:
ftp.cwd('pub/geo/DATA/SeriesMatrix/' + GSEID)
except FTP.error_perm, x:
if str(x).find('No such file') >= 0:
raise AssertionError('cannot find the %s' % path)
entry = []
ftp.retrlines('NLST', entry.append)
platform_txtfiles = []
for platform_filename in entry:
if GPLID in platform_filename:
f = open(platform_filename, 'wb')
ftp.retrbinary('RETR ' + platform_filename, f.write)
f.close()
platform_txtfile = platform_filename[:-3]
assert not os.path.exists(platform_txtfile), (
'the seriesmatrix file %s already exists' % platform_txtfile
)
#unzip the gz data
import gzip
fileObj = gzip.GzipFile(platform_filename, 'rb')
fileObjOut = file(platform_txtfile, 'wb')
while 1:
line = fileObj.readline()
if line == '':
break
fileObjOut.write(line)
fileObj.close()
fileObjOut.close()
os.remove(platform_filename)
assert os.path.exists(platform_txtfile), (
'the unzip %s in download_geo_dataset_GPL fails' % platform_txtfile
)
platform_txtfiles.append(os.path.realpath(platform_txtfile))
ftp.close()
return platform_txtfiles
def get_seriesmatrix_file(GSEID):
'download series matrix and unzip'
import os
from ftplib import FTP
import gzip
try:
ftp = FTP('ftp.ncbi.nih.gov')
ftp.login()
except Exception, e:
raise ValueError(e)
ftp.cwd('pub/geo/DATA/SeriesMatrix/' + GSEID)
#try:
# ftp.cwd('pub/geo/DATA/SeriesMatrix/' + GSEID)
#except FTP.error_perm, x:
# raise
# #if str(x).find('No such file') >= 0:
# # raise AssertionError('cannot find the %s' % path)
entry = []
ftp.retrlines('NLST', entry.append)
platform_txtfiles = []
for platform_filename in entry:
f = open(platform_filename, 'wb')
ftp.retrbinary('RETR ' + platform_filename, f.write)
f.close()
platform_txtfile = platform_filename[:-3]
assert not os.path.exists(platform_txtfile), (
'the seriesmatrix file %s already exists' % platform_txtfile
)
#unzip the gz data
fileObj = gzip.GzipFile(platform_filename, 'rb')
fileObjOut = file(platform_txtfile, 'wb')
while 1:
line = fileObj.readline()
if line == '':
break
fileObjOut.write(line)
fileObj.close()
fileObjOut.close()
os.remove(platform_filename)
assert os.path.exists(platform_txtfile), (
'the unzip %s in download_geo_dataset_GPL fails' % platform_txtfile
)
platform_txtfiles.append(os.path.realpath(platform_txtfile))
ftp.close()
return platform_txtfiles
|
|
# util.py
# Copyright (C) 2006, 2007, 2008 Michael Bayer [email protected]
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
try:
Set = set
except:
import sets
Set = sets.Set
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
import codecs, re, weakref, os, time
try:
import threading
import thread
except ImportError:
import dummy_threading as threading
import dummy_thread as thread
if sys.platform.startswith('win') or sys.platform.startswith('java'):
time_func = time.clock
else:
time_func = time.time
def verify_directory(dir):
"""create and/or verify a filesystem directory."""
tries = 0
while not os.path.exists(dir):
try:
tries += 1
os.makedirs(dir, 0750)
except:
if tries > 5:
raise
class SetLikeDict(dict):
"""a dictionary that has some setlike methods on it"""
def union(self, other):
"""produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self)
x.update(other)
return x
class FastEncodingBuffer(object):
"""a very rudimentary buffer that is faster than StringIO, but doesnt crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors='strict', unicode=False):
self.data = []
self.encoding = encoding
if unicode:
self.delim = u''
else:
self.delim = ''
self.unicode = unicode
self.errors = errors
self.write = self.data.append
def getvalue(self):
if self.encoding:
return self.delim.join(self.data).encode(self.encoding, self.errors)
else:
return self.delim.join(self.data)
class LRUCache(dict):
"""A dictionary-like object that stores a limited number of items, discarding
lesser used items periodically.
this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
paradigm so that synchronization is not really needed. the size management
is inexact.
"""
class _Item(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.timestamp = time_func()
def __repr__(self):
return repr(self.value)
def __init__(self, capacity, threshold=.5):
self.capacity = capacity
self.threshold = threshold
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item.timestamp = time_func()
return item.value
def values(self):
return [i.value for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = self._Item(key, value)
dict.__setitem__(self, key, item)
else:
item.value = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
bytime = dict.values(self)
bytime.sort(lambda a, b: cmp(b.timestamp, a.timestamp))
for item in bytime[self.capacity:]:
try:
del self[item.key]
except KeyError:
# if we couldnt find a key, most likely some other thread broke in
# on us. loop around and try again
break
# Regexp to match python magic encoding line
_PYTHON_MAGIC_COMMENT_re = re.compile(
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)',
re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a source file from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object.
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = _PYTHON_MAGIC_COMMENT_re.match(line1)
if not m:
try:
import parser
parser.suite(line1)
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source
# is not valid python source, or line2 is a continuation of
# line1, in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(line2)
if has_bom:
if m:
raise SyntaxError, \
"python refuses to compile code with both a UTF8" \
" byte-order-mark and a magic encoding comment"
return 'utf_8'
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
def sorted_dict_repr(d):
"""repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings.
"""
keys = d.keys()
keys.sort()
return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
def restore__ast(_ast):
"""Attempt to restore the required classes to the _ast module if it
appears to be missing them
"""
if hasattr(_ast, 'AST'):
return
_ast.PyCF_ONLY_AST = 2 << 9
m = compile("""\
def foo(): pass
class Bar(object): pass
if False: pass
baz = 'mako'
1 + 2 - 3 * 4 / 5
6 // 7 % 8 << 9 >> 10
11 & 12 ^ 13 | 14
15 and 16 or 17
-baz + (not +18) - ~17
baz and 'foo' or 'bar'
(mako is baz == baz) is not baz != mako
mako > baz < mako >= baz <= mako
mako in baz not in mako""", '<unknown>', 'exec', _ast.PyCF_ONLY_AST)
_ast.Module = type(m)
for cls in _ast.Module.__mro__:
if cls.__name__ == 'mod':
_ast.mod = cls
elif cls.__name__ == 'AST':
_ast.AST = cls
_ast.FunctionDef = type(m.body[0])
_ast.ClassDef = type(m.body[1])
_ast.If = type(m.body[2])
_ast.Name = type(m.body[3].targets[0])
_ast.Store = type(m.body[3].targets[0].ctx)
_ast.Str = type(m.body[3].value)
_ast.Sub = type(m.body[4].value.op)
_ast.Add = type(m.body[4].value.left.op)
_ast.Div = type(m.body[4].value.right.op)
_ast.Mult = type(m.body[4].value.right.left.op)
_ast.RShift = type(m.body[5].value.op)
_ast.LShift = type(m.body[5].value.left.op)
_ast.Mod = type(m.body[5].value.left.left.op)
_ast.FloorDiv = type(m.body[5].value.left.left.left.op)
_ast.BitOr = type(m.body[6].value.op)
_ast.BitXor = type(m.body[6].value.left.op)
_ast.BitAnd = type(m.body[6].value.left.left.op)
_ast.Or = type(m.body[7].value.op)
_ast.And = type(m.body[7].value.values[0].op)
_ast.Invert = type(m.body[8].value.right.op)
_ast.Not = type(m.body[8].value.left.right.op)
_ast.UAdd = type(m.body[8].value.left.right.operand.op)
_ast.USub = type(m.body[8].value.left.left.op)
_ast.Or = type(m.body[9].value.op)
_ast.And = type(m.body[9].value.values[0].op)
_ast.IsNot = type(m.body[10].value.ops[0])
_ast.NotEq = type(m.body[10].value.ops[1])
_ast.Is = type(m.body[10].value.left.ops[0])
_ast.Eq = type(m.body[10].value.left.ops[1])
_ast.Gt = type(m.body[11].value.ops[0])
_ast.Lt = type(m.body[11].value.ops[1])
_ast.GtE = type(m.body[11].value.ops[2])
_ast.LtE = type(m.body[11].value.ops[3])
_ast.In = type(m.body[12].value.ops[0])
_ast.NotIn = type(m.body[12].value.ops[1])
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
import paddle.fluid.contrib.mixed_precision.amp_nn as amp_nn
class TestUpdateLossScalingOp(OpTest):
def setUp(self):
self.op_type = "update_loss_scaling"
self.init()
found_inf = np.array([False], dtype=np.bool)
x = np.random.random((1024, 1024)).astype(self.dtype)
self.inputs = {
'X': [('x0', x)],
'FoundInfinite': found_inf,
'PrevLossScaling': self.prev_loss_scaling,
'InGoodSteps': self.num_good_steps,
'InBadSteps': self.num_bad_steps
}
self.outputs = {
'Out': [('out0', x)],
'LossScaling': self.prev_loss_scaling * self.incr_ratio,
'OutGoodSteps': self.zero_steps,
'OutBadSteps': self.zero_steps
}
def init(self):
self.incr_ratio = 2.0
self.decr_ratio = 0.8
self.dtype = np.float32
self.prev_loss_scaling = np.array([2048]).astype(self.dtype)
self.num_good_steps = np.array([999], dtype=np.int32)
self.num_bad_steps = np.array([1], dtype=np.int32)
self.zero_steps = np.array([0], dtype=np.int32)
self.attrs = {
'incr_every_n_steps': 1000,
'decr_every_n_nan_or_inf': 2,
'incr_ratio': self.incr_ratio,
'decr_ratio': self.decr_ratio,
}
def test_check_output(self):
self.check_output(no_check_set=['Out'])
class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
def setUp(self):
self.op_type = "update_loss_scaling"
self.init()
found_inf = np.array([True], dtype=np.bool)
x = np.random.random((1024, 1024)).astype(self.dtype)
i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1)
x[i[0]][j[0]] = np.inf
self.inputs = {
'X': [('x0', x)],
'FoundInfinite': found_inf,
'PrevLossScaling': self.prev_loss_scaling,
'InGoodSteps': self.num_good_steps,
'InBadSteps': self.num_bad_steps
}
self.outputs = {
'Out': [('out0', np.zeros_like(x))],
'LossScaling': self.prev_loss_scaling * self.decr_ratio,
'OutGoodSteps': self.zero_steps,
'OutBadSteps': self.zero_steps
}
def test_check_output(self):
self.check_output()
class TestUpdateLossScalingLayer(unittest.TestCase):
def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()):
a = fluid.data(name="a", shape=[1024, 1024], dtype='float32')
b = fluid.data(name="b", shape=[512, 128], dtype='float32')
x = [a, b]
found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool')
prev_loss_scaling = fluid.data(
name="prev_loss_scaling", shape=[1], dtype='float32')
num_good_steps = fluid.data(
name="num_good_steps", shape=[1], dtype='int32')
num_bad_steps = fluid.data(
name="num_bad_steps", shape=[1], dtype='int32')
a_v = np.random.random([1024, 1024]).astype('float32')
b_v = np.random.random([512, 128]).astype('float32')
found_inf_v = np.array([False]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling")
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v
},
fetch_list=[
result, x, found_inf, prev_loss_scaling,
num_good_steps, num_bad_steps
])
assert np.array_equal(result_v[0], a_v)
assert np.array_equal(result_v[1], b_v)
assert np.array_equal(result_v[0], result_v[2])
assert np.array_equal(result_v[1], result_v[3])
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()):
a = fluid.data(name="a", shape=[1024, 1024], dtype='float32')
b = fluid.data(name="b", shape=[512, 128], dtype='float32')
x = [a, b]
found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool')
prev_loss_scaling = fluid.data(
name="prev_loss_scaling", shape=[1], dtype='float32')
num_good_steps = fluid.data(
name="num_good_steps", shape=[1], dtype='int32')
num_bad_steps = fluid.data(
name="num_bad_steps", shape=[1], dtype='int32')
a_v = np.random.random([1024, 1024]).astype('float32')
b_v = np.random.random([512, 128]).astype('float32')
i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1)
a_v[i[0]][j[0]] = np.inf
found_inf_v = np.array([True]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling")
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v
},
fetch_list=[
result, x, found_inf, prev_loss_scaling,
num_good_steps, num_bad_steps
])
assert np.array_equal(result_v[0], np.zeros_like(a_v))
assert np.array_equal(result_v[1], np.zeros_like(b_v))
assert np.array_equal(result_v[2], np.zeros_like(a_v))
assert np.array_equal(result_v[3], np.zeros_like(b_v))
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
def test_loss_scaling_cpu(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check(use_cuda=False)
def test_loss_scaling_cpu_inf(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check_inf(use_cuda=False)
def test_loss_scaling_gpu(self):
if fluid.core.is_compiled_with_cuda():
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check(use_cuda=True)
def test_loss_scaling_gpu_inf(self):
if fluid.core.is_compiled_with_cuda():
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check_inf(use_cuda=True)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2017 Bracket Computing, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# https://github.com/brkt/brkt-cli/blob/master/LICENSE
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import errno
import logging
import os
import os.path
import shutil
import sys
import tempfile
import yaml
import brkt_cli
from brkt_cli import argutil
from brkt_cli.subcommand import Subcommand
from brkt_cli.util import parse_endpoint, render_table_rows
from brkt_cli.validation import ValidationError
log = logging.getLogger(__name__)
CONFIG_DIR = os.path.expanduser('~/.brkt')
CONFIG_PATH = os.path.join(CONFIG_DIR, 'config')
VERSION = 3
class InvalidOptionError(Exception):
def __init__(self, option):
self.option = option
class UnknownEnvironmentError(Exception):
def __init__(self, env):
self.env = env
class InvalidEnvironmentError(Exception):
def __init__(self, missing_keys):
self.missing_keys = missing_keys
BRKT_HOSTED_ENV_NAME = 'brkt-hosted'
def _bracket_environment_to_dict(benv):
"""Convert a BracketEnvironment object to a dictionary that can be stored
in a config.
:param benv a BracketEnvironment object
:return a dictionary
"""
return {
'api-host': benv.api_host,
'api-port': benv.api_port,
'keyserver-host': benv.hsmproxy_host,
'keyserver-port': benv.hsmproxy_port,
'public-api-host': benv.public_api_host,
'public-api-port': benv.public_api_port,
'network-host': benv.network_host,
'network-port': benv.network_port,
'public-api-ca-cert-path': benv.public_api_ca_cert_path
}
def _bracket_environment_from_dict(d):
"""Convert a bracket environment from the config into a BracketEnvironment
object
:param d a dictionary
:return a BracketEnvironment object
"""
benv = brkt_cli.BracketEnvironment()
benv.api_host = d.get('api-host')
benv.api_port = d.get('api-port')
benv.hsmproxy_host = d.get('keyserver-host')
benv.hsmproxy_port = d.get('keyserver-port')
benv.public_api_host = d.get('public-api-host')
benv.public_api_port = d.get('public-api-port')
benv.network_host = d.get('network-host')
benv.network_port = d.get('network-port')
benv.public_api_ca_cert_path = d.get('public-api-ca-cert-path')
return benv
def _validate_environment(benv):
"""Make sure all the necessary attributes of an environment are set.
:raises InvalidEnvironmentError
"""
attrs = ('api_host', 'hsmproxy_host', 'public_api_host', 'network_host')
missing = []
for attr in attrs:
if getattr(benv, attr) is None:
missing.append(attr)
if len(missing) > 0:
raise InvalidEnvironmentError(missing)
def _unlink_noraise(path):
try:
os.unlink(path)
except OSError as e:
if e.errorno == errno.ENOENT:
pass
else:
log.exception("Failed unlinking %s", path)
except:
log.exception("Failed unlinking %s", path)
class CLIConfig(object):
"""CLIConfig exposes an interface that subcommands can use to retrieve
persistent configuration options.
"""
def __init__(self):
self._config = {
'current-environment': None,
'environments': {},
'options': {},
'version': VERSION,
'internal': {}
}
self._add_prod_env()
self._registered_options = collections.defaultdict(dict)
def _get_env(self, env_name):
if env_name not in self._config['environments']:
raise UnknownEnvironmentError(env_name)
d = self._config['environments'][env_name]
return _bracket_environment_from_dict(d)
def set_env(self, name, env):
"""Update the named environment.
:param name the environment name (e.g. stage)
:param env a BracketEnvironment instance
"""
d = _bracket_environment_to_dict(env)
self._config['environments'][name] = d
def get_current_env(self):
"""Return the current environment.
:return a tuple of environment name, BracketEnvironment
"""
env_name = self._config['current-environment']
return env_name, self.get_env(env_name)
def set_current_env(self, env_name):
"""Change the current environment
:param env_name the named env
"""
env = self._get_env(env_name)
_validate_environment(env)
self._config['current-environment'] = env_name
def get_env_meta(self):
"""Return all defined environments"""
meta = {}
for env_name in self._config['environments'].iterkeys():
meta[env_name] = {
'is_current': self._config['current-environment'] == env_name
}
return meta
def get_env(self, env_name):
"""Return the named environment
:param env_name a string
:return a BracketEnvironment instance
:raises UnknownEnvironmentError
"""
return self._get_env(env_name)
def unset_env(self, env_name):
"""Delete the named environment
:param env_name a string
:raises UnknownEnvironmentError
"""
self._get_env(env_name)
del self._config['environments'][env_name]
if self._config['current-environment'] == env_name:
self._config['current-environment'] = BRKT_HOSTED_ENV_NAME
def _check_option(self, option):
if option not in self._registered_options:
raise InvalidOptionError(option)
def register_option(self, option, desc):
self._registered_options[option] = desc
def registered_options(self):
return self._registered_options
def set_option(self, option, value):
"""Set the value for the supplied option.
:param option a dot-delimited option string
:param value the option value
"""
self._check_option(option)
levels = option.split('.')
attr = levels.pop()
cur = self._config['options']
for level in levels:
if level not in cur:
cur[level] = {}
cur = cur[level]
cur[attr] = value
def get_option(self, option, default=None):
"""Fetch the value for the supplied option.
:param option a dot-delimited option string
:param default the value to be returned if option is not present
:return the option value
"""
self._check_option(option)
levels = option.split('.')
attr = levels.pop()
cur = self._config['options']
for level in levels:
if level not in cur:
return default
cur = cur[level]
return cur.get(attr, default)
def _remove_empty_dicts(self, h):
to_remove = []
for k in h:
if isinstance(h[k], dict):
self._remove_empty_dicts(h[k])
if len(h[k]) == 0:
to_remove.append(k)
for k in to_remove:
del h[k]
def unset_option(self, option):
"""Unset the value for the supplied option.
:param option A dot-delimited option string
"""
self._check_option(option)
levels = option.split('.')
attr = levels.pop()
cur = self._config['options']
for level in levels:
if level not in cur:
return
cur = cur[level]
if attr in cur:
del cur[attr]
# Clean up any empty sub-sections
self._remove_empty_dicts(self._config['options'])
def set_internal_option(self, option, value):
self._config['internal'][option] = value
def get_internal_option(self, option, default=None):
return self._config['internal'].get(option, default)
def _migrate_config(self, config):
"""Handle migrating between different config versions"""
if config['version'] == 1:
config['environments'] = {}
config['current-environment'] = None
config['version'] = 2
if config['version'] == 2:
config['internal'] = {}
config['version'] = VERSION
return config
def _add_prod_env(self):
prod_env = brkt_cli.get_prod_brkt_env()
prod_dict = _bracket_environment_to_dict(prod_env)
self._config['environments'][BRKT_HOSTED_ENV_NAME] = prod_dict
if self._config.get('current-environment') is None:
self._config['current-environment'] = BRKT_HOSTED_ENV_NAME
def read(self, f=None):
"""Read the config from disk"""
try:
if not f:
f = open(CONFIG_PATH)
config = yaml.safe_load(f)
self._config = self._migrate_config(config)
self._add_prod_env()
except IOError as e:
if e.errno != errno.ENOENT:
raise
finally:
if f:
f.close()
def write(self, f):
"""Write the config to disk.
:param f A file-like object
"""
yaml.dump(self._config, f)
def save_config(self):
"""Save the current config to disk.
"""
try:
os.mkdir(CONFIG_DIR, 0755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
f = tempfile.NamedTemporaryFile(delete=False, prefix='brkt_cli')
try:
self.write(f)
f.close()
except:
_unlink_noraise(f.name)
raise
try:
shutil.move(f.name, CONFIG_PATH)
except:
_unlink_noraise(f.name)
raise
class ConfigSubcommand(Subcommand):
def __init__(self, stdout=sys.stdout):
self.stdout = stdout
def name(self):
return 'config'
def register(self, subparsers, parsed_config):
self.parsed_config = parsed_config
config_parser = subparsers.add_parser(
self.name(),
description=(
'Display or update brkt-cli options stored in'
' ~/.brkt/config'),
help='Display or update brkt-cli options'
)
config_subparsers = config_parser.add_subparsers(
dest='config_subcommand',
# Hardcode the list, so that we don't expose subcommands that
# are still in development.
metavar='{list,set,get,unset,set-env,use-env,list-envs,get-env,'
'unset-env}'
)
# List all options
config_subparsers.add_parser(
'list',
help='Display the values of all options set in the config file',
description='Display the values of all options set in the config file')
# All the options available for retrieval/mutation
rows = []
descs = self.parsed_config.registered_options()
opts = sorted(descs.keys())
for opt in opts:
rows.append([opt, descs[opt]])
opts_table = render_table_rows(rows, row_prefix=' ')
epilog = "\n".join([
'supported options:',
'',
opts_table
])
# Set an option
set_parser = config_subparsers.add_parser(
'set',
help='Set the value for an option',
description='Set the value for an option',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
set_parser.add_argument(
'option',
help='The option name (e.g. encrypt-gcp-image.project)')
set_parser.add_argument(
'value',
help='The option value')
# Get the value for an option
get_parser = config_subparsers.add_parser(
'get',
help='Get the value for an option',
description='Get the value for an option',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
get_parser.add_argument(
'option',
help='The option name (e.g. encrypt-gcp-image.project)')
# Unset the value for an option
unset_parser = config_subparsers.add_parser(
'unset',
help='Unset the value for an option',
description='Unset the value for an option',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
unset_parser.add_argument(
'option',
help='The option name (e.g. encrypt-gcp-image.project)')
# Define or update an environment
set_env_parser = config_subparsers.add_parser(
'set-env',
help='Update the attributes of an environment',
description="""
Update the attributes of an environment
Environments are persisted in your configuration and can be activated via the
`use-env` config subcommand. This command is particularly helpful if you need
to work with multiple on-prem control-plane deployments. For example, we could
define stage and prod control planes hosted at stage.foo.com and prod.foo.com,
respectively, by executing:
> brkt config set-env stage --service-domain stage.foo.com
> brkt config set-env prod --service-domain prod.foo.com
We can switch between the environments using the `use-env` config subcommand
like so:
> brkt config use-env stage
We can determine the current environment using the `list-envs` config
subcommand:
> brkt config list-envs
brkt-hosted
prod
* stage
>
The leading `*' indicates that the `stage' environment is currently active.
""",
formatter_class=argparse.RawDescriptionHelpFormatter)
set_env_parser.add_argument(
'env_name',
help='The environment name (e.g. stage)')
set_env_parser.add_argument(
'--api-server',
help='The api server (host[:port]) the metavisor will connect to')
set_env_parser.add_argument(
'--key-server',
help='The key server (host[:port]) the metavisor will connect to')
set_env_parser.add_argument(
'--network-server',
help='The network server (host[:port]) the metavisor will connect to')
argutil.add_public_api_ca_cert(set_env_parser)
set_env_parser.add_argument(
'--public-api-server',
help='The public api (host[:port])')
set_env_parser.add_argument(
'--service-domain',
help=('Set server values from the service domain. This option '
'assumes that each server is resolvable via a hostname '
'rooted at service-domain. Specifically, api is expected '
'to live at yetiapi.<service-domain>, key-server at '
'hsmproxy.<service-domain>, network at '
'network.<service-domain>, and public-api-server at '
'api.<service-domain>.')
)
# Set the active environment
use_env_parser = config_subparsers.add_parser(
'use-env',
help='Set the active environment',
description='Set the active environment',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
use_env_parser.add_argument(
'env_name',
help='The environment name (e.g. stage)')
# Display all defined environments
config_subparsers.add_parser(
'list-envs',
help='Display all environments',
description=(
"Display all environments. The leading `*' indicates"
" the currently active environment."))
# Get the details of a specific environment
get_env_parser = config_subparsers.add_parser(
'get-env',
help='Display the details of a specific environment',
description='Display the details of an environment',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
get_env_parser.add_argument(
'env_name',
help='The environment name')
# Unset a specific environment
unset_env_parser = config_subparsers.add_parser(
'unset-env',
help='Delete an environment',
description='Delete an environment')
unset_env_parser.add_argument(
'env_name',
help='The environment name')
def _list_options(self):
"""Display the contents of the config"""
for opt in sorted(self.parsed_config.registered_options().keys()):
val = self.parsed_config.get_option(opt)
if val is not None:
line = "%s=%s\n" % (opt, val)
self.stdout.write(line)
return 0
def _get_option(self, opt):
try:
val = self.parsed_config.get_option(opt)
except InvalidOptionError:
raise ValidationError('Error: unknown option "%s".' % (opt,))
if val:
self.stdout.write("%s\n" % (val,))
return 0
def _set_option(self, opt, val):
"""Set the specified option"""
try:
self.parsed_config.set_option(opt, val)
except InvalidOptionError:
raise ValidationError('Error: unknown option "%s".' % (opt,))
return 0
def _unset_option(self, opt):
"""Unset the specified option"""
try:
self.parsed_config.unset_option(opt)
except InvalidOptionError:
raise ValidationError('Error: unknown option "%s".' % (opt,))
return 0
def _set_env(self, values):
"""Update attributes for the named environment"""
if values.env_name == BRKT_HOSTED_ENV_NAME:
raise ValidationError(
'Error: cannot modify environment ' + values.env_name)
try:
env = self.parsed_config.get_env(values.env_name)
except UnknownEnvironmentError:
env = brkt_cli.BracketEnvironment()
opt_attr = {
'api': 'api',
'key': 'hsmproxy',
'public_api': 'public_api',
'network': 'network',
}
for k in opt_attr.iterkeys():
endpoint = k + '_server'
endpoint = getattr(values, endpoint)
if endpoint is None:
continue
try:
host, port = parse_endpoint(endpoint)
except ValueError:
raise ValidationError('Error: Invalid value for option --' + k + '-server')
port = port or 443
setattr(env, opt_attr[k] + '_host', host)
setattr(env, opt_attr[k] + '_port', port)
if values.service_domain is not None:
env = brkt_cli.brkt_env_from_domain(values.service_domain)
env.public_api_ca_cert_path = values.public_api_ca_cert
self.parsed_config.set_env(values.env_name, env)
return 0
def _use_env(self, values):
"""Set the active environment"""
try:
self.parsed_config.set_current_env(values.env_name)
except UnknownEnvironmentError:
raise ValidationError('Error: unknown environment ' + values.env_name)
except InvalidEnvironmentError, e:
attr_opt = {
'api_host': 'api-server',
'hsmproxy_host': 'key-server',
'public_api_host': 'public-api-server',
'network_host': 'network',
}
msg = ("Error: the environment %s is missing values for %s."
" Use `brkt config set-env` to set the appropriate values.")
opts = []
for attr in e.missing_keys:
opts.append(attr_opt[attr])
raise ValidationError(msg % (values.env_name, ', '.join(opts)))
def _list_envs(self):
"""Display all envs"""
meta = self.parsed_config.get_env_meta()
rows = []
for env_name in sorted(meta.keys()):
marker = ' '
if meta[env_name]['is_current']:
marker = '*'
rows.append((marker, env_name))
self.stdout.write(render_table_rows(rows) + "\n")
def _get_env(self, values):
"""Display the details of an environment"""
try:
env = self.parsed_config.get_env(values.env_name)
except UnknownEnvironmentError:
raise ValidationError('Error: unknown environment ' + values.env_name)
attr_opt = {
'api': 'api',
'hsmproxy': 'key',
'public_api': 'public-api',
'network': 'network',
}
for k in sorted(attr_opt.keys()):
host = getattr(env, k + '_host')
if host is None:
continue
port = getattr(env, k + '_port')
self.stdout.write("%s-server=%s:%d\n" % (attr_opt[k], host, port))
if env.public_api_ca_cert_path:
self.stdout.write(
'public-api-ca-cert=%s\n' % env.public_api_ca_cert_path)
def _unset_env(self, values):
"""Delete the named environment"""
if values.env_name == BRKT_HOSTED_ENV_NAME:
raise ValidationError(
'Error: cannot delete environment ' + values.env_name)
try:
self.parsed_config.unset_env(values.env_name)
except UnknownEnvironmentError:
raise ValidationError('Error: unknown environment ' + values.env_name)
def run(self, values):
subcommand = values.config_subcommand
if subcommand == 'list':
self._list_options()
elif subcommand == 'set':
self._set_option(values.option, values.value)
self.parsed_config.save_config()
elif subcommand == 'get':
self._get_option(values.option)
elif subcommand == 'unset':
self._unset_option(values.option)
self.parsed_config.save_config()
elif subcommand == 'set-env':
self._set_env(values)
self.parsed_config.save_config()
elif subcommand == 'use-env':
self._use_env(values)
self.parsed_config.save_config()
elif subcommand == 'list-envs':
self._list_envs()
elif subcommand == 'get-env':
self._get_env(values)
elif subcommand == 'unset-env':
self._unset_env(values)
self.parsed_config.save_config()
return 0
def get_subcommands():
return [ConfigSubcommand()]
|
|
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import shutil
import IECore
import IECoreGL
IECoreGL.init( False )
class CurvesPrimitiveTest( unittest.TestCase ) :
outputFileName = os.path.dirname( __file__ ) + "/output/testCurves.tif"
def showColorShader( self ) :
fs = """
in vec3 fragmentCs;
void main()
{
gl_FragColor = vec4( fragmentCs, 1 );
}
"""
s = IECore.Shader( "showColor", "surface" )
s.parameters["gl:fragmentSource"] = IECore.StringData( fs )
return s
def performTest( self, curvesPrimitive, attributes=[], testPixels=[], testImage=None, shader=None, diffImage=None ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "immediate" ) )
r.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.setOption( "gl:searchPath:shaderInclude", IECore.StringData( "./glsl" ) )
r.camera( "main", {
"projection" : IECore.StringData( "orthographic" ),
"resolution" : IECore.V2iData( IECore.V2i( 256 ) ),
"clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ),
"screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( 0 ), IECore.V2f( 1 ) ) )
}
)
r.display( self.outputFileName, "tif", "rgba", {} )
with IECore.WorldBlock( r ) :
for a in attributes :
r.setAttribute( a[0], a[1] )
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
if shader :
shader.render( r )
else :
r.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 0, 0, 1 ) ) } )
curvesPrimitive.render( r )
i = IECore.Reader.create( self.outputFileName ).read()
e = IECore.PrimitiveEvaluator.create( i )
result = e.createResult()
a = e.A()
r = e.R()
g = e.G()
b = e.B()
for t in testPixels :
e.pointAtUV( t[0], result )
c = IECore.Color4f(
result.floatPrimVar( r ),
result.floatPrimVar( g ),
result.floatPrimVar( b ),
result.floatPrimVar( a )
)
self.assertEqual( c, t[1] )
if testImage :
# blue where there must be an object
# red where we don't mind
# black where there must be nothing
a = i["A"].data
i2 = IECore.Reader.create( testImage ).read()
r2 = i2["R"].data
b2 = i2["B"].data
for i in range( r2.size() ) :
if b2[i] > 0.5 :
self.assertEqual( a[i], 1 )
elif r2[i] < 0.5 :
self.assertEqual( a[i], 0 )
if diffImage :
expectedImage = IECore.Reader.create( diffImage ).read()
self.assertEqual( IECore.ImageDiffOp()( imageA = expectedImage, imageB = i, maxError = 0.05 ).value, False )
def testAttributes( self ) :
r = IECoreGL.Renderer()
r.setOption( "gl:mode", IECore.StringData( "deferred" ) )
r.worldBegin()
self.assertEqual( r.getAttribute( "gl:curvesPrimitive:useGLLines" ), IECore.BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:curvesPrimitive:glLineWidth" ), IECore.FloatData( 1 ) )
self.assertEqual( r.getAttribute( "gl:curvesPrimitive:ignoreBasis" ), IECore.BoolData( False ) )
r.setAttribute( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:curvesPrimitive:useGLLines" ), IECore.BoolData( True ) )
r.setAttribute( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 2 ) )
self.assertEqual( r.getAttribute( "gl:curvesPrimitive:glLineWidth" ), IECore.FloatData( 2 ) )
r.setAttribute( "gl:curvesPrimitive:ignoreBasis", IECore.BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:curvesPrimitive:ignoreBasis" ), IECore.BoolData( True ) )
r.worldEnd()
def testLinearNonPeriodicAsLines( self ) :
self.performTest(
IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.CubicBasisf.linear(),
False,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 0.5, 0 ),
IECore.V3f( 0.5, 0.5, 0 ),
IECore.V3f( 0.5, 0.5, 0 ),
IECore.V3f( 1, 0.5, 0 ),
IECore.V3f( 1, 1, 0 ),
IECore.V3f( 0, 1, 0 ),
]
)
),
[
( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 4 ) ),
( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) ),
],
[
( IECore.V2f( 0, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 0.25 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 0.5 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0, 0.25 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0, 0.5 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 0.5 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0, 0.75 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 0.75 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 1, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
]
)
def testOverriddenLinearPeriodicAsLines( self ) :
self.performTest(
IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.bSpline(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 1, 0 ),
IECore.V3f( 1, 1, 0 ),
]
)
),
[
( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 4 ) ),
( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) ),
( "gl:curvesPrimitive:ignoreBasis", IECore.BoolData( True ) ),
],
[
( IECore.V2f( 0, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 0.5 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0, 0.5 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 0.5 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0.1, 0.1 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0.9, 0.1 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0.9, 0.9 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0.1, 0.9 ), IECore.Color4f( 0, 0, 0, 0 ) ),
]
)
def testLinearPeriodicAsLines( self ) :
self.performTest(
IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.linear(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 1, 0 ),
IECore.V3f( 1, 1, 0 ),
]
)
),
[
( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 4 ) ),
( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) ),
],
[
( IECore.V2f( 0, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 0 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 0.5 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 1, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0, 1 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0, 0.5 ), IECore.Color4f( 0, 0, 1, 1 ) ),
( IECore.V2f( 0.5, 0.5 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0.1, 0.1 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0.9, 0.1 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0.9, 0.9 ), IECore.Color4f( 0, 0, 0, 0 ) ),
( IECore.V2f( 0.1, 0.9 ), IECore.Color4f( 0, 0, 0, 0 ) ),
]
)
def testBSplinePeriodicAsLines( self ) :
self.performTest(
IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.bSpline(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 1, 0 ),
IECore.V3f( 1, 1, 0 ),
]
)
),
[
( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 2 ) ),
( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) ),
],
[
],
os.path.dirname( __file__ ) + "/images/periodicBSpline.tif"
)
def testBSplinePeriodicAsRibbons( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.bSpline(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 1, 0 ),
IECore.V3f( 1, 1, 0 ),
]
)
)
c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.05 ) )
self.performTest(
c,
[
( "gl:primitive:wireframe", IECore.BoolData( True ) ),
],
[
],
os.path.dirname( __file__ ) + "/images/bSplineCircle.tif"
)
def testBezierAsRibbons( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.bezier(),
False,
IECore.V3fVectorData(
[
IECore.V3f( 0.8, 0.2, 0 ),
IECore.V3f( 0.2, 0.2, 0 ),
IECore.V3f( 0.2, 0.8, 0 ),
IECore.V3f( 0.8, 0.8, 0 ),
]
)
)
c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.05 ) )
self.performTest(
c,
[
( "gl:primitive:wireframe", IECore.BoolData( True ) ),
],
[
],
os.path.dirname( __file__ ) + "/images/bezierHorseShoe.tif"
)
def testLinearRibbons( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.linear(),
False,
IECore.V3fVectorData(
[
IECore.V3f( 0.8, 0.2, 0 ),
IECore.V3f( 0.2, 0.2, 0 ),
IECore.V3f( 0.2, 0.8, 0 ),
IECore.V3f( 0.8, 0.8, 0 ),
]
)
)
c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.035 ) )
self.performTest(
c,
[
( "gl:primitive:wireframe", IECore.BoolData( True ) ),
],
[
],
os.path.dirname( __file__ ) + "/images/linearHorseShoeRibbon.tif"
)
def testLinearPeriodicRibbons( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.linear(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 0.8, 0.2, 0 ),
IECore.V3f( 0.2, 0.2, 0 ),
IECore.V3f( 0.2, 0.8, 0 ),
IECore.V3f( 0.8, 0.8, 0 ),
]
)
)
c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.05 ) )
self.performTest(
c,
[
( "gl:primitive:wireframe", IECore.BoolData( True ) ),
],
[
],
os.path.dirname( __file__ ) + "/images/linearPeriodicRibbon.tif"
)
def testSeveralBSplineRibbons( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.CubicBasisf.bSpline(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 0.4, 0.2, 0 ),
IECore.V3f( 0.2, 0.2, 0 ),
IECore.V3f( 0.2, 0.4, 0 ),
IECore.V3f( 0.4, 0.4, 0 ),
IECore.V3f( 0.8, 0.6, 0 ),
IECore.V3f( 0.6, 0.6, 0 ),
IECore.V3f( 0.6, 0.8, 0 ),
IECore.V3f( 0.8, 0.8, 0 ),
]
)
)
c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.035 ) )
self.performTest(
c,
[
( "gl:primitive:wireframe", IECore.BoolData( True ) ),
],
[
],
os.path.dirname( __file__ ) + "/images/twoBSplineCircles.tif"
)
def testSeveralBSplineLines( self ) :
self.performTest(
IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.CubicBasisf.bSpline(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 0.4, 0.2, 0 ),
IECore.V3f( 0.2, 0.2, 0 ),
IECore.V3f( 0.2, 0.4, 0 ),
IECore.V3f( 0.4, 0.4, 0 ),
IECore.V3f( 0.8, 0.6, 0 ),
IECore.V3f( 0.6, 0.6, 0 ),
IECore.V3f( 0.6, 0.8, 0 ),
IECore.V3f( 0.8, 0.8, 0 ),
]
)
),
[
( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 1 ) ),
( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) ),
],
[
],
diffImage = os.path.dirname( __file__ ) + "/expectedOutput/twoBSplineCirclesAsLines.tif"
)
def testRibbonWindingOrder( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.bSpline(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 1, 0 ),
IECore.V3f( 1, 1, 0 ),
]
)
)
c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.05 ) )
self.performTest(
c,
[
( "doubleSided", IECore.BoolData( False ) ),
],
[
],
os.path.dirname( __file__ ) + "/images/bSplineCircle.tif"
)
def testLinearRibbonWindingOrder( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.linear(),
True,
IECore.V3fVectorData(
[
IECore.V3f( 0.8, 0.2, 0 ),
IECore.V3f( 0.2, 0.2, 0 ),
IECore.V3f( 0.2, 0.8, 0 ),
IECore.V3f( 0.8, 0.8, 0 ),
]
)
)
c["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.05 ) )
self.performTest(
c,
[
( "doubleSided", IECore.BoolData( False ) ),
],
[
],
os.path.dirname( __file__ ) + "/images/linearPeriodicRibbon.tif"
)
def testLinearLinesWithVertexColor( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.CubicBasisf.linear(),
False,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 0.5, 0 ),
IECore.V3f( 0.5, 0.5, 0 ),
IECore.V3f( 0.5, 0.5, 0 ),
IECore.V3f( 1, 0.5, 0 ),
IECore.V3f( 1, 1, 0 ),
IECore.V3f( 0, 1, 0 ),
]
)
)
c["Cs"] = IECore.PrimitiveVariable(
IECore.PrimitiveVariable.Interpolation.Vertex,
IECore.Color3fVectorData(
[
IECore.Color3f( 1, 0, 0 ),
IECore.Color3f( 0, 1, 0 ),
IECore.Color3f( 0, 0, 1 ),
IECore.Color3f( 0, 1, 0 ),
IECore.Color3f( 1, 0, 0 ),
IECore.Color3f( 0, 1, 0 ),
IECore.Color3f( 0, 0, 1 ),
IECore.Color3f( 0, 1, 0 ),
]
)
)
self.performTest(
c,
[
( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 4 ) ),
( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) ),
],
diffImage = os.path.dirname( __file__ ) + "/expectedOutput/linearLinesWithVertexColor.tif",
shader = self.showColorShader(),
)
def testLinearLinesWithUniformColor( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.CubicBasisf.linear(),
False,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 0.5, 0 ),
IECore.V3f( 0.5, 0.5, 0 ),
IECore.V3f( 0.5, 0.5, 0 ),
IECore.V3f( 1, 0.5, 0 ),
IECore.V3f( 1, 1, 0 ),
IECore.V3f( 0, 1, 0 ),
]
)
)
c["Cs"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Uniform, IECore.Color3fVectorData( [ IECore.Color3f( 1, 0, 0 ), IECore.Color3f( 0, 1, 0 ) ] ) )
self.performTest(
c,
[
( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 4 ) ),
( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) ),
],
diffImage = os.path.dirname( __file__ ) + "/expectedOutput/linearLinesWithUniformColor.tif",
shader = self.showColorShader(),
)
def testLinearLinesWithConstantColor( self ) :
c = IECore.CurvesPrimitive(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.CubicBasisf.linear(),
False,
IECore.V3fVectorData(
[
IECore.V3f( 1, 0, 0 ),
IECore.V3f( 0, 0, 0 ),
IECore.V3f( 0, 0.5, 0 ),
IECore.V3f( 0.5, 0.5, 0 ),
IECore.V3f( 0.5, 0.5, 0 ),
IECore.V3f( 1, 0.5, 0 ),
IECore.V3f( 1, 1, 0 ),
IECore.V3f( 0, 1, 0 ),
]
)
)
c["Cs"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) )
self.performTest(
c,
[
( "gl:curvesPrimitive:glLineWidth", IECore.FloatData( 4 ) ),
( "gl:curvesPrimitive:useGLLines", IECore.BoolData( True ) ),
],
diffImage = os.path.dirname( __file__ ) + "/expectedOutput/linearLinesWithConstantColor.tif",
shader = self.showColorShader(),
)
def setUp( self ) :
if not os.path.isdir( "test/IECoreGL/output" ) :
os.makedirs( "test/IECoreGL/output" )
def tearDown( self ) :
if os.path.isdir( "test/IECoreGL/output" ) :
shutil.rmtree( "test/IECoreGL/output" )
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aftenposten import AftenpostenIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import (
ARDIE,
ARDMediathekIE,
SportschauIE,
)
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbc import (
BBCCoUkIE,
BBCIE,
)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
DailymotionCloudIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .dcn import DCNIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dramafever import (
DramaFeverIE,
DramaFeverSeriesIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .esri import EsriVideoIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import (
ImgurIE,
ImgurAlbumIE,
)
from .ina import InaIE
from .indavideo import (
IndavideoIE,
IndavideoEmbedIE,
)
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ir90tv import Ir90TvIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .kuwo import (
KuwoIE,
KuwoAlbumIE,
KuwoChartIE,
KuwoSingerIE,
KuwoCategoryIE,
KuwoMvIE,
)
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lecture2go import Lecture2GoIE
from .letv import (
LetvIE,
LetvTvIE,
LetvPlaylistIE
)
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
MTVDEIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .muzu import MuzuTVIE
from .mwave import MwaveIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import MyviIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import NationalGeographicIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
MSNBCIE,
)
from .ndr import (
NDRIE,
NJoyIE,
NDREmbedBaseIE,
NDREmbedIE,
NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .nerdist import NerdistIE
from .neteasemusic import (
NetEaseMusicIE,
NetEaseMusicAlbumIE,
NetEaseMusicSingerIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicDjRadioIE,
)
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLIE,
NHLNewsIE,
NHLVideocenterIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import NovaMovIE
from .nowness import (
NownessIE,
NownessPlaylistIE,
NownessSeriesIE,
)
from .nowtv import NowTVIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
VPROIE,
WNLIE
)
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
)
from .nuvid import NuvidIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .periscope import (
PeriscopeIE,
QuickscopeIE,
)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .pinkbike import PinkbikeIE
from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .pluralsight import (
PluralsightIE,
PluralsightCourseIE,
)
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
QQMusicPlaylistIE,
)
from .quickvid import QuickVidIE
from .r7 import R7IE
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .rds import RDSIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .rtvnh import RTVNHIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .sandia import SandiaIE
from .safari import (
SafariIE,
SafariCourseIE,
)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shahid import ShahidIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snagfilms import (
SnagFilmsIE,
SnagFilmsEmbedIE,
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soompi import (
SoompiIE,
SoompiShowIE,
)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .space import SpaceIE
from .spankbang import SpankBangIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
from .srf import SrfIE
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
SVTPlayIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import (
ThePlatformIE,
ThePlatformFeedIE,
)
from .thesixtyone import TheSixtyOneIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
)
from .tv4 import TV4IE
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
)
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .twitter import TwitterCardIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ultimedia import UltimediaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vessel import VesselIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vlive import VLiveIE
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .yinyuetai import YinYueTaiIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
# disabled because it can wipe the watch history (see #6893)
# remember to uncomment test in test/test_all_urls when it's fixed
#YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
|
|
# Django settings for test_project project.
import os.path
import django
DEBUG = os.environ.get('DEBUG', False)
TEMPLATE_DEBUG = DEBUG
LOG_LEVEL = os.environ.get('DJANGO_LOG_LEVEL', 'INFO')
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.environ.get('OPENSHIFT_DATA_DIR', 'data')
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
LOG_DIR = os.environ.get('OPENSHIFT_LOG_DIR', 'log')
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
PUBLIC_DIR = os.path.join(os.environ.get('OPENSHIFT_REPO_DIR', ''), 'wsgi/static')
DATABASES = {
'default': {
'NAME': os.environ.get('DJANGO_DATABASE_DEFAULT_NAME', 'db.sqlite'),
'USER': os.environ.get('DJANGO_DATABASE_DEFAULT_USER', ''),
'PASSWORD': os.environ.get('DJANGO_DATABASE_DEFAULT_PASSWORD', ''),
'HOST': os.environ.get('DJANGO_DATABASE_DEFAULT_HOST', ''),
'PORT': os.environ.get('DJANGO_DATABASE_DEFAULT_PORT', ''),
'ENGINE': os.environ.get('DJANGO_DATABASE_DEFAULT_ENGINE',
'django.db.backends.sqlite3'),
}
}
if 'OPENSHIFT_DATA_DIR' in os.environ:
DATABASES['default']['NAME'] = os.path.join(DATA_DIR, 'db.sqlite')
if 'OPENSHIFT_POSTGRESQL_DB_HOST' in os.environ:
DATABASES['default']['NAME'] = os.environ['OPENSHIFT_APP_NAME']
DATABASES['default']['USER'] = os.environ['OPENSHIFT_POSTGRESQL_DB_USERNAME']
DATABASES['default']['PASSWORD'] = os.environ['OPENSHIFT_POSTGRESQL_DB_PASSWORD']
DATABASES['default']['HOST'] = os.environ['OPENSHIFT_POSTGRESQL_DB_HOST']
DATABASES['default']['PORT'] = os.environ['OPENSHIFT_POSTGRESQL_DB_PORT']
DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, 'fixtures'),
]
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
},
'cities_light': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
},
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
from socket import gethostname
ALLOWED_HOSTS = [
gethostname(),
]
DNS = os.environ.get('OPENSHIFT_APP_DNS', None),
if DNS:
ALLOWED_HOSTS += DNS
SITE_ID = 1
STATIC_URL = '/public/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'public', 'static')
if DATA_DIR:
MEDIA_URL = '/static/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
if PUBLIC_DIR:
STATIC_URL = '/static/collected/'
STATIC_ROOT = os.path.join(PUBLIC_DIR, 'collected')
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^le6=#%$1z63o!#z^qr(r+^ix&iqx)@h*u$@8$bu&n8cv6m)go'
ROOT_URLCONF = 'test_project.urls'
if django.VERSION < (1, 8):
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages'
)
else:
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': TEMPLATE_CONTEXT_PROCESSORS,
},
'DIRS': TEMPLATE_DIRS,
},
]
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'autocomplete_light',
'django.contrib.admin',
'cities_light',
'autocomplete_light.example_apps.basic',
'autocomplete_light.example_apps.music',
'autocomplete_light.example_apps.autocomplete_test_case_app',
'autocomplete_light.example_apps.security_test',
'autocomplete_light.example_apps.dependant_autocomplete',
'autocomplete_light.example_apps.non_admin_add_another',
'autocomplete_light.example_apps.create_choice_on_the_fly',
'navigation_autocomplete',
'admin_autocomplete_in_row',
'bootstrap_modal'
)
if django.VERSION < (1, 7):
INSTALLED_APPS += ('south',)
elif django.VERSION >= (1, 7):
INSTALLED_APPS += (
'autocomplete_light.example_apps.app_config_with_registry_file',
'autocomplete_light.example_apps.app_config_without_registry_file.apps.AppConfigWithoutRegistryFile',
)
if django.VERSION >= (1, 5):
INSTALLED_APPS += (
'autocomplete_light.example_apps.unuseable_virtualfield',
)
try:
import genericm2m
except ImportError:
pass
else:
INSTALLED_APPS += ('genericm2m',)
try:
import taggit
except ImportError:
pass
else:
INSTALLED_APPS += ('taggit',)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
SOUTH_MIGRATION_MODULES = {
'taggit': 'taggit.south_migrations',
'cities_light': 'cities_light.south_migrations',
'admin_autocomplete_in_row': 'ignore',
}
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Produces the training and dev data for --problem into --data_dir.
Produces sharded and shuffled TFRecord files of tensorflow.Example protocol
buffers for a variety of registered datasets.
All Problems are registered with @registry.register_problem or are in
_SUPPORTED_PROBLEM_GENERATORS in this file. Each entry maps a string name
(selectable on the command-line with --problem) to a function that takes 2
arguments - input_directory and mode (one of "train" or "dev") - and yields for
each training example a dictionary mapping string feature names to lists of
{string, int, float}. The generator will be run once for each mode.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import random
import tempfile
# Dependency imports
import numpy as np
from tensor2tensor.data_generators import algorithmic_math
from tensor2tensor.data_generators import all_problems # pylint: disable=unused-import
from tensor2tensor.data_generators import audio
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import snli
from tensor2tensor.data_generators import wsj_parsing
from tensor2tensor.utils import registry
from tensor2tensor.utils import usr_dir
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("data_dir", "", "Data directory.")
flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen",
"Temporary storage directory.")
flags.DEFINE_string("problem", "",
"The name of the problem to generate data for.")
flags.DEFINE_string("exclude_problems", "",
"Comma-separates list of problems to exclude.")
flags.DEFINE_integer("num_shards", 0, "How many shards to use. Ignored for "
"registered Problems.")
flags.DEFINE_integer("max_cases", 0,
"Maximum number of cases to generate (unbounded if 0).")
flags.DEFINE_bool("only_list", False,
"If true, we only list the problems that will be generated.")
flags.DEFINE_integer("random_seed", 429459, "Random seed to use.")
flags.DEFINE_integer("task_id", -1, "For distributed data generation.")
flags.DEFINE_integer("task_id_start", -1, "For distributed data generation.")
flags.DEFINE_integer("task_id_end", -1, "For distributed data generation.")
flags.DEFINE_integer(
"num_concurrent_processes", 10,
"Applies only to problems for which multiprocess_generate=True.")
flags.DEFINE_string("t2t_usr_dir", "",
"Path to a Python module that will be imported. The "
"__init__.py file should include the necessary imports. "
"The imported files should contain registrations, "
"e.g. @registry.register_problem calls, that will then be "
"available to t2t-datagen.")
# Mapping from problems that we can generate data for to their generators.
# pylint: disable=g-long-lambda
_SUPPORTED_PROBLEM_GENERATORS = {
"algorithmic_algebra_inverse": (
lambda: algorithmic_math.algebra_inverse(26, 0, 2, 100000),
lambda: algorithmic_math.algebra_inverse(26, 3, 3, 10000)),
"parsing_english_ptb8k": (
lambda: wsj_parsing.parsing_token_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 2**13, 2**9),
lambda: wsj_parsing.parsing_token_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 2**13, 2**9)),
"parsing_english_ptb16k": (
lambda: wsj_parsing.parsing_token_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 2**14, 2**9),
lambda: wsj_parsing.parsing_token_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 2**14, 2**9)),
"inference_snli32k": (
lambda: snli.snli_token_generator(FLAGS.tmp_dir, True, 2**15),
lambda: snli.snli_token_generator(FLAGS.tmp_dir, False, 2**15),
),
"audio_timit_characters_test": (
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 1718),
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 626)),
"audio_timit_tokens_8k_test": (
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 1718,
vocab_filename="vocab.endefr.%d" % 2**13, vocab_size=2**13),
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 626,
vocab_filename="vocab.endefr.%d" % 2**13, vocab_size=2**13)),
"audio_timit_tokens_32k_test": (
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, True, 1718,
vocab_filename="vocab.endefr.%d" % 2**15, vocab_size=2**15),
lambda: audio.timit_generator(
FLAGS.data_dir, FLAGS.tmp_dir, False, 626,
vocab_filename="vocab.endefr.%d" % 2**15, vocab_size=2**15)),
}
# pylint: enable=g-long-lambda
def set_random_seed():
"""Set the random seed from flag everywhere."""
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
# Calculate the list of problems to generate.
problems = sorted(
list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_problems())
for exclude in FLAGS.exclude_problems.split(","):
if exclude:
problems = [p for p in problems if exclude not in p]
if FLAGS.problem and FLAGS.problem[-1] == "*":
problems = [p for p in problems if p.startswith(FLAGS.problem[:-1])]
elif FLAGS.problem:
problems = [p for p in problems if p == FLAGS.problem]
else:
problems = []
# Remove TIMIT if paths are not given.
if not FLAGS.timit_paths:
problems = [p for p in problems if "timit" not in p]
# Remove parsing if paths are not given.
if not FLAGS.parsing_path:
problems = [p for p in problems if "parsing" not in p]
if not problems:
problems_str = "\n * ".join(
sorted(list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_problems()))
error_msg = ("You must specify one of the supported problems to "
"generate data for:\n * " + problems_str + "\n")
error_msg += ("TIMIT and parsing need data_sets specified with "
"--timit_paths and --parsing_path.")
raise ValueError(error_msg)
if not FLAGS.data_dir:
FLAGS.data_dir = tempfile.gettempdir()
tf.logging.warning("It is strongly recommended to specify --data_dir. "
"Data will be written to default data_dir=%s.",
FLAGS.data_dir)
FLAGS.data_dir = os.path.expanduser(FLAGS.data_dir)
tf.gfile.MakeDirs(FLAGS.data_dir)
tf.logging.info("Generating problems:\n%s"
% registry.display_list_by_prefix(problems,
starting_spaces=4))
if FLAGS.only_list:
return
for problem in problems:
set_random_seed()
if problem in _SUPPORTED_PROBLEM_GENERATORS:
generate_data_for_problem(problem)
else:
generate_data_for_registered_problem(problem)
def generate_data_for_problem(problem):
"""Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS."""
training_gen, dev_gen = _SUPPORTED_PROBLEM_GENERATORS[problem]
num_shards = FLAGS.num_shards or 10
tf.logging.info("Generating training data for %s.", problem)
train_output_files = generator_utils.train_data_filenames(
problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_shards)
generator_utils.generate_files(training_gen(), train_output_files,
FLAGS.max_cases)
tf.logging.info("Generating development data for %s.", problem)
dev_output_files = generator_utils.dev_data_filenames(
problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, 1)
generator_utils.generate_files(dev_gen(), dev_output_files)
all_output_files = train_output_files + dev_output_files
generator_utils.shuffle_dataset(all_output_files)
def generate_data_in_process(arg):
problem_name, data_dir, tmp_dir, task_id = arg
problem = registry.problem(problem_name)
problem.generate_data(data_dir, tmp_dir, task_id)
def generate_data_for_registered_problem(problem_name):
tf.logging.info("Generating data for %s.", problem_name)
if FLAGS.num_shards:
raise ValueError("--num_shards should not be set for registered Problem.")
problem = registry.problem(problem_name)
task_id = None if FLAGS.task_id < 0 else FLAGS.task_id
data_dir = os.path.expanduser(FLAGS.data_dir)
tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
if task_id is None and problem.multiprocess_generate:
if FLAGS.task_id_start != -1:
assert FLAGS.task_id_end != -1
task_id_start = FLAGS.task_id_start
task_id_end = FLAGS.task_id_end
else:
task_id_start = 0
task_id_end = problem.num_generate_tasks
pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes)
problem.prepare_to_generate(data_dir, tmp_dir)
args = [(problem_name, data_dir, tmp_dir, task_id)
for task_id in range(task_id_start, task_id_end)]
pool.map(generate_data_in_process, args)
else:
problem.generate_data(data_dir, tmp_dir, task_id)
if __name__ == "__main__":
tf.app.run()
|
|
"""CherryPy Benchmark Tool
Usage:
benchmark.py --null --notests --help --cpmodpy --modpython --ab=path --apache=path
--null: use a null Request object (to bench the HTTP server only)
--notests: start the server but do not run the tests; this allows
you to check the tested pages with a browser
--help: show this help message
--cpmodpy: run tests via apache on 54583 (with the builtin _cpmodpy)
--modpython: run tests via apache on 54583 (with modpython_gateway)
--ab=path: Use the ab script/executable at 'path' (see below)
--apache=path: Use the apache script/exe at 'path' (see below)
To run the benchmarks, the Apache Benchmark tool "ab" must either be on
your system path, or specified via the --ab=path option.
To run the modpython tests, the "apache" executable or script must be
on your system path, or provided via the --apache=path option. On some
platforms, "apache" may be called "apachectl" or "apache2ctl"--create
a symlink to them if needed.
"""
import getopt
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import re
import sys
import time
import traceback
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy import _cperror, _cpmodpy
from cherrypy.lib import httputil
AB_PATH = ""
APACHE_PATH = "apache"
SCRIPT_NAME = "/cpbench/users/rdelon/apps/blog"
__all__ = ['ABSession', 'Root', 'print_report',
'run_standard_benchmarks', 'safe_threads',
'size_report', 'startup', 'thread_report',
]
size_cache = {}
class Root:
def index(self):
return """<html>
<head>
<title>CherryPy Benchmark</title>
</head>
<body>
<ul>
<li><a href="hello">Hello, world! (14 byte dynamic)</a></li>
<li><a href="static/index.html">Static file (14 bytes static)</a></li>
<li><form action="sizer">Response of length:
<input type='text' name='size' value='10' /></form>
</li>
</ul>
</body>
</html>"""
index.exposed = True
def hello(self):
return "Hello, world\r\n"
hello.exposed = True
def sizer(self, size):
resp = size_cache.get(size, None)
if resp is None:
size_cache[size] = resp = "X" * int(size)
return resp
sizer.exposed = True
cherrypy.config.update({
'log.error.file': '',
'environment': 'production',
'server.socket_host': '127.0.0.1',
'server.socket_port': 54583,
'server.max_request_header_size': 0,
'server.max_request_body_size': 0,
'engine.deadlock_poll_freq': 0,
})
# Cheat mode on ;)
del cherrypy.config['tools.log_tracebacks.on']
del cherrypy.config['tools.log_headers.on']
del cherrypy.config['tools.trailing_slash.on']
appconf = {
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir,
},
}
app = cherrypy.tree.mount(Root(), SCRIPT_NAME, appconf)
class NullRequest:
"""A null HTTP request class, returning 200 and an empty body."""
def __init__(self, local, remote, scheme="http"):
pass
def close(self):
pass
def run(self, method, path, query_string, protocol, headers, rfile):
cherrypy.response.status = "200 OK"
cherrypy.response.header_list = [("Content-Type", 'text/html'),
("Server", "Null CherryPy"),
("Date", httputil.HTTPDate()),
("Content-Length", "0"),
]
cherrypy.response.body = [""]
return cherrypy.response
class NullResponse:
pass
class ABSession:
"""A session of 'ab', the Apache HTTP server benchmarking tool.
Example output from ab:
This is ApacheBench, Version 2.0.40-dev <$Revision: 1.121.2.1 $> apache-2.0
Copyright (c) 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Copyright (c) 1998-2002 The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 100 requests
Completed 200 requests
Completed 300 requests
Completed 400 requests
Completed 500 requests
Completed 600 requests
Completed 700 requests
Completed 800 requests
Completed 900 requests
Server Software: CherryPy/3.1beta
Server Hostname: 127.0.0.1
Server Port: 54583
Document Path: /static/index.html
Document Length: 14 bytes
Concurrency Level: 10
Time taken for tests: 9.643867 seconds
Complete requests: 1000
Failed requests: 0
Write errors: 0
Total transferred: 189000 bytes
HTML transferred: 14000 bytes
Requests per second: 103.69 [#/sec] (mean)
Time per request: 96.439 [ms] (mean)
Time per request: 9.644 [ms] (mean, across all concurrent requests)
Transfer rate: 19.08 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 0 2.9 0 10
Processing: 20 94 7.3 90 130
Waiting: 0 43 28.1 40 100
Total: 20 95 7.3 100 130
Percentage of the requests served within a certain time (ms)
50% 100
66% 100
75% 100
80% 100
90% 100
95% 100
98% 100
99% 110
100% 130 (longest request)
Finished 1000 requests
"""
parse_patterns = [('complete_requests', 'Completed',
ntob(r'^Complete requests:\s*(\d+)')),
('failed_requests', 'Failed',
ntob(r'^Failed requests:\s*(\d+)')),
('requests_per_second', 'req/sec',
ntob(r'^Requests per second:\s*([0-9.]+)')),
('time_per_request_concurrent', 'msec/req',
ntob(r'^Time per request:\s*([0-9.]+).*concurrent requests\)$')),
('transfer_rate', 'KB/sec',
ntob(r'^Transfer rate:\s*([0-9.]+)')),
]
def __init__(self, path=SCRIPT_NAME + "/hello", requests=1000, concurrency=10):
self.path = path
self.requests = requests
self.concurrency = concurrency
def args(self):
port = cherrypy.server.socket_port
assert self.concurrency > 0
assert self.requests > 0
# Don't use "localhost".
# Cf http://mail.python.org/pipermail/python-win32/2008-March/007050.html
return ("-k -n %s -c %s http://127.0.0.1:%s%s" %
(self.requests, self.concurrency, port, self.path))
def run(self):
# Parse output of ab, setting attributes on self
try:
self.output = _cpmodpy.read_process(AB_PATH or "ab", self.args())
except:
print(_cperror.format_exc())
raise
for attr, name, pattern in self.parse_patterns:
val = re.search(pattern, self.output, re.MULTILINE)
if val:
val = val.group(1)
setattr(self, attr, val)
else:
setattr(self, attr, None)
safe_threads = (25, 50, 100, 200, 400)
if sys.platform in ("win32",):
# For some reason, ab crashes with > 50 threads on my Win2k laptop.
safe_threads = (10, 20, 30, 40, 50)
def thread_report(path=SCRIPT_NAME + "/hello", concurrency=safe_threads):
sess = ABSession(path)
attrs, names, patterns = list(zip(*sess.parse_patterns))
avg = dict.fromkeys(attrs, 0.0)
yield ('threads',) + names
for c in concurrency:
sess.concurrency = c
sess.run()
row = [c]
for attr in attrs:
val = getattr(sess, attr)
if val is None:
print(sess.output)
row = None
break
val = float(val)
avg[attr] += float(val)
row.append(val)
if row:
yield row
# Add a row of averages.
yield ["Average"] + [str(avg[attr] / len(concurrency)) for attr in attrs]
def size_report(sizes=(10, 100, 1000, 10000, 100000, 100000000),
concurrency=50):
sess = ABSession(concurrency=concurrency)
attrs, names, patterns = list(zip(*sess.parse_patterns))
yield ('bytes',) + names
for sz in sizes:
sess.path = "%s/sizer?size=%s" % (SCRIPT_NAME, sz)
sess.run()
yield [sz] + [getattr(sess, attr) for attr in attrs]
def print_report(rows):
for row in rows:
print("")
for i, val in enumerate(row):
sys.stdout.write(str(val).rjust(10) + " | ")
print("")
def run_standard_benchmarks():
print("")
print("Client Thread Report (1000 requests, 14 byte response body, "
"%s server threads):" % cherrypy.server.thread_pool)
print_report(thread_report())
print("")
print("Client Thread Report (1000 requests, 14 bytes via staticdir, "
"%s server threads):" % cherrypy.server.thread_pool)
print_report(thread_report("%s/static/index.html" % SCRIPT_NAME))
print("")
print("Size Report (1000 requests, 50 client threads, "
"%s server threads):" % cherrypy.server.thread_pool)
print_report(size_report())
# modpython and other WSGI #
def startup_modpython(req=None):
"""Start the CherryPy app server in 'serverless' mode (for modpython/WSGI)."""
if cherrypy.engine.state == cherrypy._cpengine.STOPPED:
if req:
if "nullreq" in req.get_options():
cherrypy.engine.request_class = NullRequest
cherrypy.engine.response_class = NullResponse
ab_opt = req.get_options().get("ab", "")
if ab_opt:
global AB_PATH
AB_PATH = ab_opt
cherrypy.engine.start()
if cherrypy.engine.state == cherrypy._cpengine.STARTING:
cherrypy.engine.wait()
return 0 # apache.OK
def run_modpython(use_wsgi=False):
print("Starting mod_python...")
pyopts = []
# Pass the null and ab=path options through Apache
if "--null" in opts:
pyopts.append(("nullreq", ""))
if "--ab" in opts:
pyopts.append(("ab", opts["--ab"]))
s = _cpmodpy.ModPythonServer
if use_wsgi:
pyopts.append(("wsgi.application", "cherrypy::tree"))
pyopts.append(("wsgi.startup", "cherrypy.test.benchmark::startup_modpython"))
handler = "modpython_gateway::handler"
s = s(port=54583, opts=pyopts, apache_path=APACHE_PATH, handler=handler)
else:
pyopts.append(("cherrypy.setup", "cherrypy.test.benchmark::startup_modpython"))
s = s(port=54583, opts=pyopts, apache_path=APACHE_PATH)
try:
s.start()
run()
finally:
s.stop()
if __name__ == '__main__':
longopts = ['cpmodpy', 'modpython', 'null', 'notests',
'help', 'ab=', 'apache=']
try:
switches, args = getopt.getopt(sys.argv[1:], "", longopts)
opts = dict(switches)
except getopt.GetoptError:
print(__doc__)
sys.exit(2)
if "--help" in opts:
print(__doc__)
sys.exit(0)
if "--ab" in opts:
AB_PATH = opts['--ab']
if "--notests" in opts:
# Return without stopping the server, so that the pages
# can be tested from a standard web browser.
def run():
port = cherrypy.server.socket_port
print("You may now open http://127.0.0.1:%s%s/" %
(port, SCRIPT_NAME))
if "--null" in opts:
print("Using null Request object")
else:
def run():
end = time.time() - start
print("Started in %s seconds" % end)
if "--null" in opts:
print("\nUsing null Request object")
try:
try:
run_standard_benchmarks()
except:
print(_cperror.format_exc())
raise
finally:
cherrypy.engine.exit()
print("Starting CherryPy app server...")
class NullWriter(object):
"""Suppresses the printing of socket errors."""
def write(self, data):
pass
sys.stderr = NullWriter()
start = time.time()
if "--cpmodpy" in opts:
run_modpython()
elif "--modpython" in opts:
run_modpython(use_wsgi=True)
else:
if "--null" in opts:
cherrypy.server.request_class = NullRequest
cherrypy.server.response_class = NullResponse
cherrypy.engine.start_with_callback(run)
cherrypy.engine.block()
|
|
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.core import urlresolvers
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import RequestContext, loader
from django.template.context import _current_app_undefined
from django.template.engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, dirs=_dirs_undefined,
dictionary=_dictionary_undefined, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
content = loader.render_to_string(template_name, context, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, current_app=_current_app_undefined,
dirs=_dirs_undefined, dictionary=_dictionary_undefined,
using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
if (context_instance is _context_instance_undefined
and current_app is _current_app_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
# In Django 2.0, request should become a positional argument.
content = loader.render_to_string(
template_name, context, request=request, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
if context_instance is not _context_instance_undefined:
if current_app is not _current_app_undefined:
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
context_instance = RequestContext(request)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of render is deprecated. "
"Set the current_app attribute of request instead.",
RemovedInDjango20Warning, stacklevel=2)
request.current_app = current_app
# Directly set the private attribute to avoid triggering the
# warning in RequestContext.__init__.
context_instance._current_app = current_app
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if any(to.startswith(path) for path in ('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
|
#!/usr/bin/env ccp4-python
import copy
import pickle
import glob
import logging
import os
import shutil
import sys
# Hack to make sure we can find the modules we need
if __name__ == "__main__":
root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])
sys.path.insert(0, os.path.join(root, "scripts"))
from ample.util import ample_util, mrbump_cmd, printTable
from mrbump.parsers import parse_arpwarp, parse_buccaneer, parse_phaser
TOP_KEEP = 3 # How many of the top shelxe/phaser results to keep for the gui
MRBUMP_RUNTIME = 172800 # allow 48 hours for each mrbump job
REBUILD_MAX_PERMITTED_RESOLUTION = 4.0
SHELXE_MAX_PERMITTED_RESOLUTION = 3.0
SHELXE_MAX_PERMITTED_RESOLUTION_CC = 3.5
# Values to determine when job has succeeded - required at module level this may be set by AMPLE from
# the command line
SUCCESS_PHASER_TFZ = 8.0
SUCCESS_PHASER_LLG = 120
SUCCESS_RFREE = 0.4
SUCCESS_SHELXE_CC = 25.0
SUCCESS_SHELXE_ACL = 10
# We need a null logger so that we can be used without requiring a logger
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
class ResultsSummary(object):
"""
Summarise the results for a series of MRBUMP runs
"""
def __init__(self, results=None, results_pkl=None):
"""
Parameters
----------
results_pkl : file
A pickled AMPLE results dictionary
"""
self.results = []
self.pname = "archive"
self.pdir = None
self.success = False
if results_pkl and os.path.isfile(results_pkl):
with open(results_pkl) as f:
resd = pickle.load(f)
mkey = 'mrbump_results'
if mkey in resd and len(resd[mkey]):
self.results = resd[mkey]
elif results:
self.results = results
return
def analyseResult(self, result):
mrDir = result["MR_directory"]
# result.ensembleName = result.name[9:-6]
if result["MR_program"] == "PHASER":
if result["PHASER_pdbout"]:
phaserP = parse_phaser.PhaserPdbParser(result["PHASER_pdbout"])
result["PHASER_LLG"] = phaserP.LLG
result["PHASER_TFZ"] = phaserP.TFZ
phaserLog = os.path.join(
mrDir, "{0}_loc0_ALL_{1}_UNMOD.log".format(result["MR_program"].lower(), result['ensemble_name'])
)
if os.path.isfile(phaserLog):
phaserP = parse_phaser.PhaserLogParser(phaserLog, noLLG=True)
# result.phaserLog = phaserLog
result["PHASER_time"] = phaserP.time
result["PHASER_killed"] = phaserP.killed
buccaneerLog = os.path.join(mrDir, "build/shelxe/rebuild/buccaneer", "buccaneer.log")
bp = parse_buccaneer.BuccaneerLogParser()
if os.path.isfile(buccaneerLog):
bp.parse(buccaneerLog)
result["SXRBUCC_final_Rfree"] = bp.finalRfree
result["SXRBUCC_final_Rfact"] = bp.finalRfact
arpLog = os.path.join(mrDir, "build/shelxe/rebuild/arpwarp", "arpwarp.log")
if os.path.isfile(arpLog):
ap = parse_arpwarp.ArpwarpLogParser()
ap.parse(arpLog)
result["SXRARP_final_Rfact"] = ap.finalRfact
result["SXRARP_final_Rfree"] = ap.finalRfree
return
def createDict(self):
d = {}
# our additional keys
d['ensemble_name'] = None
d['MR_program'] = None
d['name'] = None
d['Search_directory'] = None
d['MR_directory'] = None
d['Solution_Type'] = None
d['PHASER_LLG'] = None
d['PHASER_TFZ'] = None
d['PHASER_RFZ'] = None
d['PHASER_time'] = None
d['PHASER_killed'] = None
d['PHASER_pdbout'] = None
d['PHASER_mtzout'] = None
d['PHASER_logfile'] = None
d['PHASER_version'] = None
d['PHASER_error'] = None
d['MOLREP_score'] = None
d['MOLREP_time'] = None
d['MOLREP_pdbout'] = None
d['MOLREP_logfile'] = None
d['MOLREP_version'] = None
d['REFMAC_Rfact'] = None
d['REFMAC_Rfree'] = None
d['REFMAC_pdbout'] = None
d['REFMAC_mtzout'] = None
d['REFMAC_logfile'] = None
d['REFMAC_version'] = None
d['BUCC_final_Rfact'] = None
d['BUCC_final_Rfree'] = None
d['BUCC_pdbout'] = None
d['BUCC_mtzout'] = None
d['BUCC_logfile'] = None
d['BUCC_version'] = None
d['ARP_final_Rfact'] = None
d['ARP_final_Rfree'] = None
d['ARP_pdbout'] = None
d['ARP_mtzout'] = None
d['ARP_logfile'] = None
d['ARP_version'] = None
d['SHELXE_CC'] = None
d['SHELXE_ACL'] = None
d['SHELXE_MCL'] = None
d['SHELXE_NC'] = None
d['SHELXE_wMPE'] = None
d['SHELXE_os'] = None
d['SHELXE_time'] = None
d['SHELXE_pdbout'] = None
d['SHELXE_phsout'] = None
d['SHELXE_mtzout'] = None
d['SHELXE_logfile'] = None
d['SHELXE_version'] = None
d['SXRBUCC_version'] = None
d['SXRBUCC_final_Rfact'] = None
d['SXRBUCC_final_Rfree'] = None
d['SXRBUCC_pdbout'] = None
d['SXRBUCC_mtzout'] = None
d['SXRBUCC_logfile'] = None
d['SXRARP_version'] = None
d['SXRARP_final_Rfact'] = None
d['SXRARP_final_Rfree'] = None
d['SXRARP_pdbout'] = None
d['SXRARP_mtzout'] = None
d['SXRARP_logfile'] = None
return d
def _extractPurged(self, mrbump_dir):
"""Recreate a list of the jobs that have been purged"""
purged_results = {}
self.pdir = os.path.join(mrbump_dir, self.pname)
if not os.path.isdir(self.pdir):
os.mkdir(self.pdir)
pkls = glob.glob(os.path.join(self.pdir, "*.pkl"))
if pkls:
for p in pkls:
with open(p) as f:
d = pickle.load(f)
purged_results[d['ensemble_name']] = d
return purged_results
def extractResults(self, mrbump_dir, purge=False, max_loglevel=logging.INFO):
if not mrbump_dir or not os.path.isdir(mrbump_dir):
raise RuntimeError("Cannot find mrbump_dir: {0}".format(mrbump_dir))
purged_results = {}
if purge:
purged_results = self._extractPurged(mrbump_dir)
with ample_util.disable_logging(logger, max_loglevel=max_loglevel):
self._extractResults(mrbump_dir, archived_ensembles=purged_results.keys())
if purge:
self._purgeFailed()
self.results += purged_results.values()
self.sortResults()
self.success = any([self.jobSucceeded(r) for r in self.results])
return self.results
def _extractResults(self, mrbump_dir, archived_ensembles=None):
"""
Find the results from running MRBUMP and sort them
"""
mrbump_dir = os.path.abspath(mrbump_dir)
if not os.path.isdir(mrbump_dir):
logger.warn("extractResults - is not a valid directory: {0}".format(mrbump_dir))
return []
# Get a list of the ensembles (could get this from the amopt dictionary)
# For now we just use the submission scripts and assume all have .sh or .sub extension
ext = '.sh'
if sys.platform.startswith("win"):
ext = '.bat'
ensembles = [os.path.splitext(os.path.basename(e))[0] for e in glob.glob(os.path.join(mrbump_dir, "*" + ext))]
if not len(ensembles):
# legacy - try .sub
ensembles = [os.path.splitext(os.path.basename(e))[0] for e in glob.glob(os.path.join(mrbump_dir, "*.sub"))]
if not len(ensembles):
logger.warn("Could not extract any results from directory: {0}".format(mrbump_dir))
return []
# reset any results
results = []
failed = {} # dict mapping failures to what went wrong - need to process at the end
for ensemble in ensembles:
# Skip ones that we've archived
if archived_ensembles and ensemble in archived_ensembles:
continue
# Check job directory
jobDir = os.path.join(mrbump_dir, 'search_' + ensemble + '_mrbump')
if not os.path.isdir(jobDir):
jobDir = os.path.join(mrbump_dir, 'search_' + ensemble)
if not os.path.isdir(jobDir):
# As we call this every time we monitor a job running, we don't want to print this out all the time
# logger.debug("Missing job directory: {0}".format(jobDir))
failed[ensemble] = "no_job_directory"
continue
logger.debug(" -- checking directory for results: {0}".format(jobDir))
# Check if finished
if not os.path.exists(os.path.join(jobDir, "results", "finished.txt")):
logger.debug("Found unfinished job: {0}".format(jobDir))
failed[ensemble] = "unfinished"
continue
# Check resultsTable.dat
resultsDict = os.path.join(jobDir, "results", "resultsTable.pkl")
if os.path.isfile(resultsDict):
results += self.processMrbumpPkl(resultsDict)
else:
logger.debug(" -- Could not find results files: {0}".format(resultsDict))
failed[ensemble] = "missing-results-file"
continue
# Process the failed results
if failed:
results += self._processFailed(mrbump_dir, failed)
if not len(results):
logger.warn("Could not extract any results from directory: {0}".format(mrbump_dir))
self.results = results
return
@staticmethod
def jobSucceeded(job_dict):
success = False
if (
'SHELXE_CC' in job_dict
and job_dict['SHELXE_CC']
and float(job_dict['SHELXE_CC']) >= SUCCESS_SHELXE_CC
and 'SHELXE_ACL' in job_dict
and job_dict['SHELXE_ACL']
and float(job_dict['SHELXE_ACL']) >= SUCCESS_SHELXE_ACL
):
success = True
elif (
'BUCC_final_Rfree' in job_dict
and job_dict['BUCC_final_Rfree']
and float(job_dict['BUCC_final_Rfree']) <= SUCCESS_RFREE
):
success = True
elif (
'ARP_final_Rfree' in job_dict
and job_dict['ARP_final_Rfree']
and float(job_dict['ARP_final_Rfree']) <= SUCCESS_RFREE
):
success = True
elif (
'REFMAC_Rfree' in job_dict and job_dict['REFMAC_Rfree'] and float(job_dict['REFMAC_Rfree']) <= SUCCESS_RFREE
):
success = True
elif (
'PHASER_LLG' in job_dict
and 'PHASER_TFZ' in job_dict
and job_dict['PHASER_LLG']
and job_dict['PHASER_TFZ']
and float(job_dict['PHASER_LLG']) >= SUCCESS_PHASER_LLG
and float(job_dict['PHASER_TFZ']) >= SUCCESS_PHASER_TFZ
):
success = True
return success
def processMrbumpPkl(self, resultsPkl):
"""Process dictionary
"""
with open(resultsPkl) as f:
rD = pickle.load(f)
if not rD:
return []
results = []
for name, d1 in rD.iteritems():
for mrprog, d2 in d1.iteritems():
# Check if all the entries are None - means job didn't run.
# Should probably think of a better way to spot that (Search_directory is always set)
if not any([v for k, v in d2.iteritems() if k != 'Search_directory']):
continue
# Add MR program as dictionary entry
d = copy.copy(d2)
del d['SearchModel_filename']
d['name'] = name
# name is e.g.: loc0_ALL_c1_tl100_r2_allatom_UNMOD
d['ensemble_name'] = name[9:-6]
d['MR_program'] = mrprog
# Hack for old versions
if 'JobDirectory' in d:
d['MR_directory'] = d['JobDirectory']
del d['JobDirectory']
d['Search_directory'] = os.sep.join(d['MR_directory'].split(os.sep)[:-5])
if 'final_Rfree' in d:
d['REFMAC_Rfree'] = d['final_Rfree']
d['REFMAC_Rfact'] = d['final_Rfact']
del d['final_Rfree']
del d['final_Rfact']
results.append(d)
return results
def _processFailed(self, mrbump_dir, failed):
"""Generate dictionaries for failed results
"""
results = []
for ensemble, reason in failed.iteritems():
d = self.createDict()
# name hard-coded
# d['name'] = "loc0_ALL_" + ensemble + "_UNMOD"
d['name'] = "loc0_ALL_" + ensemble + "_UNMOD"
d['ensemble_name'] = ensemble
d['Search_directory'] = os.path.join(mrbump_dir, 'search_' + ensemble + '_mrbump')
d['Solution_Type'] = reason
results.append(d)
logger.debug("Added {0} MRBUMP result failures".format(len(failed)))
return results
def _purgeFailed(self):
"""Remove the MRBUMP directories of any jobs that don't pass the keep criteria and archive their job dictionaries"""
completed = [r for r in self.results if not (job_unfinished(r))]
if completed:
to_keep = []
min_len = min(len(completed), TOP_KEEP)
for r in ResultsSummary.sortResultsStatic(completed, prioritise='SHELXE_CC')[:min_len]:
if r not in to_keep:
to_keep.append(r)
for r in ResultsSummary.sortResultsStatic(completed, prioritise='PHASER_TFZ')[:min_len]:
if r not in to_keep:
to_keep.append(r)
for r in completed:
if r not in to_keep:
pkl = os.path.join(self.pdir, "{0}.pkl".format(r['ensemble_name']))
with open(pkl, 'w') as f:
pickle.dump(r, f)
shutil.rmtree(r['Search_directory'])
def results_table(self, results):
resultsTable = []
keys = ['ensemble_name', 'MR_program', 'Solution_Type']
keys += _resultsKeys(results)
resultsTable.append(keys)
for r in results:
resultsTable.append([r[k] for k in keys])
return resultsTable
def sortResults(self, prioritise=False):
"""Wrapper function to allow calls with self"""
self.results = ResultsSummary.sortResultsStatic(self.results)
@staticmethod
def sortResultsStatic(results, prioritise=False):
"""Sort the results"""
SHELXE = False
BUCC = False
ARP = False
REFMAC = False
PHASER = False
for r in results:
if 'SHELXE_CC' in r and r['SHELXE_CC'] and float(r['SHELXE_CC']) > 0.0:
SHELXE = True
if 'BUCC_final_Rfact' in r and r['BUCC_final_Rfact'] and float(r['BUCC_final_Rfact']) < 1.0:
BUCC = True
if 'ARP_final_Rfree' in r and r['ARP_final_Rfree'] and float(r['ARP_final_Rfree']) < 1.0:
ARP = True
if 'REFMAC_Rfree' in r and r['REFMAC_Rfree'] and float(r['REFMAC_Rfree']) < 1.0:
REFMAC = True
if 'PHASER_TFZ' in r and r['PHASER_TFZ'] and float(r['PHASER_TFZ']) > 0.0:
PHASER = True
reverse = False
sortf = False
if SHELXE and prioritise != "PHASER_TFZ":
reverse = True
sortf = lambda x: float(0) if x['SHELXE_CC'] is None else float(x['SHELXE_CC'])
elif BUCC and not prioritise == "PHASER_TFZ":
sortf = lambda x: float('inf') if x['BUCC_final_Rfact'] is None else float(x['BUCC_final_Rfact'])
elif ARP and not prioritise == "PHASER_TFZ":
sortf = lambda x: float('inf') if x['ARP_final_Rfree'] is None else float(x['ARP_final_Rfree'])
elif REFMAC and not prioritise == "PHASER_TFZ":
sortf = lambda x: float('inf') if x['REFMAC_Rfree'] is None else float(x['REFMAC_Rfree'])
elif PHASER:
reverse = True
sortf = lambda x: float(0) if x['PHASER_TFZ'] is None else float(x['PHASER_TFZ'])
if sortf:
results.sort(key=sortf, reverse=reverse)
return results
def summariseResults(self, mrbump_dir, max_loglevel=logging.INFO):
"""Return a string summarising the results"""
results = self.extractResults(mrbump_dir, max_loglevel=max_loglevel)
if len(results):
return self.summaryString()
else:
return "\n!!! No results found in directory: {0}\n".format(mrbump_dir)
def summaryString(self):
"""Return a string suitable for printing the sorted results"""
resultsTable = self.results_table(self.results)
# Format the results
table = printTable.Table()
summary = table.pprint_table(resultsTable)
r = "\n\nOverall Summary:\n\n"
r += summary
# Hack need to think of a better way to do this when there are no valid results
top = self.results[0]
k = None
for p in ['Search_directory', 'MR_directory']:
if p in top.keys():
k = p
assert k, "Missing search directory key in results dictionary"
if top[k]:
r += '\nBest Molecular Replacement results so far are in:\n\n'
r += top[k]
r += '\n\n'
return r
def topFiles(self, num_results=3):
"""Return a list of dictionaries listing the top num_results PDB and MTZ files
Parameters
----------
num_results : int
How many of the top results to return
Returns
-------
topf : list
A list of dictionaries, one per result, with xyz, mtz and info keys
"""
topf = []
# list of PDB, MTZ, Explanation of file type - ordered by their desirability
poss = [
('SXRARP', 'SXRARP_pdbout', 'SXRARP_mtzout', 'ARPWARP rebuild of SHELXE trace of MR result'),
('SXRBUCC', 'SXRBUCC_pdbout', 'SXRBUCC_mtzout', 'BUCCANEER rebuild of SHELXE trace of MR result'),
('SHELXE', 'SHELXE_pdbout', 'SHELXE_mtzout', 'SHELXE trace of MR result'),
('ARP', 'ARP_pdbout', 'ARP_mtzout', 'ARPWARP rebuild of MR result'),
('BUCC', 'BUCC_pdbout', 'BUCC_mtzout', 'BUCCANEER rebuild of MR result'),
('REFMAC,', 'REFMAC_pdbout', 'REFMAC_mtzout', 'REFMAC-refined MR result'),
]
for result in self.results[0 : min(num_results, len(self.results) + 1)]:
for stype, pdb_key, mtz_key, source in poss:
if pdb_key in result and result[pdb_key] and mtz_key in result and result[mtz_key]:
# Don't check paths for now as it screws up unittests as files don't actually exist
# if not (os.path.isfile(result[pdb_key]) and os.path.isfile(result[mtz_key])): continue
topf.append(
{
'name': result['ensemble_name'],
'type': stype,
'info': source,
'pdb': result[pdb_key],
'mtz': result[mtz_key],
}
)
break # Stop as soon as we find one
if len(topf):
return topf
#
# Module functions
#
def _resultsKeys(results):
keys = []
# Build up list of keys we want to print based on what we find in the results
if any([True for r in results if r['PHASER_LLG']]):
keys += ['PHASER_LLG']
if any([True for r in results if r['PHASER_TFZ']]):
keys += ['PHASER_TFZ']
if any([True for r in results if r['REFMAC_Rfree'] and r['REFMAC_Rfree'] < 1.0]):
keys += ['REFMAC_Rfact', 'REFMAC_Rfree']
if any([True for r in results if r['BUCC_final_Rfact'] and r['BUCC_final_Rfact'] < 1.0]):
keys += ['BUCC_final_Rfact', 'BUCC_final_Rfree']
if any([True for r in results if r['ARP_final_Rfact'] and r['ARP_final_Rfact'] < 1.0]):
keys += ['ARP_final_Rfact', 'ARP_final_Rfree']
if any([True for r in results if r['SHELXE_CC']]):
keys += ['SHELXE_CC', 'SHELXE_ACL']
if any([True for r in results if r['SXRBUCC_final_Rfact']]):
keys += ['SXRBUCC_final_Rfact', 'SXRBUCC_final_Rfree']
if any([True for r in results if r['SXRARP_final_Rfact']]):
keys += ['SXRARP_final_Rfact', 'SXRARP_final_Rfree']
return keys
def checkSuccess(script_path):
"""Check if a job ran successfully.
Parameters
----------
script_path : str
Path to the MrBUMP script
Returns
-------
bool
True if success
Notes
-----
Success is assumed as a SHELX CC score of >= SHELXSUCCESS
"""
directory, script = os.path.split(script_path)
scriptname = os.path.splitext(script)[0]
rfile = os.path.join(directory, 'search_' + scriptname + '_mrbump', 'results', 'resultsTable.pkl')
if os.path.isfile(rfile):
results = ResultsSummary().processMrbumpPkl(rfile)
best = ResultsSummary.sortResultsStatic(results)[0]
return ResultsSummary.jobSucceeded(best)
else:
return False
def finalSummary(amoptd):
"""Print a final summary of the job"""
mrbump_data = amoptd['mrbump_results']
if not mrbump_data:
return "Could not find any MRBUMP results in directory: {0}!".format(amoptd['mrbump_dir'])
if 'ensembles_data' in amoptd and not (
amoptd['ideal_helices'] or amoptd['homologs'] or amoptd['single_model_mode']
):
results = []
# Merge dictionaries together
ensembles_data = amoptd['ensembles_data']
for mrb in mrbump_data:
d = copy.copy(mrb)
for ed in ensembles_data:
if ed['name'] == d['ensemble_name']:
d.update(ed)
results.append(d)
keys = ['ensemble_name', 'Solution_Type', 'MR_program']
keys += _resultsKeys(results)
keys += ['subcluster_num_models', 'num_residues']
else:
results = mrbump_data
keys = ['name', 'Solution_Type', 'MR_program']
keys += _resultsKeys(results)
resultsTable = []
resultsTable.append(keys)
for result in results:
resultLine = []
for k in keys:
resultLine.append(result[k])
resultsTable.append(resultLine)
# Format the results
table = printTable.Table()
summary = table.pprint_table(resultsTable)
r = "\n\nOverall Summary:\n\n"
r += summary
if len(results) and "MR_directory" in results[0]:
r += '\nBest Molecular Replacement results so far are in:\n\n'
r += str(results[0]["MR_directory"])
r += '\n\n'
return r
def job_unfinished(job_dict):
if not 'Solution_Type' in job_dict:
return True
return job_dict['Solution_Type'] == "unfinished" or job_dict['Solution_Type'] == "no_job_directory"
def purge_MRBUMP(amoptd):
"""Remove as much as possible from a MRBUMP directory whilst keeping valid results"""
mkey = 'mrbump_dir'
if mkey not in amoptd or amoptd[mkey] is None:
return
mrbump_dir = amoptd[mkey]
suffixes = ['.pdb', '.mtz', '.log', '.sh', '.mrbump']
if os.path.isdir(mrbump_dir):
for f in os.listdir(mrbump_dir):
_, suffix = os.path.splitext(f)
if suffix in suffixes:
os.remove(os.path.join(mrbump_dir, f))
return
def set_success_criteria(amoptd):
"""Set the module-level success criteria from an AMPLE job dictionary"""
for criteria in ['SHELXE_CC', 'SHELXE_ACL']:
amopt_prefix = 'early_terminate_'
module_prefix = 'SUCCESS_'
amopt_key = amopt_prefix + criteria
if amopt_key in amoptd and amoptd[amopt_key] is not None:
module_criteria = module_prefix + criteria
logger.debug('Updating MRBUMP success criteria \'%s\' to: %s', module_criteria, amoptd[amopt_key])
globals()[module_criteria] = amoptd[amopt_key]
def unfinished_scripts(amoptd):
"""See if there are any unfinished mrbump jobs in a mrbump directory and return a list of the scripts"""
if not 'mrbump_dir' in amoptd or amoptd['mrbump_dir'] is None or not os.path.isdir(amoptd['mrbump_dir']):
return []
amoptd['mrbump_results'] = ResultsSummary().extractResults(amoptd['mrbump_dir'])
if not len(amoptd['mrbump_results']):
return []
scripts = []
for r in [r for r in amoptd['mrbump_results'] if job_unfinished(r)]:
scripts.append(os.path.join(amoptd['mrbump_dir'], r['ensemble_name'] + ample_util.SCRIPT_EXT))
return scripts
def write_mrbump_files(ensemble_pdbs, amoptd, job_time=MRBUMP_RUNTIME, ensemble_options=None, directory=None):
"""Write the MRBUMP job files for all the ensembles.
Arguments:
ensemble_pdbs -- list of the ensembles, each a single pdb file.
amoptd -- dictionary with job options.
job_time -- maximum permissible runtime (mainly used for batch queueing systems).
ensemble_options -- dictionary with ensemble-specific keywords e.g. ensemble_options[ensemble_name] = {'ncopies' : ncopies}
directory -- working directory to write files to.
"""
if not directory:
directory = os.getcwd()
job_scripts = []
keyword_options = {}
for ensemble_pdb in ensemble_pdbs:
name = os.path.splitext(os.path.basename(ensemble_pdb))[0] # Get name from pdb path
# Get any options specific to this ensemble
if ensemble_options and name in ensemble_options:
keyword_options = ensemble_options[name]
# Generate dictionary with all the options for this job and write to keyword file
keyword_dict = mrbump_cmd.keyword_dict(ensemble_pdb, name, amoptd, keyword_options)
keyword_file = os.path.join(directory, name + '.mrbump')
keyword_str = mrbump_cmd.mrbump_keyword_file(keyword_dict)
with open(keyword_file, 'w') as f:
f.write(keyword_str)
script = write_jobscript(name, keyword_file, amoptd, directory=directory, job_time=job_time)
job_scripts.append(script)
if not len(job_scripts):
raise RuntimeError("No job scripts created!")
return job_scripts
def write_jobscript(name, keyword_file, amoptd, directory=None, job_time=86400, extra_options={}):
"""
Create the script to run MrBump for this PDB.
"""
if not directory:
directory = os.getcwd()
# Next the script to run mrbump
script_path = os.path.abspath(os.path.join(directory, name + ample_util.SCRIPT_EXT))
with open(script_path, "w") as job_script:
# Header
if not sys.platform.startswith("win"):
script_header = '#!/bin/bash\n'
script_header += '[[ ! -d $CCP4_SCR ]] && mkdir $CCP4_SCR\n\n'
job_script.write(script_header)
# Get the mrbump command-line
jobcmd = mrbump_cmd.mrbump_cmd(name, amoptd['mtz'], amoptd['mr_sequence'], keyword_file)
job_script.write(jobcmd)
# Make executable
os.chmod(script_path, 0o777)
logger.debug("Wrote MRBUMP script: {0}".format(script_path))
return script_path
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
if len(sys.argv) >= 2:
mrbump_dir = os.path.join(os.getcwd(), sys.argv[1])
else:
mrbump_dir = os.getcwd()
r = ResultsSummary()
logging.info(r.summariseResults(mrbump_dir, max_loglevel=logging.DEBUG))
|
|
# EXPERIMENTAL: all may be removed soon
from gym.benchmarks import scoring
from gym.benchmarks.registration import benchmark_spec, register_benchmark, registry, register_benchmark_view # imports used elsewhere
register_benchmark(
id='Atari200M',
scorer=scoring.TotalReward(),
name='Atari200M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
},
])
register_benchmark(
id='Atari40M',
scorer=scoring.TotalReward(),
name='Atari40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
}
])
register_benchmark(
id='AtariExploration40M',
scorer=scoring.TotalReward(),
name='AtariExploration40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'FreewayNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.1,
'reward_ceiling': 31.0,
},
{
'env_id': 'GravitarNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 245.5,
'reward_ceiling': 1000.0,
},
{
'env_id': 'MontezumaRevengeNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 25.0,
'reward_ceiling': 10000.0,
},
{
'env_id': 'PitfallNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -348.8,
'reward_ceiling': 1000.0,
},
{
'env_id': 'PrivateEyeNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 662.8,
'reward_ceiling': 100.0,
},
{
'env_id': 'SolarisNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 2047.2,
'reward_ceiling': 5000.0,
},
{
'env_id': 'VentureNoFrameskip-v3',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 18.0,
'reward_ceiling': 100.0,
}
])
register_benchmark(
id='ClassicControl2-v0',
name='ClassicControl2',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v0',
'trials': 1,
'max_timesteps': 2000,
},
{'env_id': 'Pendulum-v0',
'trials': 1,
'max_timesteps': 1000,
},
])
register_benchmark(
id='ClassicControl-v0',
name='ClassicControl',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': 0.0,
'reward_ceiling': 500.0,
},
{'env_id': 'Acrobot-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MountainCar-v0',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -200.0,
'reward_ceiling': -100.0,
},
{'env_id': 'Pendulum-v0',
'trials': 3,
'max_timesteps': 200000,
'reward_floor': -1400.0,
'reward_ceiling': 0.0,
},
])
### Autogenerated by tinkerbell.benchmark.convert_benchmark.py
register_benchmark(
id='Mujoco10M-v0',
name='Mujoco10M',
view_group="Control",
description='Mujoco benchmark with 10M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'Ant-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Hopper-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Humanoid-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'HumanoidStandup-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Walker2d-v1',
'trials': 1,
'max_timesteps': 1000000,
}
])
register_benchmark(
id='Mujoco1M-v0',
name='Mujoco1M',
view_group="Control",
description='Mujoco benchmark with 1M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'HalfCheetah-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -280.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'Hopper-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 16.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'InvertedDoublePendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 53.0,
'reward_ceiling': 10000.0,
},
{'env_id': 'InvertedPendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 5.6,
'reward_ceiling': 1000.0,
},
{'env_id': 'Reacher-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -43.0,
'reward_ceiling': -0.5,
},
{'env_id': 'Swimmer-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 0.23,
'reward_ceiling': 500.0,
},
{'env_id': 'Walker2d-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 1.6,
'reward_ceiling': 5500.0,
}
])
register_benchmark(
id='MinecraftEasy-v0',
name='MinecraftEasy',
view_group="Minecraft",
description='Minecraft easy benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftBasic-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -2200.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftDefaultFlat1-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MinecraftTrickyArena1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -1000.0,
'reward_ceiling': 2800.0,
},
{'env_id': 'MinecraftEating1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -300.0,
'reward_ceiling': 300.0,
},
])
register_benchmark(
id='MinecraftMedium-v0',
name='MinecraftMedium',
view_group="Minecraft",
description='Minecraft medium benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftCliffWalking1-v0',
'trials': 2,
'max_timesteps': 400000,
'reward_floor': -100.0,
'reward_ceiling': 100.0,
},
{'env_id': 'MinecraftVertical-v0',
'trials': 2,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 8040.0,
},
{'env_id': 'MinecraftMaze1-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftMaze2-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftHard-v0',
name='MinecraftHard',
view_group="Minecraft",
description='Minecraft hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftObstacles-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 2080.0,
},
{'env_id': 'MinecraftSimpleRoomMaze-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 4160.0,
},
{'env_id': 'MinecraftAttic-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1040.0,
},
{'env_id': 'MinecraftComplexityUsage-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftVeryHard-v0',
name='MinecraftVeryHard',
view_group="Minecraft",
description='Minecraft very hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftMedium-v0',
'trials': 2,
'max_timesteps': 1800000,
'reward_floor': -10000.0,
'reward_ceiling': 16280.0,
},
{'env_id': 'MinecraftHard-v0',
'trials': 2,
'max_timesteps': 2400000,
'reward_floor': -10000.0,
'reward_ceiling': 32640.0,
},
])
register_benchmark(
id='MinecraftImpossible-v0',
name='MinecraftImpossible',
view_group="Minecraft",
description='Minecraft impossible benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftDefaultWorld1-v0',
'trials': 2,
'max_timesteps': 6000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
"""This tests was madded to always check that the results expected by the
original FATS documentation are the same of feets.
"""
# =============================================================================
# IMPORTS
# =============================================================================
from feets import extractors
import numpy as np
import pytest
# =============================================================================
# CASES
# =============================================================================
def test_FATS_doc_Amplitude():
ext = extractors.Amplitude()
value = ext.fit(np.arange(0, 1001))["Amplitude"]
assert value == 475
@pytest.mark.xfail(reason="FATS say must be 0.2, but actual is -0.60")
def test_FATS_doc_AndersonDarling():
random = np.random.RandomState(42)
ext = extractors.AndersonDarling()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
values[idx] = ext.fit(mags)["AndersonDarling"]
np.testing.assert_allclose(values.mean(), 0.25)
def test_FATS_doc_Beyond1Std():
random = np.random.RandomState(42)
ext = extractors.Beyond1Std()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
errors = random.normal(scale=0.001, size=1000)
values[idx] = ext.fit(mags, errors)["Beyond1Std"]
np.testing.assert_allclose(values.mean(), 0.32972600000000002)
def test_FATS_doc_Con():
random = np.random.RandomState(42)
ext = extractors.Con()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
values[idx] = ext.fit(mags, consecutiveStar=1)["Con"]
np.testing.assert_allclose(values.mean(), 0.045557)
def test_FATS_doc_MeanVariance():
random = np.random.RandomState(42)
ext = extractors.MeanVariance()
values = np.empty(1000)
for idx in range(values.size):
mags = random.uniform(size=1000)
values[idx] = ext.fit(magnitude=mags)["Meanvariance"]
np.testing.assert_allclose(values.mean(), 0.57664232208148747)
def test_FATS_doc_MedianAbsDev():
random = np.random.RandomState(42)
ext = extractors.MedianAbsDev()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
values[idx] = ext.fit(magnitude=mags)["MedianAbsDev"]
np.testing.assert_allclose(values.mean(), 0.67490807679242459)
def test_FATS_doc_RCS():
random = np.random.RandomState(42)
ext = extractors.RCS()
values = np.empty(1000)
for idx in range(values.size):
mags = random.uniform(size=1000)
values[idx] = ext.fit(magnitude=mags)["Rcs"]
np.testing.assert_allclose(values.mean(), 0.03902862976795655)
def test_FATS_doc_Skew():
random = np.random.RandomState(42)
ext = extractors.Skew()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
values[idx] = ext.fit(magnitude=mags)["Skew"]
np.testing.assert_allclose(values.mean(), -0.0017170680368871292)
def test_FATS_doc_SmallKurtosis():
random = np.random.RandomState(42)
ext = extractors.SmallKurtosis()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
values[idx] = ext.fit(magnitude=mags)["SmallKurtosis"]
np.testing.assert_allclose(values.mean(), 0.00040502517673364258)
def test_FATS_doc_Std():
random = np.random.RandomState(42)
ext = extractors.Std()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
values[idx] = ext.fit(magnitude=mags)["Std"]
np.testing.assert_allclose(values.mean(), 0.9994202277548033)
@pytest.mark.xfail(reason="FATS say must be 0, but actual is -0.41")
def test_FATS_doc_StetsonJ():
random = np.random.RandomState(42)
ext = extractors.StetsonJ()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
mags2 = mags * random.uniform(0, 1.5, mags.size)
errors = random.normal(scale=0.001, size=1000)
errors2 = random.normal(scale=0.001, size=1000)
values[idx] = ext.fit(
aligned_magnitude=mags,
aligned_magnitude2=mags2,
aligned_error=errors,
aligned_error2=errors2,
)["StetsonJ"]
np.testing.assert_allclose(values.mean(), 0)
@pytest.mark.xfail(reason="FATS say must be 2/pi, but actual is -0.20")
def test_FATS_doc_StetsonK():
random = np.random.RandomState(42)
ext = extractors.StetsonK()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
errors = random.normal(scale=0.001, size=1000)
values[idx] = ext.fit(magnitude=mags, error=errors)["StetsonK"]
np.testing.assert_allclose(values.mean(), 0.798)
def test_FATS_doc_StetsonL():
random = np.random.RandomState(42)
ext = extractors.StetsonL()
values = np.empty(1000)
for idx in range(values.size):
mags = random.normal(size=1000)
mags2 = mags * random.uniform(0, 1.5, mags.size)
errors = random.normal(scale=0.001, size=1000)
errors2 = random.normal(scale=0.001, size=1000)
values[idx] = ext.fit(
aligned_magnitude=mags,
aligned_magnitude2=mags2,
aligned_error=errors,
aligned_error2=errors2,
)["StetsonL"]
np.testing.assert_allclose(values.mean(), -0.0470713296883)
|
|
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the ssh module"""
import os
import tempfile
import unittest
import shutil
import testutils
import mocks
from ganeti import constants
from ganeti import utils
from ganeti import ssh
from ganeti import errors
class TestKnownHosts(testutils.GanetiTestCase):
"""Test case for function writing the known_hosts file"""
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpfile = self._CreateTempFile()
def test(self):
cfg = mocks.FakeConfig()
ssh.WriteKnownHostsFile(cfg, self.tmpfile)
self.assertFileContent(self.tmpfile,
"%s ssh-rsa %s\n%s ssh-dss %s\n" %
(cfg.GetClusterName(), mocks.FAKE_CLUSTER_KEY,
cfg.GetClusterName(), mocks.FAKE_CLUSTER_KEY))
class TestGetUserFiles(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
@staticmethod
def _GetNoHomedir(_):
return None
def _GetTempHomedir(self, _):
return self.tmpdir
def testNonExistantUser(self):
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example",
kind=kind, _homedir_fn=self._GetNoHomedir)
def testUnknownKind(self):
kind = "something-else"
assert kind not in constants.SSHK_ALL
self.assertRaises(errors.ProgrammerError, ssh.GetUserFiles, "example4645",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testNoSshDirectory(self):
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example29694",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testSshIsFile(self):
utils.WriteFile(os.path.join(self.tmpdir, ".ssh"), data="")
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example26237",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
def testMakeSshDirectory(self):
sshdir = os.path.join(self.tmpdir, ".ssh")
self.assertEqual(os.listdir(self.tmpdir), [])
for kind in constants.SSHK_ALL:
ssh.GetUserFiles("example20745", mkdir=True, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.stat(sshdir).st_mode & 0777, 0700)
def testFilenames(self):
sshdir = os.path.join(self.tmpdir, ".ssh")
os.mkdir(sshdir)
for kind in constants.SSHK_ALL:
result = ssh.GetUserFiles("example15103", mkdir=False, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(result, [
os.path.join(self.tmpdir, ".ssh", "id_%s" % kind),
os.path.join(self.tmpdir, ".ssh", "id_%s.pub" % kind),
os.path.join(self.tmpdir, ".ssh", "authorized_keys"),
])
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.listdir(sshdir), [])
def testNoDirCheck(self):
self.assertEqual(os.listdir(self.tmpdir), [])
for kind in constants.SSHK_ALL:
ssh.GetUserFiles("example14528", mkdir=False, dircheck=False, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testGetAllUserFiles(self):
result = ssh.GetAllUserFiles("example7475", mkdir=False, dircheck=False,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(result,
(os.path.join(self.tmpdir, ".ssh", "authorized_keys"), {
constants.SSHK_RSA:
(os.path.join(self.tmpdir, ".ssh", "id_rsa"),
os.path.join(self.tmpdir, ".ssh", "id_rsa.pub")),
constants.SSHK_DSA:
(os.path.join(self.tmpdir, ".ssh", "id_dsa"),
os.path.join(self.tmpdir, ".ssh", "id_dsa.pub")),
constants.SSHK_ECDSA:
(os.path.join(self.tmpdir, ".ssh", "id_ecdsa"),
os.path.join(self.tmpdir, ".ssh", "id_ecdsa.pub")),
}))
self.assertEqual(os.listdir(self.tmpdir), [])
def testGetAllUserFilesNoDirectoryNoMkdir(self):
self.assertRaises(errors.OpExecError, ssh.GetAllUserFiles,
"example17270", mkdir=False, dircheck=True,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
class TestSshKeys(testutils.GanetiTestCase):
"""Test case for the AddAuthorizedKey function"""
KEY_A = "ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a"
KEY_B = ('command="/usr/bin/fooserver -t --verbose",from="198.51.100.4" '
"ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b")
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpname = self._CreateTempFile()
handle = open(self.tmpname, "w")
try:
handle.write("%s\n" % TestSshKeys.KEY_A)
handle.write("%s\n" % TestSshKeys.KEY_B)
finally:
handle.close()
def testHasAuthorizedKey(self):
self.assertTrue(ssh.HasAuthorizedKey(self.tmpname, self.KEY_A))
self.assertFalse(ssh.HasAuthorizedKey(
self.tmpname, "I am the key of the pink bunny!"))
def testAddingNewKey(self):
ssh.AddAuthorizedKey(self.tmpname,
"ssh-dss AAAAB3NzaC1kc3MAAACB root@test")
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n"
"ssh-dss AAAAB3NzaC1kc3MAAACB root@test\n")
def testAddingDuplicateKeys(self):
ssh.AddAuthorizedKey(self.tmpname,
"ssh-dss AAAAB3NzaC1kc3MAAACB root@test")
ssh.AddAuthorizedKeys(self.tmpname,
["ssh-dss AAAAB3NzaC1kc3MAAACB root@test",
"ssh-dss AAAAB3NzaC1kc3MAAACB root@test"])
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n"
"ssh-dss AAAAB3NzaC1kc3MAAACB root@test\n")
def testAddingSeveralKeysAtOnce(self):
ssh.AddAuthorizedKeys(self.tmpname, ["aaa", "bbb", "ccc"])
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n"
"aaa\nbbb\nccc\n")
ssh.AddAuthorizedKeys(self.tmpname, ["bbb", "ddd", "eee"])
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n"
"aaa\nbbb\nccc\nddd\neee\n")
def testAddingAlmostButNotCompletelyTheSameKey(self):
ssh.AddAuthorizedKey(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@test")
# Only significant fields are compared, therefore the key won't be
# updated/added
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n")
def testAddingExistingKeyWithSomeMoreSpaces(self):
ssh.AddAuthorizedKey(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a")
ssh.AddAuthorizedKey(self.tmpname,
"ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22")
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n"
"ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22\n")
def testRemovingExistingKeyWithSomeMoreSpaces(self):
ssh.RemoveAuthorizedKey(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a")
self.assertFileContent(self.tmpname,
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n")
def testRemovingNonExistingKey(self):
ssh.RemoveAuthorizedKey(self.tmpname,
"ssh-dss AAAAB3Nsdfj230xxjxJjsjwjsjdjU root@test")
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n")
def testAddingNewKeys(self):
ssh.AddAuthorizedKeys(self.tmpname,
["ssh-dss AAAAB3NzaC1kc3MAAACB root@test"])
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n"
"ssh-dss AAAAB3NzaC1kc3MAAACB root@test\n")
ssh.AddAuthorizedKeys(self.tmpname,
["ssh-dss AAAAB3asdfasdfaYTUCB laracroft@test",
"ssh-dss AasdfliuobaosfMAAACB frodo@test"])
self.assertFileContent(self.tmpname,
"ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
'command="/usr/bin/fooserver -t --verbose",from="198.51.100.4"'
" ssh-dss AAAAB3NzaC1w520smc01ms0jfJs22 root@key-b\n"
"ssh-dss AAAAB3NzaC1kc3MAAACB root@test\n"
"ssh-dss AAAAB3asdfasdfaYTUCB laracroft@test\n"
"ssh-dss AasdfliuobaosfMAAACB frodo@test\n")
def testOtherKeyTypes(self):
key_rsa = "ssh-rsa AAAAimnottypingallofthathere0jfJs22 test@test"
key_ed25519 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOlcZ6cpQTGow0LZECRHWn9"\
"7Yvn16J5un501T/RcbfuF fast@secure"
key_ecdsa = "ecdsa-sha2-nistp256 AAAAE2VjZHNtoolongk/TNhVbEg= secure@secure"
def _ToFileContent(keys):
return '\n'.join(keys) + '\n'
ssh.AddAuthorizedKeys(self.tmpname, [key_rsa, key_ed25519, key_ecdsa])
self.assertFileContent(self.tmpname,
_ToFileContent([self.KEY_A, self.KEY_B, key_rsa,
key_ed25519, key_ecdsa]))
ssh.RemoveAuthorizedKey(self.tmpname, key_ed25519)
self.assertFileContent(self.tmpname,
_ToFileContent([self.KEY_A, self.KEY_B, key_rsa,
key_ecdsa]))
ssh.RemoveAuthorizedKey(self.tmpname, key_rsa)
ssh.RemoveAuthorizedKey(self.tmpname, key_ecdsa)
self.assertFileContent(self.tmpname,
_ToFileContent([self.KEY_A, self.KEY_B]))
class TestPublicSshKeys(testutils.GanetiTestCase):
"""Test case for the handling of the list of public ssh keys."""
KEY_A = "ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a"
KEY_B = "ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b"
UUID_1 = "123-456"
UUID_2 = "789-ABC"
def setUp(self):
testutils.GanetiTestCase.setUp(self)
def testAddingAndRemovingPubKey(self):
pub_key_file = self._CreateTempFile()
ssh.AddPublicKey(self.UUID_1, self.KEY_A, key_file=pub_key_file)
ssh.AddPublicKey(self.UUID_2, self.KEY_B, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"123-456 ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
"789-ABC ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n")
ssh.RemovePublicKey(self.UUID_2, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"123-456 ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n")
def testAddingExistingPubKey(self):
expected_file_content = \
"123-456 ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n" + \
"789-ABC ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n"
pub_key_file = self._CreateTempFile()
ssh.AddPublicKey(self.UUID_1, self.KEY_A, key_file=pub_key_file)
ssh.AddPublicKey(self.UUID_2, self.KEY_B, key_file=pub_key_file)
self.assertFileContent(pub_key_file, expected_file_content)
ssh.AddPublicKey(self.UUID_1, self.KEY_A, key_file=pub_key_file)
self.assertFileContent(pub_key_file, expected_file_content)
ssh.AddPublicKey(self.UUID_1, self.KEY_B, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"123-456 ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
"789-ABC ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n"
"123-456 ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n")
def testRemoveNonexistingKey(self):
pub_key_file = self._CreateTempFile()
ssh.AddPublicKey(self.UUID_1, self.KEY_B, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"123-456 ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n")
ssh.RemovePublicKey(self.UUID_2, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"123-456 ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n")
def testRemoveAllExistingKeys(self):
pub_key_file = self._CreateTempFile()
ssh.AddPublicKey(self.UUID_1, self.KEY_A, key_file=pub_key_file)
ssh.AddPublicKey(self.UUID_1, self.KEY_B, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"123-456 ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
"123-456 ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n")
ssh.RemovePublicKey(self.UUID_1, key_file=pub_key_file)
self.assertFileContent(pub_key_file, "")
def testRemoveKeyFromEmptyFile(self):
pub_key_file = self._CreateTempFile()
ssh.RemovePublicKey(self.UUID_2, key_file=pub_key_file)
self.assertFileContent(pub_key_file, "")
def testRetrieveKeys(self):
pub_key_file = self._CreateTempFile()
ssh.AddPublicKey(self.UUID_1, self.KEY_A, key_file=pub_key_file)
ssh.AddPublicKey(self.UUID_2, self.KEY_B, key_file=pub_key_file)
result = ssh.QueryPubKeyFile(self.UUID_1, key_file=pub_key_file)
self.assertEquals([self.KEY_A], result[self.UUID_1])
target_uuids = [self.UUID_1, self.UUID_2, "non-existing-UUID"]
result = ssh.QueryPubKeyFile(target_uuids, key_file=pub_key_file)
self.assertEquals([self.KEY_A], result[self.UUID_1])
self.assertEquals([self.KEY_B], result[self.UUID_2])
self.assertEquals(2, len(result))
# Query all keys
target_uuids = None
result = ssh.QueryPubKeyFile(target_uuids, key_file=pub_key_file)
self.assertEquals([self.KEY_A], result[self.UUID_1])
self.assertEquals([self.KEY_B], result[self.UUID_2])
def testReplaceNameByUuid(self):
pub_key_file = self._CreateTempFile()
name = "my.precious.node"
ssh.AddPublicKey(name, self.KEY_A, key_file=pub_key_file)
ssh.AddPublicKey(self.UUID_2, self.KEY_A, key_file=pub_key_file)
ssh.AddPublicKey(name, self.KEY_B, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"my.precious.node ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
"789-ABC ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
"my.precious.node ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n")
ssh.ReplaceNameByUuid(self.UUID_1, name, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"123-456 ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
"789-ABC ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
"123-456 ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n")
def testParseEmptyLines(self):
pub_key_file = self._CreateTempFile()
ssh.AddPublicKey(self.UUID_1, self.KEY_A, key_file=pub_key_file)
# Add an empty line
fd = open(pub_key_file, 'a')
fd.write("\n")
fd.close()
ssh.AddPublicKey(self.UUID_2, self.KEY_B, key_file=pub_key_file)
# Add a whitespace line
fd = open(pub_key_file, 'a')
fd.write(" \n")
fd.close()
result = ssh.QueryPubKeyFile(self.UUID_1, key_file=pub_key_file)
self.assertEquals([self.KEY_A], result[self.UUID_1])
def testClearPubKeyFile(self):
pub_key_file = self._CreateTempFile()
ssh.AddPublicKey(self.UUID_2, self.KEY_A, key_file=pub_key_file)
ssh.ClearPubKeyFile(key_file=pub_key_file)
self.assertFileContent(pub_key_file, "")
def testOverridePubKeyFile(self):
pub_key_file = self._CreateTempFile()
key_map = {self.UUID_1: [self.KEY_A, self.KEY_B],
self.UUID_2: [self.KEY_A]}
ssh.OverridePubKeyFile(key_map, key_file=pub_key_file)
self.assertFileContent(pub_key_file,
"123-456 ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n"
"123-456 ssh-dss BAasjkakfa234SFSFDA345462AAAB root@key-b\n"
"789-ABC ssh-dss AAAAB3NzaC1w5256closdj32mZaQU root@key-a\n")
class TestGetUserFiles(testutils.GanetiTestCase):
_PRIV_KEY = "my private key"
_PUB_KEY = "my public key"
_AUTH_KEYS = "a\nb\nc"
def _setUpFakeKeys(self):
ssh_tmpdir = os.path.join(self.tmpdir, ".ssh")
os.makedirs(ssh_tmpdir)
self.priv_filename = os.path.join(ssh_tmpdir, "id_dsa")
utils.WriteFile(self.priv_filename, data=self._PRIV_KEY)
self.pub_filename = os.path.join(ssh_tmpdir, "id_dsa.pub")
utils.WriteFile(self.pub_filename, data=self._PUB_KEY)
self.auth_filename = os.path.join(ssh_tmpdir, "authorized_keys")
utils.WriteFile(self.auth_filename, data=self._AUTH_KEYS)
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self._setUpFakeKeys()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _GetTempHomedir(self, _):
return self.tmpdir
def testNewKeysOverrideOldKeys(self):
ssh.InitSSHSetup("dsa", 1024, _homedir_fn=self._GetTempHomedir)
self.assertFileContentNotEqual(self.priv_filename, self._PRIV_KEY)
self.assertFileContentNotEqual(self.pub_filename, self._PUB_KEY)
def testSuffix(self):
suffix = "_pinkbunny"
ssh.InitSSHSetup("dsa", 1024, _homedir_fn=self._GetTempHomedir,
_suffix=suffix)
self.assertFileContent(self.priv_filename, self._PRIV_KEY)
self.assertFileContent(self.pub_filename, self._PUB_KEY)
self.assertTrue(os.path.exists(self.priv_filename + suffix))
self.assertTrue(os.path.exists(self.priv_filename + suffix + ".pub"))
class TestDetermineKeyBits():
def testCompleteness(self):
self.assertEquals(constants.SSHK_ALL, ssh.SSH_KEY_VALID_BITS.keys())
def testAdoptDefault(self):
self.assertEquals(2048, DetermineKeyBits("rsa", None, None, None))
self.assertEquals(1024, DetermineKeyBits("dsa", None, None, None))
def testAdoptOldKeySize(self):
self.assertEquals(4098, DetermineKeyBits("rsa", None, "rsa", 4098))
self.assertEquals(2048, DetermineKeyBits("rsa", None, "dsa", 1024))
def testDsaSpecificValues(self):
self.assertRaises(errors.OpPrereqError, DetermineKeyBits, "dsa", 2048,
None, None)
self.assertRaises(errors.OpPrereqError, DetermineKeyBits, "dsa", 512,
None, None)
self.assertEquals(1024, DetermineKeyBits("dsa", None, None, None))
def testEcdsaSpecificValues(self):
self.assertRaises(errors.OpPrereqError, DetermineKeyBits, "ecdsa", 2048,
None, None)
for b in [256, 384, 521]:
self.assertEquals(b, DetermineKeyBits("ecdsa", b, None, None))
def testRsaSpecificValues(self):
self.assertRaises(errors.OpPrereqError, DetermineKeyBits, "dsa", 766,
None, None)
for b in [768, 769, 2048, 2049, 4096]:
self.assertEquals(b, DetermineKeyBits("rsa", b, None, None))
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
# -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
# Modules menus
main_menu = MM()(
cls.menu_modules(),
cls.menu_personal(),
cls.menu_lang(),
)
current.menu.footer = cls.menu_footer()
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
return [
homepage(name = " ",
left = True,
icon = "%s/static/themes/img/logo-small.png" % \
current.request.application,
),
MM("Newsfeed", c="cms", f="newsfeed", m="datalist"),
MM("Organizations", c="org", f="organisation"),
MM("Activities", c="project", f="activity", m="summary"),
MM("Projects", c="project", f="project"),
#MM("Aid Requests", link=False),
#MM("Aid Deliveries", link=False),
MM("Map", c="gis", f="index"),
]
# -------------------------------------------------------------------------
@classmethod
def menu_footer(cls):
""" Footer menu """
return MF()(
MF("Newsfeed", c="cms", f="newsfeed", m="datalist"),
MF("Organizations", c="org", f="organisation"),
MF("Projects", c="project", f="project"),
#MF("Aid Requests", link=False),
#MM("Aid Deliveries", link=False),
MF("Map", c="gis", f="index"),
)
# -------------------------------------------------------------------------
@classmethod
def menu_personal(cls):
""" Custom Personal Menu """
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
if not auth.is_logged_in():
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = settings.get_security_self_registration()
menu_personal = MM(icon="icon-cog", link=False)(
MM("Register", c="default", f="user",
m="register",
check = self_registration,
),
MM("Login", c="default", f="user",
m="login",
vars = {"_next": login_next},
),
MM("Lost Password", c="default", f="user",
m="retrieve_password",
),
)
else:
s3_has_role = auth.s3_has_role
is_org_admin = lambda i: s3_has_role("ORG_ADMIN") and \
not s3_has_role("ADMIN")
menu_personal = MM(icon="user", link=False)(
MM("Administration", c="admin", f="index",
restrict = "ADMIN",
),
MM("Administration", c="admin", f="user",
check = is_org_admin,
),
MM("Change Password", c="default", f="user",
m = "change_password",
),
MM("Logout", c="default", f="user",
m = "logout",
),
)
return menu_personal
# -------------------------------------------------------------------------
@classmethod
def menu_lang(cls):
""" Language Selector """
s3 = current.response.s3
menu_lang = ML("Language", right=True)
for language in s3.l10n_languages.items():
code, name = language
menu_lang(
ML(name, translate=False, lang_code=code, lang_name=name)
)
return menu_lang
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
""" Custom Controller Menus """
# -------------------------------------------------------------------------
@staticmethod
def gis():
""" GIS / GIS Controllers """
if not current.auth.is_logged_in():
# No Side Menu
return None
MAP_ADMIN = current.session.s3.system_roles.MAP_ADMIN
settings = current.deployment_settings
gis_menu = settings.get_gis_menu()
def pois(i):
poi_resources = settings.get_gis_poi_create_resources()
if not poi_resources:
return False
for res in poi_resources:
if res["table"] == "gis_poi":
return True
return False
def config_menu(i):
auth = current.auth
if not auth.is_logged_in():
# Anonymous users can never cofnigure the Map
return False
s3db = current.s3db
if auth.s3_has_permission("create",
s3db.gis_config):
# If users can create configs then they can see the menu item
return True
# Look for this user's config
table = s3db.gis_config
query = (table.pe_id == auth.user.pe_id)
config = current.db(query).select(table.id,
limitby=(0, 1),
cache=s3db.cache).first()
if config:
return True
def config_args():
auth = current.auth
if not auth.user:
# Won't show anyway due to check
return []
if auth.s3_has_role(MAP_ADMIN):
# Full List
return []
# Look for this user's config
s3db = current.s3db
table = s3db.gis_config
query = (table.pe_id == auth.user.pe_id)
config = current.db(query).select(table.id,
limitby=(0, 1),
cache=s3db.cache).first()
if config:
# Link direct to the User's config
return [config.id, "layer_entity"]
# Link to the Create form
return ["create"]
return M(c="gis")(
M("Fullscreen Map", c="gis", f="map_viewing_client"),
# Currently not got geocoding support
#M("Bulk Uploader", c="doc", f="bulk_upload"),
M("Locations", c="gis", f="location")(
M("Create", m="create"),
#M("Create Location Group", m="create", vars={"group": 1}),
M("Import from CSV", m="import", restrict=[MAP_ADMIN]),
M("Import from OpenStreetMap", m="import_poi",
restrict=[MAP_ADMIN]),
#M("Geocode", f="geocode_manual"),
),
M("PoIs", c="gis", f="poi", check=pois)(),
#M("Population Report", f="location", m="report",
# vars=dict(rows="name",
# fact="sum(population)",
# ),
# ),
M("Configuration", c="gis", f="config", args=config_args(),
_id="gis_menu_config",
check=config_menu),
M("Admin", c="gis", restrict=[MAP_ADMIN])(
M("Hierarchy", f="hierarchy"),
M("Layers", f="catalog"),
M("Markers", f="marker"),
M("Menu", f="menu",
check=[gis_menu]),
M("PoI Types", f="poi_type",
check=[pois]),
M("Projections", f="projection"),
M("Styles", f="style"),
)
)
# -------------------------------------------------------------------------
@classmethod
def hrm(cls):
""" HRM / Human Resources Management """
return cls.org()
# -------------------------------------------------------------------------
def inv(self):
""" Aid Delivery """
if not current.auth.is_logged_in():
# No Side Menu
return None
ADMIN = current.session.s3.system_roles.ADMIN
#current.s3db.inv_recv_crud_strings()
#inv_recv_list = current.response.s3.crud_strings.inv_recv.title_list
#settings = current.deployment_settings
#use_adjust = lambda i: not settings.get_inv_direct_stock_edits()
#use_commit = lambda i: settings.get_req_use_commit()
return M()(
#M("Home", f="index"),
#M("Warehouses", c="inv", f="warehouse")(
# M("Create", m="create"),
# M("Import", m="import", p="create"),
#),
#M("Warehouse Stock", c="inv", f="inv_item")(
# M("Adjust Stock Levels", f="adj", check=use_adjust),
# M("Kitting", f="kitting"),
# M("Import", f="inv_item", m="import", p="create"),
#),
#M("Reports", c="inv", f="inv_item")(
# M("Warehouse Stock", f="inv_item", m="report"),
# M("Expiration Report", c="inv", f="track_item",
# vars=dict(report="exp")),
# M("Monetization Report", c="inv", f="inv_item",
# vars=dict(report="mon")),
# M("Utilization Report", c="inv", f="track_item",
# vars=dict(report="util")),
# M("Summary of Incoming Supplies", c="inv", f="track_item",
# vars=dict(report="inc")),
# M("Summary of Releases", c="inv", f="track_item",
# vars=dict(report="rel")),
#),
#M(inv_recv_list, c="inv", f="recv", translate=False)( # Already T()
# M("Create", m="create"),
# M("Timeline", args="timeline"),
#),
M("Shipments", c="inv", f="send")(
M("Create", m="create"),
M("Search Shipped Items", f="track_item"),
M("Timeline", args="timeline"),
),
M("Items", c="supply", f="item", m="summary")(
M("Create", m="create"),
M("Import", f="catalog_item", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
#M("Create", m="create"),
#),
#M("Brands", c="supply", f="brand",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
M("Catalogs", c="supply", f="catalog")(
M("Create", m="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("Create", m="create"),
),
M("Suppliers", c="inv", f="supplier")(
M("Create", m="create"),
M("Import", m="import", p="create"),
),
#M("Facilities", c="inv", f="facility")(
# M("Create", m="create", t="org_facility"),
#),
#M("Facility Types", c="inv", f="facility_type",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
#M("Warehouse Types", c="inv", f="warehouse_type",
# restrict=[ADMIN])(
# M("Create", m="create"),
#),
#M("Requests", c="req", f="req")(
# M("Create", m="create"),
# M("Requested Items", f="req_item"),
#),
#M("Commitments", c="req", f="commit", check=use_commit)(
#),
)
# -------------------------------------------------------------------------
@staticmethod
def org():
""" ORG / Organization Registry """
if not current.auth.is_logged_in():
# No Side Menu
return None
else:
system_roles = current.session.s3.system_roles
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
INDIVIDUALS = current.deployment_settings.get_hrm_staff_label()
return M()(
M("Organizations", c="org", f="organisation")(
M("Create", m="create",
restrict=AUTHENTICATED),
),
M(INDIVIDUALS, c="hrm", f=("staff", "person"),
t="hrm_human_resource")(
#M("Search"),
M("Create", m="create"),
),
M("Service Locations", c="org", f="service_location",
m="summary")(
M("Search", m="summary"),
),
M("Administration", c=("org", "hrm"),
link=False, restrict=ADMIN)(
M("Organisation Types", c="org", f="organisation_type"),
M("Sectors", c="org", f="sector"),
M("Service Types", c="org", f="service"),
M("Facility Types", c="org", f="facility_type"),
M("Job Title Catalog", c="hrm", f="job_title"),
),
)
# -------------------------------------------------------------------------
@classmethod
def pr(cls):
""" Person Registry """
if not current.auth.is_logged_in():
# No Side Menu
return None
else:
return cls.org()
# -------------------------------------------------------------------------
@staticmethod
def project():
""" Project Management """
ADMIN = current.session.s3.system_roles.ADMIN
return M(c="project")(
M("Activities (4W)", f="activity", m="summary")(
M("Create", m="create"),
M("Map", m="summary", vars={"t": "2"}),
),
M("Projects", f="project")(
M("Create", m="create"),
M("Map", f="location", m="map"),
),
M("Administration", link=False, restrict=ADMIN)(
M("Hazards", f="hazard"),
M("Status", f="status"),
),
)
# -------------------------------------------------------------------------
@staticmethod
def req():
""" REQ / Request Management """
if not current.auth.is_logged_in():
# No Side Menu
return None
ADMIN = current.session.s3.system_roles.ADMIN
#settings = current.deployment_settings
#types = settings.get_req_req_type()
#if len(types) == 1:
# t = types[0]
# if t == "Stock":
# create_menu = M("Create", m="create", vars={"type": 1})
# elif t == "People":
# create_menu = M("Create", m="create", vars={"type": 2})
# else:
# create_menu = M("Create", m="create")
#else:
# create_menu = M("Create", m="create")
#recurring = lambda i: settings.get_req_recurring()
#use_commit = lambda i: settings.get_req_use_commit()
#req_items = lambda i: "Stock" in types
#req_skills = lambda i: "People" in types
return M(c="req")(
M("Requests", f="req")(
M("Create", m="create", vars={"type": 1}),
#M("List Recurring Requests", f="req_template", check=recurring),
#M("Map", m="map"),
#M("Report", m="report"),
M("Search All Requested Items", f="req_item",
#check=req_items
),
#M("Search All Requested Skills", f="req_skill",
# check=req_skills),
),
#M("Commitments", f="commit", check=use_commit)(
#),
M("Items", c="supply", f="item")(
M("Create", m="create"),
M("Report", m="report"),
M("Import", m="import", p="create"),
),
# Catalog Items moved to be next to the Item Categories
#M("Catalog Items", c="supply", f="catalog_item")(
#M("Create", m="create"),
#),
M("Catalogs", c="supply", f="catalog")(
M("Create", m="create"),
),
M("Item Categories", c="supply", f="item_category",
restrict=[ADMIN])(
M("Create", m="create"),
),
)
# END =========================================================================
|
|
#!/usr/bin/env python
#
# Protokollen - probe a bunch of hosts for their preferred URL
#
# -*- coding: utf-8 -*-
# vim: ts=4 sts=4 sw=4 noexpandtab
#
from __future__ import absolute_import
import sys
import re
import json
# http://www.angryobjects.com/2011/10/15/http-with-python-pycurl-by-example/
# http://pycurl.sourceforge.net/doc/unicode.html
import pycurl
from io import BytesIO
# https://stackoverflow.com/questions/2087370/decode-html-entities-in-python-string
# NOTE: Requires Python 2
import HTMLParser
import dns.resolver
from dns.exception import DNSException
# https://docs.python.org/2/library/urlparse.html
# NOTE: For Python 3, use import urllib.parse
from urlparse import urlparse
userAgentFmt = 'Mozilla/5.0 (Macintosh; Intel Mac OS X) AppleWebKit/534.34 (KHTML, like Gecko) Safari/534.34 (Protokollen; {0})'
def check_url(url, resolve=None, accept_language=None):
buf = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.USERAGENT, userAgentFmt.format('www pref test'))
c.setopt(c.ENCODING, 'gzip')
c.setopt(c.FOLLOWLOCATION, 1)
c.setopt(c.CONNECTTIMEOUT, 15)
c.setopt(c.TIMEOUT, 20)
if resolve:
c.setopt(c.RESOLVE, resolve)
hostname, port, ip = resolve[0].split(':', 2)
port = int(port)
else:
ip = None
parts = urlparse(url)
hostname = parts.netloc
if parts.port:
port = int(parts.port)
elif scheme == "http":
port = 80
elif scheme == "https":
port = 443
if accept_language:
c.setopt(c.HTTPHEADER, ['Accept-Language: ' + accept_language])
if 'openssl' in pycurl.version.lower():
c.setopt(c.OPT_CERTINFO, 1)
c.setopt(c.WRITEFUNCTION, buf.write)
error = None
status = -1
last_hostname = None
last_ip = None
last_port = None
last_url = None
title = None
content_type = 'text/plain';
charset = 'iso-8859-1'
http_charset = False
try:
c.perform()
ct_header = c.getinfo(c.CONTENT_TYPE);
if ct_header:
content_type, _, charset = ct_header.partition(';')
_, _, charset = charset.partition('=')
charset = charset.strip().lower()
if len(charset):
# Workaround for broken MIME configs
charset = charset.replace('ansi-8859-1', 'iso-8859-1')
http_charset = True
else:
charset = 'iso-8859-1'
last_ip = c.getinfo(c.PRIMARY_IP)
last_url = c.getinfo(c.EFFECTIVE_URL)
parts = urlparse(last_url)
last_hostname = parts.netloc
last_port = parts.port
if parts.port:
last_port = int(parts.port)
elif parts.scheme == "http":
last_port = 80
elif parts.scheme == "https":
last_port = 443
status = c.getinfo(c.HTTP_CODE)
if 'openssl' in pycurl.version.lower():
certinfo = c.getinfo(c.INFO_CERTINFO)
certinfo_dict = {}
for entry in certinfo:
certinfo_dict[entry[0]] = entry[1]
#print certinfo_dict
except pycurl.error as e:
errno, errstr = e.args
error = '{0} ({1})'.format(errstr, errno).strip()
# Remove variable data from error messages
# 'Connection timed out after 5001 milliseconds (28)'
# 'Resolving timed out after 5001 milliseconds (28)'
error = re.sub(r'(\w+ timed out).*(\s+\(\d+\))', r'\1\2', error)
finally:
c.close()
binary_data = buf.getvalue()
if error is None and content_type == 'text/html':
# Attempt to decode using HTTP header first
try:
html = binary_data.decode(charset, 'ignore')
except TypeError:
try:
html = binary_data.decode('utf-8', 'ignore')
charset = 'utf-8'
except TypeError:
html = ''
if http_charset is False:
# Look for <meta charset=""> tag (HTML5)
pattern = re.compile('<meta\s+charset=["\']*([^"\'> ]*)', flags=re.IGNORECASE)
matches = pattern.search(html)
if matches:
charset = matches.group(1).strip().lower()
else:
# Look for <meta http-equiv="content-type">
# <meta http-equiv="Content-Type" content="text/html; charset=utf-8"
pattern = re.compile('<meta\s+http-equiv=.content-type.[^>]*content=.([^"\']*)', flags=re.IGNORECASE)
matches = pattern.search(html)
if not matches:
pattern = re.compile('<meta\s+content=.([^"\']*).\s+http-equiv=.content-type.[^>]', flags=re.IGNORECASE)
matches = pattern.search(html)
if matches:
content_type, _, charset = matches.group(1).partition(';')
_, _, charset = charset.partition('=')
charset = charset.strip().lower()
if len(charset):
html = binary_data.decode(charset, 'ignore')
pattern = re.compile("<title>(.+?)</title>", flags=re.IGNORECASE|re.DOTALL)
matches = pattern.search(html)
if matches:
title = matches.group(1).strip()
buf.close()
try:
h = HTMLParser.HTMLParser()
decoded_title = h.unescape(title)
if decoded_title:
title = decoded_title
except:
pass
# Convert IDN hostname to Unicode
hostname = dns.name.from_text(hostname).to_unicode(omit_final_dot=True)
res = {
'charset': charset,
'contentType': content_type,
'error': error,
'hostname': hostname,
'ip': ip,
'lastHostname': last_hostname,
'lastIp': last_ip,
'lastPort': last_port,
'location': last_url,
'port': port,
'status': status,
'title': title,
'url': url
}
return res
if len(sys.argv) < 4:
print('Usage: %s <protocol> <hostname> <port> [[<protocol> <hostname> <port>], ..]' % sys.argv[0])
raise SystemExit
# Build list of schemes and hostnames
schemes = set([])
sites = set()
for i in xrange(1, len(sys.argv), 3):
scheme = sys.argv[i].lower()
hostname = sys.argv[i+1]
port = sys.argv[i+2]
if scheme == 'http' and port != '80':
hostname += ':' + port
elif scheme == 'https' and port != '443':
hostname += ':' + port
schemes.add(scheme)
sites.add((scheme, hostname))
r = dns.resolver.Resolver()
r.timeout = 15
final_res = {}
for scheme, hostport in sites:
if not scheme in final_res:
final_res[scheme] = []
hostname, _, explicit_port = hostport.partition(':')
error = None
rd = []
try:
domain = dns.name.from_unicode(unicode(hostname, 'utf-8'))
qr = r.query(domain, 'A')
for rr in qr.rrset:
rd.append(rr.to_text())
#except (Timeout, NXDOMAIN, YXDOMAIN, NoAnswer, NoNameservers) as e:
except dns.resolver.NXDOMAIN as e:
error = 'NXDOMAIN'
except DNSException as e:
error = e.__class__.__name__
try:
domain = dns.name.from_unicode(unicode(hostname, 'utf-8'))
qr = r.query(domain, 'AAAA')
for rr in qr.rrset:
rd.append(rr.to_text())
except dns.resolver.NXDOMAIN as e:
error = 'NXDOMAIN'
except DNSException as e:
error = e.__class__.__name__
# Curl needs puny-coded hostnames
hostname = domain.to_text(omit_final_dot=True)
for ip in sorted(rd):
url = scheme + '://' + hostname
if explicit_port:
port = explicit_port
url += ':' + port
elif scheme == 'http':
port = "80"
elif scheme == 'https':
port = "443"
url += '/'
resolve = [ hostname + ':' + port + ':' + ip ]
# Also use for redirects to other schemes on the same host
if port != "443":
resolve.append(hostname + ':443:' + ip)
if port != "80":
resolve.append(hostname + ':80:' + ip)
#print "Trying URL '%s' with resolve opts '%s'" % (url, resolve)
res = check_url(url, resolve, 'sv-SE,sv;q=0.8,en-US;q=0.6,en;q=0.4')
final_res[scheme].append(res)
obj = {}
for scheme in final_res:
for res in final_res[scheme]:
if res['error']:
continue
# Replace Java session IDs with something static
if res['url']:
res['url'] = re.sub(r';jsessionid=[0-9A-F]{32}', r';jsessionid=1234567890ABCDEF1234567890ABCDEF', res['url'])
if res['location']:
res['location'] = re.sub(r';jsessionid=[0-9A-F]{32}', r';jsessionid=1234567890ABCDEF1234567890ABCDEF', res['location'])
last_url = res['location']
url = res['url']
if not obj.has_key(last_url):
obj[last_url] = {}
obj[last_url][url] = res
final_res['preferred'] = None
if obj:
# http:// will sort before https://
primary_url = sorted(list(obj))[0]
primary_title = None
for url in obj[primary_url]:
res = obj[primary_url][url]
if res['location'] == primary_url:
primary_title = res['title']
break
#print("Primary URL:%s, title:%s" % (primary_url, primary_title))
final_res['preferred'] = {'url': primary_url, 'title': primary_title}
# Dump JSON with sorted keys so JSONs can be compared later
print json.dumps(final_res, indent=2, sort_keys=True)
|
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the TensorFlow CLIP model. """
import inspect
import os
import tempfile
import unittest
from importlib import import_module
import requests
from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
from transformers.file_utils import is_tf_available, is_vision_available
from transformers.testing_utils import is_pt_tf_cross_test, require_tf, require_vision, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCLIPModel, TFCLIPTextModel, TFCLIPVisionModel, TFSharedEmbeddings
from transformers.models.clip.modeling_tf_clip import TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import CLIPProcessor
class TFCLIPVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return CLIPVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = TFCLIPVisionModel(config=config)
result = model(pixel_values, training=False)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class TFCLIPVisionModelTest(TFModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (TFCLIPVisionModel,) if is_tf_available() else ()
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFCLIPVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
# CLIP does not use inputs_embeds
pass
def test_graph_mode_with_inputs_embeds(self):
# CLIP does not use inputs_embeds
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = num_patches + 1
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# CLIP has a different seq_length
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = num_patches + 1
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCLIPVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class TFCLIPTextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return CLIPTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = TFCLIPTextModel(config=config)
result = model(input_ids, attention_mask=input_mask, training=False)
result = model(input_ids, training=False)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFCLIPTextModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFCLIPTextModel,) if is_tf_available() else ()
test_pruning = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFCLIPTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_inputs_embeds(self):
# CLIP does not use inputs_embeds
pass
@slow
def test_model_from_pretrained(self):
for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCLIPTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class TFCLIPModelTester:
def __init__(self, parent, is_training=True):
self.parent = parent
self.text_model_tester = TFCLIPTextModelTester(parent)
self.vision_model_tester = TFCLIPVisionModelTester(parent)
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def get_config(self):
return CLIPConfig.from_text_vision_configs(
self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64
)
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = TFCLIPModel(config)
result = model(input_ids, pixel_values, attention_mask, training=False)
self.parent.assertEqual(
result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"return_loss": True,
}
return config, inputs_dict
@require_tf
class TFCLIPModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFCLIPModel,) if is_tf_available() else ()
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
test_onnx = False
def setUp(self):
self.model_tester = TFCLIPModelTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# hidden_states are tested in individual model tests
def test_hidden_states_output(self):
pass
# input_embeds are tested in individual model tests
def test_inputs_embeds(self):
pass
# CLIPModel does not have input/output embeddings
def test_model_common_attributes(self):
pass
# overwrite from common since `TFCLIPModelTester` set `return_loss` to `True` and causes the preparation of
# `symbolic_inputs` failed.
def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# remove `return_loss` to make code work
if self.__class__.__name__ == "TFCLIPModelTest":
inputs_dict.pop("return_loss", None)
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")]
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(99, 32, name="shared")
config.use_cache = inputs_dict.pop("use_cache", None)
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
outputs = model(inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
after_outputs = model(inputs_dict)
self.assert_outputs_same(after_outputs, outputs)
# overwrite from common since CLIPModel/TFCLIPModel return CLIPOutput/TFCLIPOutput
@is_pt_tf_cross_test
def test_pt_tf_model_equivalence(self):
import torch
import transformers
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
config.output_hidden_states = True
tf_model = model_class(config)
pt_model = pt_model_class(config)
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(
tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
)
pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
pt_inputs_dict[name] = key
elif name == "input_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
elif name == "pixel_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
self.assertEqual(len(tfo), len(pto), "Output lengths differ between TF and PyTorch")
for tf_output, pt_output in zip(tfo.to_tuple(), pto.to_tuple()):
if not (isinstance(tf_output, tf.Tensor) and isinstance(pt_output, torch.Tensor)):
continue
tf_out = tf_output.numpy()
pt_out = pt_output.numpy()
self.assertEqual(tf_out.shape, pt_out.shape, "Output component shapes differ between TF and PyTorch")
if len(tf_out.shape) > 0:
tf_nans = np.copy(np.isnan(tf_out))
pt_nans = np.copy(np.isnan(pt_out))
pt_out[tf_nans] = 0
tf_out[tf_nans] = 0
pt_out[pt_nans] = 0
tf_out[pt_nans] = 0
max_diff = np.amax(np.abs(tf_out - pt_out))
self.assertLessEqual(max_diff, 4e-2)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with tempfile.TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
key = np.array(key, dtype=bool)
pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
elif name == "input_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
elif name == "pixel_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(len(tfo), len(pto), "Output lengths differ between TF and PyTorch")
for tf_output, pt_output in zip(tfo.to_tuple(), pto.to_tuple()):
if not (isinstance(tf_output, tf.Tensor) and isinstance(pt_output, torch.Tensor)):
continue
tf_out = tf_output.numpy()
pt_out = pt_output.numpy()
self.assertEqual(tf_out.shape, pt_out.shape, "Output component shapes differ between TF and PyTorch")
if len(tf_out.shape) > 0:
tf_nans = np.copy(np.isnan(tf_out))
pt_nans = np.copy(np.isnan(pt_out))
pt_out[tf_nans] = 0
tf_out[tf_nans] = 0
pt_out[pt_nans] = 0
tf_out[pt_nans] = 0
max_diff = np.amax(np.abs(tf_out - pt_out))
self.assertLessEqual(max_diff, 4e-2)
@slow
def test_model_from_pretrained(self):
for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCLIPModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
@require_tf
class TFCLIPModelIntegrationTest(unittest.TestCase):
@slow
def test_inference(self):
model_name = "openai/clip-vit-base-patch32"
model = TFCLIPModel.from_pretrained(model_name)
processor = CLIPProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(
text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="tf"
)
outputs = model(**inputs, training=False)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = tf.constant([[24.5701, 19.3049]])
tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3)
|
|
# -*- coding: utf-8 -*-
import string
from datetime import timedelta
from django.conf import settings
# set question's render parser
ASK_CONTENT_DEFAULT_PARSER = (
('rst', 'resturctured text'),
('markdown', 'markdown'),
('texttile', 'texttile'),
)
ASK_CONTENT_PARSER = getattr(settings, 'ASK_CONTENT_DEFAULT_PARSER', ASK_CONTENT_DEFAULT_PARSER)
# The name of the editor group
MODERATOR_GROUP = 'mod_group'
# the minimal reputation needed to
MIN_REP = 1
# post/user score change during an upvote
POST_SCORE_CHANGE = 1
USER_SCORE_CHANGE = 1
# this is how many votes can be cast per session
MAX_VOTES_PER_SESSION = 1
# in seconds, the time intervals to reset vote limit
VOTE_SESSION_LENGTH = 60
VOTE_SESSION_LENGTH = timedelta(seconds=VOTE_SESSION_LENGTH)
FIRST_SESSION = 'first-session'
LASTSORT_SESSION = 'last-sort'
# Add at the end
POST_QUESTION, POST_ANSWER, POST_COMMENT, POST_TUTORIAL, POST_BLOG, POST_FORUM, POST_NEWS, POST_REVIEW, POST_TOOL, POST_FIXME, POST_VIDEO, POST_JOB, POST_PUBLICATION, POST_TIP, POST_OTHER = range(1, 16)
POST_TYPES = (
(POST_ANSWER, 'Answer'),
(POST_COMMENT, 'Comment'),
(POST_QUESTION, 'Question'),
(POST_TUTORIAL, 'Tutorial'),
(POST_TIP, 'Tip'),
(POST_BLOG, 'Blog'),
(POST_FORUM, 'Forum'),
(POST_NEWS, 'News'),
(POST_REVIEW, 'Review'),
(POST_TOOL, 'Tool'),
(POST_VIDEO, 'Video'),
(POST_FIXME, 'FixMe'),
(POST_JOB, 'Job'),
(POST_PUBLICATION, 'Research Paper'),
)
# direct mapping for quick lookups
POST_MAP = dict(POST_TYPES)
# reverse mapping for quick lookups
POST_REV_MAP = dict((y.lower(), x) for (x, y) in POST_MAP.items())
# entities that will be displayed on the navigation bar
POST_NAV_BAR = []
POST_NAV_BAR_LOWER = map(string.lower, POST_NAV_BAR)
# the valid sort orders
SORT_MAP = dict(
rank="-rank", views="-views", creation="-creation_date",
activity="-lastedit_date", votes="-full_score", answers="-answer_count",
)
# valid pill entries
VALID_PILLS = set("mytags all news questions unanswered tutorials tools \
videos jobs planet".split())
# valid tab entries
VALID_TABS = set("recent planet sticky".split()) | VALID_PILLS
# posts that only have content, no title or tags
POST_CONTENT_ONLY = set([POST_ANSWER, POST_COMMENT])
# these posts must have parent
POST_SUBLEVEL = set([POST_ANSWER, POST_COMMENT])
# main level posts
POST_EXCLUDE = set([POST_ANSWER, POST_COMMENT, POST_BLOG])
# toplevel posts may stand alone and must have title and tags
POST_TOPLEVEL = set(POST_MAP.keys()) - POST_SUBLEVEL
# posts the will go under forum
POST_FORUMLEVEL = set((POST_FORUM, POST_NEWS, POST_REVIEW))
# the session key that stores new post counts
SESSION_POST_COUNT = 'session-post-count'
SESSION_VIEW_COUNT = 'view-count'
# the type of messages that the system maintains
NOTE_USER, NOTE_MODERATOR, NOTE_ADMIN, NOTE_AWARD, NOTE_SITE = range(1, 6)
NOTE_TYPES = (
(NOTE_USER, 'User'),
(NOTE_MODERATOR, 'Moderator'),
(NOTE_ADMIN, 'Admin'),
(NOTE_AWARD, 'Award'),
(NOTE_SITE, "Site"),
)
# user types
USER_NEW, USER_MEMBER, USER_MODERATOR, USER_ADMIN, USER_BLOG, USER_SPECIAL, = range(1, 7)
USER_TYPES = (
(USER_NEW, 'New'),
(USER_MEMBER, 'Member'),
(USER_MODERATOR, 'Moderator'),
(USER_ADMIN, 'Administrator'),
(USER_BLOG, 'Blog'),
(USER_SPECIAL, 'Special'),
)
# user status types
USER_ACTIVE, USER_SUSPENDED, USER_BANNED = 1, 2, 3
USER_STATUS_TYPES = (
(USER_ACTIVE, 'Active'),
(USER_SUSPENDED, 'Suspended'),
(USER_BANNED, 'Banned'),
)
# post status types
POST_OPEN, POST_CLOSED, POST_DELETED = 100, 200, 300
POST_STATUS_TYPES = (
(POST_OPEN, 'Open'),
(POST_CLOSED, 'Closed'),
(POST_DELETED, 'Deleted'),
)
# the time between registering two post views
# from the same IP, in minutes
POST_VIEW_UPDATE = 30
# revision constants
REV_NONE, REV_CLOSE, REV_REOPEN, REV_DELETE, REV_UNDELETE = range(1000, 1005)
REV_ACTIONS = (
(REV_NONE, ''), (REV_CLOSE, 'Close'), (REV_REOPEN, 'Reopen'),
(REV_DELETE, 'Delete'), (REV_UNDELETE, 'Undelete')
)
REV_ACTION_MAP = dict(REV_ACTIONS)
# this stores the counts in the cache
CACHE_COUNT_KEY = "cache-count-key"
# moderation actions
USER_MODERATION, POST_MODERATION = 0, 1
USER_MOD_TYPES = [(USER_MODERATION, 'Usermod'), (POST_MODERATION, 'Postmod')]
# voting related constants
VOTE_UP, VOTE_DOWN, VOTE_ACCEPT, VOTE_BOOKMARK = range(1, 5)
VOTE_TYPES = (
(VOTE_UP, 'Upvote'),
(VOTE_DOWN, 'Downvote'),
(VOTE_ACCEPT, 'Accept'),
(VOTE_BOOKMARK, 'Bookmark'),
)
OPPOSING_VOTES = {VOTE_UP: VOTE_DOWN, VOTE_DOWN: VOTE_UP}
BADGE_BRONZE, BADGE_SILVER, BADGE_GOLD = 0, 1, 2
BADGE_TYPES = (
(BADGE_BRONZE, 'bronze'),
(BADGE_SILVER, 'silver'),
(BADGE_GOLD, 'gold'),
)
BETA_TESTER_BADGE = "Beta Tester"
TARGET_COUNT_MAP = {
POST_NEWS: "News",
POST_QUESTION: "Question",
POST_TOOL: "Tool",
POST_TUTORIAL: "Tutorial",
POST_JOB: "Job",
POST_BLOG: "Blog",
POST_VIDEO: "Video",
"unanswered": "Unanswered",
}
MIN_POST_SIZE = 6
MAX_POST_SIZE = 250000
# google analytics tracker and domain
GOOGLE_TRACKER = ""
GOOGLE_DOMAIN = ""
# needs to be turned on explicitly
CONTENT_INDEXING = True
# rank gains expressed in hours
POST_UPVOTE_RANK_GAIN = 1
POST_VIEW_RANK_GAIN = 0.1
BLOG_VIEW_RANK_GAIN = 0.1
# if this is set together with the DEBUG mode allows test logins
# don't turn it on in production servers!
SELENIUM_TEST_LOGIN_TOKEN = None
# no external authentication by default
# dictionary keyed by name containing the tuple of (secret key, template)
EXTERNAL_AUTHENICATION = {
}
# setting the session for multiple servers
SESSION_COOKIE_DOMAIN = ""
MIN_POST_SIZE = 15
MAX_POST_SIZE = 20000
RECENT_VOTE_COUNT = 10
RECENT_TAG_COUNT = 30
# set the tag names are to be displayed on the main page
IMPORTANT_TAG_NAMES = "rna-seq chip-seq assembly snp metagenomics vcf cnv mirna indel bwa bowtie bedtools biopython bioperl".split()
# the interval specified in hours
# that user activity throttling is computed over
TRUST_INTERVAL = 3
# how many posts may a new user make in a trust interval
# new user means a user that joined within a trust interval time
TRUST_NEW_USER_MAX_POST = 3
# how many posts may a trusted user make withing a trust in
TRUST_USER_MAX_POST = 15
# TEMPLATE LAYOUT,
# One may override these variables from the settings file
#
# this data governs the layout of the PILL_BAR
# bar name, link url, link name, counter key
ANON_PILL_BAR = [
("all", "", "Show All", ""),
("best", "show/best", "Popular", "Popular"),
("bookmarked", "show/bookmarked", "Bookmarked", "Bookmarked"),
("questions", "show/questions/", "Questions", "Question"),
("unanswered", "show/unanswered/", "Unanswered", "Unanswered"),
("howto", "show/howto/", "How To", "How To"),
("galaxy", "show/galaxy/", "Galaxy", "Galaxy"),
("forum", "show/forum/", "Forum", "Forum"),
("jobs", "show/jobs/", "Jobs", "Job"),
("planet", "show/planet/", "Planet", "Blog"),
]
USER_PILL_BAR = list(ANON_PILL_BAR)
USER_PILL_BAR.insert(1, ("mytags", "show/mytags/", "My Tags", ""))
#
# remapping the templates to local versions
# a row is the way a post is rendered on a page
# list below the templates to be loaded for a post type
# to reduce clutter there is a default mapper that
# for missing types attempts to map each type to rows/row.type.html
# django template lookup rules apply
#
TEMPLATE_ROWS = {
'job': "rows/row.job.html",
}
POSTS_PER_PAGE = 20
from django.conf import settings
__CURR_DIR = settings.PROJECT_ROOT
#HOME_DIR = path(__CURR_DIR )
#DATABASE_DIR = path(HOME_DIR, 'db')
#DATABASE_NAME = path(DATABASE_DIR, 'biostar.db')
#TEMPLATE_DIR = path(HOME_DIR, 'main', 'templates')
#STATIC_DIR = path(HOME_DIR, 'static')
EXPORT_DIR = 'export'
#WHOOSH_INDEX = path(HOME_DIR, 'db', 'index')
#PLANET_DIR = path(HOME_DIR, 'db', 'planet')
|
|
from pyasn1.type import tag, namedtype, univ
from pyasn1.codec.ber import encoder
from pyasn1.compat.octets import ints2octs
from pyasn1.error import PyAsn1Error
from sys import version_info
if version_info[0:2] < (2, 7) or \
version_info[0:2] in ( (3, 0), (3, 1) ):
try:
import unittest2 as unittest
except ImportError:
import unittest
else:
import unittest
class LargeTagEncoderTestCase(unittest.TestCase):
def setUp(self):
self.o = univ.Integer().subtype(
value=1, explicitTag=tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0xdeadbeaf)
)
def testEncoder(self):
assert encoder.encode(self.o) == ints2octs((127, 141, 245, 182, 253, 47, 3, 2, 1, 1))
class IntegerEncoderTestCase(unittest.TestCase):
def testPosInt(self):
assert encoder.encode(univ.Integer(12)) == ints2octs((2, 1, 12))
def testNegInt(self):
assert encoder.encode(univ.Integer(-12)) == ints2octs((2, 1, 244))
def testZero(self):
assert encoder.encode(univ.Integer(0)) == ints2octs((2, 1, 0))
def testCompactZero(self):
encoder.IntegerEncoder.supportCompactZero = True
substrate = encoder.encode(univ.Integer(0))
encoder.IntegerEncoder.supportCompactZero = False
assert substrate == ints2octs((2, 0))
def testMinusOne(self):
assert encoder.encode(univ.Integer(-1)) == ints2octs((2, 1, 255))
def testPosLong(self):
assert encoder.encode(
univ.Integer(0xffffffffffffffff)
) == ints2octs((2, 9, 0, 255, 255, 255, 255, 255, 255, 255, 255))
def testNegLong(self):
assert encoder.encode(
univ.Integer(-0xffffffffffffffff)
) == ints2octs((2, 9, 255, 0, 0, 0, 0, 0, 0, 0, 1))
class BooleanEncoderTestCase(unittest.TestCase):
def testTrue(self):
assert encoder.encode(univ.Boolean(1)) == ints2octs((1, 1, 1))
def testFalse(self):
assert encoder.encode(univ.Boolean(0)) == ints2octs((1, 1, 0))
class BitStringEncoderTestCase(unittest.TestCase):
def setUp(self):
self.b = univ.BitString((1,0,1,0,1,0,0,1,1,0,0,0,1,0,1))
def testDefMode(self):
assert encoder.encode(self.b) == ints2octs((3, 3, 1, 169, 138))
def testIndefMode(self):
assert encoder.encode(
self.b, defMode=0
) == ints2octs((3, 3, 1, 169, 138))
def testDefModeChunked(self):
assert encoder.encode(
self.b, maxChunkSize=1
) == ints2octs((35, 8, 3, 2, 0, 169, 3, 2, 1, 138))
def testIndefModeChunked(self):
assert encoder.encode(
self.b, defMode=0, maxChunkSize=1
) == ints2octs((35, 128, 3, 2, 0, 169, 3, 2, 1, 138, 0, 0))
def testEmptyValue(self):
assert encoder.encode(univ.BitString(())) == ints2octs((3, 1, 0))
class OctetStringEncoderTestCase(unittest.TestCase):
def setUp(self):
self.o = univ.OctetString('Quick brown fox')
def testDefMode(self):
assert encoder.encode(self.o) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
def testIndefMode(self):
assert encoder.encode(
self.o, defMode=0
) == ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
def testDefModeChunked(self):
assert encoder.encode(
self.o, maxChunkSize=4
) == ints2octs((36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120))
def testIndefModeChunked(self):
assert encoder.encode(
self.o, defMode=0, maxChunkSize=4
) == ints2octs((36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0))
class ExpTaggedOctetStringEncoderTestCase(unittest.TestCase):
def setUp(self):
self.o = univ.OctetString().subtype(
value='Quick brown fox',
explicitTag=tag.Tag(tag.tagClassApplication,tag.tagFormatSimple,5)
)
def testDefMode(self):
assert encoder.encode(self.o) == ints2octs((101, 17, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120))
def testIndefMode(self):
assert encoder.encode(
self.o, defMode=0
) == ints2octs((101, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
def testDefModeChunked(self):
assert encoder.encode(
self.o, defMode=1, maxChunkSize=4
) == ints2octs((101, 25, 36, 23, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120))
def testIndefModeChunked(self):
assert encoder.encode(
self.o, defMode=0, maxChunkSize=4
) == ints2octs((101, 128, 36, 128, 4, 4, 81, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 4, 111, 119, 110, 32, 4, 3, 102, 111, 120, 0, 0, 0, 0))
class NullEncoderTestCase(unittest.TestCase):
def testNull(self):
assert encoder.encode(univ.Null('')) == ints2octs((5, 0))
class ObjectIdentifierEncoderTestCase(unittest.TestCase):
def testOne(self):
assert encoder.encode(
univ.ObjectIdentifier((1,3,6,0,0xffffe))
) == ints2octs((6, 6, 43, 6, 0, 191, 255, 126))
def testEdge1(self):
assert encoder.encode(
univ.ObjectIdentifier((0,39))
) == ints2octs((6,1,39))
def testEdge2(self):
assert encoder.encode(
univ.ObjectIdentifier((1,39))
) == ints2octs((6,1,79))
def testEdge3(self):
#01111111
assert encoder.encode(
univ.ObjectIdentifier((2,40))
) == ints2octs((6,1,120))
def testEdge4(self):
#10010000|10000000|10000000|10000000|01001111
assert encoder.encode(
univ.ObjectIdentifier((2,0xffffffff))
) == ints2octs((6,5,0x90,0x80,0x80,0x80,0x4F))
def testEdge5(self):
#01111111
assert encoder.encode(
univ.ObjectIdentifier((2,47))
) == ints2octs((6,1,0x7F))
def testEdge6(self):
#10000001|00000000
assert encoder.encode(
univ.ObjectIdentifier((2,48))
) == ints2octs((6,2,0x81,0x00))
def testEdge7(self):
#10000001|00110100|00000003
assert encoder.encode(
univ.ObjectIdentifier((2,100,3))
) == ints2octs((6,3,0x81,0x34,0x03))
def testEdge8(self):
#10000101|00000000
assert encoder.encode(
univ.ObjectIdentifier((2,560))
) == ints2octs((6,2,133,0))
def testEdge9(self):
#10001000|10000100|10000111|0000010
assert encoder.encode(
univ.ObjectIdentifier((2,16843570))
) == ints2octs((6,4,0x88,0x84,0x87,0x02))
def testImpossible1(self):
try:
encoder.encode(univ.ObjectIdentifier((3,1,2)))
except PyAsn1Error:
pass
else:
assert 0, 'impossible leading arc tolerated'
def testImpossible2(self):
try:
encoder.encode(univ.ObjectIdentifier((0,)))
except PyAsn1Error:
pass
else:
assert 0, 'single arc OID tolerated'
def testImpossible3(self):
try:
encoder.encode(univ.ObjectIdentifier((0,40)))
except PyAsn1Error:
pass
else:
assert 0, 'second arc overflow tolerated'
def testImpossible4(self):
try:
encoder.encode(univ.ObjectIdentifier((1,40)))
except PyAsn1Error:
pass
else:
assert 0, 'second arc overflow tolerated'
def testLarge1(self):
assert encoder.encode(
univ.ObjectIdentifier((2,18446744073709551535184467440737095))
) == ints2octs((0x06,0x11,0x83,0xC6,0xDF,0xD4,0xCC,0xB3,0xFF,0xFF,0xFE,0xF0,0xB8,0xD6,0xB8,0xCB,0xE2,0xB7,0x17))
def testLarge2(self):
assert encoder.encode(
univ.ObjectIdentifier((2,999,18446744073709551535184467440737095))
) == ints2octs((0x06,0x13,0x88,0x37,0x83,0xC6,0xDF,0xD4,0xCC,0xB3,0xFF,0xFF,0xFE,0xF0,0xB8,0xD6,0xB8,0xCB,0xE2,0xB6,0x47))
class RealEncoderTestCase(unittest.TestCase):
def testChar(self):
assert encoder.encode(
univ.Real((123, 10, 11))
) == ints2octs((9, 7, 3, 49, 50, 51, 69, 49, 49))
def testBin1(self):
assert encoder.encode( # default binEncBase = 2
univ.Real((0.5, 2, 0)) # check encbase = 2 and exponenta = -1
) == ints2octs((9, 3, 128, 255, 1))
def testBin2(self):
r = univ.Real((3.25, 2, 0))
r.binEncBase = 8 # change binEncBase only for this instance of Real
assert encoder.encode(
r # check encbase = 8
) == ints2octs((9, 3, 148, 255, 13))
def testBin3(self):
# change binEncBase in the RealEncoder instance => for all further Reals
encoder.tagMap[univ.Real.tagSet].binEncBase = 16
assert encoder.encode(
univ.Real((0.00390625, 2, 0)) # check encbase = 16
) == ints2octs((9, 3, 160, 254, 1))
def testBin4(self):
# choose binEncBase automatically for all further Reals (testBin[4-7])
encoder.tagMap[univ.Real.tagSet].binEncBase = None
assert encoder.encode(
univ.Real((1, 2, 0)) # check exponenta = 0
) == ints2octs((9, 3, 128, 0, 1))
def testBin5(self):
assert encoder.encode(
univ.Real((3, 2, -1020)) # case of 2 octs for exponenta and
# negative exponenta and abs(exponenta) is
# all 1's and fills the whole octet(s)
) == ints2octs((9, 4, 161, 255, 1, 3))
def testBin6(self):
assert encoder.encode(
univ.Real((1, 2, 262140)) # case of 3 octs for exponenta and
# check that first 9 bits for exponenta
# are not all 1's
) == ints2octs((9, 5, 162, 0, 255, 255, 1))
def testBin7(self):
assert encoder.encode(
univ.Real((-1, 2, 76354972)) # case of >3 octs for exponenta and
# mantissa < 0
) == ints2octs((9, 7, 227, 4, 1, 35, 69, 103, 1))
def testPlusInf(self):
assert encoder.encode(univ.Real('inf')) == ints2octs((9, 1, 64))
def testMinusInf(self):
assert encoder.encode(univ.Real('-inf')) == ints2octs((9, 1, 65))
def testZero(self):
assert encoder.encode(univ.Real(0)) == ints2octs((9, 0))
class SequenceEncoderTestCase(unittest.TestCase):
def setUp(self):
self.s = univ.Sequence(componentType=namedtype.NamedTypes(
namedtype.NamedType('place-holder', univ.Null('')),
namedtype.OptionalNamedType('first-name', univ.OctetString('')),
namedtype.DefaultedNamedType('age', univ.Integer(33)),
))
def __init(self):
self.s.clear()
self.s.setComponentByPosition(0)
def __initWithOptional(self):
self.s.clear()
self.s.setComponentByPosition(0)
self.s.setComponentByPosition(1, 'quick brown')
def __initWithDefaulted(self):
self.s.clear()
self.s.setComponentByPosition(0)
self.s.setComponentByPosition(2, 1)
def __initWithOptionalAndDefaulted(self):
self.s.clear()
self.s.setComponentByPosition(0, univ.Null(''))
self.s.setComponentByPosition(1, univ.OctetString('quick brown'))
self.s.setComponentByPosition(2, univ.Integer(1))
def testDefMode(self):
self.__init()
assert encoder.encode(self.s) == ints2octs((48, 2, 5, 0))
def testIndefMode(self):
self.__init()
assert encoder.encode(
self.s, defMode=0
) == ints2octs((48, 128, 5, 0, 0, 0))
def testDefModeChunked(self):
self.__init()
assert encoder.encode(
self.s, defMode=1, maxChunkSize=4
) == ints2octs((48, 2, 5, 0))
def testIndefModeChunked(self):
self.__init()
assert encoder.encode(
self.s, defMode=0, maxChunkSize=4
) == ints2octs((48, 128, 5, 0, 0, 0))
def testWithOptionalDefMode(self):
self.__initWithOptional()
assert encoder.encode(self.s) == ints2octs((48, 15, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110))
def testWithOptionalIndefMode(self):
self.__initWithOptional()
assert encoder.encode(
self.s, defMode=0
) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 0, 0))
def testWithOptionalDefModeChunked(self):
self.__initWithOptional()
assert encoder.encode(
self.s, defMode=1, maxChunkSize=4
) == ints2octs((48, 21, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110))
def testWithOptionalIndefModeChunked(self):
self.__initWithOptional()
assert encoder.encode(
self.s, defMode=0, maxChunkSize=4
) == ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 0, 0))
def testWithDefaultedDefMode(self):
self.__initWithDefaulted()
assert encoder.encode(self.s) == ints2octs((48, 5, 5, 0, 2, 1, 1))
def testWithDefaultedIndefMode(self):
self.__initWithDefaulted()
assert encoder.encode(
self.s, defMode=0
) == ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0))
def testWithDefaultedDefModeChunked(self):
self.__initWithDefaulted()
assert encoder.encode(
self.s, defMode=1, maxChunkSize=4
) == ints2octs((48, 5, 5, 0, 2, 1, 1))
def testWithDefaultedIndefModeChunked(self):
self.__initWithDefaulted()
assert encoder.encode(
self.s, defMode=0, maxChunkSize=4
) == ints2octs((48, 128, 5, 0, 2, 1, 1, 0, 0))
def testWithOptionalAndDefaultedDefMode(self):
self.__initWithOptionalAndDefaulted()
assert encoder.encode(self.s) == ints2octs((48, 18, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1))
def testWithOptionalAndDefaultedIndefMode(self):
self.__initWithOptionalAndDefaulted()
assert encoder.encode(
self.s, defMode=0
) == ints2octs((48, 128, 5, 0, 4, 11, 113, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 2, 1, 1, 0, 0))
def testWithOptionalAndDefaultedDefModeChunked(self):
self.__initWithOptionalAndDefaulted()
assert encoder.encode(
self.s, defMode=1, maxChunkSize=4
) == ints2octs((48, 24, 5, 0, 36, 17, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 2, 1, 1))
def testWithOptionalAndDefaultedIndefModeChunked(self):
self.__initWithOptionalAndDefaulted()
assert encoder.encode(
self.s, defMode=0, maxChunkSize=4
) == ints2octs((48, 128, 5, 0, 36, 128, 4, 4, 113, 117, 105, 99, 4, 4, 107, 32, 98, 114, 4, 3, 111, 119, 110, 0, 0, 2, 1, 1, 0, 0))
class ChoiceEncoderTestCase(unittest.TestCase):
def setUp(self):
self.s = univ.Choice(componentType=namedtype.NamedTypes(
namedtype.NamedType('place-holder', univ.Null('')),
namedtype.NamedType('number', univ.Integer(0)),
namedtype.NamedType('string', univ.OctetString())
))
def testEmpty(self):
try:
encoder.encode(self.s)
except PyAsn1Error:
pass
else:
assert 0, 'encoded unset choice'
def testFilled(self):
self.s.setComponentByPosition(0, univ.Null(''))
assert encoder.encode(self.s) == ints2octs((5, 0))
def testTagged(self):
s = self.s.subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,4)
)
s.setComponentByPosition(0, univ.Null(''))
assert encoder.encode(s) == ints2octs((164, 2, 5, 0))
def testUndefLength(self):
self.s.setComponentByPosition(2, univ.OctetString('abcdefgh'))
assert encoder.encode(self.s, defMode=False, maxChunkSize=3) == ints2octs((36, 128, 4, 3, 97, 98, 99, 4, 3, 100, 101, 102, 4, 2, 103, 104, 0, 0))
def testTaggedUndefLength(self):
s = self.s.subtype(
explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,4)
)
s.setComponentByPosition(2, univ.OctetString('abcdefgh'))
assert encoder.encode(s, defMode=False, maxChunkSize=3) == ints2octs((164, 128, 36, 128, 4, 3, 97, 98, 99, 4, 3, 100, 101, 102, 4, 2, 103, 104, 0, 0, 0, 0))
class AnyEncoderTestCase(unittest.TestCase):
def setUp(self):
self.s = univ.Any(encoder.encode(univ.OctetString('fox')))
def testUntagged(self):
assert encoder.encode(self.s) == ints2octs((4, 3, 102, 111, 120))
def testTaggedEx(self):
s = self.s.subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
)
assert encoder.encode(s) == ints2octs((164, 5, 4, 3, 102, 111, 120))
def testTaggedIm(self):
s = self.s.subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)
)
assert encoder.encode(s) == ints2octs((132, 5, 4, 3, 102, 111, 120))
if __name__ == '__main__': unittest.main()
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Software construction toolkit site_scons configuration.
This module sets up SCons for use with this toolkit. This should contain setup
which occurs outside of environments. If a method operates within the context
of an environment, it should instead go in a tool in site_tools and be invoked
for the target environment.
"""
import __builtin__
import sys
import SCons
import usage_log
import time
def CheckSConsLocation():
"""Check that the version of scons we are running lives in the native_client
tree.
Without this, if system scons is used then it produces rather cryptic error
messages.
"""
scons_location = os.path.dirname(os.path.abspath(SCons.__file__))
nacl_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if not scons_location.startswith(nacl_dir):
raise SCons.Errors.UserError('native_client must be built with its local '
'version of SCons.\n You are running SCons '
'from %s' % scons_location)
def _HostPlatform():
"""Returns the current host platform.
That is, the platform we're actually running SCons on. You shouldn't use
this inside your SConscript files; instead, include the appropriate
target_platform tool for your environments. When you call
BuildEnvironments(), only environments with the current host platform will be
built. If for some reason you really need to examine the host platform,
check env.Bit('host_windows') / env.Bit('host_linux') / env.Bit('host_mac').
Returns:
The host platform name - one of ('WINDOWS', 'LINUX', 'MAC').
"""
platform_map = {
'win32': 'WINDOWS',
'cygwin': 'WINDOWS',
'linux': 'LINUX',
'linux2': 'LINUX',
'linux3': 'LINUX',
'darwin': 'MAC',
}
if sys.platform not in platform_map:
print ('site_init.py warning: platform "%s" is not in platfom map.' %
sys.platform)
return platform_map.get(sys.platform, sys.platform)
def BuildEnvironmentSConscripts(env):
"""Evaluates SConscripts for the environment.
Called by BuildEnvironments().
"""
# Read SConscript for each component
# TODO: Remove BUILD_COMPONENTS once all projects have transitioned to the
# BUILD_SCONSCRIPTS nomenclature.
for c in env.SubstList2('$BUILD_SCONSCRIPTS', '$BUILD_COMPONENTS'):
# Clone the environment so components can't interfere with each other
ec = env.Clone()
if ec.Entry(c).isdir():
# The component is a directory, so assume it contains a SConscript
# file.
c_dir = ec.Dir(c)
# Use 'build.scons' as the default filename, but if that doesn't
# exist, fall back to 'SConscript'.
c_script = c_dir.File('build.scons')
if not c_script.exists():
c_script = c_dir.File('SConscript')
else:
# The component is a SConscript file.
c_script = ec.File(c)
c_dir = c_script.dir
# Make c_dir a string.
c_dir = str(c_dir)
# Use build_dir differently depending on where the SConscript is.
if not ec.RelativePath('$TARGET_ROOT', c_dir).startswith('..'):
# The above expression means: if c_dir is $TARGET_ROOT or anything
# under it. Going from c_dir to $TARGET_ROOT and dropping the not fails
# to include $TARGET_ROOT.
# We want to be able to allow people to use addRepository to back things
# under $TARGET_ROOT/$OBJ_ROOT with things from above the current
# directory. When we are passed a SConscript that is already under
# $TARGET_ROOT, we should not use build_dir.
start = time.clock()
ec.SConscript(c_script, exports={'env': ec}, duplicate=0)
if SCons.Script.ARGUMENTS.get('verbose'):
print "[%5d] Loaded" % (1000 * (time.clock() - start)), c_script
elif not ec.RelativePath('$MAIN_DIR', c_dir).startswith('..'):
# The above expression means: if c_dir is $MAIN_DIR or anything
# under it. Going from c_dir to $TARGET_ROOT and dropping the not fails
# to include $MAIN_DIR.
# Also, if we are passed a SConscript that
# is not under $MAIN_DIR, we should fail loudly, because it is unclear how
# this will correspond to things under $OBJ_ROOT.
start = time.clock()
ec.SConscript(c_script, variant_dir='$OBJ_ROOT/' + c_dir,
exports={'env': ec}, duplicate=0)
if SCons.Script.ARGUMENTS.get('verbose'):
print "[%5d] Loaded" % (1000 * (time.clock() - start)), c_script
else:
raise SCons.Errors.UserError(
'Bad location for a SConscript. "%s" is not under '
'\$TARGET_ROOT or \$MAIN_DIR' % c_script)
def FilterEnvironments(environments):
"""Filters out the environments to be actually build from the specified list
Args:
environments: List of SCons environments.
Returns:
List of environments which were matched
"""
# Get options
build_modes = SCons.Script.GetOption('build_mode')
# TODO: Remove support legacy MODE= argument, once everyone has transitioned
# to --mode.
legacy_mode_option = SCons.Script.ARGUMENTS.get('MODE')
if legacy_mode_option:
build_modes = legacy_mode_option
environment_map = dict((env['BUILD_TYPE'], env) for env in environments)
# Add aliases for the host platform so that the caller of Scons does
# not need to work out which platform they are running on.
platform_map = {
'win32': 'win',
'cygwin': 'win',
'linux': 'linux',
'linux2': 'linux',
'darwin': 'mac',
}
if sys.platform in platform_map:
name = platform_map[sys.platform]
environment_map['opt-host'] = environment_map['opt-%s' % name]
environment_map['dbg-host'] = environment_map['dbg-%s' % name]
environment_map['coverage-host'] = environment_map['coverage-%s' % name]
matched_envs = []
for mode in build_modes.split(','):
if mode not in environment_map:
raise SCons.Errors.UserError('Build mode "%s" is not defined' % mode)
matched_envs.append(environment_map[mode])
return matched_envs
def BuildEnvironments(environments):
"""Build a collection of SConscripts under a collection of environments.
The environments are subject to filtering (c.f. FilterEnvironments)
Args:
environments: List of SCons environments.
Returns:
List of environments which were actually evaluated (built).
"""
usage_log.log.AddEntry('BuildEnvironments start')
for e in environments:
# Make this the root environment for deferred functions, so they don't
# execute until our call to ExecuteDefer().
e.SetDeferRoot()
# Defer building the SConscripts, so that other tools can do
# per-environment setup first.
e.Defer(BuildEnvironmentSConscripts)
# Execute deferred functions
e.ExecuteDefer()
# Add help on targets.
AddTargetHelp()
usage_log.log.AddEntry('BuildEnvironments done')
#------------------------------------------------------------------------------
def _ToolExists():
"""Replacement for SCons tool module exists() function, if one isn't present.
Returns:
True. This enables modules which always exist not to need to include a
dummy exists() function.
"""
return True
def _ToolModule(self):
"""Thunk for SCons.Tool.Tool._tool_module to patch in exists() function.
Returns:
The module from the original SCons.Tool.Tool._tool_module call, with an
exists() method added if it wasn't present.
"""
module = self._tool_module_orig()
if not hasattr(module, 'exists'):
module.exists = _ToolExists
return module
#------------------------------------------------------------------------------
def AddSiteDir(site_dir):
"""Adds a site directory, as if passed to the --site-dir option.
Args:
site_dir: Site directory path to add, relative to the location of the
SConstruct file.
This may be called from the SConscript file to add a local site scons
directory for a project. This does the following:
* Adds site_dir/site_scons to sys.path.
* Imports site_dir/site_init.py.
* Adds site_dir/site_scons to the SCons tools path.
"""
# Call the same function that SCons does for the --site-dir option.
SCons.Script.Main._load_site_scons_dir(
SCons.Node.FS.get_default_fs().SConstruct_dir, site_dir)
#------------------------------------------------------------------------------
_new_options_help = '''
Additional options for SCons:
--mode=MODE Specify build mode, e.g. "dbg-linux,nacl".
--host-platform=PLATFORM Force SCons to use PLATFORM as the host platform,
instead of the actual platform on which SCons is
run. Useful for examining the dependency tree
which would be created, but not useful for
actually running the build because it'll attempt
to use the wrong tools for your actual platform.
--site-path=DIRLIST Comma-separated list of additional site
directory paths; each is processed as if passed
to --site-dir.
--usage-log=FILE Write XML usage log to FILE.
'''
def SiteInitMain():
"""Main code executed in site_init."""
# Bail out if we've been here before. This is needed to handle the case where
# this site_init.py has been dropped into a project directory.
if hasattr(__builtin__, 'BuildEnvironments'):
return
CheckSConsLocation()
usage_log.log.AddEntry('Software Construction Toolkit site init')
# Let people use new global methods directly.
__builtin__.AddSiteDir = AddSiteDir
__builtin__.FilterEnvironments = FilterEnvironments
__builtin__.BuildEnvironments = BuildEnvironments
# Legacy method names
# TODO: Remove these once they're no longer used anywhere.
__builtin__.BuildComponents = BuildEnvironments
# Set list of default tools for component_setup
__builtin__.component_setup_tools = [
# Defer must be first so other tools can register environment
# setup/cleanup functions.
'defer',
# Component_targets must precede component_builders so builders can
# define target groups.
'component_targets',
'command_output',
'component_bits',
'component_builders',
'environment_tools',
'publish',
'replicate',
'wix',
]
# Patch Tool._tool_module method to fill in an exists() method for the
# module if it isn't present.
# TODO: This functionality should be patched into SCons itself by changing
# Tool.__init__().
SCons.Tool.Tool._tool_module_orig = SCons.Tool.Tool._tool_module
SCons.Tool.Tool._tool_module = _ToolModule
# Add our options
SCons.Script.AddOption(
'--mode', '--build-mode',
dest='build_mode',
nargs=1, type='string',
action='store',
metavar='MODE',
default='opt-host,nacl',
help='build mode(s)')
SCons.Script.AddOption(
'--host-platform',
dest='host_platform',
nargs=1, type='string',
action='store',
metavar='PLATFORM',
help='build mode(s)')
SCons.Script.AddOption(
'--site-path',
dest='site_path',
nargs=1, type='string',
action='store',
metavar='PATH',
help='comma-separated list of site directories')
SCons.Script.AddOption(
'--usage-log',
dest='usage_log',
nargs=1, type='string',
action='store',
metavar='PATH',
help='file to write XML usage log to')
SCons.Script.Help(_new_options_help)
# Set up usage log
usage_log_file = SCons.Script.GetOption('usage_log')
if usage_log_file:
usage_log.log.SetOutputFile(usage_log_file)
# Set current host platform
host_platform = SCons.Script.GetOption('host_platform')
if not host_platform:
host_platform = _HostPlatform()
__builtin__.HOST_PLATFORM = host_platform
# Check for site path. This is a list of site directories which each are
# processed as if they were passed to --site-dir.
site_path = SCons.Script.GetOption('site_path')
if site_path:
for site_dir in site_path.split(','):
AddSiteDir(site_dir)
# Since our site dir was specified on the SCons command line, SCons will
# normally only look at our site dir. Add back checking for project-local
# site_scons directories.
if not SCons.Script.GetOption('no_site_dir'):
SCons.Script.Main._load_site_scons_dir(
SCons.Node.FS.get_default_fs().SConstruct_dir, None)
# Run main code
SiteInitMain()
|
|
import pytest
from americano import parse, ParseError
"""
List of parameters to test. Follows the function signature: f(expression, expected, context={})
"""
test_eval_parameters = [
# Literals
['\"\\"\"', '"'],
["\'\\'\'", "'"],
['"4"', '4'],
["'4'", '4'],
['"with space"', 'with space'],
["'with space'", 'with space'],
['"A"', 'A'],
["'A'", 'A'],
['"+"', '+'],
["'+'", '+'],
['"$var"', '$var'],
["'$var'", '$var'],
['1', 1],
['100', 100],
['0.0', 0.0],
['0.12', 0.12],
['12.0', 12.0],
['12.34', 12.34],
['true', True],
['false', False],
['null', None],
# Variables
['var', 1, {'var': 1}],
['var', 'A', {'var': 'A'}],
['var1 + var2', 3, {'var1': 1, 'var2': 2}],
# Arithmetic operators
['1 + 2', 3],
['1.2 + 3', 4.2],
['1.2 + 3.4', 4.6],
['"A" + "B"', 'AB'],
['null + "A"', 'nullA'],
['"A" + null', 'Anull'],
['"A" + var', 'Anull', {'var': None}],
['"A" + 1', 'A1'],
['"A" + true', 'Atrue'],
['"A" + false', 'Afalse'],
['1 + true', 2],
['1 + false', 1],
['true + false', 1],
['1 - 2', -1],
['1.2 - 3', -1.8],
['1.2 - 3.4', -2.2],
['2 - 1', 1],
['3 - 1.2', 1.8],
['3.4 - 2.2', 1.2],
['"2" - 1', 1],
['"2.5" - 1', 1.5],
['2 * 3', 6],
['2.1 * 3', 6.3],
['2.1 * 3.5', 7.35],
['true * 5', 5],
['false * 5', 0],
['null * 5', 0],
['"3" * 5', 15],
['6 / 3', 2.0],
['5 / 2', 2.5],
['2.6 / 2', 1.3],
['6 / 2.4', 2.5],
['10 / "2"', 5],
# Unary operators
['-1', -1],
['+1', 1],
['+1.7', 1.7],
['+"1"', 1],
['+"1.5"', 1.5],
['+true', 1],
['+false', 0],
['+null', 0],
['!true', False],
['!false', True],
['!0', True],
['!1', False],
# Logic operators
['true && true', True],
['true && false', False],
['false && true', False],
['false && false', False],
['1 && 1', 1],
['1 && 0', 0],
['0 && 1', 0],
['0 && 0', 0],
['true || true', True],
['true || false', True],
['false || true', True],
['false || false', False],
['1 || 1', 1],
['1 || 0', 1],
['0 || 1', 1],
['0 || 0', 0],
# Comparison operators
['1 < 2', True],
['2 < 1', False],
['2 < 2', False],
['1 <= 2', True],
['2 <= 1', False],
['2 <= 2', True],
['1 >= 2', False],
['2 >= 1', True],
['2 >= 2', True],
['1 > 2', False],
['2 > 1', True],
['2 > 2', False],
['1 === 1', True],
['1 === 2', False],
['1 === "1"', False],
['1 === 1.0', True],
['1 === true', False],
['"A" === "A"', True],
['"A" === "B"', False],
['null === null', True],
['null === 0', False],
['null === 1', False],
['1 == 1', True],
['1 == 2', False],
['1 == "1"', True],
['1 == 1.0', True],
['1 == true', True],
['null == null', True],
['null == 0', False],
['null == 1', False],
['"A" == "A"', True],
['"A" == "B"', False],
['"A" == 1', False],
['1 !== 1', False],
['1 !== 2', True],
['1 !== "1"', True],
['1 !== 1.0', False],
['1 !== true', True],
['null !== null', False],
['null !== 0', True],
['null !== 1', True],
['"A" !== "A"', False],
['"A" !== "B"', True],
['1 != 1', False],
['1 != 2', True],
['1 != "1"', False],
['1 != 1.0', False],
['1 != true', False],
['null != null', False],
['null != 0', True],
['null != 1', True],
['"A" != "A"', False],
['"A" != "B"', True],
['-1 < null', True],
['-1 > null', False],
['-1 <= null', True],
['-1 >= null', False],
['0 < null', False],
['0 > null', False],
['0 <= null', True],
['0 >= null', True],
['1 < null', False],
['1 > null', True],
['1 <= null', False],
['1 >= null', True],
['null < null', False],
['null > null', False],
['null <= null', True],
['null >= null', True],
['"" < null', False],
['"" > null', False],
['"" <= null', True],
['"" >= null', True],
['"A" < null', False],
['"A" > null', False],
['"A" <= null', False],
['"A" >= null', False],
# Parentheses
['(1)', 1],
['(1 + 2)', 3],
['((1))', 1],
['(1 + 2) * (3 + 7)', 30],
['(var)', 1, {'var': 1}],
['([1])', [1]],
['[(1)]', [1]],
# Array
['[]', []],
['[1]', [1]],
['[1,]', [1]],
['[1, 2]', [1, 2]],
['[1, 2] == [1, 2]', True],
['[1, 2] == var', True, {'var': [1, 2]}],
['[1, 2] == var', False, {'var': [0]}],
# Properties
['a.b', 'c', {'a': {'b': 'c'}}],
['a.b', 1, {'a': {'b': 1}}],
['a.b + 1', 3, {'a': {'b': 2}}],
['1 + a.b', 3, {'a': {'b': 2}}],
['a["b"]', 'c', {'a': {'b': 'c'}}],
['a["b"]', 1, {'a': {'b': 1}}],
['a["b"] + 1', 3, {'a': {'b': 2}}],
['1 + a["b"]', 3, {'a': {'b': 2}}],
# Precedence
['3 + 4 - 2', 5],
['4 - 2 + 3', 5],
['6 / 3 * 4', 8],
['3 * 4 / 6', 2],
['1 + 2 * 3', 7],
['2 * 3 + 1', 7],
['1 - 2 * 3', -5],
['2 * 3 - 1', 5],
['1 + 6 / 2', 4],
['6 / 2 + 1', 4],
['-1 + 3', 2],
['3 + -1', 2],
['true !== true || true', True],
['3 === 2 + 1', True],
['4 * (1 + 2)', 12],
['3 * (1 + (5 * 7))', 108],
# Functions
['is_true(true)', True, {'is_true': lambda x: x is True}],
['is_true(false)', False, {'is_true': lambda x: x is True}],
# Ternary operators
['true ? "a" : "b"', 'a'],
['false ? "a" : "b"', 'b'],
['("a" !== undefined ? "a" : "")', 'a'],
]
for parameter_list in test_eval_parameters:
if len(parameter_list) == 2:
parameter_list.append({})
@pytest.mark.parametrize('expression,expected,context', test_eval_parameters)
def test_eval(expression, expected, context):
expected = pytest.approx(expected) if isinstance(expected, float) else expected
p = parse(expression)
result = p.eval(context)
assert result == expected
def test_no_nud():
with pytest.raises(ParseError):
parse('?')
|
|
# Copyright 2015 Openstack Foundation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The view list module handles creating Jenkins List views.
To create a list view specify ``list`` in the ``view-type`` attribute
to the :ref:`view_list` definition.
:View Parameters:
* **name** (`str`): The name of the view.
* **view-type** (`str`): The type of view.
* **description** (`str`): A description of the view. (default '')
* **filter-executors** (`bool`): Show only executors that can
execute the included views. (default false)
* **filter-queue** (`bool`): Show only included jobs in builder
queue. (default false)
* **job-name** (`list`): List of jobs to be included.
* **job-filters** (`dict`): Job filters to be included. Requires
:jenkins-wiki:`View Job Filters <View+Job+Filters>`
* **most-recent** (`dict`)
:most-recent:
* **max-to-include** (`int`): Maximum number of jobs
to include. (default 0)
* **check-start-time** (`bool`): Check job start
time. (default false)
* **build-duration** (`dict`)
:build-duration:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **build-duration-type** ('str'): Duration of the
build. (default Latest)
* **amount-type**: ('str'): Duration in hours,
days or builds. (default Hours)
* **amount**: ('int'): How far back to check.
(default 0)
* **less-than**: ('bool'): Check build duration
less than or more than. (default True)
* **build-duration-minutes**: ('int'): Build
duration minutes. (default 0)
* **build-trend** (`dict`)
:build-trend:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **build-trend-type** ('str'): Duration of the
build. (default Latest)
* **amount-type**: ('str'): Duration in hours,
days or builds. (default Hours)
* **amount**: ('int'): How far back to check.
(default 0)
* **status**: ('str'): Job status.
(default Completed)
* **job-status** (`dict`)
:job-status:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **unstable** ('bool'): Jobs with status
unstable. (default False)
* **failed** ('bool'): Jobs with status
failed. (default False)
* **aborted** ('bool'): Jobs with status
aborted. (default False)
* **disabled** ('bool'): Jobs with status
disabled. (default False)
* **stable** ('bool'): Jobs with status
stable. (default False)
* **fallback** (`dict`)
:fallback:
* **fallback-type** ('str'): Fallback type to include/exclude
for all jobs in a view, if no jobs have been included by
previous filters. (default REMOVE_ALL_IF_ALL_INCLUDED)
* **build-status** (`dict`)
:build-status:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **never-built** ('bool'): Jobs that are never
built. (default False)
* **building** ('bool'): Jobs that are being
built. (default False)
* **in-build-queue** ('bool'): Jobs that are in
the build queue. (default False)
* **user-relevence** (`dict`)
:user-relevence:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **build-count** ('str'): Count of builds.
(default AtLeastOne)
* **amount-type**: ('str'): Duration in hours,
days or builds. (default Hours)
* **amount**: ('int'): How far back to check.
(default 0)
* **match-user-id** ('bool'): Jobs matching
user-id. (default False)
* **match-user-fullname** ('bool'): Jobs
matching user fullname. (default False)
* **ignore-case** ('bool'): Ignore case.
(default False)
* **ignore-whitespace** ('bool'): Ignore
whitespace. (default False)
* **ignore-non-alphaNumeric** ('bool'): Ignore
non-alphaNumeric. (default False)
* **match-builder** ('bool'): Jobs matching
builder. (default False)
* **match-email** ('bool'): Jobs matching
email. (default False)
* **match-scm-changes** ('bool'): Jobs matching
scm changes. (default False)
* **regex-job** (`dict`)
:regex-job:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **regex-name** ('str'): Regular expression name.
(default '')
* **regex** ('str'): Regular expression. (default '')
* **job-tpye** (`dict`)
:job-type:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **job-type** ('str'): Type of Job.
(default hudson.model.FreeStyleProject)
* **parameter** (`dict`)
:parameter:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **name** ('str'): Job name to match. (default '')
* **value** ('str'): Value to match. (default '')
* **desc** ('str'): Description to match. (default '')
* **use-default-value** ('bool'): Use default value.
(default False)
* **match-builds-in-progress** ('bool'): Match build in
progress. (default False)
* **match-all-builds** ('bool'): Match all builds.
(default False)
* **max-builds-to-match** ('int'): Maximum builds to match.
(default 0)
* **other-views** (`dict`)
:other-views:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **view-name** ('str'): View name.
(default select a view other than this one)
* **scm** (`dict`)
:scm:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **scm-type** ('str'): Type of SCM.
(default hudson.scm.NullSCM)
* **secured-job** (`dict`)
:secured-job:
* **match-type** ('str'): Jobs that match a filter
to include. (default includeMatched)
* **user-permissions** (`dict`)
:user-permissions:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **configure** ('bool'): User with configure permissions.
(default false)
* **amount-type**: ('bool'): User with build permissions.
(default false)
* **amount**: ('bool'): User with workspace permissions.
(default false)
* **permission-check**: ('str'): Match user permissions.
(default MustMatchAll)
* **upstream-downstream** (`dict`)
:upstream-downstream:
* **include-upstream** ('bool'): Jobs that match upstream.
(default False)
* **include-downstream** ('bool'): Jobs that match downstream.
(default False)
* **recursive** ('bool'): Jobs that are recursive.
(default False)
* **exclude-originals** ('bool'): Jobs that are originals.
(default False)
* **unclassified** (`dict`)
:unclassified:
* **match-type** ('str'): Jobs that match a filter to include.
(default includeMatched)
* **columns** (`list`): List of columns to be shown in view.
* **regex** (`str`): . Regular expression for selecting jobs
(optional)
* **recurse** (`bool`): Recurse in subfolders.(default false)
* **status-filter** (`bool`): Filter job list by enabled/disabled
status. (optional)
Example:
.. literalinclude::
/../../tests/views/fixtures/view_list001.yaml
Example:
.. literalinclude::
/../../tests/views/fixtures/view_list002.yaml
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
COLUMN_DICT = {
'status': 'hudson.views.StatusColumn',
'weather': 'hudson.views.WeatherColumn',
'job': 'hudson.views.JobColumn',
'last-success': 'hudson.views.LastSuccessColumn',
'last-failure': 'hudson.views.LastFailureColumn',
'last-duration': 'hudson.views.LastDurationColumn',
'build-button': 'hudson.views.BuildButtonColumn',
'last-stable': 'hudson.views.LastStableColumn',
'robot-list': 'hudson.plugins.robot.view.RobotListViewColumn',
'find-bugs': 'hudson.plugins.findbugs.FindBugsColumn',
'jacoco': 'hudson.plugins.jacococoveragecolumn.JaCoCoColumn',
'git-branch': 'hudson.plugins.git.GitBranchSpecifierColumn',
'schedule-build':
'org.jenkinsci.plugins.schedulebuild.ScheduleBuildButtonColumn',
'priority-sorter': 'jenkins.advancedqueue.PrioritySorterJobColumn',
'build-filter': 'hudson.views.BuildFilterColumn',
'desc': 'jenkins.branch.DescriptionColumn',
'policy-violations':
'com.sonatype.insight.ci.hudson.QualityColumn '
'plugin="sonatype-clm-ci"',
'member-graph-view':
'com.barchart.jenkins.cascade.GraphViewColumn '
'plugin="maven-release-cascade"',
'extra-tests-total': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>2</testResultFormat>'],
'extra-tests-failed': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>3</testResultFormat>'],
'extra-tests-passed': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>4</testResultFormat>'],
'extra-tests-skipped': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>5</testResultFormat>'],
'extra-tests-format-0': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>0</testResultFormat>'],
'extra-tests-format-1': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>1</testResultFormat>'],
'extra-build-description': [
['jenkins.plugins.extracolumns.BuildDescriptionColumn',
{'plugin': 'extra-columns'}],
'<columnWidth>3</columnWidth>', '<forceWidth>false</forceWidth>'],
'extra-build-parameters': [
['jenkins.plugins.extracolumns.BuildParametersColumn',
{'plugin': 'extra-columns'}],
'<singlePara>false</singlePara>', '<parameterName/>'],
'extra-last-user-name':
'jenkins.plugins.extracolumns.UserNameColumn'
' plugin="extra-columns"',
'extra-last-output':
'jenkins.plugins.extracolumns.LastBuildConsoleColumn'
' plugin="extra-columns"',
'extra-workspace-link':
'jenkins.plugins.extracolumns.WorkspaceColumn '
'plugin="extra-columns"',
'extra-configure-button':
'jenkins.plugins.extracolumns.ConfigureProjectColumn'
' plugin="extra-columns"',
}
DEFAULT_COLUMNS = ['status', 'weather', 'job', 'last-success', 'last-failure',
'last-duration', 'build-button']
class List(jenkins_jobs.modules.base.Base):
sequence = 0
def root_xml(self, data):
root = XML.Element('hudson.model.ListView')
mapping = [
('name', 'name', None),
('description', 'description', ''),
('filter-executors', 'filterExecutors', False),
('filter-queue', 'filterQueue', False),
]
helpers.convert_mapping_to_xml(root, data, mapping, fail_required=True)
XML.SubElement(root, 'properties',
{'class': 'hudson.model.View$PropertyList'})
jn_xml = XML.SubElement(root, 'jobNames')
jobnames = data.get('job-name', None)
XML.SubElement(
jn_xml,
'comparator', {
'class': 'hudson.util.CaseInsensitiveComparator'
}
)
if jobnames is not None:
# Job names must be sorted in the xml
jobnames = sorted(jobnames, key=str.lower)
for jobname in jobnames:
XML.SubElement(jn_xml, 'string').text = str(jobname)
job_filter_xml = XML.SubElement(root, 'jobFilters')
jobfilters = data.get('job-filters', [])
for jobfilter in jobfilters:
if jobfilter == 'most-recent':
mr_xml = XML.SubElement(job_filter_xml,
'hudson.views.MostRecentJobsFilter')
mr_xml.set('plugin', 'view-job-filters')
mr_data = jobfilters.get('most-recent')
mapping = [
('max-to-include', 'maxToInclude', '0'),
('check-start-time', 'checkStartTime', False),
]
helpers.convert_mapping_to_xml(mr_xml, mr_data, mapping,
fail_required=True)
if jobfilter == 'build-duration':
bd_xml = XML.SubElement(job_filter_xml,
'hudson.views.BuildDurationFilter')
bd_xml.set('plugin', 'view-job-filters')
bd_data = jobfilters.get('build-duration')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('build-duration-type', 'buildCountTypeString', 'Latest'),
('amount-type', 'amountTypeString', 'Hours'),
('amount', 'amount', '0'),
('less-than', 'lessThan', True),
('build-duration-minutes', 'buildDurationMinutes', '0'),
]
helpers.convert_mapping_to_xml(bd_xml, bd_data, mapping,
fail_required=True)
if jobfilter == 'build-trend':
bt_xml = XML.SubElement(job_filter_xml,
'hudson.views.BuildTrendFilter')
bt_xml.set('plugin', 'view-job-filters')
bt_data = jobfilters.get('build-trend')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('build-trend-type', 'buildCountTypeString', 'Latest'),
('amount-type', 'amountTypeString', 'Hours'),
('amount', 'amount', '0'),
('status', 'statusTypeString', 'Completed'),
]
helpers.convert_mapping_to_xml(bt_xml, bt_data, mapping,
fail_required=True)
if jobfilter == 'job-status':
js_xml = XML.SubElement(job_filter_xml,
'hudson.views.JobStatusFilter')
js_xml.set('plugin', 'view-job-filters')
js_data = jobfilters.get('job-status')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('unstable', 'unstable', False),
('failed', 'failed', False),
('aborted', 'aborted', False),
('disabled', 'disabled', False),
('stable', 'stable', False),
]
helpers.convert_mapping_to_xml(js_xml, js_data, mapping,
fail_required=True)
if jobfilter == 'upstream-downstream':
ud_xml = XML.SubElement(
job_filter_xml,
'hudson.views.UpstreamDownstreamJobsFilter'
)
ud_xml.set('plugin', 'view-job-filters')
ud_data = jobfilters.get('upstream-downstream')
mapping = [
('include-upstream', 'includeUpstream',
False),
('include-downstream', 'includeDownstream', False),
('recursive', 'recursive', False),
('exclude-originals', 'excludeOriginals', False),
]
helpers.convert_mapping_to_xml(ud_xml, ud_data, mapping,
fail_required=True)
if jobfilter == 'fallback':
fb_xml = XML.SubElement(
job_filter_xml,
'hudson.views.AddRemoveFallbackFilter'
)
fb_xml.set('plugin', 'view-job-filters')
fb_data = jobfilters.get('fallback')
mapping = [
('fallback-type', 'fallbackTypeString',
'REMOVE_ALL_IF_ALL_INCLUDED'),
('fallback-type', 'fallbackType',
'REMOVE_ALL_IF_ALL_INCLUDED'),
]
helpers.convert_mapping_to_xml(fb_xml, fb_data, mapping,
fail_required=True)
if jobfilter == 'build-status':
bs_xml = XML.SubElement(job_filter_xml,
'hudson.views.BuildStatusFilter')
bs_xml.set('plugin', 'view-job-filters')
bs_data = jobfilters.get('build-status')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('never-built', 'neverBuilt', False),
('building', 'building', False),
('in-build-queue', 'inBuildQueue', False),
]
helpers.convert_mapping_to_xml(bs_xml, bs_data, mapping,
fail_required=True)
if jobfilter == 'user-relevence':
ur_xml = XML.SubElement(job_filter_xml,
'hudson.views.UserRelevanceFilter')
ur_xml.set('plugin', 'view-job-filters')
ur_data = jobfilters.get('user-relevence')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('build-count', 'buildCountTypeString', 'AtLeastOne'),
('amount-type', 'amountTypeString', 'Hours'),
('amount', 'amount', '0'),
('match-user-id', 'matchUserId', False),
('match-user-fullname', 'matchUserFullName', False),
('ignore-case', 'ignoreCase', False),
('ignore-whitespace', 'ignoreWhitespace', False),
('ignore-non-alphaNumeric', 'ignoreNonAlphaNumeric',
False),
('match-builder', 'matchBuilder', False),
('match-email', 'matchEmail', False),
('match-scm-changes', 'matchScmChanges', False),
]
helpers.convert_mapping_to_xml(ur_xml, ur_data, mapping,
fail_required=True)
if jobfilter == 'regex-job':
rj_xml = XML.SubElement(job_filter_xml,
'hudson.views.RegExJobFilter')
rj_xml.set('plugin', 'view-job-filters')
rj_data = jobfilters.get('regex-job')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('regex-name', 'valueTypeString', ''),
('regex', 'regex', ''),
]
helpers.convert_mapping_to_xml(rj_xml, rj_data, mapping,
fail_required=True)
if jobfilter == 'job-type':
jt_xml = XML.SubElement(job_filter_xml,
'hudson.views.JobTypeFilter')
jt_xml.set('plugin', 'view-job-filters')
jt_data = jobfilters.get('job-type')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('job-type', 'jobType', 'hudson.model.FreeStyleProject'),
]
helpers.convert_mapping_to_xml(jt_xml, jt_data, mapping,
fail_required=True)
if jobfilter == 'parameter':
pr_xml = XML.SubElement(job_filter_xml,
'hudson.views.ParameterFilter')
pr_xml.set('plugin', 'view-job-filters')
pr_data = jobfilters.get('parameter')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('name', 'nameRegex', ''),
('value', 'valueRegex', ''),
('description', 'descriptionRegex', ''),
('use-default', 'useDefaultValue', False),
('match-builds-in-progress', 'matchBuildsInProgress',
False),
('match-all-builds', 'matchAllBuilds', False),
('max-builds-to-match', 'maxBuildsToMatch', 0),
]
helpers.convert_mapping_to_xml(pr_xml, pr_data, mapping,
fail_required=True)
if jobfilter == 'other-views':
ov_xml = XML.SubElement(job_filter_xml,
'hudson.views.OtherViewsFilter')
ov_xml.set('plugin', 'view-job-filters')
ov_data = jobfilters.get('other-views')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('view-name', 'otherViewName',
'<select a view other than this one>'),
]
helpers.convert_mapping_to_xml(ov_xml, ov_data, mapping,
fail_required=True)
if jobfilter == 'scm':
st_xml = XML.SubElement(job_filter_xml,
'hudson.views.ScmTypeFilter')
st_xml.set('plugin', 'view-job-filters')
st_data = jobfilters.get('scm')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('scm-type', 'scmType', 'hudson.scm.NullSCM'),
]
helpers.convert_mapping_to_xml(st_xml, st_data, mapping,
fail_required=True)
if jobfilter == 'secured-job':
sj_xml = XML.SubElement(job_filter_xml,
'hudson.views.SecuredJobsFilter')
sj_xml.set('plugin', 'view-job-filters')
sj_data = jobfilters.get('secured-job')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
]
helpers.convert_mapping_to_xml(sj_xml, sj_data, mapping,
fail_required=True)
if jobfilter == 'user-permissions':
up_xml = XML.SubElement(job_filter_xml,
'hudson.views.SecurityFilter')
up_xml.set('plugin', 'view-job-filters')
up_data = jobfilters.get('user-permissions')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('configure', 'configure', False),
('build', 'build', False),
('workspace', 'workspace', False),
('permission-check', 'permissionCheckType',
'MustMatchAll'),
]
helpers.convert_mapping_to_xml(up_xml, up_data, mapping,
fail_required=True)
if jobfilter == 'unclassified':
uc_xml = XML.SubElement(job_filter_xml,
'hudson.views.UnclassifiedJobsFilter')
uc_xml.set('plugin', 'view-job-filters')
uc_data = jobfilters.get('unclassified')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
]
helpers.convert_mapping_to_xml(uc_xml, uc_data, mapping,
fail_required=True)
c_xml = XML.SubElement(root, 'columns')
columns = data.get('columns', DEFAULT_COLUMNS)
for column in columns:
if isinstance(column, dict):
if 'extra-build-parameter' in column:
p_name = column['extra-build-parameter']
x = XML.SubElement(
c_xml,
'jenkins.plugins.extracolumns.BuildParametersColumn',
plugin='extra-columns'
)
x.append(XML.fromstring(
'<singlePara>true</singlePara>'))
x.append(XML.fromstring(
'<parameterName>%s</parameterName>' % p_name))
else:
if column in COLUMN_DICT:
if isinstance(COLUMN_DICT[column], list):
x = XML.SubElement(c_xml, COLUMN_DICT[column][0][0],
**COLUMN_DICT[column][0][1])
for tag in COLUMN_DICT[column][1:]:
x.append(XML.fromstring(tag))
else:
XML.SubElement(c_xml, COLUMN_DICT[column])
mapping = [
('regex', 'includeRegex', None),
('recurse', 'recurse', False),
('status-filter', 'statusFilter', None),
]
helpers.convert_mapping_to_xml(
root, data, mapping, fail_required=False)
return root
|
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.legacy.visa
~~~~~~~~~~~~~~~~~~~~~~~~~
Implements base classes for drivers that communicate with instruments using visalib.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from lantz import Driver
from lantz.drivers.legacy.textual import TextualMixin
from lantz.errors import LantzTimeoutError
import visa
class LantzVisaTimeoutError(LantzTimeoutError):
pass
"""
BYTESIZE = {5: 5, 6: 6,
7: 7, 8: 8}
PARITY = {'none': Constants.ASRL_PAR_NONE, 'even': Constants.ASRL_PAR_EVEN,
'odd': Constants.ASRL_PAR_ODD, 'mark': Constants.ASRL_PAR_MARK,
'space': Constants.ASRL_PAR_SPACE}
STOPBITS = {1: Constants.ASRL_STOP_ONE, 1.5: Constants.ASRL_STOP_ONE5,
2: Constants.ASRL_STOP_TWO}
"""
class VisaDriver(object):
def __new__(cls, resource_name, *args, **kwargs):
library_path = kwargs.get('library_path', None)
if library_path:
manager = visa.ResourceManager(library_path)
else:
manager = visa.ResourceManager()
name = manager.resource_info(resource_name).resource_name
if name.startswith('GPIB'):
return GPIBVisaDriver(resource_name, *args, **kwargs)
elif name.startswith('ASRL'):
return SerialVisaDriver(resource_name, *args, **kwargs)
elif name.startswith('TCPIP'):
return TCPVisaDriver(resource_name, *args, **kwargs)
elif name.startswith('USB'):
return USBVisaDriver(resource_name, *args, **kwargs)
else:
raise ValueError('Unknown resource type: {}'.format(name))
class MessageVisaDriver(TextualMixin, Driver):
"""Base class for drivers that communicate with instruments
via serial or parallel port using pyserial
:param resource_name: name or alias of the resource to open.
"""
RECV_TERMINATION = '\n'
SEND_TERMINATION = '\n'
ENCODING = 'ascii'
RECV_CHUNK = -1
def __init__(self, resource_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_attributes = {}
library_path = kwargs.get('library_path', None)
if library_path:
self.resource_manager = visa.ResourceManager(library_path)
else:
self.resource_manager = visa.ResourceManager()
self.resource = None
self.resource_name = resource_name
self.log_debug('Created Instrument {}', self.resource_name)
def raw_send(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:param data: bytes
"""
try:
self.resource.write_raw(data)
except Exception as e:
raise Exception(str(e))
def initialize(self):
"""Open port
"""
if not self.is_open():
self.log_debug('Opening {}', self.resource_name)
self.resource = self.resource_manager.open_resource(self.resource_name)
for key, value in self._init_attributes.items():
self.resource.set_visa_attribute(key, value)
self.log_debug('The session for {} is {}', self.resource_name, self.resource.session)
else:
self.log_debug('{} is already open', self.resource_name)
def finalize(self):
"""Close port
"""
self.log_debug('Closing port {}', self.resource_name)
self.resource.close()
def is_open(self):
if self.resource is None:
return False
return self.resource.session is not None
class SerialVisaDriver(MessageVisaDriver):
"""Base class for drivers that communicate with instruments
via serial port using visa.
:param resource_name: the visa resource name or alias (e.g. 'ASRL1::INSTR')
"""
#: communication parameters
BAUDRATE = 9600
BYTESIZE = 8
PARITY = 'none'
STOPBITS = 1
#: flow control flags
RTSCTS = False
DSRDTR = False
XONXOFF = False
RECV_CHUNK = -1
def __init__(self, resource_name, *args, **kwargs):
super().__init__(resource_name, *args, **kwargs)
kw = {}
kw['ASRL_BAUD']= kwargs.get('baudrate', self.BAUDRATE)
kw['ASRL_DATA_BITS'] = BYTESIZE[kw.get('bytesize', self.BYTESIZE)]
kw['ASRL_PARITY'] = PARITY[kw.get('parity', self.PARITY)]
kw['ASRL_STOP_BITS'] = STOPBITS[kw.get('stopbits', self.STOPBITS)]
flow = Constants.ASRL_FLOW_NONE
if kwargs.get('rtscts', getattr(self, 'RTSCTS')):
flow |= Constants.ASRL_FLOW_RTS_CTS
if kwargs.get('dsrdtr', getattr(self, 'DSRDTR')):
flow |= Constants.ASRL_FLOW_DTR_DSR
if kwargs.get('xonxoff', getattr(self, 'XONXOFF')):
flow |= Constants.ASRL_FLOW_XON_XOFF
kw['ASRL_FLOW_CNTRL'] = flow
if self.RECV_TERMINATION and self.RECV_CHUNK > 1:
kw['TERMCHAR'] = ord(self.RECV_TERMINATION)
kw['ASRL_END_IN'] = Constants.ASRL_END_TERMCHAR
else:
kw['ASRL_END_IN'] = Constants.ASRL_END_NONE
self._init_attributes.update(kw)
def raw_recv(self, size):
"""Receive raw bytes to the instrument.
:param size: number of bytes to receive
:return: received bytes
:return type: bytes
If a timeout is set, it may return less bytes than requested.
If size == -1, then the number of available bytes will be read.
"""
if size == -1:
size = self.visa.get_attribute(self.vi, 'ASRL_AVAIL_NUM')
if not size:
return bytes()
if not size:
size = 1
data = self.visa.read(self.vi, size)
return data
class GPIBVisaDriver(MessageVisaDriver):
def raw_recv(self, size):
"""Receive raw bytes to the instrument.
:param size: number of bytes to receive
:return: received bytes
:return type: bytes
If a timeout is set, it may return less bytes than requested.
If size == -1, then the number of available bytes will be read.
"""
if not size:
size = 1
data = self.resource.read_raw(1)
return data
class TCPVisaDriver(MessageVisaDriver):
pass
class USBVisaDriver(MessageVisaDriver):
def raw_recv(self, size):
"""Receive raw bytes to the instrument.
:param size: number of bytes to receive
:return: received bytes
:return type: bytes
If a timeout is set, it may return less bytes than requested.
If size == -1, then the number of available bytes will be read.
"""
if not size:
size = 1
data = self.resource.read_raw(1)
return data
|
|
# pylint:disable=too-many-lines, protected-access, redefined-outer-name, not-callable,
# pylint:disable=no-member
from __future__ import absolute_import, print_function
import sys
import os
import traceback
import signal as signalmodule
# pylint:disable=undefined-all-variable
__all__ = [
'get_version',
'get_header_version',
'supported_backends',
'recommended_backends',
'embeddable_backends',
'time',
'loop',
]
import gevent.libev._corecffi as _corecffi # pylint:disable=no-name-in-module
ffi = _corecffi.ffi # pylint:disable=no-member
libev = _corecffi.lib # pylint:disable=no-member
if hasattr(libev, 'vfd_open'):
# Must be on windows
assert sys.platform.startswith("win"), "vfd functions only needed on windows"
vfd_open = libev.vfd_open
vfd_free = libev.vfd_free
vfd_get = libev.vfd_get
else:
vfd_open = vfd_free = vfd_get = lambda fd: fd
#####
## NOTE on Windows:
# The C implementation does several things specially for Windows;
# a possibly incomplete list is:
#
# - the loop runs a periodic signal checker;
# - the io watcher constructor is different and it has a destructor;
# - the child watcher is not defined
#
# The CFFI implementation does none of these things, and so
# is possibly NOT FUNCTIONALLY CORRECT on Win32
#####
#####
## Note on CFFI objects, callbacks and the lifecycle of watcher objects
#
# Each subclass of `watcher` allocates a C structure of the
# appropriate type e.g., struct gevent_ev_io and holds this pointer in
# its `_gwatcher` attribute. When that watcher instance is garbage
# collected, then the C structure is also freed. The C structure is
# passed to libev from the watcher's start() method and then to the
# appropriate C callback function, e.g., _gevent_ev_io_callback, which
# passes it back to python's _python_callback where we need the
# watcher instance. Therefore, as long as that callback is active (the
# watcher is started), the watcher instance must not be allowed to get
# GC'd---any access at the C level or even the FFI level to the freed
# memory could crash the process.
#
# However, the typical idiom calls for writing something like this:
# loop.io(fd, python_cb).start()
# thus forgetting the newly created watcher subclass and allowing it to be immediately
# GC'd. To combat this, when the watcher is started, it places itself into the loop's
# `_keepaliveset`, and it only removes itself when the watcher's `stop()` method is called.
# Often, this is the *only* reference keeping the watcher object, and hence its C structure,
# alive.
#
# This is slightly complicated by the fact that the python-level
# callback, called from the C callback, could choose to manually stop
# the watcher. When we return to the C level callback, we now have an
# invalid pointer, and attempting to pass it back to Python (e.g., to
# handle an error) could crash. Hence, _python_callback,
# _gevent_io_callback, and _python_handle_error cooperate to make sure
# that the watcher instance stays in the loops `_keepaliveset` while
# the C code could be running---and if it gets removed, to not call back
# to Python again.
# See also https://github.com/gevent/gevent/issues/676
####
@ffi.callback("int(void* handle, int revents)")
def _python_callback(handle, revents):
"""
Returns an integer having one of three values:
- -1
An exception occurred during the callback and you must call
:func:`_python_handle_error` to deal with it. The Python watcher
object will have the exception tuple saved in ``_exc_info``.
- 0
Everything went according to plan. You should check to see if the libev
watcher is still active, and call :func:`_python_stop` if so. This will
clean up the memory.
- 1
Everything went according to plan, but the watcher has already
been stopped. Its memory may no longer be valid.
"""
try:
# Even dereferencing the handle needs to be inside the try/except;
# if we don't return normally (e.g., a signal) then we wind up going
# to the 'onerror' handler, which
# is not what we want; that can permanently wedge the loop depending
# on which callback was executing
the_watcher = ffi.from_handle(handle)
args = the_watcher.args
if args is None:
# Legacy behaviour from corecext: convert None into ()
# See test__core_watcher.py
args = _NOARGS
if args and args[0] == GEVENT_CORE_EVENTS:
args = (revents, ) + args[1:]
the_watcher.callback(*args)
except: # pylint:disable=bare-except
the_watcher._exc_info = sys.exc_info()
# Depending on when the exception happened, the watcher
# may or may not have been stopped. We need to make sure its
# memory stays valid so we can stop it at the ev level if needed.
the_watcher.loop._keepaliveset.add(the_watcher)
return -1
else:
if the_watcher in the_watcher.loop._keepaliveset:
# It didn't stop itself
return 0
return 1 # It stopped itself
libev.python_callback = _python_callback
@ffi.callback("void(void* handle, int revents)")
def _python_handle_error(handle, revents):
try:
watcher = ffi.from_handle(handle)
exc_info = watcher._exc_info
del watcher._exc_info
watcher.loop.handle_error(watcher, *exc_info)
finally:
# XXX Since we're here on an error condition, and we
# made sure that the watcher object was put in loop._keepaliveset,
# what about not stopping the watcher? Looks like a possible
# memory leak?
if revents & (libev.EV_READ | libev.EV_WRITE):
try:
watcher.stop()
except: # pylint:disable=bare-except
watcher.loop.handle_error(watcher, *sys.exc_info())
return # pylint:disable=lost-exception
libev.python_handle_error = _python_handle_error
@ffi.callback("void(void* handle)")
def _python_stop(handle):
watcher = ffi.from_handle(handle)
watcher.stop()
libev.python_stop = _python_stop
UNDEF = libev.EV_UNDEF
NONE = libev.EV_NONE
READ = libev.EV_READ
WRITE = libev.EV_WRITE
TIMER = libev.EV_TIMER
PERIODIC = libev.EV_PERIODIC
SIGNAL = libev.EV_SIGNAL
CHILD = libev.EV_CHILD
STAT = libev.EV_STAT
IDLE = libev.EV_IDLE
PREPARE = libev.EV_PREPARE
CHECK = libev.EV_CHECK
EMBED = libev.EV_EMBED
FORK = libev.EV_FORK
CLEANUP = libev.EV_CLEANUP
ASYNC = libev.EV_ASYNC
CUSTOM = libev.EV_CUSTOM
ERROR = libev.EV_ERROR
READWRITE = libev.EV_READ | libev.EV_WRITE
MINPRI = libev.EV_MINPRI
MAXPRI = libev.EV_MAXPRI
BACKEND_PORT = libev.EVBACKEND_PORT
BACKEND_KQUEUE = libev.EVBACKEND_KQUEUE
BACKEND_EPOLL = libev.EVBACKEND_EPOLL
BACKEND_POLL = libev.EVBACKEND_POLL
BACKEND_SELECT = libev.EVBACKEND_SELECT
FORKCHECK = libev.EVFLAG_FORKCHECK
NOINOTIFY = libev.EVFLAG_NOINOTIFY
SIGNALFD = libev.EVFLAG_SIGNALFD
NOSIGMASK = libev.EVFLAG_NOSIGMASK
class _EVENTSType(object):
def __repr__(self):
return 'gevent.core.EVENTS'
EVENTS = GEVENT_CORE_EVENTS = _EVENTSType()
def get_version():
return 'libev-%d.%02d' % (libev.ev_version_major(), libev.ev_version_minor())
def get_header_version():
return 'libev-%d.%02d' % (libev.EV_VERSION_MAJOR, libev.EV_VERSION_MINOR)
_flags = [(libev.EVBACKEND_PORT, 'port'),
(libev.EVBACKEND_KQUEUE, 'kqueue'),
(libev.EVBACKEND_EPOLL, 'epoll'),
(libev.EVBACKEND_POLL, 'poll'),
(libev.EVBACKEND_SELECT, 'select'),
(libev.EVFLAG_NOENV, 'noenv'),
(libev.EVFLAG_FORKCHECK, 'forkcheck'),
(libev.EVFLAG_SIGNALFD, 'signalfd'),
(libev.EVFLAG_NOSIGMASK, 'nosigmask')]
_flags_str2int = dict((string, flag) for (flag, string) in _flags)
_events = [(libev.EV_READ, 'READ'),
(libev.EV_WRITE, 'WRITE'),
(libev.EV__IOFDSET, '_IOFDSET'),
(libev.EV_PERIODIC, 'PERIODIC'),
(libev.EV_SIGNAL, 'SIGNAL'),
(libev.EV_CHILD, 'CHILD'),
(libev.EV_STAT, 'STAT'),
(libev.EV_IDLE, 'IDLE'),
(libev.EV_PREPARE, 'PREPARE'),
(libev.EV_CHECK, 'CHECK'),
(libev.EV_EMBED, 'EMBED'),
(libev.EV_FORK, 'FORK'),
(libev.EV_CLEANUP, 'CLEANUP'),
(libev.EV_ASYNC, 'ASYNC'),
(libev.EV_CUSTOM, 'CUSTOM'),
(libev.EV_ERROR, 'ERROR')]
def _flags_to_list(flags):
result = []
for code, value in _flags:
if flags & code:
result.append(value)
flags &= ~code
if not flags:
break
if flags:
result.append(flags)
return result
if sys.version_info[0] >= 3:
basestring = (bytes, str)
integer_types = (int,)
else:
import __builtin__ # pylint:disable=import-error
basestring = __builtin__.basestring,
integer_types = (int, __builtin__.long)
def _flags_to_int(flags):
# Note, that order does not matter, libev has its own predefined order
if not flags:
return 0
if isinstance(flags, integer_types):
return flags
result = 0
try:
if isinstance(flags, basestring):
flags = flags.split(',')
for value in flags:
value = value.strip().lower()
if value:
result |= _flags_str2int[value]
except KeyError as ex:
raise ValueError('Invalid backend or flag: %s\nPossible values: %s' % (ex, ', '.join(sorted(_flags_str2int.keys()))))
return result
def _str_hex(flag):
if isinstance(flag, integer_types):
return hex(flag)
return str(flag)
def _check_flags(flags):
as_list = []
flags &= libev.EVBACKEND_MASK
if not flags:
return
if not flags & libev.EVBACKEND_ALL:
raise ValueError('Invalid value for backend: 0x%x' % flags)
if not flags & libev.ev_supported_backends():
as_list = [_str_hex(x) for x in _flags_to_list(flags)]
raise ValueError('Unsupported backend: %s' % '|'.join(as_list))
def _events_to_str(events):
result = []
for (flag, string) in _events:
c_flag = flag
if events & c_flag:
result.append(string)
events = events & (~c_flag)
if not events:
break
if events:
result.append(hex(events))
return '|'.join(result)
def supported_backends():
return _flags_to_list(libev.ev_supported_backends())
def recommended_backends():
return _flags_to_list(libev.ev_recommended_backends())
def embeddable_backends():
return _flags_to_list(libev.ev_embeddable_backends())
def time():
return libev.ev_time()
_default_loop_destroyed = False
def _loop_callback(*args, **kwargs):
return ffi.callback(*args, **kwargs)
class loop(object):
# pylint:disable=too-many-public-methods
error_handler = None
def __init__(self, flags=None, default=None):
self._in_callback = False
self._callbacks = []
# self._check is a watcher that runs in each iteration of the
# mainloop, just after the blocking call
self._check = ffi.new("struct ev_check *")
self._check_callback_ffi = _loop_callback("void(*)(struct ev_loop *, void*, int)",
self._check_callback,
onerror=self._check_callback_handle_error)
libev.ev_check_init(self._check, self._check_callback_ffi)
# self._prepare is a watcher that runs in each iteration of the mainloop,
# just before the blocking call
self._prepare = ffi.new("struct ev_prepare *")
self._prepare_callback_ffi = _loop_callback("void(*)(struct ev_loop *, void*, int)",
self._run_callbacks,
onerror=self._check_callback_handle_error)
libev.ev_prepare_init(self._prepare, self._prepare_callback_ffi)
# A timer we start and stop on demand. If we have callbacks,
# too many to run in one iteration of _run_callbacks, we turn this
# on so as to have the next iteration of the run loop return to us
# as quickly as possible.
# TODO: There may be a more efficient way to do this using ev_timer_again;
# see the "ev_timer" section of the ev manpage (http://linux.die.net/man/3/ev)
self._timer0 = ffi.new("struct ev_timer *")
libev.ev_timer_init(self._timer0, libev.gevent_noop, 0.0, 0.0)
# TODO: We may be able to do something nicer and use the existing python_callback
# combined with onerror and the class check/timer/prepare to simplify things
# and unify our handling
c_flags = _flags_to_int(flags)
_check_flags(c_flags)
c_flags |= libev.EVFLAG_NOENV
c_flags |= libev.EVFLAG_FORKCHECK
if default is None:
default = True
if _default_loop_destroyed:
default = False
if default:
self._ptr = libev.gevent_ev_default_loop(c_flags)
if not self._ptr:
raise SystemError("ev_default_loop(%s) failed" % (c_flags, ))
else:
self._ptr = libev.ev_loop_new(c_flags)
if not self._ptr:
raise SystemError("ev_loop_new(%s) failed" % (c_flags, ))
if default or globals()["__SYSERR_CALLBACK"] is None:
set_syserr_cb(self._handle_syserr)
libev.ev_prepare_start(self._ptr, self._prepare)
self.unref()
libev.ev_check_start(self._ptr, self._check)
self.unref()
self._keepaliveset = set()
def _check_callback_handle_error(self, t, v, tb):
# None as the context argument causes the exception to be raised
# in the main greenlet.
self.handle_error(None, t, v, tb)
def _check_callback(self, *args):
# If we have the onerror callback, this is a no-op; all the real
# work to rethrow the exception is done by the onerror callback
pass
def _run_callbacks(self, _evloop, _, _revents):
count = 1000
libev.ev_timer_stop(self._ptr, self._timer0)
while self._callbacks and count > 0:
callbacks = self._callbacks
self._callbacks = []
for cb in callbacks:
self.unref()
callback = cb.callback
args = cb.args
if callback is None or args is None:
# it's been stopped
continue
cb.callback = None
try:
callback(*args)
except: # pylint:disable=bare-except
# If we allow an exception to escape this method (while we are running the ev callback),
# then CFFI will print the error and libev will continue executing.
# There are two problems with this. The first is that the code after
# the loop won't run. The second is that any remaining callbacks scheduled
# for this loop iteration will be silently dropped; they won't run, but they'll
# also not be *stopped* (which is not a huge deal unless you're looking for
# consistency or checking the boolean/pending status; the loop doesn't keep
# a reference to them like it does to watchers...*UNLESS* the callback itself had
# a reference to a watcher; then I don't know what would happen, it depends on
# the state of the watcher---a leak or crash is not totally inconceivable).
# The Cython implementation in core.ppyx uses gevent_call from callbacks.c
# to run the callback, which uses gevent_handle_error to handle any errors the
# Python callback raises...it unconditionally simply prints any error raised
# by loop.handle_error and clears it, so callback handling continues.
# We take a similar approach (but are extra careful about printing)
try:
self.handle_error(cb, *sys.exc_info())
except: # pylint:disable=bare-except
try:
print("Exception while handling another error", file=sys.stderr)
traceback.print_exc()
except: # pylint:disable=bare-except
pass # Nothing we can do here
finally:
# NOTE: this must be reset here, because cb.args is used as a flag in
# the callback class so that bool(cb) of a callback that has been run
# becomes False
cb.args = None
count -= 1
if self._callbacks:
libev.ev_timer_start(self._ptr, self._timer0)
def _stop_aux_watchers(self):
if libev.ev_is_active(self._prepare):
self.ref()
libev.ev_prepare_stop(self._ptr, self._prepare)
if libev.ev_is_active(self._check):
self.ref()
libev.ev_check_stop(self._ptr, self._check)
def destroy(self):
global _default_loop_destroyed
if self._ptr:
self._stop_aux_watchers()
if globals()["__SYSERR_CALLBACK"] == self._handle_syserr:
set_syserr_cb(None)
if libev.ev_is_default_loop(self._ptr):
_default_loop_destroyed = True
libev.ev_loop_destroy(self._ptr)
self._ptr = ffi.NULL
@property
def ptr(self):
return self._ptr
@property
def WatcherType(self):
return watcher
@property
def MAXPRI(self):
return libev.EV_MAXPRI
@property
def MINPRI(self):
return libev.EV_MINPRI
def _handle_syserr(self, message, errno):
try:
errno = os.strerror(errno)
except: # pylint:disable=bare-except
traceback.print_exc()
try:
message = '%s: %s' % (message, errno)
except: # pylint:disable=bare-except
traceback.print_exc()
self.handle_error(None, SystemError, SystemError(message), None)
def handle_error(self, context, type, value, tb):
handle_error = None
error_handler = self.error_handler
if error_handler is not None:
# we do want to do getattr every time so that setting Hub.handle_error property just works
handle_error = getattr(error_handler, 'handle_error', error_handler)
handle_error(context, type, value, tb)
else:
self._default_handle_error(context, type, value, tb)
def _default_handle_error(self, context, type, value, tb): # pylint:disable=unused-argument
# note: Hub sets its own error handler so this is not used by gevent
# this is here to make core.loop usable without the rest of gevent
traceback.print_exception(type, value, tb)
libev.ev_break(self._ptr, libev.EVBREAK_ONE)
def run(self, nowait=False, once=False):
flags = 0
if nowait:
flags |= libev.EVRUN_NOWAIT
if once:
flags |= libev.EVRUN_ONCE
libev.ev_run(self._ptr, flags)
def reinit(self):
libev.ev_loop_fork(self._ptr)
def ref(self):
libev.ev_ref(self._ptr)
def unref(self):
libev.ev_unref(self._ptr)
def break_(self, how=libev.EVBREAK_ONE):
libev.ev_break(self._ptr, how)
def verify(self):
libev.ev_verify(self._ptr)
def now(self):
return libev.ev_now(self._ptr)
def update(self):
libev.ev_now_update(self._ptr)
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self._format())
@property
def default(self):
return True if libev.ev_is_default_loop(self._ptr) else False
@property
def iteration(self):
return libev.ev_iteration(self._ptr)
@property
def depth(self):
return libev.ev_depth(self._ptr)
@property
def backend_int(self):
return libev.ev_backend(self._ptr)
@property
def backend(self):
backend = libev.ev_backend(self._ptr)
for key, value in _flags:
if key == backend:
return value
return backend
@property
def pendingcnt(self):
return libev.ev_pending_count(self._ptr)
def io(self, fd, events, ref=True, priority=None):
return io(self, fd, events, ref, priority)
def timer(self, after, repeat=0.0, ref=True, priority=None):
return timer(self, after, repeat, ref, priority)
def signal(self, signum, ref=True, priority=None):
return signal(self, signum, ref, priority)
def idle(self, ref=True, priority=None):
return idle(self, ref, priority)
def prepare(self, ref=True, priority=None):
return prepare(self, ref, priority)
def check(self, ref=True, priority=None):
return check(self, ref, priority)
def fork(self, ref=True, priority=None):
return fork(self, ref, priority)
def async(self, ref=True, priority=None):
return async(self, ref, priority)
if sys.platform != "win32":
def child(self, pid, trace=0, ref=True):
return child(self, pid, trace, ref)
def install_sigchld(self):
libev.gevent_install_sigchld_handler()
def reset_sigchld(self):
libev.gevent_reset_sigchld_handler()
def stat(self, path, interval=0.0, ref=True, priority=None):
return stat(self, path, interval, ref, priority)
def callback(self, priority=None):
return callback(self, priority)
def run_callback(self, func, *args):
cb = callback(func, args)
self._callbacks.append(cb)
self.ref()
return cb
def _format(self):
if not self._ptr:
return 'destroyed'
msg = self.backend
if self.default:
msg += ' default'
msg += ' pending=%s' % self.pendingcnt
msg += self._format_details()
return msg
def _format_details(self):
msg = ''
fileno = self.fileno()
try:
activecnt = self.activecnt
except AttributeError:
activecnt = None
if activecnt is not None:
msg += ' ref=' + repr(activecnt)
if fileno is not None:
msg += ' fileno=' + repr(fileno)
#if sigfd is not None and sigfd != -1:
# msg += ' sigfd=' + repr(sigfd)
return msg
def fileno(self):
if self._ptr:
fd = self._ptr.backend_fd
if fd >= 0:
return fd
@property
def activecnt(self):
if not self._ptr:
raise ValueError('operation on destroyed loop')
return self._ptr.activecnt
# For times when *args is captured but often not passed (empty),
# we can avoid keeping the new tuple that was created for *args
# around by using a constant.
_NOARGS = ()
class callback(object):
__slots__ = ('callback', 'args')
def __init__(self, callback, args):
self.callback = callback
self.args = args or _NOARGS
def stop(self):
self.callback = None
self.args = None
# Note that __nonzero__ and pending are different
# bool() is used in contexts where we need to know whether to schedule another callback,
# so it's true if it's pending or currently running
# 'pending' has the same meaning as libev watchers: it is cleared before actually
# running the callback
def __nonzero__(self):
# it's nonzero if it's pending or currently executing
# NOTE: This depends on loop._run_callbacks setting the args property
# to None.
return self.args is not None
__bool__ = __nonzero__
@property
def pending(self):
return self.callback is not None
def _format(self):
return ''
def __repr__(self):
result = "<%s at 0x%x" % (self.__class__.__name__, id(self))
if self.pending:
result += " pending"
if self.callback is not None:
result += " callback=%r" % (self.callback, )
if self.args is not None:
result += " args=%r" % (self.args, )
if self.callback is None and self.args is None:
result += " stopped"
return result + ">"
class watcher(object):
def __init__(self, _loop, ref=True, priority=None, args=_NOARGS):
self.loop = _loop
if ref:
self._flags = 0
else:
self._flags = 4
self._args = None
self._callback = None
self._handle = ffi.new_handle(self)
self._watcher = ffi.new(self._watcher_struct_pointer_type)
self._watcher.data = self._handle
if priority is not None:
libev.ev_set_priority(self._watcher, priority)
self._watcher_init(self._watcher,
self._watcher_callback,
*args)
# A string identifying the type of libev object we watch, e.g., 'ev_io'
# This should be a class attribute.
_watcher_type = None
# A class attribute that is the callback on the libev object that init's the C struct,
# e.g., libev.ev_io_init. If None, will be set by _init_subclasses.
_watcher_init = None
# A class attribute that is the callback on the libev object that starts the C watcher,
# e.g., libev.ev_io_start. If None, will be set by _init_subclasses.
_watcher_start = None
# A class attribute that is the callback on the libev object that stops the C watcher,
# e.g., libev.ev_io_stop. If None, will be set by _init_subclasses.
_watcher_stop = None
# A cffi ctype object identifying the struct pointer we create.
# This is a class attribute set based on the _watcher_type
_watcher_struct_pointer_type = None
# The attribute of the libev object identifying the custom
# callback function for this type of watcher. This is a class
# attribute set based on the _watcher_type in _init_subclasses.
_watcher_callback = None
@classmethod
def _init_subclasses(cls):
for subclass in cls.__subclasses__(): # pylint:disable=no-member
watcher_type = subclass._watcher_type
subclass._watcher_struct_pointer_type = ffi.typeof('struct ' + watcher_type + '*')
subclass._watcher_callback = ffi.addressof(libev,
'_gevent_generic_callback')
for name in 'start', 'stop', 'init':
ev_name = watcher_type + '_' + name
watcher_name = '_watcher' + '_' + name
if getattr(subclass, watcher_name) is None:
setattr(subclass, watcher_name,
getattr(libev, ev_name))
# this is not needed, since we keep alive the watcher while it's started
#def __del__(self):
# self._watcher_stop(self.loop._ptr, self._watcher)
def __repr__(self):
formats = self._format()
result = "<%s at 0x%x%s" % (self.__class__.__name__, id(self), formats)
if self.pending:
result += " pending"
if self.callback is not None:
result += " callback=%r" % (self.callback, )
if self.args is not None:
result += " args=%r" % (self.args, )
if self.callback is None and self.args is None:
result += " stopped"
result += " handle=%s" % (self._watcher.data)
return result + ">"
def _format(self):
return ''
def _libev_unref(self):
if self._flags & 6 == 4:
self.loop.unref()
self._flags |= 2
def _get_ref(self):
return False if self._flags & 4 else True
def _set_ref(self, value):
if value:
if not self._flags & 4:
return # ref is already True
if self._flags & 2: # ev_unref was called, undo
self.loop.ref()
self._flags &= ~6 # do not want unref, no outstanding unref
else:
if self._flags & 4:
return # ref is already False
self._flags |= 4
if not self._flags & 2 and libev.ev_is_active(self._watcher):
self.loop.unref()
self._flags |= 2
ref = property(_get_ref, _set_ref)
def _get_callback(self):
return self._callback
def _set_callback(self, cb):
if not callable(cb) and cb is not None:
raise TypeError("Expected callable, not %r" % (cb, ))
self._callback = cb
callback = property(_get_callback, _set_callback)
def _get_args(self):
return self._args
def _set_args(self, args):
if not isinstance(args, tuple) and args is not None:
raise TypeError("args must be a tuple or None")
self._args = args
args = property(_get_args, _set_args)
def start(self, callback, *args):
if callback is None:
raise TypeError('callback must be callable, not None')
self.callback = callback
self.args = args or _NOARGS
self._libev_unref()
self.loop._keepaliveset.add(self)
self._watcher_start(self.loop._ptr, self._watcher)
def stop(self):
if self._flags & 2:
self.loop.ref()
self._flags &= ~2
self._watcher_stop(self.loop._ptr, self._watcher)
self.loop._keepaliveset.discard(self)
self._callback = None
self.args = None
def _get_priority(self):
return libev.ev_priority(self._watcher)
def _set_priority(self, priority):
if libev.ev_is_active(self._watcher):
raise AttributeError("Cannot set priority of an active watcher")
libev.ev_set_priority(self._watcher, priority)
priority = property(_get_priority, _set_priority)
def feed(self, revents, callback, *args):
self.callback = callback
self.args = args or _NOARGS
if self._flags & 6 == 4:
self.loop.unref()
self._flags |= 2
libev.ev_feed_event(self.loop._ptr, self._watcher, revents)
if not self._flags & 1:
# Py_INCREF(<PyObjectPtr>self)
self._flags |= 1
@property
def active(self):
return True if libev.ev_is_active(self._watcher) else False
@property
def pending(self):
return True if libev.ev_is_pending(self._watcher) else False
class io(watcher):
_watcher_type = 'ev_io'
def __init__(self, loop, fd, events, ref=True, priority=None):
# XXX: Win32: Need to vfd_open the fd and free the old one?
# XXX: Win32: Need a destructor to free the old fd?
if fd < 0:
raise ValueError('fd must be non-negative: %r' % fd)
if events & ~(libev.EV__IOFDSET | libev.EV_READ | libev.EV_WRITE):
raise ValueError('illegal event mask: %r' % events)
watcher.__init__(self, loop, ref=ref, priority=priority, args=(fd, events))
def start(self, callback, *args, **kwargs):
# pylint:disable=arguments-differ
args = args or _NOARGS
if kwargs.get('pass_events'):
args = (GEVENT_CORE_EVENTS, ) + args
watcher.start(self, callback, *args)
def _get_fd(self):
return vfd_get(self._watcher.fd)
def _set_fd(self, fd):
if libev.ev_is_active(self._watcher):
raise AttributeError("'io' watcher attribute 'fd' is read-only while watcher is active")
vfd = vfd_open(fd)
vfd_free(self._watcher.fd)
self._watcher_init(self._watcher, self._watcher_callback, vfd, self._watcher.events)
fd = property(_get_fd, _set_fd)
def _get_events(self):
return self._watcher.events
def _set_events(self, events):
if libev.ev_is_active(self._watcher):
raise AttributeError("'io' watcher attribute 'events' is read-only while watcher is active")
self._watcher_init(self._watcher, self._watcher_callback, self._watcher.fd, events)
events = property(_get_events, _set_events)
@property
def events_str(self):
return _events_to_str(self._watcher.events)
def _format(self):
return ' fd=%s events=%s' % (self.fd, self.events_str)
class timer(watcher):
_watcher_type = 'ev_timer'
def __init__(self, loop, after=0.0, repeat=0.0, ref=True, priority=None):
if repeat < 0.0:
raise ValueError("repeat must be positive or zero: %r" % repeat)
watcher.__init__(self, loop, ref=ref, priority=priority, args=(after, repeat))
def start(self, callback, *args, **kw):
# pylint:disable=arguments-differ
update = kw.get("update", True)
if update:
# Quoth the libev doc: "This is a costly operation and is
# usually done automatically within ev_run(). This
# function is rarely useful, but when some event callback
# runs for a very long time without entering the event
# loop, updating libev's idea of the current time is a
# good idea."
# So do we really need to default to true?
libev.ev_now_update(self.loop._ptr)
watcher.start(self, callback, *args)
@property
def at(self):
return self._watcher.at
def again(self, callback, *args, **kw):
# Exactly the same as start(), just with a different initializer
# function
self._watcher_start = libev.ev_timer_again
try:
self.start(callback, *args, **kw)
finally:
del self._watcher_start
class signal(watcher):
_watcher_type = 'ev_signal'
def __init__(self, loop, signalnum, ref=True, priority=None):
if signalnum < 1 or signalnum >= signalmodule.NSIG:
raise ValueError('illegal signal number: %r' % signalnum)
# still possible to crash on one of libev's asserts:
# 1) "libev: ev_signal_start called with illegal signal number"
# EV_NSIG might be different from signal.NSIG on some platforms
# 2) "libev: a signal must not be attached to two different loops"
# we probably could check that in LIBEV_EMBED mode, but not in general
watcher.__init__(self, loop, ref=ref, priority=priority, args=(signalnum, ))
class idle(watcher):
_watcher_type = 'ev_idle'
class prepare(watcher):
_watcher_type = 'ev_prepare'
class check(watcher):
_watcher_type = 'ev_check'
class fork(watcher):
_watcher_type = 'ev_fork'
class async(watcher):
_watcher_type = 'ev_async'
def send(self):
libev.ev_async_send(self.loop._ptr, self._watcher)
@property
def pending(self):
return True if libev.ev_async_pending(self._watcher) else False
class child(watcher):
_watcher_type = 'ev_child'
def __init__(self, loop, pid, trace=0, ref=True):
if not loop.default:
raise TypeError('child watchers are only available on the default loop')
loop.install_sigchld()
watcher.__init__(self, loop, ref=ref, args=(pid, trace))
def _format(self):
return ' pid=%r rstatus=%r' % (self.pid, self.rstatus)
@property
def pid(self):
return self._watcher.pid
@property
def rpid(self, ):
return self._watcher.rpid
@rpid.setter
def rpid(self, value):
self._watcher.rpid = value
@property
def rstatus(self):
return self._watcher.rstatus
@rstatus.setter
def rstatus(self, value):
self._watcher.rstatus = value
class stat(watcher):
_watcher_type = 'ev_stat'
@staticmethod
def _encode_path(path):
if isinstance(path, bytes):
return path
# encode for the filesystem. Not all systems (e.g., Unix)
# will have an encoding specified
encoding = sys.getfilesystemencoding() or 'utf-8'
try:
path = path.encode(encoding, 'surrogateescape')
except LookupError:
# Can't encode it, and the error handler doesn't
# exist. Probably on Python 2 with an astral character.
# Not sure how to handle this.
raise UnicodeEncodeError("Can't encode path to filesystem encoding")
return path
def __init__(self, _loop, path, interval=0.0, ref=True, priority=None):
# Store the encoded path in the same attribute that corecext does
self._paths = self._encode_path(path)
# Keep the original path to avoid re-encoding, especially on Python 3
self._path = path
# Although CFFI would automatically convert a bytes object into a char* when
# calling ev_stat_init(..., char*, ...), on PyPy the char* pointer is not
# guaranteed to live past the function call. On CPython, only with a constant/interned
# bytes object is the pointer guaranteed to last path the function call. (And since
# Python 3 is pretty much guaranteed to produce a newly-encoded bytes object above, thats
# rarely the case). Therefore, we must keep a reference to the produced cdata object
# so that the struct ev_stat_watcher's `path` pointer doesn't become invalid/deallocated
self._cpath = ffi.new('char[]', self._paths)
watcher.__init__(self, _loop, ref=ref, priority=priority,
args=(self._cpath,
interval))
@property
def path(self):
return self._path
@property
def attr(self):
if not self._watcher.attr.st_nlink:
return
return self._watcher.attr
@property
def prev(self):
if not self._watcher.prev.st_nlink:
return
return self._watcher.prev
@property
def interval(self):
return self._watcher.interval
# All watcher subclasses must be declared above. Now we do some
# initialization; this is not only a minor optimization, it protects
# against later runtime typos and attribute errors
watcher._init_subclasses()
def _syserr_cb(msg):
try:
msg = ffi.string(msg)
__SYSERR_CALLBACK(msg, ffi.errno)
except:
set_syserr_cb(None)
raise # let cffi print the traceback
_syserr_cb._cb = ffi.callback("void(*)(char *msg)", _syserr_cb)
def set_syserr_cb(callback):
global __SYSERR_CALLBACK
if callback is None:
libev.ev_set_syserr_cb(ffi.NULL)
__SYSERR_CALLBACK = None
elif callable(callback):
libev.ev_set_syserr_cb(_syserr_cb._cb)
__SYSERR_CALLBACK = callback
else:
raise TypeError('Expected callable or None, got %r' % (callback, ))
__SYSERR_CALLBACK = None
LIBEV_EMBED = True
|
|
"""
Taken from sklearn.gaussian_process module and stripped out naked to the bare minimum
"""
from scipy import linalg as LA
import scipy as sp
from sklearn.utils import array2d
from sklearn.gaussian_process import correlation_models
import Cholesky
MACHINE_EPSILON = sp.finfo(sp.double).eps
def kernel(d, theta, correlation='squared_exponential'):
if correlation is 'absolute_exponential':
return sp.exp(-d / theta) # correlation_models.absolute_exponential(theta, d)
elif correlation is 'squared_exponential':
return sp.exp(-d**2 / (2.0 * theta**2)) # correlation_models.squared_exponential(theta, d)
elif correlation is 'generalized_exponential':
return correlation_models.generalized_exponential(theta, d)
elif correlation is 'cubic':
return correlation_models.cubic(theta, d)
elif correlation is 'linear':
return correlation_models.linear(theta, d)
else:
print "Correlation model %s not understood" % correlation
return None
def matrix_distance(A, B):
# matrix distance = sum of distances of columns
A = sp.asarray(A)
B = sp.asarray(B)
if not shape(A) == shape(B):
exit
return sp.array([sp.linalg.norm(u-v) for u, v in zip(A,B)]).sum()
def symmat_to_vector(A):
n = A.shape[0]
v = [] # sp.zeros(n * (n-1) / 2)
for i, row in enumerate(A):
for a in row[i+1:]:
v.append(a)
return sp.array(v)
class GaussianProcess:
"""
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
NOT YET 'generalized_exponential', 'cubic', 'linear'
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
normalise : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalise = 1 so that both input and output data are normalised
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
"""
def __init__(self, corr='squared_exponential', verbose=False, theta0=1e-1,
normalise=1, nugget=10. * MACHINE_EPSILON,
low_memory=False, do_features_projection=False, metric='euclidean'):
self.corr = corr
self.verbose = verbose
self.theta0 = theta0
self.normalise = normalise
self.nugget = nugget
self.low_memory = low_memory
self.do_features_projection = do_features_projection
self.metric = metric
def flush_data(self):
self.X = None
self.y = None
if not self.low_memory:
self.D = None
self.K = None
self.inverse = None
self.alpha = None
self.X_mean, self.X_std = None, None
self.y_mean, self.y_std = None, None
def a2b_distance(self, Xa, Xb, return_k=False):
"""
Given two sets of samples Xa and Xb, return the distance between
each element of Xa and Xb. If required, calculate the kernel of such distances
according to the GP model in use.
"""
Xa, Xb = sp.asarray(Xa), sp.asarray(Xb)
d = sp.spatial.distance.cdist(Xa, Xb, metric=self.metric)
if return_k:
return d, kernel(d, self.theta0, correlation=self.corr)
else:
return d
def calc_kernel_matrix(self, X):
"""
Perform only the calculation of the covariance matrix given the GP and a dataset
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
Returns
-------
gp : adds properties self.D and self.K
"""
# Force data to 2D numpy.array
X = array2d(X)
n_samples, n_features = X.shape
# Normalise input data or not. Do if normalise is 1 (all normalise) or 2 (input normalise)
if self.normalise > 0:
X_mean = sp.mean(X, axis=0)
X_std = sp.std(X, axis=0)
X_std[X_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
else:
X_mean = 0.0
X_std = 1.0
# Calculate distance matrix in vector form. The matrix form of X is obtained by scipy.spatial.distance.squareform(X)
D = sp.spatial.distance.pdist(X, metric = self.metric)
D = sp.spatial.distance.squareform(D)
# Divide each distance ij by sqrt(N_i * N_j)
if self.normalise == -1:
natoms = (X != 0.).sum(1)
D = D / sp.sqrt(sp.outer(natoms, natoms))
# Covariance matrix K
# sklearn correlation doesn't work. Probably correlation_models needs some different inputs
K = kernel(D, self.theta0, correlation=self.corr)
self.X = X
if not self.low_memory:
self.D = D
self.K = K
self.X_mean, self.X_std = X_mean, X_std
return K
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
K = self.calc_kernel_matrix(X)
# # Force data to 2D numpy.array
X = array2d(X)
n_samples, n_features = X.shape
y = sp.asarray(y)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, sp.newaxis]
_, n_targets = y.shape
# # Normalise output data or not
if self.normalise == 1:
y_mean = sp.mean(y, axis=0)
y_std = sp.std(y, axis=0)
y_std[y_std == 0.] = 1.
y = (y - y_mean) / y_std
else:
y_mean = 0.0
y_std = 1.0
err = 'Dummy error message'
inverse = K + self.nugget * sp.ones(n_samples)
try:
# print "is symmetric", Cholesky.isSymmetric(inverse)
# upper_triang = Cholesky.Cholesky(inverse)
# inverse = Cholesky.CholeskyInverse(upper_triang)
inverse = LA.inv(inverse)
except LA.LinAlgError as err:
print "inv failed: %s. Switching to pinvh" % err
try:
inverse = LA.pinvh(inverse)
except LA.LinAlgError as err:
print "pinvh failed: %s. Switching to pinv2" % err
try:
inverse = LA.pinv2(inverse)
except LA.LinAlgError as err:
print "pinv2 failed: %s. Failed to invert matrix." % err
inverse = None
# alpha is the vector of regression coefficients of GaussianProcess
alpha = sp.dot(inverse, y)
self.y = y
self.y_mean, self.y_std = y_mean, y_std
if not self.low_memory:
self.inverse = inverse
self.alpha = sp.array(alpha)
def predict(self, X, eval_MSE=False, return_k=False):
"""
This function evaluates the Gaussian Process model at x.
Parameters
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
return_k : boolean, optional
A boolean specifying whether the function should return the kernel
vector (kernel of distances between test configurations and database ones).
Default is False.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
k : array_like, optional (if return_k == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y
"""
# Check input shapes
X = array2d(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
X = (X - self.X_mean) / self.X_std
# Initialize output
y = sp.zeros(n_eval)
if eval_MSE:
MSE = sp.zeros(n_eval)
# Get distances between each new point in X and all input training set
# dx = sp.asarray([[ LA.norm(p-q) for q in self.X] for p in X]) # SLOW!!!
if self.metric == 'euclidean':
dx = (((self.X - X[:,None])**2).sum(axis=2))**0.5
elif self.metric == 'cityblock':
dx = (sp.absolute(self.X - X[:,None])).sum(axis=2)
else:
print "ERROR: metric not understood"
if self.normalise == -1:
natoms_db = (self.X != 0.).sum(1)
natoms_t = (X != 0.).sum(1)
dx = dx / sp.sqrt(natoms_db * natoms_t[:, None])
# Evaluate correlation
k = kernel(dx, self.theta0, self.corr)
# UNNECESSARY: feature relevance
if self.do_features_projection:
self.feat_proj = self.alpha.flatten() * k
y_scaled = self.feat_proj.sum(axis=1)
else:
# Scaled predictor
y_scaled = sp.dot(k, self.alpha)
# Predictor
y = (self.y_mean + self.y_std * y_scaled).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Calculate mean square error of each prediction
if eval_MSE:
MSE = sp.dot(sp.dot(k, self.inverse), k.T)
if k.ndim > 1: MSE = sp.diagonal(MSE)
MSE = kernel(0.0, self.theta0, self.corr) + self.nugget - MSE
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < MACHINE_EPSILON] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
if return_k:
return y, MSE, k
else:
return y, MSE
elif return_k:
return y, k
else:
return y
def teach_database_plusone(self, X, y, X_t, y_t):
"""
Gaussian Process model fitting, target is to get the correct regression coefficients
for each of the configurations (X_i, y_i) i \ in t if configuration i were included in the teaching.
Parameters
----------
X, X_t : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y, y_t : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
alpha: an array of regression coefficients with shape (n_samples, ).
"""
self.flush_data()
# Force all data to be numpy arrays
X, y = sp.asarray(X), sp.asarray(y)
X_t, y_t = sp.asarray(X_t), sp.asarray(y_t)
# From a fixed database (X,y), get alpha of some new configurations if added one at a time
alphas = []
for i, (X_test, y_test) in enumerate(zip(X_t, y_t)):
if y_test.size != 1:
print "ERROR: output space must be 1D. Exiting..."
return
# Test configuration is placed at position 0
X_plus = sp.row_stack((X_test, X))
y_plus = sp.append(y_test, y)
self.fit(X_plus, y_plus)
alphas.append((self.alpha[0]).flatten().copy())
self.flush_data()
return sp.array(alphas).flatten()
# def fitKoK(self, X, y):
# # Business as usual, but now X is a list of matrices and y is a list of vectors:
# # each element of X is the kernel matrix for a given \theta_i, each element of y is the regression coefficients vector for a given \theta_i
# # Force data to numpy.array
# X = sp.asarray(X)
# y = sp.asarray(y)
# D = sp.zeros([len(X), len(X)])
# for i, A in enumerate(X):
# for j,B in enumerate(X[:i]):
# D[i,j] = matrix_distance(A,B)
# D[j,i] = D[i,j]
# # D = sp.spatial.distance.squareform(D)
# # Covariance matrix K
# # sklearn correlation doesn't work. Probably correlation_models needs some different inputs
# K = kernel(D, self.theta0, correlation=self.corr)
# err = 'bb'
# # Cholesky.CholeskyInverse(Cholesky.Cholesky(K + self.nugget * sp.eye(n_samples))) This method should work but doesn't
# try:
# inverse = LA.inv(K + self.nugget * sp.ones(n_samples))
# except LA.LinAlgError as err:
# print "inv failed: %s. Switching to pinvh" % err
# try:
# inverse = LA.pinvh(K + self.nugget * sp.eye(n_samples))
# except LA.LinAlgError as err:
# print "pinvh failed: %s. Switching to pinvh" % err
# try:
# inverse = LA.pinv2(K + self.nugget * sp.eye(n_samples))
# except LA.LinAlgError as err:
# print "pinv2 failed: %s. Failed to invert matrix." % err
# inverse = None
# # alpha is the vector of regression coefficients of GaussianProcess
# alpha = sp.dot(inverse, y)
# self.X = X
# self.y = y
# if not self.low_memory:
# self.D = D
# self.K = K
# self.inverse = inverse
# self.alpha = sp.array(alpha)
# self.X_mean, self.X_std = 1.0, 0.0
# self.y_mean, self.y_std = 1.0, 0.0
# def predict_KoK(self, X):
# """
# This function evaluates the Gaussian Process model at a set of points X.
# Parameters
# X : array_like
# An array with shape (n_eval, n_features) giving the point(s) at
# which the prediction(s) should be made.
# Returns
# -------
# y : array_like, shape (n_samples, ) or (n_samples, n_targets)
# An array with shape (n_eval, ) if the Gaussian Process was trained
# on an array of shape (n_samples, ) or an array with shape
# (n_eval, n_targets) if the Gaussian Process was trained on an array
# of shape (n_samples, n_targets) with the Best Linear Unbiased
# Prediction at x.
# """
# # Check input shapes
# X = array2d(X)
# n_eval, _ = X.shape
# n_samples, n_features = self.X.shape
# n_samples_y, n_targets = self.y.shape
# if X.shape[1] != n_features:
# raise ValueError(("The number of features in X (X.shape[1] = %d) "
# "should match the number of features used "
# "for fit() "
# "which is %d.") % (X.shape[1], n_features))
# X = (X - self.X_mean) / self.X_std
# # Initialize output
# y = sp.zeros(n_eval)
# if eval_MSE:
# MSE = sp.zeros(n_eval)
# # Get distances between each new point in X and all input training set
# # dx = sp.asarray([[ LA.norm(p-q) for q in self.X] for p in X]) # SLOW!!!
# dx = (((self.X - X[:,None])**2).sum(axis=2))**0.5
# # Evaluate correlation
# k = kernel(dx, self.theta0, self.corr)
# # UNNECESSARY: feature relevance
# if self.do_features_projection:
# self.feat_proj = self.alpha.flatten() * k
# y_scaled = self.feat_proj.sum(axis=1)
# else:
# # Scaled predictor
# y_scaled = sp.dot(k, self.alpha)
# # Predictor
# y = (self.y_mean + self.y_std * y_scaled).reshape(n_eval, n_targets)
# if self.y_ndim_ == 1:
# y = y.ravel()
# # Calculate mean square error of each prediction
# if eval_MSE:
# MSE = sp.dot(sp.dot(k, self.inverse), k.T)
# if k.ndim > 1: MSE = sp.diagonal(MSE)
# MSE = kernel(0.0, self.theta0, self.corr) + self.nugget - MSE
# # Mean Squared Error might be slightly negative depending on
# # machine precision: force to zero!
# MSE[MSE < MACHINE_EPSILON] = 0.
# if self.y_ndim_ == 1:
# MSE = MSE.ravel()
# return y, MSE
|
|
from __future__ import absolute_import
# Project imports
import mock
import os
import re
import shutil
import sys
import time
from datetime import datetime
from datetime import timedelta
from tempfile import gettempdir
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))
from . import helper
from elodie.config import load_config
from elodie.filesystem import FileSystem
from elodie.media.text import Text
from elodie.media.media import Media
from elodie.media.photo import Photo
from elodie.media.video import Video
from nose.plugins.skip import SkipTest
from elodie.external.pyexiftool import ExifTool
from elodie.dependencies import get_exiftool
from elodie import constants
os.environ['TZ'] = 'GMT'
def setup_module():
exiftool_addedargs = [
u'-config',
u'"{}"'.format(constants.exiftool_config)
]
ExifTool(executable_=get_exiftool(), addedargs=exiftool_addedargs).start()
def teardown_module():
ExifTool().terminate
def test_create_directory_success():
filesystem = FileSystem()
folder = os.path.join(helper.temp_dir(), helper.random_string(10))
status = filesystem.create_directory(folder)
# Needs to be a subdirectory
assert helper.temp_dir() != folder
assert status == True
assert os.path.isdir(folder) == True
assert os.path.exists(folder) == True
# Clean up
shutil.rmtree(folder)
def test_create_directory_recursive_success():
filesystem = FileSystem()
folder = os.path.join(helper.temp_dir(), helper.random_string(10), helper.random_string(10))
status = filesystem.create_directory(folder)
# Needs to be a subdirectory
assert helper.temp_dir() != folder
assert status == True
assert os.path.isdir(folder) == True
assert os.path.exists(folder) == True
shutil.rmtree(folder)
@mock.patch('elodie.filesystem.os.makedirs')
def test_create_directory_invalid_permissions(mock_makedirs):
if os.name == 'nt':
raise SkipTest("It isn't implemented on Windows")
# Mock the case where makedirs raises an OSError because the user does
# not have permission to create the given directory.
mock_makedirs.side_effect = OSError()
filesystem = FileSystem()
status = filesystem.create_directory('/apathwhichdoesnotexist/afolderwhichdoesnotexist')
assert status == False
def test_delete_directory_if_empty():
filesystem = FileSystem()
folder = os.path.join(helper.temp_dir(), helper.random_string(10))
os.makedirs(folder)
assert os.path.isdir(folder) == True
assert os.path.exists(folder) == True
filesystem.delete_directory_if_empty(folder)
assert os.path.isdir(folder) == False
assert os.path.exists(folder) == False
def test_delete_directory_if_empty_when_not_empty():
filesystem = FileSystem()
folder = os.path.join(helper.temp_dir(), helper.random_string(10), helper.random_string(10))
os.makedirs(folder)
parent_folder = os.path.dirname(folder)
assert os.path.isdir(folder) == True
assert os.path.exists(folder) == True
assert os.path.isdir(parent_folder) == True
assert os.path.exists(parent_folder) == True
filesystem.delete_directory_if_empty(parent_folder)
assert os.path.isdir(folder) == True
assert os.path.exists(folder) == True
assert os.path.isdir(parent_folder) == True
assert os.path.exists(parent_folder) == True
shutil.rmtree(parent_folder)
def test_get_all_files_success():
filesystem = FileSystem()
folder = helper.populate_folder(5)
files = set()
files.update(filesystem.get_all_files(folder))
shutil.rmtree(folder)
length = len(files)
assert length == 5, files
def test_get_all_files_by_extension():
filesystem = FileSystem()
folder = helper.populate_folder(5)
files = set()
files.update(filesystem.get_all_files(folder))
length = len(files)
assert length == 5, length
files = set()
files.update(filesystem.get_all_files(folder, 'jpg'))
length = len(files)
assert length == 3, length
files = set()
files.update(filesystem.get_all_files(folder, 'txt'))
length = len(files)
assert length == 2, length
files = set()
files.update(filesystem.get_all_files(folder, 'gif'))
length = len(files)
assert length == 0, length
shutil.rmtree(folder)
def test_get_all_files_with_only_invalid_file():
filesystem = FileSystem()
folder = helper.populate_folder(0, include_invalid=True)
files = set()
files.update(filesystem.get_all_files(folder))
shutil.rmtree(folder)
length = len(files)
assert length == 0, length
def test_get_all_files_with_invalid_file():
filesystem = FileSystem()
folder = helper.populate_folder(5, include_invalid=True)
files = set()
files.update(filesystem.get_all_files(folder))
shutil.rmtree(folder)
length = len(files)
assert length == 5, length
def test_get_all_files_for_loop():
filesystem = FileSystem()
folder = helper.populate_folder(5)
files = set()
files.update()
counter = 0
for file in filesystem.get_all_files(folder):
counter += 1
shutil.rmtree(folder)
assert counter == 5, counter
def test_get_current_directory():
filesystem = FileSystem()
assert os.getcwd() == filesystem.get_current_directory()
def test_get_file_name_definition_default():
filesystem = FileSystem()
name_template, definition = filesystem.get_file_name_definition()
assert name_template == '%date-%original_name-%title.%extension', name_template
assert definition == [[('date', '%Y-%m-%d_%H-%M-%S')], [('original_name', '')], [('title', '')], [('extension', '')]], definition #noqa
@mock.patch('elodie.config.config_file', '%s/config.ini-custom-filename' % gettempdir())
def test_get_file_name_definition_custom():
with open('%s/config.ini-custom-filename' % gettempdir(), 'w') as f:
f.write("""
[File]
date=%Y-%m-%b
name=%date-%original_name.%extension
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
name_template, definition = filesystem.get_file_name_definition()
if hasattr(load_config, 'config'):
del load_config.config
assert name_template == '%date-%original_name.%extension', name_template
assert definition == [[('date', '%Y-%m-%b')], [('original_name', '')], [('extension', '')]], definition #noqa
def test_get_file_name_plain():
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
assert file_name == helper.path_tz_fix('2015-12-05_00-59-26-plain.jpg'), file_name
def test_get_file_name_with_title():
filesystem = FileSystem()
media = Photo(helper.get_file('with-title.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
assert file_name == helper.path_tz_fix('2015-12-05_00-59-26-with-title-some-title.jpg'), file_name
def test_get_file_name_with_original_name_exif():
filesystem = FileSystem()
media = Photo(helper.get_file('with-filename-in-exif.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
assert file_name == helper.path_tz_fix('2015-12-05_00-59-26-foobar.jpg'), file_name
def test_get_file_name_with_original_name_title_exif():
filesystem = FileSystem()
media = Photo(helper.get_file('with-filename-and-title-in-exif.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
assert file_name == helper.path_tz_fix('2015-12-05_00-59-26-foobar-foobar-title.jpg'), file_name
def test_get_file_name_with_uppercase_and_spaces():
filesystem = FileSystem()
media = Photo(helper.get_file('Plain With Spaces And Uppercase 123.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
assert file_name == helper.path_tz_fix('2015-12-05_00-59-26-plain-with-spaces-and-uppercase-123.jpg'), file_name
@mock.patch('elodie.config.config_file', '%s/config.ini-filename-custom' % gettempdir())
def test_get_file_name_custom():
with open('%s/config.ini-filename-custom' % gettempdir(), 'w') as f:
f.write("""
[File]
date=%Y-%m-%b
name=%date-%original_name.%extension
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert file_name == helper.path_tz_fix('2015-12-dec-plain.jpg'), file_name
@mock.patch('elodie.config.config_file', '%s/config.ini-filename-custom-with-title' % gettempdir())
def test_get_file_name_custom_with_title():
with open('%s/config.ini-filename-custom-with-title' % gettempdir(), 'w') as f:
f.write("""
[File]
date=%Y-%m-%d
name=%date-%original_name-%title.%extension
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('with-title.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert file_name == helper.path_tz_fix('2015-12-05-with-title-some-title.jpg'), file_name
@mock.patch('elodie.config.config_file', '%s/config.ini-filename-custom-with-empty-value' % gettempdir())
def test_get_file_name_custom_with_empty_value():
with open('%s/config.ini-filename-custom-with-empty-value' % gettempdir(), 'w') as f:
f.write("""
[File]
date=%Y-%m-%d
name=%date-%original_name-%title.%extension
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert file_name == helper.path_tz_fix('2015-12-05-plain.jpg'), file_name
@mock.patch('elodie.config.config_file', '%s/config.ini-filename-custom-with-lowercase' % gettempdir())
def test_get_file_name_custom_with_lower_capitalization():
with open('%s/config.ini-filename-custom-with-lowercase' % gettempdir(), 'w') as f:
f.write("""
[File]
date=%Y-%m-%d
name=%date-%original_name-%title.%extension
capitalization=lower
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert file_name == helper.path_tz_fix('2015-12-05-plain.jpg'), file_name
@mock.patch('elodie.config.config_file', '%s/config.ini-filename-custom-with-invalidcase' % gettempdir())
def test_get_file_name_custom_with_invalid_capitalization():
with open('%s/config.ini-filename-custom-with-invalidcase' % gettempdir(), 'w') as f:
f.write("""
[File]
date=%Y-%m-%d
name=%date-%original_name-%title.%extension
capitalization=garabage
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert file_name == helper.path_tz_fix('2015-12-05-plain.jpg'), file_name
@mock.patch('elodie.config.config_file', '%s/config.ini-filename-custom-with-uppercase' % gettempdir())
def test_get_file_name_custom_with_upper_capitalization():
with open('%s/config.ini-filename-custom-with-uppercase' % gettempdir(), 'w') as f:
f.write("""
[File]
date=%Y-%m-%d
name=%date-%original_name-%title.%extension
capitalization=upper
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
file_name = filesystem.get_file_name(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert file_name == helper.path_tz_fix('2015-12-05-PLAIN.JPG'), file_name
def test_get_folder_path_plain():
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
assert path == os.path.join('2015-12-Dec','Unknown Location'), path
def test_get_folder_path_with_title():
filesystem = FileSystem()
media = Photo(helper.get_file('with-title.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
assert path == os.path.join('2015-12-Dec','Unknown Location'), path
def test_get_folder_path_with_location():
filesystem = FileSystem()
media = Photo(helper.get_file('with-location.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
assert path == os.path.join('2015-12-Dec','Sunnyvale'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-original-with-camera-make-and-model' % gettempdir())
def test_get_folder_path_with_camera_make_and_model():
with open('%s/config.ini-original-with-camera-make-and-model' % gettempdir(), 'w') as f:
f.write("""
[Directory]
full_path=%camera_make/%camera_model
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == os.path.join('Canon', 'Canon EOS REBEL T2i'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-original-with-camera-make-and-model-fallback' % gettempdir())
def test_get_folder_path_with_camera_make_and_model_fallback():
with open('%s/config.ini-original-with-camera-make-and-model-fallback' % gettempdir(), 'w') as f:
f.write("""
[Directory]
full_path=%camera_make|"nomake"/%camera_model|"nomodel"
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('no-exif.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == os.path.join('nomake', 'nomodel'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-int-in-component-path' % gettempdir())
def test_get_folder_path_with_int_in_config_component():
# gh-239
with open('%s/config.ini-int-in-component-path' % gettempdir(), 'w') as f:
f.write("""
[Directory]
date=%Y
full_path=%date
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == os.path.join('2015'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-combined-date-and-album' % gettempdir())
def test_get_folder_path_with_combined_date_and_album():
# gh-239
with open('%s/config.ini-combined-date-and-album' % gettempdir(), 'w') as f:
f.write("""
[Directory]
date=%Y-%m-%b
custom=%date %album
full_path=%custom
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('with-album.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == '2015-12-Dec Test Album', path
@mock.patch('elodie.config.config_file', '%s/config.ini-combined-date-album-location-fallback' % gettempdir())
def test_get_folder_path_with_album_and_location_fallback():
# gh-279
with open('%s/config.ini-combined-date-album-location-fallback' % gettempdir(), 'w') as f:
f.write("""
[Directory]
date=%Y-%m-%b
custom=%album
full_path=%custom|%city
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
# Test with no location
media = Photo(helper.get_file('plain.jpg'))
path_plain = filesystem.get_folder_path(media.get_metadata())
# Test with City
media = Photo(helper.get_file('with-location.jpg'))
path_city = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path_plain == 'Unknown Location', path_plain
assert path_city == 'Sunnyvale', path_city
def test_get_folder_path_with_int_in_source_path():
# gh-239
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder('int')
origin = os.path.join(folder,'plain.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
media = Photo(origin)
path = filesystem.get_folder_path(media.get_metadata())
assert path == os.path.join('2015-12-Dec','Unknown Location'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-original-default-unknown-location' % gettempdir())
def test_get_folder_path_with_original_default_unknown_location():
with open('%s/config.ini-original-default-with-unknown-location' % gettempdir(), 'w') as f:
f.write('')
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == os.path.join('2015-12-Dec','Unknown Location'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-custom-path' % gettempdir())
def test_get_folder_path_with_custom_path():
with open('%s/config.ini-custom-path' % gettempdir(), 'w') as f:
f.write("""
[MapQuest]
key=czjNKTtFjLydLteUBwdgKAIC8OAbGLUx
[Directory]
date=%Y-%m-%d
location=%country-%state-%city
full_path=%date/%location
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('with-location.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == os.path.join('2015-12-05','United States of America-California-Sunnyvale'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-fallback' % gettempdir())
def test_get_folder_path_with_fallback_folder():
with open('%s/config.ini-fallback' % gettempdir(), 'w') as f:
f.write("""
[Directory]
year=%Y
month=%m
full_path=%year/%month/%album|%"No Album Fool"/%month
""")
#full_path=%year/%album|"No Album"
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == os.path.join('2015','12','No Album Fool','12'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-location-date' % gettempdir())
def test_get_folder_path_with_with_more_than_two_levels():
with open('%s/config.ini-location-date' % gettempdir(), 'w') as f:
f.write("""
[MapQuest]
key=czjNKTtFjLydLteUBwdgKAIC8OAbGLUx
[Directory]
year=%Y
month=%m
location=%city, %state
full_path=%year/%month/%location
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('with-location.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == os.path.join('2015','12','Sunnyvale, California'), path
@mock.patch('elodie.config.config_file', '%s/config.ini-location-date' % gettempdir())
def test_get_folder_path_with_with_only_one_level():
with open('%s/config.ini-location-date' % gettempdir(), 'w') as f:
f.write("""
[Directory]
year=%Y
full_path=%year
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
media = Photo(helper.get_file('plain.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
if hasattr(load_config, 'config'):
del load_config.config
assert path == os.path.join('2015'), path
def test_get_folder_path_with_location_and_title():
filesystem = FileSystem()
media = Photo(helper.get_file('with-location-and-title.jpg'))
path = filesystem.get_folder_path(media.get_metadata())
assert path == os.path.join('2015-12-Dec','Sunnyvale'), path
def test_parse_folder_name_default():
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
place_name = {'default': u'California', 'country': u'United States of America', 'state': u'California', 'city': u'Sunnyvale'}
mask = '%city'
location_parts = re.findall('(%[^%]+)', mask)
path = filesystem.parse_mask_for_location(mask, location_parts, place_name)
if hasattr(load_config, 'config'):
del load_config.config
assert path == 'Sunnyvale', path
def test_parse_folder_name_multiple():
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
place_name = {'default': u'California', 'country': u'United States of America', 'state': u'California', 'city': u'Sunnyvale'}
mask = '%city-%state-%country'
location_parts = re.findall('(%[^%]+)', mask)
path = filesystem.parse_mask_for_location(mask, location_parts, place_name)
if hasattr(load_config, 'config'):
del load_config.config
assert path == 'Sunnyvale-California-United States of America', path
def test_parse_folder_name_static_chars():
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
place_name = {'default': u'California', 'country': u'United States of America', 'state': u'California', 'city': u'Sunnyvale'}
mask = '%city-is-the-city'
location_parts = re.findall('(%[^%]+)', mask)
path = filesystem.parse_mask_for_location(mask, location_parts, place_name)
if hasattr(load_config, 'config'):
del load_config.config
assert path == 'Sunnyvale-is-the-city', path
def test_parse_folder_name_key_not_found():
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
place_name = {'default': u'California', 'country': u'United States of America', 'state': u'California'}
mask = '%city'
location_parts = re.findall('(%[^%]+)', mask)
path = filesystem.parse_mask_for_location(mask, location_parts, place_name)
if hasattr(load_config, 'config'):
del load_config.config
assert path == 'California', path
def test_parse_folder_name_key_not_found_with_static_chars():
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
place_name = {'default': u'California', 'country': u'United States of America', 'state': u'California'}
mask = '%city-is-not-found'
location_parts = re.findall('(%[^%]+)', mask)
path = filesystem.parse_mask_for_location(mask, location_parts, place_name)
if hasattr(load_config, 'config'):
del load_config.config
assert path == 'California', path
def test_parse_folder_name_multiple_keys_not_found():
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
place_name = {'default': u'United States of America', 'country': u'United States of America'}
mask = '%city-%state'
location_parts = re.findall('(%[^%]+)', mask)
path = filesystem.parse_mask_for_location(mask, location_parts, place_name)
if hasattr(load_config, 'config'):
del load_config.config
assert path == 'United States of America', path
def test_process_file_invalid():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('invalid.jpg'), origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
assert destination is None
def test_process_file_plain():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
origin_checksum = helper.checksum(origin)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None
assert origin_checksum is not None
assert destination_checksum is not None
assert origin_checksum_preprocess == origin_checksum
assert helper.path_tz_fix(os.path.join('2015-12-Dec','Unknown Location','2015-12-05_00-59-26-photo.jpg')) in destination, destination
def test_process_file_with_title():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = '%s/photo.jpg' % folder
shutil.copyfile(helper.get_file('with-title.jpg'), origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
origin_checksum = helper.checksum(origin)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None
assert origin_checksum is not None
assert destination_checksum is not None
assert origin_checksum_preprocess == origin_checksum
assert helper.path_tz_fix(os.path.join('2015-12-Dec','Unknown Location','2015-12-05_00-59-26-photo-some-title.jpg')) in destination, destination
def test_process_file_with_location():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('with-location.jpg'), origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
origin_checksum = helper.checksum(origin)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None
assert origin_checksum is not None
assert destination_checksum is not None
assert origin_checksum_preprocess == origin_checksum
assert helper.path_tz_fix(os.path.join('2015-12-Dec','Sunnyvale','2015-12-05_00-59-26-photo.jpg')) in destination, destination
def test_process_file_validate_original_checksum():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
origin_checksum = helper.checksum(origin)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None, origin_checksum_preprocess
assert origin_checksum is not None, origin_checksum
assert destination_checksum is not None, destination_checksum
assert origin_checksum_preprocess == origin_checksum, (origin_checksum_preprocess, origin_checksum)
# See https://github.com/jmathai/elodie/issues/330
def test_process_file_no_exif_date_is_correct_gh_330():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('no-exif.jpg'), origin)
atime = 1330712100
utime = 1330712900
os.utime(origin, (atime, utime))
media = Photo(origin)
metadata = media.get_metadata()
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert '/2012-03-Mar/' in destination, destination
assert '/2012-03-02_18-28-20' in destination, destination
def test_process_file_with_location_and_title():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('with-location-and-title.jpg'), origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
origin_checksum = helper.checksum(origin)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None
assert origin_checksum is not None
assert destination_checksum is not None
assert origin_checksum_preprocess == origin_checksum
assert helper.path_tz_fix(os.path.join('2015-12-Dec','Sunnyvale','2015-12-05_00-59-26-photo-some-title.jpg')) in destination, destination
def test_process_file_with_album():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('with-album.jpg'), origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
origin_checksum = helper.checksum(origin)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None
assert origin_checksum is not None
assert destination_checksum is not None
assert origin_checksum_preprocess == origin_checksum
assert helper.path_tz_fix(os.path.join('2015-12-Dec','Test Album','2015-12-05_00-59-26-photo.jpg')) in destination, destination
def test_process_file_with_album_and_title():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('with-album-and-title.jpg'), origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
origin_checksum = helper.checksum(origin)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None
assert origin_checksum is not None
assert destination_checksum is not None
assert origin_checksum_preprocess == origin_checksum
assert helper.path_tz_fix(os.path.join('2015-12-Dec','Test Album','2015-12-05_00-59-26-photo-some-title.jpg')) in destination, destination
def test_process_file_with_album_and_title_and_location():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('with-album-and-title-and-location.jpg'), origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
origin_checksum = helper.checksum(origin)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None
assert origin_checksum is not None
assert destination_checksum is not None
assert origin_checksum_preprocess == origin_checksum
assert helper.path_tz_fix(os.path.join('2015-12-Dec','Test Album','2015-12-05_00-59-26-photo-some-title.jpg')) in destination, destination
# gh-89 (setting album then title reverts album)
def test_process_video_with_album_then_title():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'movie.mov')
shutil.copyfile(helper.get_file('video.mov'), origin)
origin_checksum = helper.checksum(origin)
origin_checksum_preprocess = helper.checksum(origin)
media = Video(origin)
media.set_album('test_album')
media.set_title('test_title')
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
destination_checksum = helper.checksum(destination)
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
assert origin_checksum_preprocess is not None
assert origin_checksum is not None
assert destination_checksum is not None
assert origin_checksum_preprocess == origin_checksum
assert helper.path_tz_fix(os.path.join('2015-01-Jan','test_album','2015-01-19_12-45-11-movie-test_title.mov')) in destination, destination
@mock.patch('elodie.config.config_file', '%s/config.ini-fallback-folder' % gettempdir())
def test_process_file_fallback_folder():
with open('%s/config.ini-fallback-folder' % gettempdir(), 'w') as f:
f.write("""
[Directory]
date=%Y-%m
full_path=%date/%album|"fallback"
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'plain.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
if hasattr(load_config, 'config'):
del load_config.config
assert helper.path_tz_fix(os.path.join('2015-12', 'fallback', '2015-12-05_00-59-26-plain.jpg')) in destination, destination
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
@mock.patch('elodie.config.config_file', '%s/config.ini-multiple-directories' % gettempdir())
def test_process_twice_more_than_two_levels_of_directories():
with open('%s/config.ini-multiple-directories' % gettempdir(), 'w') as f:
f.write("""
[Directory]
year=%Y
month=%m
day=%d
full_path=%year/%month/%day
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'plain.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
if hasattr(load_config, 'config'):
del load_config.config
assert helper.path_tz_fix(os.path.join('2015','12','05', '2015-12-05_00-59-26-plain.jpg')) in destination, destination
if hasattr(load_config, 'config'):
del load_config.config
media_second = Photo(destination)
media_second.set_title('foo')
destination_second = filesystem.process_file(destination, temporary_folder, media_second, allowDuplicate=True)
if hasattr(load_config, 'config'):
del load_config.config
assert destination.replace('.jpg', '-foo.jpg') == destination_second, destination_second
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
def test_process_existing_file_without_changes():
# gh-210
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'plain.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
assert helper.path_tz_fix(os.path.join('2015-12-Dec', 'Unknown Location', '2015-12-05_00-59-26-plain.jpg')) in destination, destination
media_second = Photo(destination)
destination_second = filesystem.process_file(destination, temporary_folder, media_second, allowDuplicate=True)
assert destination_second is None, destination_second
shutil.rmtree(folder)
shutil.rmtree(os.path.dirname(os.path.dirname(destination)))
@mock.patch('elodie.config.config_file', '%s/config.ini-plugin-throw-error' % gettempdir())
def test_process_file_with_plugin_throw_error():
with open('%s/config.ini-plugin-throw-error' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
plugins=ThrowError
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'plain.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
if hasattr(load_config, 'config'):
del load_config.config
assert destination is None, destination
@mock.patch('elodie.config.config_file', '%s/config.ini-plugin-runtime-error' % gettempdir())
def test_process_file_with_plugin_runtime_error():
with open('%s/config.ini-plugin-runtime-error' % gettempdir(), 'w') as f:
f.write("""
[Plugins]
plugins=RuntimeError
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'plain.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
media = Photo(origin)
destination = filesystem.process_file(origin, temporary_folder, media, allowDuplicate=True)
if hasattr(load_config, 'config'):
del load_config.config
assert '2015-12-Dec/Unknown Location/2015-12-05_00-59-26-plain.jpg' in destination, destination
def test_set_utime_with_exif_date():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('plain.jpg'), origin)
media_initial = Photo(origin)
metadata_initial = media_initial.get_metadata()
initial_stat = os.stat(origin)
initial_time = int(min(initial_stat.st_mtime, initial_stat.st_ctime))
initial_checksum = helper.checksum(origin)
assert initial_time != time.mktime(metadata_initial['date_taken'])
filesystem.set_utime_from_metadata(media_initial.get_metadata(), media_initial.get_file_path())
final_stat = os.stat(origin)
final_checksum = helper.checksum(origin)
media_final = Photo(origin)
metadata_final = media_final.get_metadata()
shutil.rmtree(folder)
assert initial_stat.st_mtime != final_stat.st_mtime
assert final_stat.st_mtime == time.mktime(metadata_final['date_taken'])
assert initial_checksum == final_checksum
def test_set_utime_without_exif_date():
filesystem = FileSystem()
temporary_folder, folder = helper.create_working_folder()
origin = os.path.join(folder,'photo.jpg')
shutil.copyfile(helper.get_file('no-exif.jpg'), origin)
media_initial = Photo(origin)
metadata_initial = media_initial.get_metadata()
initial_stat = os.stat(origin)
initial_time = int(min(initial_stat.st_mtime, initial_stat.st_ctime))
initial_checksum = helper.checksum(origin)
assert initial_time == time.mktime(metadata_initial['date_taken'])
filesystem.set_utime_from_metadata(media_initial.get_metadata(), media_initial.get_file_path())
final_stat = os.stat(origin)
final_checksum = helper.checksum(origin)
media_final = Photo(origin)
metadata_final = media_final.get_metadata()
shutil.rmtree(folder)
assert initial_time == final_stat.st_mtime
assert final_stat.st_mtime == time.mktime(metadata_final['date_taken']), (final_stat.st_mtime, time.mktime(metadata_final['date_taken']))
assert initial_checksum == final_checksum
def test_should_exclude_with_no_exclude_arg():
filesystem = FileSystem()
result = filesystem.should_exclude('/some/path')
assert result == False, result
def test_should_exclude_with_non_matching_regex():
filesystem = FileSystem()
result = filesystem.should_exclude('/some/path', {re.compile('foobar')})
assert result == False, result
def test_should_exclude_with_matching_regex():
filesystem = FileSystem()
result = filesystem.should_exclude('/some/path', {re.compile('some')})
assert result == True, result
def test_should_not_exclude_with_multiple_with_non_matching_regex():
filesystem = FileSystem()
result = filesystem.should_exclude('/some/path', {re.compile('foobar'), re.compile('dne')})
assert result == False, result
def test_should_exclude_with_multiple_with_one_matching_regex():
filesystem = FileSystem()
result = filesystem.should_exclude('/some/path', {re.compile('foobar'), re.compile('some')})
assert result == True, result
def test_should_exclude_with_complex_matching_regex():
filesystem = FileSystem()
result = filesystem.should_exclude('/var/folders/j9/h192v5v95gd_fhpv63qzyd1400d9ct/T/T497XPQH2R/UATR2GZZTX/2016-04-Apr/London/2016-04-07_11-15-26-valid-sample-title.txt', {re.compile('London.*\.txt$')})
assert result == True, result
@mock.patch('elodie.config.config_file', '%s/config.ini-does-not-exist' % gettempdir())
def test_get_folder_path_definition_default():
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
path_definition = filesystem.get_folder_path_definition()
if hasattr(load_config, 'config'):
del load_config.config
assert path_definition == [[('date', '%Y-%m-%b')], [('album', ''), ('location', '%city'), ('"Unknown Location"', '')]], path_definition
@mock.patch('elodie.config.config_file', '%s/config.ini-date-location' % gettempdir())
def test_get_folder_path_definition_date_location():
with open('%s/config.ini-date-location' % gettempdir(), 'w') as f:
f.write("""
[Directory]
date=%Y-%m-%d
location=%country
full_path=%date/%location
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
path_definition = filesystem.get_folder_path_definition()
expected = [
[('date', '%Y-%m-%d')], [('location', '%country')]
]
if hasattr(load_config, 'config'):
del load_config.config
assert path_definition == expected, path_definition
@mock.patch('elodie.config.config_file', '%s/config.ini-location-date' % gettempdir())
def test_get_folder_path_definition_location_date():
with open('%s/config.ini-location-date' % gettempdir(), 'w') as f:
f.write("""
[Directory]
date=%Y-%m-%d
location=%country
full_path=%location/%date
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
path_definition = filesystem.get_folder_path_definition()
expected = [
[('location', '%country')], [('date', '%Y-%m-%d')]
]
if hasattr(load_config, 'config'):
del load_config.config
assert path_definition == expected, path_definition
@mock.patch('elodie.config.config_file', '%s/config.ini-cached' % gettempdir())
def test_get_folder_path_definition_cached():
with open('%s/config.ini-cached' % gettempdir(), 'w') as f:
f.write("""
[Directory]
date=%Y-%m-%d
location=%country
full_path=%date/%location
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
path_definition = filesystem.get_folder_path_definition()
expected = [
[('date', '%Y-%m-%d')], [('location', '%country')]
]
assert path_definition == expected, path_definition
with open('%s/config.ini-cached' % gettempdir(), 'w') as f:
f.write("""
[Directory]
date=%uncached
location=%uncached
full_path=%date/%location
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
path_definition = filesystem.get_folder_path_definition()
expected = [
[('date', '%Y-%m-%d')], [('location', '%country')]
]
if hasattr(load_config, 'config'):
del load_config.config
@mock.patch('elodie.config.config_file', '%s/config.ini-location-date' % gettempdir())
def test_get_folder_path_definition_with_more_than_two_levels():
with open('%s/config.ini-location-date' % gettempdir(), 'w') as f:
f.write("""
[Directory]
year=%Y
month=%m
day=%d
full_path=%year/%month/%day
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
path_definition = filesystem.get_folder_path_definition()
expected = [
[('year', '%Y')], [('month', '%m')], [('day', '%d')]
]
if hasattr(load_config, 'config'):
del load_config.config
assert path_definition == expected, path_definition
@mock.patch('elodie.config.config_file', '%s/config.ini-location-date' % gettempdir())
def test_get_folder_path_definition_with_only_one_level():
with open('%s/config.ini-location-date' % gettempdir(), 'w') as f:
f.write("""
[Directory]
year=%Y
full_path=%year
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
path_definition = filesystem.get_folder_path_definition()
expected = [
[('year', '%Y')]
]
if hasattr(load_config, 'config'):
del load_config.config
assert path_definition == expected, path_definition
@mock.patch('elodie.config.config_file', '%s/config.ini-multi-level-custom' % gettempdir())
def test_get_folder_path_definition_multi_level_custom():
with open('%s/config.ini-multi-level-custom' % gettempdir(), 'w') as f:
f.write("""
[Directory]
year=%Y
month=%M
full_path=%year/%album|%month|%"foo"/%month
""")
if hasattr(load_config, 'config'):
del load_config.config
filesystem = FileSystem()
path_definition = filesystem.get_folder_path_definition()
expected = [[('year', '%Y')], [('album', ''), ('month', '%M'), ('"foo"', '')], [('month', '%M')]]
if hasattr(load_config, 'config'):
del load_config.config
assert path_definition == expected, path_definition
|
|
"""
httpauth modules defines functions to implement HTTP Digest
Authentication (RFC 2617).
This has full compliance with 'Digest' and 'Basic' authentication methods. In
'Digest' it supports both MD5 and MD5-sess algorithms.
Usage:
First use 'doAuth' to request the client authentication for a
certain resource. You should send an httplib.UNAUTHORIZED response to the
client so he knows he has to authenticate itself.
Then use 'parseAuthorization' to retrieve the 'auth_map' used in
'checkResponse'.
To use 'checkResponse' you must have already verified the password
associated with the 'username' key in 'auth_map' dict. Then you use the
'checkResponse' function to verify if the password matches the one sent by
the client.
SUPPORTED_ALGORITHM - list of supported 'Digest' algorithms
SUPPORTED_QOP - list of supported 'Digest' 'qop'.
"""
__version__ = 1, 0, 1
__author__ = "Tiago Cogumbreiro <[email protected]>"
__credits__ = """
Peter van Kampen for its recipe which implement most of Digest
authentication:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302378
"""
__license__ = """
Copyright (c) 2005, Tiago Cogumbreiro <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Sylvain Hellegouarch nor the names of his contributor
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__all__ = ("digestAuth", "basicAuth", "doAuth", "checkResponse",
"parseAuthorization", "SUPPORTED_ALGORITHM", "md5SessionKey",
"calculateNonce", "SUPPORTED_QOP")
###############################################################################
import time
try:
from base64 import decodebytes as base64_decodebytes
except ImportError:
from base64 import b64decode as base64_decodebytes # NOQA
try:
from urllib.request import parse_http_list, parse_keqv_list
except ImportError:
from urllib2 import parse_http_list, parse_keqv_list # NOQA
from hashlib import md5, sha1
MD5 = "MD5"
SHA1 = "SHA1"
MD5_SESS = "MD5-sess"
AUTH = "auth"
AUTH_INT = "auth-int"
SUPPORTED_ALGORITHM = (MD5, MD5_SESS)
SUPPORTED_QOP = (AUTH, AUTH_INT)
###############################################################################
# doAuth
#
DIGEST_AUTH_ENCODERS = {
MD5: lambda val: md5(val).hexdigest(),
MD5_SESS: lambda val: md5(val).hexdigest(),
SHA1: lambda val: sha1.new(val).hexdigest(),
}
def calculateNonce(realm, algorithm=MD5):
"""This is an auxaliary function that calculates 'nonce' value. It is used
to handle sessions."""
assert algorithm in SUPPORTED_ALGORITHM
try:
encoder = DIGEST_AUTH_ENCODERS[algorithm]
except KeyError:
raise NotImplementedError(
"The chosen algorithm (%s) does not have "
"an implementation yet" % algorithm
)
s = "%d:%s" % (time.time(), realm)
return encoder(s.encode("utf-8"))
def digestAuth(realm, algorithm=MD5, nonce=None, qop=AUTH):
"""Challenges the client for a Digest authentication."""
assert algorithm in SUPPORTED_ALGORITHM
assert qop in SUPPORTED_QOP
if nonce is None:
nonce = calculateNonce(realm, algorithm)
return 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (
realm, nonce, algorithm, qop
)
def basicAuth(realm):
"""Challengenes the client for a Basic authentication."""
assert '"' not in realm, "Realms cannot contain the \" (quote) character."
return 'Basic realm="%s"' % realm
def doAuth(realm):
"""'doAuth' function returns the challenge string b giving priority over
Digest and fallback to Basic authentication when the browser doesn't
support the first one.
This should be set in the HTTP header under the key 'WWW-Authenticate'."""
return digestAuth(realm) + " " + basicAuth(realm)
###############################################################################
# Parse authorization parameters
#
def _parseDigestAuthorization(auth_params):
# Convert the auth params to a dict
items = parse_http_list(auth_params)
params = parse_keqv_list(items)
# Now validate the params
# Check for required parameters
required = ["username", "realm", "nonce", "uri", "response"]
for k in required:
if k not in params:
return None
# If qop is sent then cnonce and nc MUST be present
if "qop" in params and not ("cnonce" in params and "nc" in params):
return None
# If qop is not sent, neither cnonce nor nc can be present
if ("cnonce" in params or "nc" in params) and "qop" not in params:
return None
return params
def _parseBasicAuthorization(auth_params):
auth_params = auth_params.encode("utf-8")
username, password = base64_decodebytes(auth_params).split(b":", 1)
username = username.decode("utf-8")
password = password.decode("utf-8")
return {"username": username, "password": password}
AUTH_SCHEMES = {
"basic": _parseBasicAuthorization,
"digest": _parseDigestAuthorization,
}
def parseAuthorization(credentials):
"""parseAuthorization will convert the value of the 'Authorization' key in
the HTTP header to a map itself. If the parsing fails 'None' is returned.
"""
auth_scheme, auth_params = credentials.split(" ", 1)
auth_scheme = auth_scheme.lower()
parser = AUTH_SCHEMES[auth_scheme]
params = parser(auth_params)
if params is None:
return
assert "auth_scheme" not in params
params["auth_scheme"] = auth_scheme
return params
###############################################################################
# Check provided response for a valid password
#
def md5SessionKey(params, password):
"""
If the "algorithm" directive's value is "MD5-sess", then A1
[the session key] is calculated only once - on the first request by the
client following receipt of a WWW-Authenticate challenge from the server.
This creates a 'session key' for the authentication of subsequent
requests and responses which is different for each "authentication
session", thus limiting the amount of material hashed with any one
key.
Because the server need only use the hash of the user
credentials in order to create the A1 value, this construction could
be used in conjunction with a third party authentication service so
that the web server would not need the actual password value. The
specification of such a protocol is beyond the scope of this
specification.
"""
keys = ("username", "realm", "nonce", "cnonce")
params_copy = {}
for key in keys:
params_copy[key] = params[key]
params_copy["algorithm"] = MD5_SESS
return _A1(params_copy, password)
def _A1(params, password):
algorithm = params.get("algorithm", MD5)
H = DIGEST_AUTH_ENCODERS[algorithm]
if algorithm == MD5:
# If the "algorithm" directive's value is "MD5" or is
# unspecified, then A1 is:
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
return "%s:%s:%s" % (params["username"], params["realm"], password)
elif algorithm == MD5_SESS:
# This is A1 if qop is set
# A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
# ":" unq(nonce-value) ":" unq(cnonce-value)
s = "%s:%s:%s" % (params["username"], params["realm"], password)
h_a1 = H(s.encode("utf-8"))
return "%s:%s:%s" % (h_a1, params["nonce"], params["cnonce"])
def _A2(params, method, kwargs):
# If the "qop" directive's value is "auth" or is unspecified, then A2 is:
# A2 = Method ":" digest-uri-value
qop = params.get("qop", "auth")
if qop == "auth":
return method + ":" + params["uri"]
elif qop == "auth-int":
# If the "qop" value is "auth-int", then A2 is:
# A2 = Method ":" digest-uri-value ":" H(entity-body)
entity_body = kwargs.get("entity_body", "")
H = kwargs["H"]
return "%s:%s:%s" % (
method,
params["uri"],
H(entity_body)
)
else:
raise NotImplementedError("The 'qop' method is unknown: %s" % qop)
def _computeDigestResponse(auth_map, password, method="GET", A1=None,
**kwargs):
"""
Generates a response respecting the algorithm defined in RFC 2617
"""
params = auth_map
algorithm = params.get("algorithm", MD5)
H = DIGEST_AUTH_ENCODERS[algorithm]
def KD(secret, data):
s = secret + ":" + data
return H(s.encode("utf-8"))
qop = params.get("qop", None)
s = _A2(params, method, kwargs)
H_A2 = H(s.encode("utf-8"))
if algorithm == MD5_SESS and A1 is not None:
H_A1 = H(A1.encode("utf-8"))
else:
s = _A1(params, password)
H_A1 = H(s.encode("utf-8"))
if qop in ("auth", "auth-int"):
# If the "qop" value is "auth" or "auth-int":
# request-digest = <"> < KD ( H(A1), unq(nonce-value)
# ":" nc-value
# ":" unq(cnonce-value)
# ":" unq(qop-value)
# ":" H(A2)
# ) <">
request = "%s:%s:%s:%s:%s" % (
params["nonce"],
params["nc"],
params["cnonce"],
params["qop"],
H_A2,
)
elif qop is None:
# If the "qop" directive is not present (this construction is
# for compatibility with RFC 2069):
# request-digest =
# <"> < KD ( H(A1), unq(nonce-value) ":" H(A2) ) > <">
request = "%s:%s" % (params["nonce"], H_A2)
return KD(H_A1, request)
def _checkDigestResponse(auth_map, password, method="GET", A1=None, **kwargs):
"""This function is used to verify the response given by the client when
he tries to authenticate.
Optional arguments:
entity_body - when 'qop' is set to 'auth-int' you MUST provide the
raw data you are going to send to the client (usually the
HTML page.
request_uri - the uri from the request line compared with the 'uri'
directive of the authorization map. They must represent
the same resource (unused at this time).
"""
if auth_map['realm'] != kwargs.get('realm', None):
return False
response = _computeDigestResponse(auth_map, password, method, A1, **kwargs)
return response == auth_map["response"]
def _checkBasicResponse(auth_map, password, method='GET', encrypt=None,
**kwargs):
# Note that the Basic response doesn't provide the realm value so we cannot
# test it
try:
return encrypt(auth_map["password"], auth_map["username"]) == password
except TypeError:
return encrypt(auth_map["password"]) == password
AUTH_RESPONSES = {
"basic": _checkBasicResponse,
"digest": _checkDigestResponse,
}
def checkResponse(auth_map, password, method="GET", encrypt=None, **kwargs):
"""'checkResponse' compares the auth_map with the password and optionally
other arguments that each implementation might need.
If the response is of type 'Basic' then the function has the following
signature:
checkBasicResponse (auth_map, password) -> bool
If the response is of type 'Digest' then the function has the following
signature:
checkDigestResponse (auth_map, password, method = 'GET', A1 = None) -> bool
The 'A1' argument is only used in MD5_SESS algorithm based responses.
Check md5SessionKey() for more info.
"""
checker = AUTH_RESPONSES[auth_map["auth_scheme"]]
return checker(
auth_map, password, method=method, encrypt=encrypt, **kwargs
)
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.lib.robot.api import logger
from robotide.lib.robot.utils import (is_dict_like, is_string, is_truthy, plural_or_not,
seq2str, seq2str2, type_name, unic, Matcher)
from robotide.lib.robot.utils.asserts import assert_equals
from robotide.lib.robot.version import get_version
class _List(object):
def convert_to_list(self, item):
"""Converts the given ``item`` to a Python ``list`` type.
Mainly useful for converting tuples and other iterable to lists.
Use `Create List` from the BuiltIn library for constructing new lists.
"""
return list(item)
def append_to_list(self, list_, *values):
"""Adds ``values`` to the end of ``list``.
Example:
| Append To List | ${L1} | xxx | | |
| Append To List | ${L2} | x | y | z |
=>
| ${L1} = ['a', 'xxx']
| ${L2} = ['a', 'b', 'x', 'y', 'z']
"""
for value in values:
list_.append(value)
def insert_into_list(self, list_, index, value):
"""Inserts ``value`` into ``list`` to the position specified with ``index``.
Index ``0`` adds the value into the first position, ``1`` to the second,
and so on. Inserting from right works with negative indices so that
``-1`` is the second last position, ``-2`` third last, and so on. Use
`Append To List` to add items to the end of the list.
If the absolute value of the index is greater than
the length of the list, the value is added at the end
(positive index) or the beginning (negative index). An index
can be given either as an integer or a string that can be
converted to an integer.
Example:
| Insert Into List | ${L1} | 0 | xxx |
| Insert Into List | ${L2} | ${-1} | xxx |
=>
| ${L1} = ['xxx', 'a']
| ${L2} = ['a', 'xxx', 'b']
"""
list_.insert(self._index_to_int(index), value)
def combine_lists(self, *lists):
"""Combines the given ``lists`` together and returns the result.
The given lists are not altered by this keyword.
Example:
| ${x} = | Combine List | ${L1} | ${L2} | |
| ${y} = | Combine List | ${L1} | ${L2} | ${L1} |
=>
| ${x} = ['a', 'a', 'b']
| ${y} = ['a', 'a', 'b', 'a']
| ${L1} and ${L2} are not changed.
"""
ret = []
for item in lists:
ret.extend(item)
return ret
def set_list_value(self, list_, index, value):
"""Sets the value of ``list`` specified by ``index`` to the given ``value``.
Index ``0`` means the first position, ``1`` the second and so on.
Similarly, ``-1`` is the last position, ``-2`` second last, and so on.
Using an index that does not exist on the list causes an error.
The index can be either an integer or a string that can be converted to
an integer.
Example:
| Set List Value | ${L3} | 1 | xxx |
| Set List Value | ${L3} | -1 | yyy |
=>
| ${L3} = ['a', 'xxx', 'yyy']
"""
try:
list_[self._index_to_int(index)] = value
except IndexError:
self._index_error(list_, index)
def remove_values_from_list(self, list_, *values):
"""Removes all occurrences of given ``values`` from ``list``.
It is not an error if a value does not exist in the list at all.
Example:
| Remove Values From List | ${L4} | a | c | e | f |
=>
| ${L4} = ['b', 'd']
"""
for value in values:
while value in list_:
list_.remove(value)
def remove_from_list(self, list_, index):
"""Removes and returns the value specified with an ``index`` from ``list``.
Index ``0`` means the first position, ``1`` the second and so on.
Similarly, ``-1`` is the last position, ``-2`` the second last, and so on.
Using an index that does not exist on the list causes an error.
The index can be either an integer or a string that can be converted
to an integer.
Example:
| ${x} = | Remove From List | ${L2} | 0 |
=>
| ${x} = 'a'
| ${L2} = ['b']
"""
try:
return list_.pop(self._index_to_int(index))
except IndexError:
self._index_error(list_, index)
def remove_duplicates(self, list_):
"""Returns a list without duplicates based on the given ``list``.
Creates and returns a new list that contains all items in the given
list so that one item can appear only once. Order of the items in
the new list is the same as in the original except for missing
duplicates. Number of the removed duplicates is logged.
New in Robot Framework 2.7.5.
"""
ret = []
for item in list_:
if item not in ret:
ret.append(item)
removed = len(list_) - len(ret)
logger.info('%d duplicate%s removed.' % (removed, plural_or_not(removed)))
return ret
def get_from_list(self, list_, index):
"""Returns the value specified with an ``index`` from ``list``.
The given list is never altered by this keyword.
Index ``0`` means the first position, ``1`` the second, and so on.
Similarly, ``-1`` is the last position, ``-2`` the second last, and so on.
Using an index that does not exist on the list causes an error.
The index can be either an integer or a string that can be converted
to an integer.
Examples (including Python equivalents in comments):
| ${x} = | Get From List | ${L5} | 0 | # L5[0] |
| ${y} = | Get From List | ${L5} | -2 | # L5[-2] |
=>
| ${x} = 'a'
| ${y} = 'd'
| ${L5} is not changed
"""
try:
return list_[self._index_to_int(index)]
except IndexError:
self._index_error(list_, index)
def get_slice_from_list(self, list_, start=0, end=None):
"""Returns a slice of the given list between ``start`` and ``end`` indexes.
The given list is never altered by this keyword.
If both ``start`` and ``end`` are given, a sublist containing values
from ``start`` to ``end`` is returned. This is the same as
``list[start:end]`` in Python. To get all items from the beginning,
use 0 as the start value, and to get all items until and including
the end, use ``None`` (default) as the end value.
Using ``start`` or ``end`` not found on the list is the same as using
the largest (or smallest) available index.
Examples (incl. Python equivalents in comments):
| ${x} = | Get Slice From List | ${L5} | 2 | 4 | # L5[2:4] |
| ${y} = | Get Slice From List | ${L5} | 1 | | # L5[1:None] |
| ${z} = | Get Slice From List | ${L5} | | -2 | # L5[0:-2] |
=>
| ${x} = ['c', 'd']
| ${y} = ['b', 'c', 'd', 'e']
| ${z} = ['a', 'b', 'c']
| ${L5} is not changed
"""
start = self._index_to_int(start, True)
if end is not None:
end = self._index_to_int(end)
return list_[start:end]
def count_values_in_list(self, list_, value, start=0, end=None):
"""Returns the number of occurrences of the given ``value`` in ``list``.
The search can be narrowed to the selected sublist by the ``start`` and
``end`` indexes having the same semantics as with `Get Slice From List`
keyword. The given list is never altered by this keyword.
Example:
| ${x} = | Count Values In List | ${L3} | b |
=>
| ${x} = 1
| ${L3} is not changed
"""
return self.get_slice_from_list(list_, start, end).count(value)
def get_index_from_list(self, list_, value, start=0, end=None):
"""Returns the index of the first occurrence of the ``value`` on the list.
The search can be narrowed to the selected sublist by the ``start`` and
``end`` indexes having the same semantics as with `Get Slice From List`
keyword. In case the value is not found, -1 is returned. The given list
is never altered by this keyword.
Example:
| ${x} = | Get Index From List | ${L5} | d |
=>
| ${x} = 3
| ${L5} is not changed
"""
if start == '':
start = 0
list_ = self.get_slice_from_list(list_, start, end)
try:
return int(start) + list_.index(value)
except ValueError:
return -1
def copy_list(self, list_):
"""Returns a copy of the given list.
The given list is never altered by this keyword.
"""
return list_[:]
def reverse_list(self, list_):
"""Reverses the given list in place.
Note that the given list is changed and nothing is returned. Use
`Copy List` first, if you need to keep also the original order.
| Reverse List | ${L3} |
=>
| ${L3} = ['c', 'b', 'a']
"""
list_.reverse()
def sort_list(self, list_):
"""Sorts the given list in place.
The strings are sorted alphabetically and the numbers numerically.
Note that the given list is changed and nothing is returned. Use
`Copy List` first, if you need to keep also the original order.
${L} = [2,1,'a','c','b']
| Sort List | ${L} |
=>
| ${L} = [1, 2, 'a', 'b', 'c']
"""
list_.sort()
def list_should_contain_value(self, list_, value, msg=None):
"""Fails if the ``value`` is not found from ``list``.
If the keyword fails, the default error messages is ``<list> does
not contain value '<value>'``. A custom message can be given using
the ``msg`` argument.
"""
default = "%s does not contain value '%s'." % (seq2str2(list_), value)
_verify_condition(value in list_, default, msg)
def list_should_not_contain_value(self, list_, value, msg=None):
"""Fails if the ``value`` is not found from ``list``.
See `List Should Contain Value` for an explanation of ``msg``.
"""
default = "%s contains value '%s'." % (seq2str2(list_), value)
_verify_condition(value not in list_, default, msg)
def list_should_not_contain_duplicates(self, list_, msg=None):
"""Fails if any element in the ``list`` is found from it more than once.
The default error message lists all the elements that were found
from the ``list`` multiple times, but it can be overridden by giving
a custom ``msg``. All multiple times found items and their counts are
also logged.
This keyword works with all iterables that can be converted to a list.
The original iterable is never altered.
"""
if not isinstance(list_, list):
list_ = list(list_)
dupes = []
for item in list_:
if item not in dupes:
count = list_.count(item)
if count > 1:
logger.info("'%s' found %d times." % (item, count))
dupes.append(item)
if dupes:
raise AssertionError(msg or
'%s found multiple times.' % seq2str(dupes))
def lists_should_be_equal(self, list1, list2, msg=None, values=True,
names=None):
"""Fails if given lists are unequal.
The keyword first verifies that the lists have equal lengths, and then
it checks are all their values equal. Possible differences between the
values are listed in the default error message like ``Index 4: ABC !=
Abc``.
The error message can be configured using ``msg`` and ``values``
arguments:
- If ``msg`` is not given, the default error message is used.
- If ``msg`` is given and ``values`` gets a value considered true
(see `Boolean arguments`), the error message starts with the given
``msg`` followed by a newline and the default message.
- If ``msg`` is given and ``values`` is not given a true value,
the error message is just the given ``msg``.
Optional ``names`` argument can be used for naming the indices shown in
the default error message. It can either be a list of names matching
the indices in the lists or a dictionary where keys are indices that
need to be named. It is not necessary to name all of the indices. When
using a dictionary, keys can be either integers or strings that can be
converted to integers.
Examples:
| ${names} = | Create List | First Name | Family Name | Email |
| Lists Should Be Equal | ${people1} | ${people2} | names=${names} |
| ${names} = | Create Dictionary | 0=First Name | 2=Email |
| Lists Should Be Equal | ${people1} | ${people2} | names=${names} |
If the items in index 2 would differ in the above examples, the error
message would contain a row like ``Index 2 (email): [email protected] !=
[email protected]``.
"""
len1 = len(list1)
len2 = len(list2)
default = 'Lengths are different: %d != %d' % (len1, len2)
_verify_condition(len1 == len2, default, msg, values)
names = self._get_list_index_name_mapping(names, len1)
diffs = list(self._yield_list_diffs(list1, list2, names))
default = 'Lists are different:\n' + '\n'.join(diffs)
_verify_condition(diffs == [], default, msg, values)
def _get_list_index_name_mapping(self, names, list_length):
if not names:
return {}
if is_dict_like(names):
return dict((int(index), names[index]) for index in names)
return dict(zip(range(list_length), names))
def _yield_list_diffs(self, list1, list2, names):
for index, (item1, item2) in enumerate(zip(list1, list2)):
name = ' (%s)' % names[index] if index in names else ''
try:
assert_equals(item1, item2, msg='Index %d%s' % (index, name))
except AssertionError as err:
yield unic(err)
def list_should_contain_sub_list(self, list1, list2, msg=None, values=True):
"""Fails if not all of the elements in ``list2`` are found in ``list1``.
The order of values and the number of values are not taken into
account.
See `Lists Should Be Equal` for more information about configuring
the error message with ``msg`` and ``values`` arguments.
"""
diffs = ', '.join(unic(item) for item in list2 if item not in list1)
default = 'Following values were not found from first list: ' + diffs
_verify_condition(not diffs, default, msg, values)
def log_list(self, list_, level='INFO'):
"""Logs the length and contents of the ``list`` using given ``level``.
Valid levels are TRACE, DEBUG, INFO (default), and WARN.
If you only want to the length, use keyword `Get Length` from
the BuiltIn library.
"""
logger.write('\n'.join(self._log_list(list_)), level)
def _log_list(self, list_):
if not list_:
yield 'List is empty.'
elif len(list_) == 1:
yield 'List has one item:\n%s' % list_[0]
else:
yield 'List length is %d and it contains following items:' % len(list_)
for index, item in enumerate(list_):
yield '%s: %s' % (index, item)
def _index_to_int(self, index, empty_to_zero=False):
if empty_to_zero and not index:
return 0
try:
return int(index)
except ValueError:
raise ValueError("Cannot convert index '%s' to an integer." % index)
def _index_error(self, list_, index):
raise IndexError('Given index %s is out of the range 0-%d.'
% (index, len(list_)-1))
class _Dictionary(object):
def convert_to_dictionary(self, item):
"""Converts the given ``item`` to a Python ``dict`` type.
Mainly useful for converting other mappings to dictionaries. Use
`Create Dictionary` from the BuiltIn library for constructing new
dictionaries.
New in Robot Framework 2.9.
"""
return dict(item)
def set_to_dictionary(self, dictionary, *key_value_pairs, **items):
"""Adds the given ``key_value_pairs`` and ``items`` to the ``dictionary``.
Giving items as ``key_value_pairs`` means giving keys and values
as separate arguments:
| Set To Dictionary | ${D1} | key | value | second | ${2} |
=>
| ${D1} = {'a': 1, 'key': 'value', 'second': 2}
Starting from Robot Framework 2.8.1, items can also be given as kwargs
using ``key=value`` syntax:
| Set To Dictionary | ${D1} | key=value | second=${2} |
The latter syntax is typically more convenient to use, but it has
a limitation that keys must be strings.
If given keys already exist in the dictionary, their values are updated.
"""
if len(key_value_pairs) % 2 != 0:
raise ValueError("Adding data to a dictionary failed. There "
"should be even number of key-value-pairs.")
for i in range(0, len(key_value_pairs), 2):
dictionary[key_value_pairs[i]] = key_value_pairs[i+1]
dictionary.update(items)
return dictionary
def remove_from_dictionary(self, dictionary, *keys):
"""Removes the given ``keys`` from the ``dictionary``.
If the given ``key`` cannot be found from the ``dictionary``, it
is ignored.
Example:
| Remove From Dictionary | ${D3} | b | x | y |
=>
| ${D3} = {'a': 1, 'c': 3}
"""
for key in keys:
if key in dictionary:
value = dictionary.pop(key)
logger.info("Removed item with key '%s' and value '%s'." % (key, value))
else:
logger.info("Key '%s' not found." % key)
def keep_in_dictionary(self, dictionary, *keys):
"""Keeps the given ``keys`` in the ``dictionary`` and removes all other.
If the given ``key`` cannot be found from the ``dictionary``, it
is ignored.
Example:
| Keep In Dictionary | ${D5} | b | x | d |
=>
| ${D5} = {'b': 2, 'd': 4}
"""
remove_keys = [k for k in dictionary if k not in keys]
self.remove_from_dictionary(dictionary, *remove_keys)
def copy_dictionary(self, dictionary):
"""Returns a copy of the given dictionary.
The given dictionary is never altered by this keyword.
"""
return dictionary.copy()
def get_dictionary_keys(self, dictionary):
"""Returns keys of the given ``dictionary``.
Keys are returned in sorted order. The given ``dictionary`` is never
altered by this keyword.
Example:
| ${keys} = | Get Dictionary Keys | ${D3} |
=>
| ${keys} = ['a', 'b', 'c']
"""
# TODO: Possibility to disable sorting. Can be handy with OrderedDicts.
return sorted(dictionary)
def get_dictionary_values(self, dictionary):
"""Returns values of the given dictionary.
Values are returned sorted according to keys. The given dictionary is
never altered by this keyword.
Example:
| ${values} = | Get Dictionary Values | ${D3} |
=>
| ${values} = [1, 2, 3]
"""
return [dictionary[k] for k in self.get_dictionary_keys(dictionary)]
def get_dictionary_items(self, dictionary):
"""Returns items of the given ``dictionary``.
Items are returned sorted by keys. The given ``dictionary`` is not
altered by this keyword.
Example:
| ${items} = | Get Dictionary Items | ${D3} |
=>
| ${items} = ['a', 1, 'b', 2, 'c', 3]
"""
ret = []
for key in self.get_dictionary_keys(dictionary):
ret.extend((key, dictionary[key]))
return ret
def get_from_dictionary(self, dictionary, key):
"""Returns a value from the given ``dictionary`` based on the given ``key``.
If the given ``key`` cannot be found from the ``dictionary``, this
keyword fails.
The given dictionary is never altered by this keyword.
Example:
| ${value} = | Get From Dictionary | ${D3} | b |
=>
| ${value} = 2
"""
try:
return dictionary[key]
except KeyError:
raise RuntimeError("Dictionary does not contain key '%s'." % key)
def dictionary_should_contain_key(self, dictionary, key, msg=None):
"""Fails if ``key`` is not found from ``dictionary``.
See `List Should Contain Value` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
default = "Dictionary does not contain key '%s'." % key
_verify_condition(key in dictionary, default, msg)
def dictionary_should_not_contain_key(self, dictionary, key, msg=None):
"""Fails if ``key`` is found from ``dictionary``.
See `List Should Contain Value` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
default = "Dictionary contains key '%s'." % key
_verify_condition(key not in dictionary, default, msg)
def dictionary_should_contain_item(self, dictionary, key, value, msg=None):
"""An item of ``key``/``value`` must be found in a `dictionary`.
Value is converted to unicode for comparison.
See `Lists Should Be Equal` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
self.dictionary_should_contain_key(dictionary, key, msg)
actual, expected = unicode(dictionary[key]), unicode(value)
default = "Value of dictionary key '%s' does not match: %s != %s" % (key, actual, expected)
_verify_condition(actual == expected, default, msg)
def dictionary_should_contain_value(self, dictionary, value, msg=None):
"""Fails if ``value`` is not found from ``dictionary``.
See `List Should Contain Value` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
default = "Dictionary does not contain value '%s'." % value
_verify_condition(value in dictionary.values(), default, msg)
def dictionary_should_not_contain_value(self, dictionary, value, msg=None):
"""Fails if ``value`` is found from ``dictionary``.
See `List Should Contain Value` for an explanation of ``msg``.
The given dictionary is never altered by this keyword.
"""
default = "Dictionary contains value '%s'." % value
_verify_condition(not value in dictionary.values(), default, msg)
def dictionaries_should_be_equal(self, dict1, dict2, msg=None, values=True):
"""Fails if the given dictionaries are not equal.
First the equality of dictionaries' keys is checked and after that all
the key value pairs. If there are differences between the values, those
are listed in the error message.
See `Lists Should Be Equal` for more information about configuring
the error message with ``msg`` and ``values`` arguments.
The given dictionaries are never altered by this keyword.
"""
keys = self._keys_should_be_equal(dict1, dict2, msg, values)
self._key_values_should_be_equal(keys, dict1, dict2, msg, values)
def dictionary_should_contain_sub_dictionary(self, dict1, dict2, msg=None,
values=True):
"""Fails unless all items in ``dict2`` are found from ``dict1``.
See `Lists Should Be Equal` for more information about configuring
the error message with ``msg`` and ``values`` arguments.
The given dictionaries are never altered by this keyword.
"""
keys = self.get_dictionary_keys(dict2)
diffs = [unic(k) for k in keys if k not in dict1]
default = "Following keys missing from first dictionary: %s" \
% ', '.join(diffs)
_verify_condition(not diffs, default, msg, values)
self._key_values_should_be_equal(keys, dict1, dict2, msg, values)
def log_dictionary(self, dictionary, level='INFO'):
"""Logs the size and contents of the ``dictionary`` using given ``level``.
Valid levels are TRACE, DEBUG, INFO (default), and WARN.
If you only want to log the size, use keyword `Get Length` from
the BuiltIn library.
"""
logger.write('\n'.join(self._log_dictionary(dictionary)), level)
def _log_dictionary(self, dictionary):
if not dictionary:
yield 'Dictionary is empty.'
elif len(dictionary) == 1:
yield 'Dictionary has one item:'
else:
yield 'Dictionary size is %d and it contains following items:' % len(dictionary)
for key in self.get_dictionary_keys(dictionary):
yield '%s: %s' % (key, dictionary[key])
def _keys_should_be_equal(self, dict1, dict2, msg, values):
keys1 = self.get_dictionary_keys(dict1)
keys2 = self.get_dictionary_keys(dict2)
miss1 = [unic(k) for k in keys2 if k not in dict1]
miss2 = [unic(k) for k in keys1 if k not in dict2]
error = []
if miss1:
error += ['Following keys missing from first dictionary: %s'
% ', '.join(miss1)]
if miss2:
error += ['Following keys missing from second dictionary: %s'
% ', '.join(miss2)]
_verify_condition(not error, '\n'.join(error), msg, values)
return keys1
def _key_values_should_be_equal(self, keys, dict1, dict2, msg, values):
diffs = list(self._yield_dict_diffs(keys, dict1, dict2))
default = 'Following keys have different values:\n' + '\n'.join(diffs)
_verify_condition(not diffs, default, msg, values)
def _yield_dict_diffs(self, keys, dict1, dict2):
for key in keys:
try:
assert_equals(dict1[key], dict2[key], msg='Key %s' % (key,))
except AssertionError as err:
yield unic(err)
class Collections(_List, _Dictionary):
"""A test library providing keywords for handling lists and dictionaries.
``Collections`` is Robot Framework's standard library that provides a
set of keywords for handling Python lists and dictionaries. This
library has keywords, for example, for modifying and getting
values from lists and dictionaries (e.g. `Append To List`, `Get
From Dictionary`) and for verifying their contents (e.g. `Lists
Should Be Equal`, `Dictionary Should Contain Value`).
= Related keywords in BuiltIn =
Following keywords in the BuiltIn library can also be used with
lists and dictionaries:
| = Keyword Name = | = Applicable With = | = Comment = |
| `Create List` | lists |
| `Create Dictionary` | dicts | Was in Collections until RF 2.9. |
| `Get Length` | both |
| `Length Should Be` | both |
| `Should Be Empty` | both |
| `Should Not Be Empty` | both |
| `Should Contain` | both |
| `Should Not Contain` | both |
| `Should Contain X Times` | lists |
| `Should Not Contain X Times` | lists |
| `Get Count` | lists |
= Using with list-like and dictionary-like objects =
List keywords that do not alter the given list can also be used
with tuples, and to some extend also with other iterables.
`Convert To List` can be used to convert tuples and other iterables
to Python ``list`` objects.
Similarly dictionary keywords can, for most parts, be used with other
mappings. `Convert To Dictionary` can be used if real Python ``dict``
objects are needed.
= Boolean arguments =
Some keywords accept arguments that are handled as Boolean values true or
false. If such an argument is given as a string, it is considered false if
it is either empty or case-insensitively equal to ``false`` or ``no``.
Keywords verifying something that allow dropping actual and expected values
from the possible error message also consider string ``no values`` as false.
Other strings are considered true regardless their value, and other
argument types are tested using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules
as in Python].
True examples:
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=True | # Strings are generally true. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=yes | # Same as the above. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=${TRUE} | # Python ``True`` is true. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=${42} | # Numbers other than 0 are true. |
False examples:
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=False | # String ``false`` is false. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=no | # Also string ``no`` is false. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=${EMPTY} | # Empty string is false. |
| `Should Contain Match` | ${list} | ${pattern} | case_insensitive=${FALSE} | # Python ``False`` is false. |
| `Lists Should Be Equal` | ${x} | ${y} | Custom error | values=no values | # ``no values`` works with ``values`` argument |
Note that prior to Robot Framework 2.9 some keywords considered all
non-empty strings, including ``False``, to be true.
= Data in examples =
List related keywords use variables in format ``${Lx}`` in their examples.
They mean lists with as many alphabetic characters as specified by ``x``.
For example, ``${L1}`` means ``['a']`` and ``${L3}`` means
``['a', 'b', 'c']``.
Dictionary keywords use similar ``${Dx}`` variables. For example, ``${D1}``
means ``{'a': 1}`` and ``${D3}`` means ``{'a': 1, 'b': 2, 'c': 3}``.
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
def should_contain_match(self, list, pattern, msg=None,
case_insensitive=False,
whitespace_insensitive=False):
"""Fails if ``pattern`` is not found in ``list``.
See `List Should Contain Value` for an explanation of ``msg``.
By default, pattern matching is similar to matching files in a shell
and is case-sensitive and whitespace-sensitive. In the pattern syntax,
``*`` matches to anything and ``?`` matches to any single character. You
can also prepend ``glob=`` to your pattern to explicitly use this pattern
matching behavior.
If you prepend ``regexp=`` to your pattern, your pattern will be used
according to the Python
[http://docs.python.org/2/library/re.html|re module] regular expression
syntax. Important note: Backslashes are an escape character, and must
be escaped with another backslash (e.g. ``regexp=\\\\d{6}`` to search for
``\\d{6}``). See `BuiltIn.Should Match Regexp` for more details.
If ``case_insensitive`` is given a true value (see `Boolean arguments`),
the pattern matching will ignore case.
If ``whitespace_insensitive`` is given a true value (see `Boolean
arguments`), the pattern matching will ignore whitespace.
Non-string values in lists are ignored when matching patterns.
The given list is never altered by this keyword.
See also ``Should Not Contain Match``.
Examples:
| Should Contain Match | ${list} | a* | | | # Match strings beginning with 'a'. |
| Should Contain Match | ${list} | regexp=a.* | | | # Same as the above but with regexp. |
| Should Contain Match | ${list} | regexp=\\\\d{6} | | | # Match strings containing six digits. |
| Should Contain Match | ${list} | a* | case_insensitive=True | | # Match strings beginning with 'a' or 'A'. |
| Should Contain Match | ${list} | ab* | whitespace_insensitive=yes | | # Match strings beginning with 'ab' with possible whitespace ignored. |
| Should Contain Match | ${list} | ab* | whitespace_insensitive=true | case_insensitive=true | # Same as the above but also ignore case. |
New in Robot Framework 2.8.6.
"""
matches = _get_matches_in_iterable(list, pattern, case_insensitive,
whitespace_insensitive)
default = "%s does not contain match for pattern '%s'." \
% (seq2str2(list), pattern)
_verify_condition(matches, default, msg)
def should_not_contain_match(self, list, pattern, msg=None,
case_insensitive=False,
whitespace_insensitive=False):
"""Fails if ``pattern`` is found in ``list``.
Exact opposite of `Should Contain Match` keyword. See that keyword
for information about arguments and usage in general.
New in Robot Framework 2.8.6.
"""
matches = _get_matches_in_iterable(list, pattern, case_insensitive,
whitespace_insensitive)
default = "%s contains match for pattern '%s'." \
% (seq2str2(list), pattern)
_verify_condition(not matches, default, msg)
def get_matches(self, list, pattern, case_insensitive=False,
whitespace_insensitive=False):
"""Returns a list of matches to ``pattern`` in ``list``.
For more information on ``pattern``, ``case_insensitive``, and
``whitespace_insensitive``, see `Should Contain Match`.
Examples:
| ${matches}= | Get Matches | ${list} | a* | # ${matches} will contain any string beginning with 'a' |
| ${matches}= | Get Matches | ${list} | regexp=a.* | # ${matches} will contain any string beginning with 'a' (regexp version) |
| ${matches}= | Get Matches | ${list} | a* | case_insensitive=${True} | # ${matches} will contain any string beginning with 'a' or 'A' |
New in Robot Framework 2.8.6.
"""
return _get_matches_in_iterable(list, pattern, case_insensitive,
whitespace_insensitive)
def get_match_count(self, list, pattern, case_insensitive=False,
whitespace_insensitive=False):
"""Returns the count of matches to ``pattern`` in ``list``.
For more information on ``pattern``, ``case_insensitive``, and
``whitespace_insensitive``, see `Should Contain Match`.
Examples:
| ${count}= | Get Match Count | ${list} | a* | # ${count} will be the count of strings beginning with 'a' |
| ${count}= | Get Match Count | ${list} | regexp=a.* | # ${matches} will be the count of strings beginning with 'a' (regexp version) |
| ${count}= | Get Match Count | ${list} | a* | case_insensitive=${True} | # ${matches} will be the count of strings beginning with 'a' or 'A' |
New in Robot Framework 2.8.6.
"""
return len(self.get_matches(list, pattern, case_insensitive,
whitespace_insensitive))
def _verify_condition(condition, default_msg, msg, values=False):
if condition:
return
if not msg:
msg = default_msg
elif is_truthy(values) and str(values).upper() != 'NO VALUES':
msg += '\n' + default_msg
raise AssertionError(msg)
def _get_matches_in_iterable(iterable, pattern, case_insensitive=False,
whitespace_insensitive=False):
if not is_string(pattern):
raise TypeError("Pattern must be string, got '%s'." % type_name(pattern))
regexp = False
if pattern.startswith('regexp='):
pattern = pattern[7:]
regexp = True
elif pattern.startswith('glob='):
pattern = pattern[5:]
matcher = Matcher(pattern,
caseless=is_truthy(case_insensitive),
spaceless=is_truthy(whitespace_insensitive),
regexp=regexp)
return [string for string in iterable
if is_string(string) and matcher.match(string)]
|
|
"""A Python module for interacting with Slack's Web API."""
import copy
import hashlib
import hmac
import io
import json
import logging
import mimetypes
import urllib
import uuid
import warnings
from base64 import b64encode
from http.client import HTTPResponse
from ssl import SSLContext
from typing import BinaryIO, Dict, List
from typing import Optional, Union
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen, OpenerDirector, ProxyHandler, HTTPSHandler
import slack_sdk.errors as err
from slack_sdk.errors import SlackRequestError
from .deprecation import show_2020_01_deprecation
from .internal_utils import (
convert_bool_to_0_or_1,
get_user_agent,
_get_url,
_build_req_args,
_build_unexpected_body_error_message,
)
from .slack_response import SlackResponse
from slack_sdk.http_retry import default_retry_handlers
from slack_sdk.http_retry.handler import RetryHandler
from slack_sdk.http_retry.request import HttpRequest as RetryHttpRequest
from slack_sdk.http_retry.response import HttpResponse as RetryHttpResponse
from slack_sdk.http_retry.state import RetryState
from slack_sdk.proxy_env_variable_loader import load_http_proxy_from_env
class BaseClient:
BASE_URL = "https://www.slack.com/api/"
def __init__(
self,
token: Optional[str] = None,
base_url: str = BASE_URL,
timeout: int = 30,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
headers: Optional[dict] = None,
user_agent_prefix: Optional[str] = None,
user_agent_suffix: Optional[str] = None,
# for Org-Wide App installation
team_id: Optional[str] = None,
logger: Optional[logging.Logger] = None,
retry_handlers: Optional[List[RetryHandler]] = None,
):
self.token = None if token is None else token.strip()
self.base_url = base_url
self.timeout = timeout
self.ssl = ssl
self.proxy = proxy
self.headers = headers or {}
self.headers["User-Agent"] = get_user_agent(
user_agent_prefix, user_agent_suffix
)
self.default_params = {}
if team_id is not None:
self.default_params["team_id"] = team_id
self._logger = logger if logger is not None else logging.getLogger(__name__)
self.retry_handlers = (
retry_handlers if retry_handlers is not None else default_retry_handlers()
)
if self.proxy is None or len(self.proxy.strip()) == 0:
env_variable = load_http_proxy_from_env(self._logger)
if env_variable is not None:
self.proxy = env_variable
def api_call( # skipcq: PYL-R1710
self,
api_method: str,
*,
http_verb: str = "POST",
files: dict = None,
data: Union[dict] = None,
params: dict = None,
json: dict = None, # skipcq: PYL-W0621
headers: dict = None,
auth: dict = None,
) -> SlackResponse:
"""Create a request and execute the API call to Slack.
Args:
api_method (str): The target Slack API method.
e.g. 'chat.postMessage'
http_verb (str): HTTP Verb. e.g. 'POST'
files (dict): Files to multipart upload.
e.g. {image OR file: file_object OR file_path}
data: The body to attach to the request. If a dictionary is
provided, form-encoding will take place.
e.g. {'key1': 'value1', 'key2': 'value2'}
params (dict): The URL parameters to append to the URL.
e.g. {'key1': 'value1', 'key2': 'value2'}
json (dict): JSON for the body to attach to the request
(if files or data is not specified).
e.g. {'key1': 'value1', 'key2': 'value2'}
headers (dict): Additional request headers
auth (dict): A dictionary that consists of client_id and client_secret
Returns:
(SlackResponse)
The server's response to an HTTP request. Data
from the response can be accessed like a dict.
If the response included 'next_cursor' it can
be iterated on to execute subsequent requests.
Raises:
SlackApiError: The following Slack API call failed:
'chat.postMessage'.
SlackRequestError: Json data can only be submitted as
POST requests.
"""
api_url = _get_url(self.base_url, api_method)
headers = headers or {}
headers.update(self.headers)
req_args = _build_req_args(
token=self.token,
http_verb=http_verb,
files=files,
data=data,
default_params=self.default_params,
params=params,
json=json, # skipcq: PYL-W0621
headers=headers,
auth=auth,
ssl=self.ssl,
proxy=self.proxy,
)
show_2020_01_deprecation(api_method)
return self._sync_send(api_url=api_url, req_args=req_args)
# =================================================================
# urllib based WebClient
# =================================================================
def _sync_send(self, api_url, req_args) -> SlackResponse:
params = req_args["params"] if "params" in req_args else None
data = req_args["data"] if "data" in req_args else None
files = req_args["files"] if "files" in req_args else None
_json = req_args["json"] if "json" in req_args else None
headers = req_args["headers"] if "headers" in req_args else None
token = params.get("token") if params and "token" in params else None
auth = (
req_args["auth"] if "auth" in req_args else None
) # Basic Auth for oauth.v2.access / oauth.access
if auth is not None:
if isinstance(auth, str):
headers["Authorization"] = auth
elif isinstance(auth, dict):
client_id, client_secret = auth["client_id"], auth["client_secret"]
value = b64encode(
f"{client_id}:{client_secret}".encode("utf-8")
).decode("ascii")
headers["Authorization"] = f"Basic {value}"
else:
self._logger.warning(
f"As the auth: {auth}: {type(auth)} is unsupported, skipped"
)
body_params = {}
if params:
body_params.update(params)
if data:
body_params.update(data)
return self._urllib_api_call(
token=token,
url=api_url,
query_params={},
body_params=body_params,
files=files,
json_body=_json,
additional_headers=headers,
)
def _request_for_pagination(self, api_url, req_args) -> Dict[str, any]:
"""This method is supposed to be used only for SlackResponse pagination
You can paginate using Python's for iterator as below:
for response in client.conversations_list(limit=100):
# do something with each response here
"""
response = self._perform_urllib_http_request(url=api_url, args=req_args)
return {
"status_code": int(response["status"]),
"headers": dict(response["headers"]),
"data": json.loads(response["body"]),
}
def _urllib_api_call(
self,
*,
token: str = None,
url: str,
query_params: Dict[str, str] = {},
json_body: Dict = {},
body_params: Dict[str, str] = {},
files: Dict[str, io.BytesIO] = {},
additional_headers: Dict[str, str] = {},
) -> SlackResponse:
"""Performs a Slack API request and returns the result.
Args:
token: Slack API Token (either bot token or user token)
url: Complete URL (e.g., https://www.slack.com/api/chat.postMessage)
query_params: Query string
json_body: JSON data structure (it's still a dict at this point),
if you give this argument, body_params and files will be skipped
body_params: Form body params
files: Files to upload
additional_headers: Request headers to append
Returns:
API response
"""
files_to_close: List[BinaryIO] = []
try:
# True/False -> "1"/"0"
query_params = convert_bool_to_0_or_1(query_params)
body_params = convert_bool_to_0_or_1(body_params)
if self._logger.level <= logging.DEBUG:
def convert_params(values: dict) -> dict:
if not values or not isinstance(values, dict):
return {}
return {
k: ("(bytes)" if isinstance(v, bytes) else v)
for k, v in values.items()
}
headers = {
k: "(redacted)" if k.lower() == "authorization" else v
for k, v in additional_headers.items()
}
self._logger.debug(
f"Sending a request - url: {url}, "
f"query_params: {convert_params(query_params)}, "
f"body_params: {convert_params(body_params)}, "
f"files: {convert_params(files)}, "
f"json_body: {json_body}, "
f"headers: {headers}"
)
request_data = {}
if files is not None and isinstance(files, dict) and len(files) > 0:
if body_params:
for k, v in body_params.items():
request_data.update({k: v})
for k, v in files.items():
if isinstance(v, str):
f: BinaryIO = open(v.encode("utf-8", "ignore"), "rb")
files_to_close.append(f)
request_data.update({k: f})
elif isinstance(v, (bytearray, bytes)):
request_data.update({k: io.BytesIO(v)})
else:
request_data.update({k: v})
request_headers = self._build_urllib_request_headers(
token=token or self.token,
has_json=json is not None,
has_files=files is not None,
additional_headers=additional_headers,
)
request_args = {
"headers": request_headers,
"data": request_data,
"params": body_params,
"files": files,
"json": json_body,
}
if query_params:
q = urlencode(query_params)
url = f"{url}&{q}" if "?" in url else f"{url}?{q}"
response = self._perform_urllib_http_request(url=url, args=request_args)
response_body = response.get("body", None) # skipcq: PTC-W0039
response_body_data: Optional[Union[dict, bytes]] = response_body
if response_body is not None and not isinstance(response_body, bytes):
try:
response_body_data = json.loads(response["body"])
except json.decoder.JSONDecodeError:
message = _build_unexpected_body_error_message(
response.get("body", "")
)
raise err.SlackApiError(message, response)
if query_params:
all_params = copy.copy(body_params)
all_params.update(query_params)
else:
all_params = body_params
request_args["params"] = all_params # for backward-compatibility
return SlackResponse(
client=self,
http_verb="POST", # you can use POST method for all the Web APIs
api_url=url,
req_args=request_args,
data=response_body_data,
headers=dict(response["headers"]),
status_code=response["status"],
).validate()
finally:
for f in files_to_close:
if not f.closed:
f.close()
def _perform_urllib_http_request(
self, *, url: str, args: Dict[str, Dict[str, any]]
) -> Dict[str, any]:
"""Performs an HTTP request and parses the response.
Args:
url: Complete URL (e.g., https://www.slack.com/api/chat.postMessage)
args: args has "headers", "data", "params", and "json"
"headers": Dict[str, str]
"data": Dict[str, any]
"params": Dict[str, str],
"json": Dict[str, any],
Returns:
dict {status: int, headers: Headers, body: str}
"""
headers = args["headers"]
if args["json"]:
body = json.dumps(args["json"])
headers["Content-Type"] = "application/json;charset=utf-8"
elif args["data"]:
boundary = f"--------------{uuid.uuid4()}"
sep_boundary = b"\r\n--" + boundary.encode("ascii")
end_boundary = sep_boundary + b"--\r\n"
body = io.BytesIO()
data = args["data"]
for key, value in data.items():
readable = getattr(value, "readable", None)
if readable and value.readable():
filename = "Uploaded file"
name_attr = getattr(value, "name", None)
if name_attr:
filename = (
name_attr.decode("utf-8")
if isinstance(name_attr, bytes)
else name_attr
)
if "filename" in data:
filename = data["filename"]
mimetype = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
title = (
f'\r\nContent-Disposition: form-data; name="{key}"; filename="{filename}"\r\n'
+ f"Content-Type: {mimetype}\r\n"
)
value = value.read()
else:
title = f'\r\nContent-Disposition: form-data; name="{key}"\r\n'
value = str(value).encode("utf-8")
body.write(sep_boundary)
body.write(title.encode("utf-8"))
body.write(b"\r\n")
body.write(value)
body.write(end_boundary)
body = body.getvalue()
headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
headers["Content-Length"] = len(body)
elif args["params"]:
body = urlencode(args["params"])
headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
body = None
if isinstance(body, str):
body = body.encode("utf-8")
# NOTE: Intentionally ignore the `http_verb` here
# Slack APIs accepts any API method requests with POST methods
req = Request(method="POST", url=url, data=body, headers=headers)
resp = None
last_error = None
retry_state = RetryState()
counter_for_safety = 0
while counter_for_safety < 100:
counter_for_safety += 1
# If this is a retry, the next try started here. We can reset the flag.
retry_state.next_attempt_requested = False
try:
resp = self._perform_urllib_http_request_internal(url, req)
# The resp is a 200 OK response
return resp
except HTTPError as e:
resp = {"status": e.code, "headers": e.headers}
if e.code == 429:
# for compatibility with aiohttp
resp["headers"]["Retry-After"] = resp["headers"]["retry-after"]
# read the response body here
charset = e.headers.get_content_charset() or "utf-8"
response_body: str = e.read().decode(charset)
resp["body"] = response_body
# Try to find a retry handler for this error
retry_request = RetryHttpRequest.from_urllib_http_request(req)
retry_response = RetryHttpResponse(
status_code=e.code,
headers={k: [v] for k, v in e.headers.items()},
data=response_body.encode("utf-8")
if response_body is not None
else None,
)
for handler in self.retry_handlers:
if handler.can_retry(
state=retry_state,
request=retry_request,
response=retry_response,
error=e,
):
if self._logger.level <= logging.DEBUG:
self._logger.info(
f"A retry handler found: {type(handler).__name__} for {req.method} {req.full_url} - {e}"
)
handler.prepare_for_next_attempt(
state=retry_state,
request=retry_request,
response=retry_response,
error=e,
)
break
if retry_state.next_attempt_requested is False:
return resp
except Exception as err:
last_error = err
self._logger.error(
f"Failed to send a request to Slack API server: {err}"
)
# Try to find a retry handler for this error
retry_request = RetryHttpRequest.from_urllib_http_request(req)
for handler in self.retry_handlers:
if handler.can_retry(
state=retry_state,
request=retry_request,
response=None,
error=err,
):
if self._logger.level <= logging.DEBUG:
self._logger.info(
f"A retry handler found: {type(handler).__name__} for {req.method} {req.full_url} - {err}"
)
handler.prepare_for_next_attempt(
state=retry_state,
request=retry_request,
response=None,
error=err,
)
self._logger.info(
f"Going to retry the same request: {req.method} {req.full_url}"
)
break
if retry_state.next_attempt_requested is False:
raise err
if resp is not None:
return resp
raise last_error
def _perform_urllib_http_request_internal(
self,
url: str,
req: Request,
) -> Dict[str, any]:
# urllib not only opens http:// or https:// URLs, but also ftp:// and file://.
# With this it might be possible to open local files on the executing machine
# which might be a security risk if the URL to open can be manipulated by an external user.
# (BAN-B310)
if url.lower().startswith("http"):
opener: Optional[OpenerDirector] = None
if self.proxy is not None:
if isinstance(self.proxy, str):
opener = urllib.request.build_opener(
ProxyHandler({"http": self.proxy, "https": self.proxy}),
HTTPSHandler(context=self.ssl),
)
else:
raise SlackRequestError(
f"Invalid proxy detected: {self.proxy} must be a str value"
)
# NOTE: BAN-B310 is already checked above
resp: Optional[HTTPResponse] = None
if opener:
resp = opener.open(req, timeout=self.timeout) # skipcq: BAN-B310
else:
resp = urlopen( # skipcq: BAN-B310
req, context=self.ssl, timeout=self.timeout
)
if resp.headers.get_content_type() == "application/gzip":
# admin.analytics.getFile
body: bytes = resp.read()
if self._logger.level <= logging.DEBUG:
self._logger.debug(
"Received the following response - "
f"status: {resp.code}, "
f"headers: {dict(resp.headers)}, "
f"body: (binary)"
)
return {"status": resp.code, "headers": resp.headers, "body": body}
charset = resp.headers.get_content_charset() or "utf-8"
body: str = resp.read().decode(charset) # read the response body here
if self._logger.level <= logging.DEBUG:
self._logger.debug(
"Received the following response - "
f"status: {resp.code}, "
f"headers: {dict(resp.headers)}, "
f"body: {body}"
)
return {"status": resp.code, "headers": resp.headers, "body": body}
raise SlackRequestError(f"Invalid URL detected: {url}")
def _build_urllib_request_headers(
self, token: str, has_json: bool, has_files: bool, additional_headers: dict
) -> Dict[str, str]:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
headers.update(self.headers)
if token:
headers.update({"Authorization": "Bearer {}".format(token)})
if additional_headers:
headers.update(additional_headers)
if has_json:
headers.update({"Content-Type": "application/json;charset=utf-8"})
if has_files:
# will be set afterwards
headers.pop("Content-Type", None)
return headers
# =================================================================
@staticmethod
def validate_slack_signature(
*, signing_secret: str, data: str, timestamp: str, signature: str
) -> bool:
"""
Slack creates a unique string for your app and shares it with you. Verify
requests from Slack with confidence by verifying signatures using your
signing secret.
On each HTTP request that Slack sends, we add an X-Slack-Signature HTTP
header. The signature is created by combining the signing secret with the
body of the request we're sending using a standard HMAC-SHA256 keyed hash.
https://api.slack.com/docs/verifying-requests-from-slack#how_to_make_a_request_signature_in_4_easy_steps__an_overview
Args:
signing_secret: Your application's signing secret, available in the
Slack API dashboard
data: The raw body of the incoming request - no headers, just the body.
timestamp: from the 'X-Slack-Request-Timestamp' header
signature: from the 'X-Slack-Signature' header - the calculated signature
should match this.
Returns:
True if signatures matches
"""
warnings.warn(
"As this method is deprecated since slackclient 2.6.0, "
"use `from slack.signature import SignatureVerifier` instead",
DeprecationWarning,
)
format_req = str.encode(f"v0:{timestamp}:{data}")
encoded_secret = str.encode(signing_secret)
request_hash = hmac.new(encoded_secret, format_req, hashlib.sha256).hexdigest()
calculated_signature = f"v0={request_hash}"
return hmac.compare_digest(calculated_signature, signature)
|
|
"""Module to handle image quality calculations."""
#
# iqcalc.py -- image quality calculations on FITS data
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import logging
import threading
import numpy as np
try:
import scipy.optimize as optimize
import scipy.ndimage as ndimage
from scipy.ndimage import maximum_filter
from scipy.interpolate import interp1d
have_scipy = True
except ImportError:
have_scipy = False
from ginga.misc import Bunch
__all__ = ['get_mean', 'get_median', 'IQCalcError', 'IQCalc']
def get_mean(data_np):
"""Calculate mean for valid values.
Parameters
----------
data_np : ndarray
Input array. Can contain masked values.
Returns
-------
result : float
Mean of array values that are finite.
If array contains no finite values, returns NaN.
"""
i = np.isfinite(data_np)
if not np.any(i):
return np.nan
# NOTE: we use "ma" version of mean because this can be used with
# masked arrays created by cutting out non-rectangular shapes
return np.ma.mean(data_np[i])
def get_median(data_np):
"""Like :func:`get_mean` but for median."""
i = np.isfinite(data_np)
if not np.any(i):
return np.nan
# NOTE: we use "ma" version of median because this can be used with
# masked arrays created by cutting out non-rectangular shapes
return np.ma.median(data_np[i])
class IQCalcError(Exception):
"""Base exception for raising errors in this module."""
pass
class IQCalc(object):
"""Class to handle model fitting and FWHM calculations.
Parameters
----------
logger : obj or `None`
Python logger. If not given, one will be created.
Attributes
----------
lock : :py:class:`threading.RLock`
For mutex around `scipy.optimize`, which seems to be non-threadsafe.
skylevel_magnification, skylevel_offset : float
For adjustments to sky background level.
"""
def __init__(self, logger=None):
if not logger:
logger = logging.getLogger('IQCalc')
self.logger = logger
# for mutex around scipy.optimize, which seems to be non-threadsafe
self.lock = threading.RLock()
# for adjustments to background level
self.skylevel_magnification = 1.05
self.skylevel_offset = 40.0
# FWHM CALCULATION
def gaussian(self, x, p):
"""Evaluate Gaussian function in 1D. See :meth:`calc_fwhm`.
Parameters
----------
x : array-like
X values.
p : tuple of float
Parameters for Gaussian, i.e., ``(mean, stddev, amplitude)``.
Returns
-------
y : array-like
Y values.
"""
y = (1.0 / (p[1] * np.sqrt(2 * np.pi)) *
np.exp(-(x - p[0]) ** 2 / (2 * p[1] ** 2))) * p[2]
return y
def calc_fwhm_gaussian(self, arr1d, medv=None, gauss_fn=None):
"""FWHM calculation on a 1D array by using least square fitting of
a Gaussian function on the data.
Parameters
----------
arr1d : array-like
1D array cut in either X or Y direction on the object.
medv : float or `None`
Median of the data. If not given, it is calculated from ``arr1d``.
gauss_fn : func or `None`
Gaussian function for fitting. If not given, :meth:`gaussian`
is used.
Returns
-------
res : `~ginga.misc.Bunch.Bunch`
Fitting results.
Raises
------
IQCalcError
Fitting failed.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
if gauss_fn is None:
gauss_fn = self.gaussian
N = len(arr1d)
X = np.array(list(range(N)))
Y = arr1d
# Fitting works more reliably if we do the following
# a. subtract sky background
if medv is None:
medv = get_median(Y)
Y = Y - medv
maxv = Y.max()
# b. clamp to 0..max (of the sky subtracted field)
Y = Y.clip(0, maxv)
# Fit a gaussian
p0 = [0, N - 1, maxv] # Inital guess
# Distance to the target function
errfunc = lambda p, x, y: gauss_fn(x, p) - y # noqa
# Least square fit to the gaussian
with self.lock:
# NOTE: without this mutex, optimize.leastsq causes a fatal error
# sometimes--it appears not to be thread safe.
# The error is:
# "SystemError: null argument to internal routine"
# "Fatal Python error: GC object already tracked"
p1, success = optimize.leastsq(errfunc, p0[:], args=(X, Y))
if not success:
raise IQCalcError("FWHM Gaussian fitting failed")
mu, sdev, maxv = p1
self.logger.debug("mu=%f sdev=%f maxv=%f" % (mu, sdev, maxv))
# Now that we have the sdev from fitting, we can calculate FWHM
fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0)) * sdev
# some routines choke on numpy values and need "pure" Python floats
# e.g. when marshalling through a remote procedure interface
fwhm = float(fwhm)
mu = float(mu)
sdev = float(sdev)
maxv = float(maxv)
res = Bunch.Bunch(fwhm=fwhm, mu=mu, sdev=sdev, maxv=maxv,
fit_fn=gauss_fn, fit_args=[mu, sdev, maxv])
return res
def moffat(self, x, p):
"""Evaluate Moffat function in 1D. See :meth:`calc_fwhm`.
Parameters
----------
x : array-like
X values.
p : tuple of float
Parameters for Moffat, i.e., ``(x_0, gamma, alpha, amplitude)``,
where ``x_0`` a.k.a. mean and ``gamma`` core width.
Returns
-------
y : array-like
Y values.
"""
y = (1.0 + (x - p[0]) ** 2 / p[1] ** 2) ** (-1.0 * p[2]) * p[3]
return y
def calc_fwhm_moffat(self, arr1d, medv=None, moffat_fn=None):
"""FWHM calculation on a 1D array by using least square fitting of
a Moffat function on the data.
Parameters
----------
arr1d : array-like
1D array cut in either X or Y direction on the object.
medv : float or `None`
Median of the data. If not given, it is calculated from ``arr1d``.
moffat_fn : func or `None`
Moffat function for fitting. If not given, :meth:`moffat` is used.
Returns
-------
res : `~ginga.misc.Bunch.Bunch`
Fitting results.
Raises
------
IQCalcError
Fitting failed.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
if moffat_fn is None:
moffat_fn = self.moffat
N = len(arr1d)
X = np.array(list(range(N)))
Y = arr1d
# Fitting works more reliably if we do the following
# a. subtract sky background
if medv is None:
medv = get_median(Y)
Y = Y - medv
maxv = Y.max()
# b. clamp to 0..max (of the sky subtracted field)
Y = Y.clip(0, maxv)
# Fit a moffat
p0 = [0, N - 1, 2, maxv] # Inital guess
# Distance to the target function
errfunc = lambda p, x, y: moffat_fn(x, p) - y # noqa
# Least square fit to the gaussian
with self.lock:
# NOTE: without this mutex, optimize.leastsq causes a fatal error
# sometimes--it appears not to be thread safe.
# The error is:
# "SystemError: null argument to internal routine"
# "Fatal Python error: GC object already tracked"
p1, success = optimize.leastsq(errfunc, p0[:], args=(X, Y))
if not success:
raise IQCalcError("FWHM Moffat fitting failed")
mu, width, power, maxv = p1
width = np.abs(width)
self.logger.debug("mu=%f width=%f power=%f maxv=%f" % (
mu, width, power, maxv))
fwhm = 2.0 * width * np.sqrt(2.0 ** (1.0 / power) - 1.0)
# some routines choke on numpy values and need "pure" Python floats
# e.g. when marshalling through a remote procedure interface
fwhm = float(fwhm)
mu = float(mu)
width = float(width)
power = float(power)
maxv = float(maxv)
res = Bunch.Bunch(fwhm=fwhm, mu=mu, width=width, power=power,
maxv=maxv, fit_fn=moffat_fn,
fit_args=[mu, width, power, maxv])
return res
def calc_fwhm(self, arr1d, medv=None, method_name='gaussian'):
"""Calculate FWHM for the given input array.
Parameters
----------
arr1d : array-like
1D array cut in either X or Y direction on the object.
medv : float or `None`
Median of the data. If not given, it is calculated from ``arr1d``.
method_name : {'gaussian', 'moffat'}
Function to use for fitting.
Returns
-------
res : `~ginga.misc.Bunch.Bunch`
Fitting results.
"""
# Calculate FWHM in each direction
fwhm_fn = self.calc_fwhm_gaussian
if method_name == 'moffat':
fwhm_fn = self.calc_fwhm_moffat
return fwhm_fn(arr1d, medv=medv)
def get_fwhm(self, x, y, radius, data, medv=None, method_name='gaussian'):
"""Get the FWHM values of the object at the given coordinates and
radius.
Parameters
----------
x, y : int
Indices of the object location in data array.
radius : float
Radius of the region encompassing the object.
data : array-like
Data array.
medv, method_name
See :meth:`calc_fwhm`.
Returns
-------
fwhm_x, fwhm_y : float
FWHM in X and Y, respectively.
ctr_x, ctr_y : float
Center in X and Y, respectively.
x_res, y_res : dict
Fit results from :meth:`calc_fwhm` in X and Y, respectively.
"""
if medv is None:
medv = get_median(data)
# Get two cuts of the data, one in X and one in Y
x0, y0, xarr, yarr = self.cut_cross(x, y, radius, data)
# Calculate FWHM in each direction
x_res = self.calc_fwhm(xarr, medv=medv, method_name=method_name)
fwhm_x, cx = x_res.fwhm, x_res.mu
y_res = self.calc_fwhm(yarr, medv=medv, method_name=method_name)
fwhm_y, cy = y_res.fwhm, y_res.mu
ctr_x = x0 + cx
ctr_y = y0 + cy
self.logger.debug("fwhm_x,fwhm_y=%f,%f center=%f,%f" % (
fwhm_x, fwhm_y, ctr_x, ctr_y))
return (fwhm_x, fwhm_y, ctr_x, ctr_y, x_res, y_res)
def starsize(self, fwhm_x, deg_pix_x, fwhm_y, deg_pix_y):
"""Calculate average FWHM in arcseconds.
Parameters
----------
fwhm_x : float
FWHM in X (pixels).
deg_pix_x : float
Plate scale from CDELT1 in degrees per pixel.
fwhm_y : float
FWHM in Y (pixels).
deg_pix_y : float
Plate scale from CDELT2 in degrees per pixel.
Returns
-------
fwhm : float
Average FWHM in arcseconds.
"""
cdelta1 = math.fabs(deg_pix_x)
cdelta2 = math.fabs(deg_pix_y)
fwhm = (fwhm_x * cdelta1 + fwhm_y * cdelta2) / 2.0
fwhm = fwhm * 3600.0
return fwhm
def centroid(self, data, xc, yc, radius):
"""Calculate centroid from center of mass.
Parameters
----------
data : array-like
Data array.
xc, yc : int
X and Y indices of the approximate center.
radius : float
Half-width of the region to consider around the given center.
Returns
-------
x, y : float
Centroid indices.
Raises
------
IQCalcError
Missing dependency.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
xc, yc = int(xc), int(yc)
x0, y0, arr = self.cut_region(xc, yc, int(radius), data)
# See https://stackoverflow.com/questions/25369982/center-of-mass-for-roi-in-python
cp_arr = np.asarray(arr)
cy, cx = ndimage.center_of_mass(cp_arr)
return (x0 + cx, y0 + cy)
# FINDING BRIGHT PEAKS
def get_threshold(self, data, sigma=5.0):
"""Calculate threshold for :meth:`find_bright_peaks`.
Parameters
----------
data : array-like
Data array.
sigma : float
Sigma for the threshold.
Returns
-------
threshold : float
Threshold based on good data, its median, and the given sigma.
"""
# remove masked elements
fdata = data[np.logical_not(np.ma.getmaskarray(data))]
# remove Inf or NaN
fdata = fdata[np.isfinite(fdata)]
# find the median
median = get_median(fdata)
# NOTE: for this method a good default sigma is 5.0
dist = np.fabs(fdata - median).mean()
threshold = median + sigma * dist
# NOTE: for this method a good default sigma is 2.0
## std = np.std(fdata - median)
## threshold = median + sigma * std
self.logger.debug("calc threshold=%f" % (threshold))
return threshold
def find_bright_peaks(self, data, threshold=None, sigma=5, radius=5):
"""Find bright peak candidates in in the given data.
Parameters
----------
data : array-like
Input data to find peaks from.
threshold : float or `None`
Detection threshold. Below this value, an object is not
considered a candidate. If not given, a default is calculated
using :meth:`get_threshold` with the given ``sigma``.
sigma : float
Sigma for the threshold.
radius : float
Pixel radius for determining local maxima. If the
desired objects are larger in size, specify a larger radius.
Returns
-------
peaks : list of tuple
A list of candidate object coordinate tuples ``(x, y)`` in data.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
if threshold is None:
# set threshold to default if none provided
threshold = self.get_threshold(data, sigma=sigma)
self.logger.debug("threshold defaults to %f (sigma=%f)" % (
threshold, sigma))
#self.logger.debug("filtering")
data_max = maximum_filter(data, radius)
maxima = (data == data_max)
diff = data_max > threshold
maxima[diff == 0] = 0
#self.logger.debug("finding")
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
peaks = []
for dy, dx in slices:
xc = (dx.start + dx.stop - 1) / 2.0
yc = (dy.start + dy.stop - 1) / 2.0
# This is only an approximate center; use FWHM or centroid
# calculation to refine further
peaks.append((xc, yc))
self.logger.debug("peaks=%s" % (str(peaks)))
return peaks
def cut_region(self, x, y, radius, data):
"""Return a cut region.
Parameters
----------
x, y : int
Indices of central pixel.
radius : int
Half-width in both X and Y directions.
data : array-like
Data array to cut from.
Returns
-------
x0, y0 : int
Origin of the region.
arr : array-like
Cut region (a view, not copy).
"""
n = radius
ht, wd = data.shape
x0, x1 = max(0, x - n), min(wd - 1, x + n)
y0, y1 = max(0, y - n), min(ht - 1, y + n)
arr = data[y0:y1 + 1, x0:x1 + 1]
return (x0, y0, arr)
def cut_cross(self, x, y, radius, data):
"""Cut data vertically and horizontally at the given position
with the given radius.
Parameters
----------
x, y : int
Indices where vertical and horizontal cuts meet.
radius : float
Radius of both cuts.
data : array-like
Data array to cut from.
Returns
-------
x0 : array-like
Starting pixel of horizontal cut (in X).
y0 : array-like
Starting pixel of vertical cut (in Y).
xarr : array-like
Horizontal cut (in X).
yarr : array-like
Vertical cut (in Y).
"""
n = int(round(radius))
ht, wd = data.shape
x, y = int(round(x)), int(round(y))
x0, x1 = int(max(0, x - n)), int(min(wd - 1, x + n))
y0, y1 = int(max(0, y - n)), int(min(ht - 1, y + n))
xarr = data[y, x0:x1 + 1]
yarr = data[y0:y1 + 1, x]
return (x0, y0, xarr, yarr)
def brightness(self, x, y, radius, medv, data):
"""Return the brightness value found in a region defined by input
location and radius. Region is cut using :meth:`cut_region`.
Parameters
----------
x, y : int
Indices of central pixel.
radius : int
Half-width in both X and Y directions.
medv : float
Background to subtract off.
data : array-like
Data array.
Returns
-------
res : float
Brightness.
"""
x0, y0, arr = self.cut_region(x, y, radius, data)
arr2 = np.sort(arr.flat)
idx = int(len(arr2) * 0.8)
res = arr2[idx] - medv
return float(res)
def fwhm_data(self, x, y, data, radius=15, method_name='gaussian'):
"""Equivalent to :meth:`get_fwhm`."""
return self.get_fwhm(x, y, radius, data, method_name=method_name)
# Encircled and ensquared energies (EE)
def ensquared_energy(self, data):
"""Return a function of ensquared energy across pixel indices.
Ideally, data is already a masked array and is assumed to be centered.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
tot = data.sum()
ny, nx = data.shape
cen_x = int(nx // 2)
cen_y = int(ny // 2)
ee = []
if ny > nx:
n_max = ny
cen = cen_y
else:
n_max = nx
cen = cen_x
if n_max % 2 == 0: # Even
delta_i1 = -1
else: # Odd
delta_i1 = 0
xr = range(n_max - cen)
for i in xr:
ix1 = cen_x - i + delta_i1
if ix1 < 0:
ix1 = 0
ix2 = cen_x + i + 1
if ix2 > nx:
ix2 = nx
iy1 = cen_y - i + delta_i1
if iy1 < 0:
iy1 = 0
iy2 = cen_y + i + 1
if iy2 > ny:
iy2 = ny
ee.append(data[iy1:iy2, ix1:ix2].sum() / tot)
return interp1d(xr, ee, kind='cubic', bounds_error=False,
assume_sorted=True)
# This is adapted from poppy package. See licenses/POPPY_LICENSE.md .
def encircled_energy(self, data):
"""Return a function of encircled energy across pixel indices.
Ideally, data is already a masked array and is assumed to be centered.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
y, x = np.indices(data.shape, dtype=float)
cen = tuple((i - 1) * 0.5 for i in data.shape[::-1])
x -= cen[0]
y -= cen[1]
r = np.sqrt(x * x + y * y)
ind = np.argsort(r.flat)
sorted_r = r.flat[ind]
sorted_data = data.flat[ind]
# data is already masked
csim = sorted_data.cumsum(dtype=float)
sorted_r_int = sorted_r.astype(int)
deltar = sorted_r_int[1:] - sorted_r_int[:-1] # assume all radii represented
rind = np.where(deltar)[0]
ee = csim[rind] / sorted_data.sum() # Normalize
if isinstance(ee, np.ma.MaskedArray):
ee.set_fill_value(0)
ee = ee.filled()
return interp1d(range(ee.size), ee, kind='cubic', bounds_error=False,
assume_sorted=True)
# EVALUATION ON A FIELD
def evaluate_peaks(self, peaks, data, bright_radius=2, fwhm_radius=15,
fwhm_method='gaussian', ee_total_radius=10,
cb_fn=None, ev_intr=None):
"""Evaluate photometry for given peaks in data array.
Parameters
----------
peaks : list of tuple
List of ``(x, y)`` tuples containing indices of peaks.
data : array-like
Data array that goes with the given peaks.
bright_radius : int
**This is not used.**
fwhm_radius, fwhm_method
See :meth:`get_fwhm`.
ee_total_radius : float
Radius, in pixels, where encircled and ensquared energy fractions
are defined as 1.
cb_fn : func or `None`
If applicable, provide a callback function that takes a
`ginga.misc.Bunch.Bunch` containing the result for each peak.
It should not return anything.
ev_intr : :py:class:`threading.Event` or `None`
For threading, if applicable.
Returns
-------
objlist : list of `ginga.misc.Bunch.Bunch`
A list of successful results for the given peaks.
Each result contains the following keys:
* ``objx``, ``objy``: Fitted centroid from :meth:`get_fwhm`.
* ``pos``: A measure of distance from the center of the image.
* ``oid_x``, ``oid_y``: Center-of-mass centroid from :meth:`centroid`.
* ``fwhm_x``, ``fwhm_y``: Fitted FWHM from :meth:`get_fwhm`.
* ``fwhm``: Overall measure of fwhm as a single value.
* ``fwhm_radius``: Input FWHM radius.
* ``brightness``: Average peak value based on :meth:`get_fwhm` fits.
* ``elipse``: A measure of ellipticity.
* ``x``, ``y``: Input indices of the peak.
* ``skylevel``: Sky level estimated from median of data array and
``skylevel_magnification`` and ``skylevel_offset`` attributes.
* ``background``: Median of the input array.
* ``ensquared_energy_fn``: Function of ensquared energy for different pixel radii.
* ``encircled_energy_fn``: Function of encircled energy for different pixel radii.
"""
height, width = data.shape
hh = float(height) / 2.0
ht = float(height)
h4 = float(height) * 4.0
wh = float(width) / 2.0
wd = float(width)
w4 = float(width) * 4.0
# Find the median (sky/background) level
median = float(get_median(data))
#skylevel = median
# Old SOSS qualsize() applied this calculation to skylevel
skylevel = median * self.skylevel_magnification + self.skylevel_offset
# Form a list of objects and their characteristics
objlist = []
for x, y in peaks:
if ev_intr and ev_intr.is_set():
raise IQCalcError("Evaluation interrupted!")
# centroid calculation on local peak
oid_x, oid_y = None, None
try:
oid_x, oid_y = self.centroid(data, x, y, fwhm_radius)
except Exception as e:
# Error doing centroid
self.logger.debug("Error doing centroid on object at %.2f,%.2f: %s" % (
x, y, str(e)))
# Find the fwhm in x and y, using local peak
try:
res = self.fwhm_data(x, y, data, radius=fwhm_radius,
method_name=fwhm_method)
fwhm_x, fwhm_y, ctr_x, ctr_y, x_res, y_res = res
bx = x_res.fit_fn(round(ctr_x),
(ctr_x,) + tuple(x_res.fit_args[1:]))
by = y_res.fit_fn(round(ctr_y),
(ctr_y,) + tuple(y_res.fit_args[1:]))
bright = float((bx + by) / 2.0)
except Exception as e:
# Error doing FWHM, skip this object
self.logger.debug("Error doing FWHM on object at %.2f,%.2f: %s" % (
x, y, str(e)))
continue
self.logger.debug("orig=%f,%f ctr=%f,%f fwhm=%f,%f bright=%f" % (
x, y, ctr_x, ctr_y, fwhm_x, fwhm_y, bright))
# overall measure of fwhm as a single value
fwhm = (math.sqrt(fwhm_x * fwhm_x + fwhm_y * fwhm_y) *
(1.0 / math.sqrt(2.0)))
# calculate a measure of ellipticity
elipse = math.fabs(min(fwhm_x, fwhm_y) / max(fwhm_x, fwhm_y))
# calculate a measure of distance from center of image
dx = wh - ctr_x
dy = hh - ctr_y
dx2 = dx * dx / wd / w4
dy2 = dy * dy / ht / h4
if dx2 > dy2:
pos = 1.0 - dx2
else:
pos = 1.0 - dy2
# EE on background subtracted image
ee_sq_fn = None
ee_circ_fn = None
iy1 = int(ctr_y - ee_total_radius)
iy2 = int(ctr_y + ee_total_radius) + 1
ix1 = int(ctr_x - ee_total_radius)
ix2 = int(ctr_x + ee_total_radius) + 1
if iy1 < 0 or iy2 > height or ix1 < 0 or ix2 > width:
self.logger.debug("Error calculating EE on object at %.2f,%.2f: Box out of range with radius=%.2f" % (x, y, ee_total_radius))
else:
ee_data = data[iy1:iy2, ix1:ix2] - median
try:
ee_sq_fn = self.ensquared_energy(ee_data)
except Exception as e:
self.logger.debug("Error calculating ensquared energy on object at %.2f,%.2f: %s" % (x, y, str(e)))
try:
ee_circ_fn = self.encircled_energy(ee_data)
except Exception as e:
self.logger.debug("Error calculating encircled energy on object at %.2f,%.2f: %s" % (x, y, str(e)))
obj = Bunch.Bunch(objx=ctr_x, objy=ctr_y, pos=pos,
oid_x=oid_x, oid_y=oid_y,
fwhm_x=fwhm_x, fwhm_y=fwhm_y,
fwhm=fwhm, fwhm_radius=fwhm_radius,
brightness=bright, elipse=elipse,
x=int(x), y=int(y),
skylevel=skylevel, background=median,
ensquared_energy_fn=ee_sq_fn,
encircled_energy_fn=ee_circ_fn)
objlist.append(obj)
if cb_fn is not None:
cb_fn(obj)
return objlist
def _sortkey(self, obj):
"""For sorting of result in :meth:`objlist_select`."""
val = obj.brightness * obj.pos / math.sqrt(obj.fwhm)
return val
def objlist_select(self, objlist, width, height,
minfwhm=2.0, maxfwhm=150.0, minelipse=0.5,
edgew=0.01):
"""Filter output from :meth:`evaluate_peaks`.
Parameters
----------
objlist : list of `ginga.misc.Bunch.Bunch`
Output from :meth:`evaluate_peaks`.
width, height : int
Dimension of data array from which ``objlist`` was derived.
minfwhm, maxfwhm : float
Limits for desired FWHM, where ``(minfwhm, maxfwhm)``.
minelipse : float
Minimum value of desired ellipticity (not inclusive).
edgew : float
Factor between 0 and 1 that determines if a location is too close to the edge or not.
Returns
-------
results : list of `ginga.misc.Bunch.Bunch`
Elements of ``objlist`` that contain desired FWHM, ellipticity,
and not too close to the edge.
"""
results = []
count = 0
for obj in objlist:
count += 1
self.logger.debug("%d obj x,y=%.2f,%.2f fwhm=%.2f bright=%.2f" % (
count, obj.objx, obj.objy, obj.fwhm, obj.brightness))
# If peak has a minfwhm < fwhm < maxfwhm and the object
# is inside the frame by edgew pct
if ((minfwhm < obj.fwhm) and (obj.fwhm < maxfwhm) and
(minelipse < obj.elipse) and (width * edgew < obj.x) and
(height * edgew < obj.y) and
(width * (1.0 - edgew) > obj.x) and
(height * (1.0 - edgew) > obj.y)):
results.append(obj)
#results.sort(cmp=self._compare)
results.sort(key=self._sortkey, reverse=True)
return results
def pick_field(self, data, peak_radius=5, bright_radius=2, fwhm_radius=15,
threshold=None,
minfwhm=2.0, maxfwhm=50.0, minelipse=0.5,
edgew=0.01, ee_total_radius=10):
"""Pick the first good object within the given field.
Parameters
----------
data : array-like
Data array of the field.
peak_radius, threshold
See :meth:`find_bright_peaks`.
bright_radius, fwhm_radius, ee_total_radius
See :meth:`evaluate_peaks`.
minfwhm, maxfwhm, minelipse, edgew
See :meth:`objlist_select`.
Returns
-------
result : `ginga.misc.Bunch.Bunch`
This is a single element of ``objlist`` as described in
:meth:`evaluate_peaks`.
Raises
------
IQCalcError
No object matches selection criteria.
"""
height, width = data.shape
# Find the bright peaks in the image
peaks = self.find_bright_peaks(data, radius=peak_radius,
threshold=threshold)
self.logger.debug("peaks=%s" % str(peaks))
if len(peaks) == 0:
raise IQCalcError("Cannot find bright peaks")
# Evaluate those peaks
objlist = self.evaluate_peaks(peaks, data,
bright_radius=bright_radius,
fwhm_radius=fwhm_radius,
ee_total_radius=ee_total_radius)
if len(objlist) == 0:
raise IQCalcError("Error evaluating bright peaks")
results = self.objlist_select(objlist, width, height,
minfwhm=minfwhm, maxfwhm=maxfwhm,
minelipse=minelipse, edgew=edgew)
if len(results) == 0:
raise IQCalcError("No object matches selection criteria")
return results[0]
def qualsize(self, image, x1=None, y1=None, x2=None, y2=None,
radius=5, bright_radius=2, fwhm_radius=15, threshold=None,
minfwhm=2.0, maxfwhm=50.0, minelipse=0.5,
edgew=0.01, ee_total_radius=10):
"""Run :meth:`pick_field` on the given image.
Parameters
----------
image : `ginga.AstroImage.AstroImage`
Image to process.
x1, y1, x2, y2 : int
See :meth:`ginga.BaseImage.BaseImage.cutout_data`.
radius, threshold
See :meth:`find_bright_peaks`.
bright_radius, fwhm_radius, ee_total_radius
See :meth:`evaluate_peaks`.
minfwhm, maxfwhm, minelipse, edgew
See :meth:`objlist_select`.
Returns
-------
qs : `ginga.misc.Bunch.Bunch`
This is a single element of ``objlist`` as described in
:meth:`evaluate_peaks`.
"""
if x1 is None:
x1 = 0
if y1 is None:
y1 = 0
if x2 is None:
x2 = image.width
if y2 is None:
y2 = image.height
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
data = image.cutout_data(x1, y1, x2, y2, astype=float)
qs = self.pick_field(data, peak_radius=radius,
bright_radius=bright_radius,
fwhm_radius=fwhm_radius,
threshold=threshold,
minfwhm=minfwhm, maxfwhm=maxfwhm,
minelipse=minelipse, edgew=edgew,
ee_total_radius=ee_total_radius)
# Add back in offsets into image to get correct values with respect
# to the entire image
qs.x += x1
qs.y += y1
qs.objx += x1
qs.objy += y1
self.logger.debug("obj=%f,%f fwhm=%f sky=%f bright=%f" % (
qs.objx, qs.objy, qs.fwhm, qs.skylevel, qs.brightness))
return qs
# END
|
|
import collections
import gzip
import numpy
import os
import pandas
import pysam
import cStringIO as StringIO
import subprocess
from grocsvs import step
from grocsvs import utilities
Readcloud = collections.namedtuple("Readcloud", "chrom start_pos end_pos bc num_reads obs_len hap")
class CombineReadcloudsStep(step.StepChunk):
@staticmethod
def get_steps(options):
for sample, dataset in options.iter_10xdatasets():
yield CombineReadcloudsStep(options, sample, dataset)
def __init__(self, options, sample, dataset):
self.options = options
self.sample = sample
self.dataset = dataset
def __str__(self):
return ".".join([self.__class__.__name__,
self.sample.name,
self.dataset.id])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
readclouds = "readclouds.{}.{}.tsv.gz".format(
self.sample.name,
self.dataset.id
)
barcode_map_file = "barcode_map.{}.{}.pickle".format(
self.sample.name,
self.dataset.id
)
paths = {
"barcode_map": os.path.join(directory, barcode_map_file),
"readclouds": os.path.join(directory, readclouds),
"index": os.path.join(directory, readclouds+".tbi")
}
return paths
def run(self):
outpaths = self.outpaths(final=False)
self.logger.log("Loading read clouds...")
readclouds = []
for i, inpath in enumerate(self.get_input_paths()):
self.logger.log("\t"+str(i)+","+inpath)
try:
readclouds.append(pandas.read_table(inpath, compression="gzip"))
except pandas.io.common.EmptyDataError:
self.logger.log("No read clouds found in {}; skipping".format(inpath))
readclouds = pandas.concat(readclouds)
#goodbcs = set(get_good_barcodes(readclouds))
goodbcs = get_good_barcodes(readclouds)
noclouds = len(readclouds["bc"])
nogoodbcs = len(goodbcs)
isinbcs = [False] * noclouds
bs=0
bcwin=1000000
while bs < noclouds:
be=bs+bcwin
if be > noclouds: be=noclouds
isinbcs[bs:be] = readclouds["bc"][bs:be].isin(goodbcs)
bs+=bcwin
# readclouds = readclouds.loc[readclouds["bc"].isin(goodbcs)]
readclouds = readclouds.loc[isinbcs]
good_barcodes = readclouds["bc"].unique()
barcode_map = get_barcode_map(good_barcodes)
self.logger.log("Writing barcode map to file...")
with open(outpaths["barcode_map"], "w") as outf:
utilities.pickle.dump(barcode_map, outf, protocol=-1)
self.logger.log("Writing readclouds to file...")
tmp_readclouds_path = outpaths["readclouds"][:-3]
readclouds.to_csv(tmp_readclouds_path, sep="\t", index=False)
bgzip = self.options.binary("bgzip")
bgzip_cmd = "{} {}".format(bgzip, tmp_readclouds_path)
bgzip_proc = subprocess.Popen(bgzip_cmd, shell=True)
bgzip_proc.wait()
self.logger.log("Indexing readclouds file...")
# define the chrom, start and end columns; and indicate that
# the first (header) row should be skipped
tabix = self.options.binary("tabix")
tabix_cmd = "{} -s 1 -b 2 -e 3 -S 1 {}".format(tabix, outpaths["readclouds"])
subprocess.check_call(tabix_cmd, shell=True)
def get_input_paths(self):
paths = []
for chrom in self.options.reference.chroms:
input_step = CallReadcloudsStep(self.options, self.sample, self.dataset, chrom)
paths.append(input_step.outpaths(final=True)["readclouds"])
return paths
class EstimateReadCloudParamsStep(step.StepChunk):
@staticmethod
def get_steps(options):
for sample, dataset in options.iter_10xdatasets():
yield EstimateReadCloudParamsStep(options, sample, dataset)
def __init__(self, options, sample, dataset):
self.options = options
self.sample = sample
self.dataset = dataset
def __str__(self):
return ".".join([self.__class__.__name__,
self.sample.name,
self.dataset.id])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
inter_read_distances = "inter_read_distances.{}.{}.pickle".format(
self.sample.name,
self.dataset.id
)
paths = {
"inter_read_distances": os.path.join(directory, inter_read_distances),
}
return paths
def run(self):
outpaths = self.outpaths(final=False)
inter_read_distances = sample_inter_read_distances(self.dataset.bam)
result = {}
try:
result["sampled_inter_read_distances"] = numpy.random.choice(
inter_read_distances, int(0.5e6), replace=False)
except ValueError:
result["sampled_inter_read_distances"] = inter_read_distances
result["read_cloud_clustering_distance"] = \
max(10000, int(numpy.ceil(numpy.percentile(inter_read_distances, 99) / 5000)) * 5000)
self.logger.log("{} {} {}".format(*numpy.percentile(inter_read_distances, [50, 99, 99.9])))
self.logger.log(result["read_cloud_clustering_distance"])
with open(outpaths["inter_read_distances"], "w") as outf:
utilities.pickle.dump(result, outf, protocol=-1)
def sample_inter_read_distances(bam_path, window_size=0.5e6, skip_size=5e7):
bam = pysam.AlignmentFile(bam_path)
window_size = int(window_size)
skip_size = int(skip_size)
if skip_size < window_size:
skip_size = window_size
distances = []
for chrom, length in zip(bam.references, bam.lengths):
if length < 2*skip_size+2*window_size: continue
print chrom
for start in range(skip_size, length-skip_size, skip_size):
bc_last_pos = {}
for read in bam.fetch(chrom, start, start+window_size):
if read.is_secondary or read.is_supplementary or read.is_unmapped or read.is_read2:
continue
if not read.has_tag("BX"):
continue
bc = read.get_tag("BX")
if bc in bc_last_pos:
d = read.pos - bc_last_pos[bc]
if d < 60000:
distances.append(d)
if len(distances) > 10e6: # that's a plenty big sample!
return distances
bc_last_pos[bc] = read.pos
if len(distances) < 25 and skip_size > window_size:
new_skip_size = skip_size / 100
return sample_inter_read_distances(bam_path, window_size, new_skip_size)
return distances
class CallReadcloudsStep(step.StepChunk):
@staticmethod
def get_steps(options):
for sample, dataset in options.iter_10xdatasets():
for chrom in options.reference.chroms:
yield CallReadcloudsStep(
options, sample, dataset, chrom)
def __init__(self, options, sample, dataset, chrom):
self.options = options
self.sample = sample
self.dataset = dataset
self.chrom = chrom
def __str__(self):
return ".".join([self.__class__.__name__,
self.sample.name,
self.dataset.id,
self.chrom])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
readclouds = "readclouds.{}.{}.{}.tsv.gz".format(
self.sample.name,
self.dataset.id,
self.chrom
)
paths = {
"readclouds": os.path.join(directory, readclouds)
}
return paths
def run(self):
outpaths = self.outpaths(final=False)
input_step = EstimateReadCloudParamsStep(self.options, self.sample, self.dataset)
input_path = input_step.outpaths(final=True)["inter_read_distances"]
info = utilities.pickle.load(open(input_path))
max_dist = info["read_cloud_clustering_distance"]
self.logger.log("Using {} for distance between readclouds".format(max_dist))
bam_path = self.dataset.bam
readclouds = call_readclouds(bam_path, self.chrom, max_dist)
readclouds.to_csv(outpaths["readclouds"], sep="\t", index=False, compression="gzip")
def call_readclouds(bam_path, chrom, max_dist):
"""
we'll load everything into memory so we can easily sort; this isn't strictly
necessary if we were to start running out of memory
"""
detector = ReadcloudDetector(bam_path, max_dist)
readcloud_iter = detector.get_read_clouds(chrom)
dataframe = pandas.DataFrame(readcloud_iter)
if len(dataframe) > 1:
dataframe = dataframe.sort_values(["start_pos", "end_pos"])
return dataframe
def is_good_read(read):
if not read.has_tag("BX"):
return False
# if read.mapq < 30:
# return False
if read.is_duplicate:
return False
if read.is_unmapped:
return False
return True
class ReadcloudDetector(object):
def __init__(self, bam_path, max_dist, min_end_mapq=30, require_one_mapq=60):
self.bam = pysam.AlignmentFile(bam_path)
self.max_dist = max_dist
self.min_end_mapq = min_end_mapq
self.require_one_mapq = require_one_mapq
self._cur_chrom = None
self._barcodes_to_reads = {}
self._barcodes_to_haplotypes = {}
self._recently_observed_bcs = set()
self.cur_start = 0
def get_read_clouds(self, chrom):
assert self._cur_chrom is None, "can only run on one chrom at once"
self._cur_chrom = chrom
self.cur_start = 0
_progress = int(1e6)
for i, read in enumerate(self.bam.fetch(chrom)):
if i % _progress == 0:
print "{:>15} {:15,.0f}".format(i, read.pos)
# if i > 2e6:
# break
if (read.pos - self.cur_start) > self.max_dist:
self.cur_start = read.pos
for read_cloud in self._flush():
yield read_cloud
read_cloud = self._add_read(read)
if read_cloud is not None:
yield read_cloud
for read_cloud in self._flush():
yield read_cloud
self._cur_chrom = None
def _make_read_cloud(self, bc):
frag_reads = self._barcodes_to_reads.pop(bc)
if max(read.mapq for read in frag_reads) < self.require_one_mapq:
return None
while frag_reads[-1].mapq < self.min_end_mapq:
frag_reads.pop(-1)
count = len(frag_reads)
# This is how 10X counts; not really sure why, since by this approach,
# we could have a 100kb read cloud with only a single read counted
# sum(1 for read in frag_reads if read.mapq >= self.require_one_mapq)
# if count == 0:
# return None
start = min(read.pos for read in frag_reads)
end = max(read.aend for read in frag_reads)
obs_len = end - start
haplotype = self._barcodes_to_haplotypes[bc]
if haplotype is not None:
haplotype = haplotype[1]
read_cloud = Readcloud(self._cur_chrom, start, end, bc, count, obs_len, haplotype)
return read_cloud
def _flush(self):
to_flush = set(self._barcodes_to_reads) - self._recently_observed_bcs
for bc in to_flush:
read_cloud = self._make_read_cloud(bc)
# if bc == "AGTGAAAAGCTTGGTT-1": print read_cloud
if read_cloud:
yield read_cloud
self._recently_observed_bcs = set()
def _add_read(self, read):
if not is_good_read(read):
return None
bc = read.get_tag("BX")
# if bc == "AGTGAAAAGCTTGGTT-1":
# print read.pos, read.mapq, len(self._barcodes_to_reads.get(bc, []))
if bc in self._barcodes_to_reads:
previous_read = self._barcodes_to_reads[bc][-1]
if (read.pos - previous_read.aend) > self.max_dist:
read_cloud = self._make_read_cloud(bc)
self._barcodes_to_reads[bc] = [read]
self._barcodes_to_haplotypes[bc] = get_read_haplotype(read)
self._recently_observed_bcs.add(bc)
return read_cloud
else:
self._barcodes_to_reads[bc].append(read)
self._recently_observed_bcs.add(bc)
elif read.mapq > self.min_end_mapq:
self._barcodes_to_reads[bc] = [read]
self._recently_observed_bcs.add(bc)
self._barcodes_to_haplotypes[bc] = get_read_haplotype(read)
return None
def get_read_haplotype(read):
try:
hp = read.get_tag("HP")
ps = read.get_tag("PS")
return (ps, hp)
except KeyError:
return None
def get_good_barcodes(fragments, proportion=0.90):
"""
return the top barcodes which together comprise 90% of reads
"""
read_counts = fragments.groupby("bc").sum()["num_reads"].copy()
read_counts.sort_values(inplace=True, ascending=False)
cutoff = proportion * read_counts.sum()
cutoff = numpy.where(read_counts.cumsum() >= cutoff)[0][0]
return sorted(read_counts.index[:cutoff])
def get_barcode_map(barcodes):
barcodes = sorted(barcodes)
return dict(zip(barcodes, range(len(barcodes))))
def load_fragments(options, sample, dataset, chrom=None, start=None, end=None, usecols=None,
min_reads_per_frag=1):
if start is not None:
if start < 0:
raise Exception("start coord is negative: {}:{}-{}".format(chrom, start, end))
if end is not None:
if start >= end:
raise Exception("end coord is before start: {}:{}-{}".format(chrom, start, end))
readclouds_path = os.path.join(
options.results_dir,
"CombineReadcloudsStep",
"readclouds.{}.{}.tsv.gz".format(sample.name, dataset.id))
tabix = pysam.TabixFile(readclouds_path)
if chrom is not None and chrom not in tabix.contigs:
#print("MISSING:", chrom)
return pandas.DataFrame(columns="chrom start_pos end_pos bc num_reads obs_len hap".split())
if usecols is not None and "num_reads" not in usecols:
usecols.append("num_reads")
s = StringIO.StringIO("\n".join(tabix.fetch(chrom, start, end)))
readclouds = pandas.read_table(s, header=None, names=Readcloud._fields, usecols=usecols)
readclouds["chrom"] = readclouds["chrom"].astype("string")
if min_reads_per_frag > 0:
readclouds = readclouds.loc[readclouds["num_reads"]>min_reads_per_frag]
return readclouds
|
|
from .ast_builder import ast_builder
from ..query_data_structures.constraint import Constraint
from ..query_data_structures.element import Variable, Constant, Wildcard
from ..query_data_structures.query import ConjunctiveQuery
from ..query_data_structures.relation import RelationInQuery
__author__ = 'caioseguin'
# Function that compares the ASTFactory output with an expected input.
# It compares the two using the str() method.
# Unfortunately, it does not implement a type matching check.
def hardCodedTest():
ast_builder = ASTBuilder()
input_list = []
expected_answer_list = []
result_list = []
# q(X,Y):- s(X)
input = ([[['q', ['X', 'Y']], ':-', ['s', ['X']]]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X')], False)], [],
RelationInQuery('q', [Variable('X'), Variable('Y')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X):- s(X)
input = ([[['q', ['X']], ':-', ['s', ['X']]]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X')], False)], [],
RelationInQuery('q', [Variable('X')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X):- s(X), not t(U)
input = ([[['q', ['X']], ':-', ['s', ['X']], ['not', ['t', ['U']]]]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X')], False),
RelationInQuery('t', [Variable('U')], True)], [],
RelationInQuery('q', [Variable('X')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X):- s(X,U), not t(U)
input = ([[['q', ['X']], ':-', ['s', ['X', 'U']], ['not', ['t', ['U']]]]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X'), Variable('U')], False),
RelationInQuery('t', [Variable('U')], True)], [],
RelationInQuery('q', [Variable('X')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X):- s(X), not t(U), U = 2
input = ([[['q', ['X']], ':-', ['s', ['X']], ['not', ['t', ['U']]], ['U', '=', '2']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X')], False),
RelationInQuery('t', [Variable('U')], True)],
[Constraint(Variable('U'), Constant('2'), '=')],
RelationInQuery('q', [Variable('X')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X):- s(X), not t(U), u < 2
input = ([[['q', ['X']], ':-', ['s', ['X']], ['not', ['t', ['U']]], ['U', '<', '2']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X')], False),
RelationInQuery('t', [Variable('U')], True)],
[Constraint(Variable('U'), Constant('2'), '<')],
RelationInQuery('q', [Variable('X')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X):- s(X), not t(U), U = X
input = ([[['q', ['X']], ':-', ['s', ['X']], ['not', ['t', ['U']]], ['U', '=', 'X']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X')], False),
RelationInQuery('t', [Variable('U')], True)],
[Constraint(Variable('U'), Variable('X'), '=')],
RelationInQuery('q', [Variable('X')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X,Y):- s(X,Y), y < 3
input = ([[['q', ['X', 'Y']], ':-', ['s', ['X', 'Y']], ['Y', '<', '3']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X'), Variable('Y')], False)],
[Constraint(Variable('Y'), Variable('3'), '<')],
RelationInQuery('q', [Variable('X'), Variable('Y')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X):- s(X), not t(Y), X = Y
input = ([[['q', ['X']], ':-', ['s', ['X']], ['not', ['t', ['Y']]], ['X', '=', 'Y']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X')], False),
RelationInQuery('t', [Variable('Y')], True)],
[Constraint(Variable('X'), Variable('Y'), '=')],
RelationInQuery('q', [Variable('X')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X,Z):- s(X,Y), not t(A,Z), Z = Y
input = ([[['q', ['X', 'Z']], ':-', ['s', ['X', 'Y']], ['not', ['t', ['A', 'Z']]], ['Z', '=', 'Y']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X'), Variable('Y')], False),
RelationInQuery('t', [Variable('A'), Variable('Z')], True)],
[Constraint(Variable('Z'), Variable('Y'), '=')],
RelationInQuery('q', [Variable('X'), Variable('Z')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X):- s(X, Y, Z), Y = 2, Z = Y
input = ([[['q', ['X']], ':-', ['s', ['X', 'Y', 'Z']], ['Y', '=', '2'], ['Z', '=', 'Y']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X'), Variable('Y'), Variable('Z')], False)],
[Constraint(Variable('Y'), Constant('2'), '='),
Constraint(Variable('Z'), Variable('Y'), '=')],
RelationInQuery('q', [Variable('X')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# r(X,'Nantes') :- r(X)
input = ([[['r', ['X', "'Nantes'"]], ':-', ['r', ['X']]]])
expected_answer = ConjunctiveQuery([RelationInQuery('r', [Variable('X')], False)],[],
RelationInQuery('r', [Variable('X'), Constant("'Nantes'")]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# r(X,Y,Z,_,2) :- s(X), Y=X, X=2
input = ([[['r', ['X', 'Y', 'Z', '_', '2']], ':-', ['s', ['X']], ['Y', '=', 'X'], ['X', '=', '2']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X')], False)],
[Constraint(Variable('Y'), Variable('X'), '='),
Constraint(Variable('X'), Variable('2'), '=')],
RelationInQuery('r', [Variable('X'), Variable('Y'), Variable('Z'),
Wildcard(), Constant('2')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# q(X,Y) :- s(_,Y), t(X,_), u(_), v(_,_)
input = ([[['q', ['X', 'Y']], ':-', ['s', ['_', 'Y']], ['t', ['X', '_']], ['u', ['_']], ['v', ['_', '_']]]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Wildcard(),Variable('Y')], False),
RelationInQuery('t', [Variable('X'), Wildcard()], False),
RelationInQuery('u', [Wildcard()], False),
RelationInQuery('v', [Wildcard(),Wildcard()], False)],[],
RelationInQuery('q', [Variable('X'), Variable('Y')]))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# answer(X,Y):-S(X,Z),S(Y,Z),X>Y
input = ([[['answer', ['X', 'Y']], ':-', ['s', ['X', 'Z']], ['s', ['Y', 'Z']], ['X', '>', 'Y']]])
expected_answer = ConjunctiveQuery([RelationInQuery('s', [Variable('X'),Variable('Z')],False),
RelationInQuery('s', [Variable('Y'),Variable('Z')],False)],
[Constraint(Variable('X'), Variable('Y'),'>')],
RelationInQuery('answer', [Variable('X'),Variable('Y')], False))
output = ast_builder.buildAST(input)[0]
input_list.append(input)
expected_answer_list.append(expected_answer)
result_list.append(checkAnswer(output, expected_answer))
# ---------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------------- #
failed_test_list = []
for i in range(0, len(result_list)):
result = result_list[i]
if not result:
failed_test_list.append(i+1)
test_score = str(len(result_list) - len(failed_test_list)) + '/' + str(len(result_list))
print('*** Test Results ***', '\n',
'*** Score: ', test_score, '\n',
#'*** Inputs: ', '\n',
#input_list, '\n',
#"\n".join(item[0] for item in input_list), '\n',
#'*** Expected answers: ', '\n',
#expected_answer_list, '\n'
#"\n".join(item[0] for item in expected_answer_list), '\n',
'*** Failed tests: ', failed_test_list)
def checkAnswer(answer_1, answer_2):
return str(answer_1) == str(answer_2)
def main():
hardCodedTest()
main()
|
|
import os
from functools import partial
from fabric.context_managers import cd
from fabric.state import env
from fabric.utils import indent, abort, warn
from blues.application.project import git_repository_path
from refabric.context_managers import sudo, silent
from refabric.operations import run
from refabric.utils import info
from refabric.contrib import blueprints
from .providers import get_providers
from .. import debian
from .. import git
from .. import user
from .. import python
from .. import util
from .. import virtualenv
__all__ = [
'install_project',
'install_project_user',
'install_project_structure',
'install_system_dependencies',
'install_virtualenv',
'install_requirements',
'install_or_update_source',
'install_source',
'update_source',
'install_providers'
]
blueprint = blueprints.get('blues.app')
def install_project():
create_app_root()
install_project_user()
install_project_structure()
install_system_dependencies()
install_or_update_source()
def create_app_root():
from .project import app_root
with sudo():
# Create global apps root
root_path = app_root()
debian.mkdir(root_path, recursive=True)
def install_project_user():
"""
Create project user and groups.
Create user home dir.
Disable ssh host checking.
Create log dir.
"""
from .project import project_home, user_name, log_path
with sudo():
info('Install application user')
username = user_name()
home_path = project_home()
# Setup groups for project user
project_user_groups = ['app-data', 'www-data']
for group in project_user_groups:
debian.groupadd(group, gid_min=10000)
# Get UID for project user
user.create_system_user(username, groups=project_user_groups,
home=home_path)
# Ensure project home has right owner in case user already exists
debian.mkdir(home_path, owner=username, group=username, mode=1775)
# Create application log path
debian.mkdir(log_path(), owner=username, group='app-data', mode=1775)
# Configure ssh for github
user.set_strict_host_checking(username, 'github.com')
dirs = blueprint.get('directories') or []
if dirs:
info('Create application directories')
for d in dirs:
if isinstance(d, basestring):
d = {'path': d}
info(' %s' % d['path'])
mode = d.get('mode')
debian.mkdir(d['path'], recursive=True,
owner=d.get('owner') or username,
group=d.get('group') or 'app-data',
mode=int(mode) if mode else 1775)
def install_project_structure():
"""
Create project directory structure
"""
from .project import static_base, use_static
with sudo():
info('Install application directory structure')
create_app_root()
if use_static():
# Create static web paths
static_path = os.path.join(static_base(), 'static')
media_path = os.path.join(static_base(), 'media')
debian.mkdir(static_path, group='www-data', mode=1775)
debian.mkdir(media_path, group='www-data', mode=1775)
def install_system_dependencies():
"""
Install system wide packages that application depends on.
"""
with sudo(), silent():
info('Install system dependencies')
system_dependencies = blueprint.get('system_dependencies')
if system_dependencies:
dependencies = []
repositories = []
ppa_dependencies = []
for dependency in system_dependencies:
dep, _, rep = dependency.partition('@')
if rep:
if rep not in repositories:
repositories.append(rep)
ppa_dependencies.append(dep)
elif dep not in dependencies:
dependencies.append(dep)
debian.apt_get_update()
debian.apt_get('install', *dependencies)
if repositories:
for repository in repositories:
debian.add_apt_repository(repository, src=True)
debian.apt_get_update()
debian.apt_get('install', *ppa_dependencies)
def install_virtualenv():
"""
Create a project virtualenv.
"""
from .project import sudo_project, virtualenv_path
with sudo():
virtualenv.install()
with sudo_project():
virtualenv.create(virtualenv_path())
def maybe_install_requirements(previous_commit, current_commit, force=False, update_pip=False):
from .project import requirements_txt, git_repository_path
changed_files = []
commit_range = '{}..{}'.format(previous_commit, current_commit)
installation_files = requirements_txt()
if force:
changed_files = installation_files
else:
with sudo(), cd(git_repository_path()), silent():
tree = util.RequirementTree(paths=installation_files)
for installation_file in tree.all_files():
installation_method = get_installation_method(installation_file)
if not force:
if installation_method == 'pip':
has_changed, added, removed = diff_requirements(
previous_commit,
current_commit,
installation_file)
if has_changed:
info('Requirements have changed, '
'added: {}, removed: {}'
.format(', '.join(added), ', '.join(removed)))
else:
# Check if installation_file has changed
has_changed, _, _ = git.diff_stat(
git_repository_path(),
commit_range,
installation_file)
if has_changed:
changed_files.append(installation_file)
changed_files = tree.get_changed(all_changed_files=changed_files)
if changed_files:
info('Install requirements {}', ', '.join(changed_files))
install_requirements(changed_files, update_pip=update_pip)
else:
info(indent('(requirements not changed in {}...skipping)'),
commit_range)
def diff_requirements(previous_commit, current_commit, filename):
"""
Diff requirements file
:param previous_commit:
:param current_commit:
:param filename:
:return: 3-tuple with (has_changed, additions, removals) where
has_changed is a bool, additions and removals may be sets or None.
"""
try:
return diff_requirements_smart(previous_commit,
current_commit,
filename,
strict=True)
except ValueError:
warn('Smart requirements diff failed, falling back to git diff')
has_changed, insertions, deletions = git.diff_stat(
git_repository_path(),
'{}..{}'.format(previous_commit, current_commit),
filename)
return has_changed, [str(insertions)], [str(deletions)]
def diff_requirements_smart(previous_commit, current_commit, filename,
strict=False):
get_requirements = partial(git.show_file,
repository_path=git_repository_path(),
filename=filename)
force_changed = False
try:
text = get_requirements(revision=previous_commit)
previous = set(util.iter_requirements(text=text))
except ValueError as exc:
warn('Failed to parse previous requirements: {}'.format(exc))
previous = set()
force_changed = True
if strict:
raise
try:
text = get_requirements(revision=current_commit)
current = set(util.iter_requirements(text=text))
except ValueError as exc:
warn('Failed to parse new requirements: {}'.format(exc))
current = set()
force_changed = True
if strict:
raise
additions = current.difference(previous)
removals = previous.difference(current)
has_changed = force_changed or bool(additions or removals)
return has_changed, additions, removals
def get_installation_method(filename):
if filename.endswith('.txt') or \
filename.endswith('.pip'):
return 'pip'
if os.path.basename(filename) == 'setup.py':
return 'setuptools'
def install_requirements(installation_files=None, update_pip=False):
"""
Pip install requirements in project virtualenv.
"""
from .project import sudo_project, virtualenv_path, requirements_txt, \
git_repository_path
if not installation_files:
installation_files = requirements_txt()
if isinstance(installation_files, basestring):
installation_files = [installation_files]
with sudo_project():
path = virtualenv_path()
for installation_file in installation_files:
info('Installing requirements from file {}', installation_file)
with virtualenv.activate(path), cd(git_repository_path()):
installation_method = get_installation_method(installation_file)
if installation_method == 'pip':
if update_pip:
python.update_pip()
python.pip('install', '-r', installation_file)
elif installation_method == 'setuptools':
with cd(git_repository_path()):
run('python {} develop'.format(installation_file))
else:
raise ValueError(
'"{}" is not a valid installation file'.format(
installation_file))
def install_or_update_source():
"""
Try to install source, if already installed then update.
"""
new_install = install_source()
if not new_install:
update_source()
def install_source():
"""
Install git and clone application repository.
:return: True, if repository got cloned
"""
from .project import sudo_project, git_repository, git_root
with sudo():
git.install()
with sudo_project() as username:
path = git_root()
debian.mkdir(path, owner=username, group=username)
with cd(path):
repository = git_repository()
path, cloned = git.clone(repository['url'], branch=repository['branch'])
if cloned is None:
abort('Failed to install source, aborting!')
return cloned
def update_source():
"""
Update application repository with configured branch.
:return: tuple(previous commit, current commit)
"""
from .project import sudo_project, git_repository_path, git_repository
with sudo_project():
# Get current commit
path = git_repository_path()
previous_commit = git.get_commit(path, short=True)
# Update source from git (reset)
repository = git_repository()
current_commit = git.reset(repository['branch'],
repository_path=path,
ignore=blueprint.get('git_force_ignore'))
if current_commit is not None and current_commit != previous_commit:
info(indent('(new version)'))
else:
info(indent('(same commit)'))
return previous_commit, current_commit
def install_providers():
"""
Install application providers on current host.
"""
host = env.host_string
providers = get_providers(host)
for provider in providers.values():
if getattr(provider, 'manager', None) is not None:
provider.manager.install()
provider.install()
|
|
import ptypes, operator
from ptypes import *
from . import codestream, intofdata, dataofint
ptypes.setbyteorder(ptypes.config.byteorder.bigendian)
### JFIF Markers
class Marker(codestream.Marker):
cache, table = {}, [
('SOF0', b'\xff\xc0'),
('SOF1', b'\xff\xc1'),
('SOF2', b'\xff\xc2'),
('SOF3', b'\xff\xc3'),
('DHT', b'\xff\xc4'),
('SOF5', b'\xff\xc5'),
('SOF6', b'\xff\xc6'),
('SOF7', b'\xff\xc7'),
('JPG', b'\xff\xc8'),
('SOF9', b'\xff\xc9'),
('SOF10', b'\xff\xca'),
('SOF11', b'\xff\xcb'),
('DAC', b'\xff\xcc'),
('SOF13', b'\xff\xcd'),
('SOF14', b'\xff\xce'),
('SOF15', b'\xff\xcf'),
('RST0', b'\xff\xd0'),
('RST1', b'\xff\xd1'),
('RST2', b'\xff\xd2'),
('RST3', b'\xff\xd3'),
('RST4', b'\xff\xd4'),
('RST5', b'\xff\xd5'),
('RST6', b'\xff\xd6'),
('RST7', b'\xff\xd7'),
('SOI', b'\xff\xd8'),
('EOI', b'\xff\xd9'),
('SOS', b'\xff\xda'),
('DQT', b'\xff\xdb'),
('DNL', b'\xff\xdc'),
('DRI', b'\xff\xdd'),
('DHP', b'\xff\xde'),
('EXP', b'\xff\xdf'),
('APP0', b'\xff\xe0'),
('APP1', b'\xff\xe1'),
('APP2', b'\xff\xe2'),
('APP3', b'\xff\xe3'),
('APP4', b'\xff\xe4'),
('APP5', b'\xff\xe5'),
('APP6', b'\xff\xe6'),
('APP7', b'\xff\xe7'),
('APP8', b'\xff\xe8'),
('APP9', b'\xff\xe9'),
('APP10', b'\xff\xea'),
('APP11', b'\xff\xeb'),
('APP12', b'\xff\xec'),
('APP13', b'\xff\xed'),
('APP14', b'\xff\xee'),
('APP15', b'\xff\xef'),
('JPG0', b'\xff\xf0'),
('JPG1', b'\xff\xf1'),
('JPG2', b'\xff\xf2'),
('JPG3', b'\xff\xf3'),
('JPG4', b'\xff\xf4'),
('JPG5', b'\xff\xf5'),
('JPG6', b'\xff\xf6'),
('SOF48', b'\xff\xf7'),
('LSE', b'\xff\xf8'),
('JPG9', b'\xff\xf9'),
('JPG10', b'\xff\xfa'),
('JPG11', b'\xff\xfb'),
('JPG12', b'\xff\xfc'),
('JPG13', b'\xff\xfd'),
('COM', b'\xff\xfe'),
]
class MarkerType(codestream.MarkerType):
_values_ = Marker.table
class StreamData(codestream.StreamData):
_fields_ = codestream.StreamData._fields_[:1] + [
(pint.uint_t, 'Lp')
] + codestream.StreamData._fields_[1:]
class StreamMarker(codestream.StreamMarker):
Type, Table = MarkerType, Marker
def __Type(self):
return self.Type
def __Value(self):
if self.blocksize() <= sum(self[fld].li.size() for fld in ['Type', 'Lp']):
return ptype.undefined
t, res = self.Table.withdefault(self['Type'].li.serialize()), self['Lp'].li
if issubclass(t, ptype.block):
return dyn.clone(t, length=res.int() - self['Type'].size())
return dyn.clone(t, blocksize=lambda self, cb=res.int() - self['Type'].size(): cb)
def __Extra(self):
fields = ['Type', 'Lp', 'Value']
t = dyn.block(self.blocksize() - sum(self[fld].li.size() for fld in fields))
if hasattr(self['Value'], 'EncodedQ') and self['Value'].EncodedQ():
return dyn.clone(codestream.ByteStuffer, _value_=t)
return t
_fields_ = [
(__Type, 'Type'),
(lambda self: pint.uint_t if self.blocksize() < 4 else pint.uint16_t, 'Lp'),
(__Value, 'Value'),
(__Extra, 'Extra'),
]
def alloc(self, **fields):
res = super(StreamMarker, self).alloc(**fields)
return res if operator.contains(fields, 'Lp') else res.set(Lp=res['Value'].size())
### Marker definitions
@Marker.define
class SOI(ptype.block):
pass
class SOF(pstruct.type):
class _Cn(pstruct.type):
class _F(pbinary.struct):
_fields_ = [
(4, 'H'),
(4, 'V'),
]
_fields_ = [
(pint.uint8_t, 'C'),
(_F, 'F'),
(pint.uint8_t, 'Tq')
]
_fields_ = [
(pint.uint8_t, 'P'),
(pint.uint16_t, 'Y'),
(pint.uint16_t, 'X'),
(pint.uint8_t, 'Nf'),
(lambda self, Cn=_Cn: dyn.array(Cn, self['Nf'].li.int()), 'Cn')
]
def alloc(self, **fields):
res = super(SOF, self).alloc(**fields)
return res if operator.contains(fields, 'Nf') else res.set(Nf=len(res['Cn']))
@Marker.define
class SOF0(SOF): pass
@Marker.define
class SOF1(SOF): pass
@Marker.define
class SOF2(SOF): pass
@Marker.define
class SOF3(SOF): pass
#@Marker.define
#class SOF4(SOF): pass
@Marker.define
class SOF5(SOF): pass
@Marker.define
class SOF6(SOF): pass
@Marker.define
class SOF7(SOF): pass
#@Marker.define
#class SOF8(SOF): pass
@Marker.define
class SOF9(SOF): pass
@Marker.define
class SOF10(SOF): pass
@Marker.define
class SOF11(SOF): pass
#@Marker.define
#class SOF12(SOF): pass
@Marker.define
class SOF13(SOF): pass
@Marker.define
class SOF14(SOF): pass
@Marker.define
class SOF15(SOF): pass
@Marker.define
class DQT(parray.block):
class Table(pstruct.type):
class _Y(pbinary.struct):
_fields_ = [
(4, 'Pq'),
(4, 'Tq')
]
class _Qk(parray.type):
length, _object_ = 64, pint.uint8_t
def matrix(self):
iterable = (item.int() for item in self)
rows = 8 * [iter(iterable)]
return [ list(item) for item in zip(*rows) ]
_fields_ = [
(_Y, 'Y'),
(_Qk, 'Qk')
]
def repr(self):
Fprefix = lambda instance, name: "[{:#x}] {:s}{:s}".format(instance.getoffset(), ptypes.utils.repr_instance(instance.classname(), name), " {{{:s}}}".format(','.join(u"{:s}={!r}".format(k, v) for k, v in instance.properties().items())) if instance.properties() else '')
res = ["{:s} {:s}".format(Fprefix(self['Y'], self['Y'].name()), self['Y'].summary())]
for index, Mk in enumerate(self['Qk'].matrix()):
offset = 8 * index
item = self['Qk'][offset : len(Mk) + offset]
row = ','.join(map("{:>3d}".format, Mk))
res.append("{:s} {:s} [{:s}]".format(Fprefix(item, "Qk[{:>2d}:{:<2d}]".format(offset, len(Mk) + offset - 1)), item.__element__(), row))
return '\n'.join(res)
_object_ = Table
@Marker.define
class DHT(parray.block):
class Table(pstruct.type):
class _Th(pbinary.struct):
class _Tc(pbinary.enum):
length, _values_ = 1, [
('DC', 0),
('AC', 1),
]
_fields_ = [
(3, 'Reserved'),
(_Tc, 'Tc'),
(4, 'Td'),
]
class _Li(parray.type):
length, _object_ = 16, pint.uint8_t
def summary(self):
iterable = map("{:+d}".format, (item.int() for item in self))
return "[ {:s} ]".format(', '.join(iterable))
class _Vij(parray.type):
length = 16
class V(parray.type):
_object_ = pint.uint8_t
def summary(self):
iterable = map("{:02x}".format, bytearray(self.serialize()))
return "symbols: ({:d}) {:s}".format(self.size(), ' '.join(iterable) or 'none')
_object_ = V
def repr(self):
Fprefix = lambda instance, name: "[{:#x}] {:s}{:s}".format(instance.getoffset(), ptypes.utils.repr_instance(instance.classname(), name), " {{{:s}}}".format(','.join(u"{:s}={!r}".format(k, v) for k, v in instance.properties().items())) if instance.properties() else '')
if len(self) > 1:
offset, res = 0, []
for index, symbols in enumerate(self):
if len(symbols) > 0:
iterable = map("{:02x}".format, bytearray(symbols.serialize()))
res.append("{:s} symbols of size {:+d}: ({:d}) {:s}".format(Fprefix(symbols, symbols.name()), index, len(symbols), ' '.join(iterable)))
offset += len(symbols)
return '\n'.join(res)
return super(DHT.Table._Vij, self).repr()
def __Vij(self):
count = [item.int() for item in self['Li'].li]
def _object_(self, _object_=self._Vij._object_, count=count):
return dyn.clone(_object_, length=count[len(self.value)])
return dyn.clone(self._Vij, _object_=_object_)
_fields_ = [
(_Th, 'Th'),
(_Li, 'Li'),
(__Vij, 'Vij')
]
def alloc(self, **fields):
res = super(DHT.Table, self).alloc(**fields)
if operator.contains(fields, 'Li'):
return res
res.set(Li=[item.size() for item in res['Vij']]) if isinstance(res['Vij'], parray.type) else res
_object_ = Table
@Marker.define
class SOS(pstruct.type):
class _Cs(pbinary.struct):
_fields_ = [
(8, 'Cs'),
(4, 'Td'),
(4, 'Ta')
]
class _A(pbinary.struct):
_fields_ = [
(4, 'Ah'),
(4, 'Al'),
]
_fields_ = [
(pint.uint8_t, 'Ns'),
(lambda self, Cs=_Cs: dyn.array(Cs, self['Ns'].li.int()), 'Csn'),
(pint.uint8_t, 'Ss'),
(pint.uint8_t, 'Se'),
(_A, 'A')
]
def alloc(self, **fields):
res = super(SOS, self).alloc(**fields)
return res if operator.contains(fields, 'Ns') else res.set(Ns=len(res['Csn']))
@classmethod
def EncodedQ(cls):
return True
#@Marker.define
class APP0(pstruct.type):
class _Format(pint.enum, pint.uint8_t):
_values_ = [
('JPEG format', 10),
('1 byte per pixel palettized', 11),
('3 byte per pixel RGB format', 13),
]
_fields_ = [
(pint.uint16_t, 'Length'),
(dyn.clone(pstr.string, length=5), 'Identifier'),
(_Format, 'Format'),
(ptype.undefined, 'Thumbnail'),
]
@Marker.define
class COM(ptype.block):
pass
@Marker.define
class DRI(pstruct.type):
_fields_ = [
(pint.uint16_t, 'Ri'),
]
@Marker.define
class EOI(ptype.block):
pass
### JFIF Structures
class extension_type(ptype.definition):
cache = {}
@extension_type.define
class X10(pstruct.type):
type = 0
class _C(pstruct.type):
_fields_ = [
(pint.uint8_t, 'Y'),
(pint.uint8_t, 'Cb'),
(pint.uint8_t, 'Cr'),
]
_fields_ = [
(pint.uint8_t, 'Nf'),
(lambda self: dyn.array(_C, self['Nf'].li.int()), 'C'),
]
class RGB(pstruct.type):
_fields_ = [(pint.uint8_t, item) for item in 'RGB']
@extension_type.define
class X11(pstruct.type):
type = 1
_fields_ = [
(pint.uint8_t, 'HthumbnailB'),
(pint.uint8_t, 'VthumbnailB'),
(dyn.array(RGB, 0x100), 'Palette'),
(lambda self: dyn.block(self['HthumbnailB'].li.int() * self['VthumbnailB'].li.int()), 'm'),
]
@extension_type.define
class X13(pstruct.type):
type = 3
_fields_ = [
(pint.uint8_t, 'HthumbnailC'),
(pint.uint8_t, 'VthumbnailC'),
(lambda self: dyn.block(self['HthumbnailC'].li.int() * self['VthumbnailC'].li.int()), 'n'),
]
class APP0(pstruct.type):
def __extension_data(self):
res = self['extension_code'].li
return extension_type.lookup(res.int())
_fields_ = [
(dyn.block(5), 'identifier'),
(pint.uint8_t, 'extension_code'),
(__extension_data, 'extension_data'),
]
@Marker.define
class APP0(pstruct.type):
_fields_ = [
(dyn.block(5), 'identifier'),
(pint.uint16_t, 'version'),
(pint.uint8_t, 'units'),
(pint.uint16_t, 'Hdensity'),
(pint.uint16_t, 'Vdensity'),
(lambda self: ptype.undefined if self.blocksize() < 10 else pint.uint8_t, 'HthumbnailA'),
(lambda self: ptype.undefined if self.blocksize() < 11 else pint.uint8_t, 'VthumbnailA'),
(lambda self: ptype.undefined if self.blocksize() < 12 else dyn.array(RGB, self['HthumbnailA'].li.int() * self['VthumbnailA'].li.int()), 'k'),
]
class ImageResourceBlock(pstruct.type):
class _name(pstruct.type):
_fields_ = [
(pint.uint8_t, 'length'),
(lambda self: dyn.clone(pstr.string, length=1 + self['length'].li.int()), 'string'),
]
_fields_ = [
(dyn.clone(pstr.string, length=4), 'signature'),
(pint.uint16_t, 'identifier'),
(_name, 'name'),
(pint.uint32_t, 'size'),
(lambda self: dyn.block(self['size'].li.int()), 'data'),
(dyn.padding(2), 'padding(data)'),
]
@Marker.define
class APP13(pstruct.type):
def __extra(self):
bs, res = self.blocksize(), sum(self[fld].li.size() for fld in ['identifier', 'resources'])
return dyn.block(max(0, bs - res))
def __resources(self):
bs, res = self.blocksize(), self['identifier'].li.size()
return dyn.blockarray(ImageResourceBlock, bs - res)
_fields_ = [
(pstr.szstring, 'identifier'),
(__resources, 'resources'),
(__extra, 'extra'),
]
class File(codestream.Stream):
class _object_(codestream.DecodedStream):
Element, Data = StreamMarker, StreamData
def StartOfDataMarkerQ(self, marker):
# If we see the SOS code, then that's our data marker and the rest of our data
# are all compressed scanlines.
return intofdata(marker) in {0xffda}
def DataMarkerQ(self, marker):
res = intofdata(marker)
if 0xffc0 <= res < 0xffff:
# FIXME: it'd be nice if we could split our image data across these RST codes, but
# our codestream.Stream implementation doesn't support it.
return res not in {0xffd0, 0xffd1, 0xffd2, 0xffd3, 0xffd4, 0xffd5, 0xffd6, 0xffd7}
return False
def EndOfDataMarkerQ(self, marker):
# This EOI marker represents the end of our image data.
return intofdata(marker) in {0xffd9}
def _value_(self):
return dyn.clone(ptype.block, length=self.source.size())
if __name__ == '__main__':
import sys, ptypes, image.jpeg.jfif as jfif
source = ptypes.setsource(ptypes.prov.file(sys.argv[1], 'rb'))
# Read the contents of the jfif file as an individual stream
z = jfif.File(source=source)
z = z.l
# Decode the jfif's codestream into its separate chunks
a = z.d
a = a.l
if False:
#input = getFileContents('Q100-2.JPG')
input = getFileContents('huff_simple0.jpg')
input = bytes(input.replace(b'\xff\x00', b'\xff'))
jpegfile = File()
jpegfile.deserialize(input)
lookup = {type(item).__name__ : item for item in jpegfile}
print(jpegfile[0])
print(jpegfile[1])
else:
lookup = {}
if all(operator.contains(lookup, name) for name in ['DQT', 'DHT', 'SCANDATA']):
print('\n'.join(map("{!r}".format, jpegfile)))
dqt = lookup['DQT']['table']
dht = lookup['DHT']['table']
sosdata = lookup['SCANDATA']
print("{!r}".format(dqt))
print("{!r}".format(dht))
print("{!r}".format(sosdata))
print('\n'.join(map("{!r}".format, dht)))
print('\n'.join(map("{!r}".format, dqt)))
### load_quant_table
if operator.contains(lookup, 'DQT'):
zigzag = [
0, 1, 5, 6,14,15,27,28,
2, 4, 7,13,16,26,29,42,
3, 8,12,17,25,30,41,43,
9,11,18,24,31,40,44,53,
10,19,23,32,39,45,52,54,
20,22,33,38,46,51,55,60,
21,34,37,47,50,56,59,61,
35,36,48,49,57,58,62,63
]
scalefactor = [
1.0, 1.387039845, 1.306562965, 1.175875602,
1.0, 0.785694958, 0.541196100, 0.275899379
]
self = lookup['DQT']['table'][0]
quantizationTable = bytearray(self['value'].serialize())
res, table = [], iter(quantizationTable)
for y in range(8):
for x in range(8):
res.append( next(table) * scalefactor[y] * scalefactor[x] )
continue
scaledQuantizationTable = res
### decode_huffman ->
### decode AC coefficient
### decode DC coefficient
## process dht table
if operator.contains(lookup, 'DHT'):
self = lookup['DHT']['table'][3]
print("{!r}".format(self))
### process scan data
if operator.contains(lookup, 'SOS'):
self = lookup['SOS']
print("{!r}".format(self))
print(self['component'][0])
self = lookup['SOS']
if operator.contains(lookup, 'SOF'):
self = lookup['SOF']
|
|
# (c) 2017, XYSec Labs
from orm_choices import choices_with_unknown as choices
from orm_choices import choices as choices_without_unknown
from ak_vendor.constants import (
RISK_ENUM_PASSED,
RISK_ENUM_LOW,
RISK_ENUM_MEDIUM,
RISK_ENUM_HIGH,
RISK_ENUM_CRITICAL,
)
@choices
class ProductEnum:
class Meta:
APPKNOX = [0, "Appknox"]
DEVKNOX = [1, "Devknox"]
@choices
class AppactionEnum:
class Meta:
NO_PREFERENCE = [0, "NO_PREFERENCE"]
HALT = [1, "HALT"]
PROCEED = [2, "PROCEED"]
@choices
class OrganizationRolesEnum:
class Meta:
MEMBER = [0, "MEMBER"]
OWNER = [1, "OWNER"]
ADMIN = [2, "ADMIN"]
@choices_without_unknown
class OWASPEnum:
"""
OWASP Enum
"""
class Meta:
A1_2013 = ["A1_2013", "A1_2013"]
A2_2013 = ["A2_2013", "A2_2013"]
A3_2013 = ["A3_2013", "A3_2013"]
A4_2013 = ["A4_2013", "A4_2013"]
A5_2013 = ["A5_2013", "A5_2013"]
A6_2013 = ["A6_2013", "A6_2013"]
A7_2013 = ["A7_2013", "A7_2013"]
A8_2013 = ["A8_2013", "A8_2013"]
A9_2013 = ["A9_2013", "A9_2013"]
A10_2013 = ["A10_2013", "A10_2013"]
M1_2016 = ["M1_2016", "M1_2016"]
M2_2016 = ["M2_2016", "M2_2016"]
M3_2016 = ["M3_2016", "M3_2016"]
M4_2016 = ["M4_2016", "M4_2016"]
M5_2016 = ["M5_2016", "M5_2016"]
M6_2016 = ["M6_2016", "M6_2016"]
M7_2016 = ["M7_2016", "M7_2016"]
M8_2016 = ["M8_2016", "M8_2016"]
M9_2016 = ["M9_2016", "M9_2016"]
M10_2016 = ["M10_2016", "M10_2016"]
@choices
class PlatformEnum:
"""
Platform Type Enum
"""
class Meta:
ANDROID = [0, "Android"]
IOS = [1, "iOS"]
WINDOWS = [2, "Windows"]
COMMON = [3, "common (meta)"]
@choices
class DeviceTypeEnum:
"""
Device type enum
"""
class Meta:
NO_PREFERENCE = [0, "No Preference"]
PHONE_REQUIRED = [1, "Phone Required"]
TABLET_REQUIRED = [2, "Tablet Required"]
@choices_without_unknown
class DynamicStatusEnum:
"""
The Dynamic scanning status
"""
class Meta:
ERROR = [-1, "Error"]
NONE = [0, "None"]
INQUEUE = [1, "In Queue"]
BOOTING = [2, "Booting"]
DOWNLOADING = [3, "Downloading Package"]
INSTALLING = [4, "Installing Package"]
LAUNCHING = [5, "Launching App"]
HOOKING = [6, "Hooking"]
READY = [7, "Ready"]
SHUTTING_DOWN = [8, "Shutting Down"]
COMPLETED = [9, "Completed"]
@choices
class RiskEnum:
"""
The risk level that is associated with an analysis
"""
class Meta:
PASSED = [RISK_ENUM_PASSED, "Passed"]
LOW = [RISK_ENUM_LOW, "Low"]
MEDIUM = [RISK_ENUM_MEDIUM, "Medium"]
HIGH = [RISK_ENUM_HIGH, "High"]
CRITICAL = [RISK_ENUM_CRITICAL, "Critical"]
@choices
class AnalysisEnum:
"""
The status of the analysis
"""
class Meta:
ERROR = [0, "Error"]
WAITING = [1, "Waiting"]
RUNNING = [2, "Running"]
COMPLETED = [3, "Completed"]
@choices
class ManualEnum:
"""
Manual Assessment request state
"""
class Meta:
NONE = [0, "None"]
REQUESTED = [1, "Requested"]
ASSESSING = [2, "Assessing"]
DONE = [3, "Done"]
@choices
class NotifyEnum:
class Meta:
INFO = [0, "Info"]
SUCCESS = [2, "Success"]
WARNING = [3, "Warning"]
ALERT = [4, "Alert"]
ERROR = [5, "Error"]
@choices
class SubmissionStatusEnum:
"""
Submission status enum
"""
class Meta:
DOWNLOAD_PREPARE = [0, "Preparing to download the URL"]
DOWNLOADING = [1, "Downloading the URL"]
DOWNLOAD_FAILED = [2, "Failed to download the URL"]
VALIDATE_PREPARE = [3, "Preparing to validate the file"]
VALIDATING = [4, "Validating the file"]
VALIDATE_FAILED = [5, "Failed to validate the file"]
ANALYZE_PREPARE = [6, "Preparing to analyze the file"]
ANALYZING = [7, "The file is being analyzed"]
@choices
class SubmissionSourceEnum:
"""
Submission Source Enum
"""
class Meta:
UPLOAD = [0, "Upload"]
STORE = [1, "Store"]
SCM = [2, "Source Code Management"]
DEVKNOX = [3, "Devknox"]
@choices
class CollaborationRoleEnum:
"""
User project role
"""
class Meta:
ADMIN = [0, "Admin"]
MANAGER = [1, "Manager"]
READ_ONLY = [2, "Read Only"]
@choices
class ContactSourceEnum:
"""
Enum to identify the contact source
"""
class Meta:
HOME_PAGE = [0, "Home Page"]
CONTACT_US = [1, "Contact Us"]
@choices
class ContactStatusEnum:
"""
Enum to recognize the status of the contact
"""
class Meta:
DEAD = [0, "Dead"]
LIVE = [1, "Live"]
@choices
class ContactValidityEnum:
"""
Enum to differentiate between contacts
with valid info and invalid info.
"""
class Meta:
URL_INVALID = [0, "URL Invalid"]
EMAIL_INVALID = [1, "Email Invalid"]
NEW_NAME_SPACE = [2, "New Namespace"]
ALL_VALID = [3, "All Valid"]
@choices
class PaymentSourceEnum:
"""
Where did the payment take place?
"""
class Meta:
PAYPAL = [1, "Paypal"]
STRIPE_MANUAL = [2, "Stripe Manual"]
BANK_TRANSFER = [3, "Bank Transfer"]
MANUAL = [4, "Manual"]
STRIPE_RECURRING = [5, "Stripe Recurring"]
@choices
class PaymentDurationEnum:
"""
Duration - Yearly/Monthly
NOTE:
This is a special-case of enum. Each enum represents no.of months
rather than sequential number. This way, the codebase will be much
smaller and cleaner.
I know I asked no one should change ENUMS after deployment.
But I am doing this only because I know for a fact that no one has
used Yearly. Just verified
-dhilipsiva
"""
class Meta:
MONTHLY = [1, "Monthly"] # 1 Month
QUARTERLY = [3, "Quaterly"] # 3 Months
HALFYEARLY = [6, "Halfyearly"] # 6 months
YEARLY = [10, "Yearly"] # 10 months + 2 free months
@staticmethod
def days_for_duration(duration):
"""
Number of days for given duration
"""
months = duration
DAYS = 31
if duration == PaymentDurationEnum.YEARLY:
months = 12
# 31 days because Appknox is gracious
return months * DAYS
@choices
class VulnerabilityTypeEnum:
"""
Vulnerability Type
"""
class Meta:
STATIC = [1, "Static"]
DYNAMIC = [2, "Dynamic"]
MANUAL = [3, "Manual"]
API = [4, "API"]
@choices
class ConfidenceEnum:
"""
Confidence about the occurrence of a vulnerability
"""
class Meta:
LOW = [1, "Low"]
MEDIUM = [2, "Medium"]
HIGH = [3, "High"]
@choices
class UserTypeEnum:
"""
Use Types
"""
class Meta:
APPKNOX = [1, "Appknox"]
DEVKNOX = [2, "Devknox"]
@choices
class UserRoleEnum:
"""
Role of User at Appknox
"""
class Meta:
CO_FOUNDER = [1, "Co-Founder"]
EMPLOYEE = [2, "Employee"]
PARTNER = [3, "Partner"]
REGULAR = [4, "Regular"]
@choices
class UserDepartmentEnum:
"""
The Department that the user belongs to
"""
class Meta:
TECHNOLOGY = [1, "Technology"]
SECURITY = [2, "Security"]
SALES = [3, "Sales"]
MARKETING = [4, "Marketing"]
DESIGN = [5, "Design"]
@choices
class AttackVectorEnum:
"""
CVSSv3 attack vector
"""
class Meta:
NETWORK = ["N", "Network"]
ADJACENT = ["A", "Adjacent"]
LOCAL = ["L", "Local"]
PHYSICAL = ["P", "Physical"]
@choices
class AttackComplexityEnum:
"""
CVSSv3 attack complexity
"""
class Meta:
LOW = ["L", "Low"]
HIGH = ["H", "High"]
@choices
class PrivilegesRequiredEnum:
"""
CVSSv3 privileges required
"""
class Meta:
NONE = ["N", "None"]
LOW = ["L", "Low"]
HIGH = ["H", "High"]
@choices
class UserInteractionEnum:
"""
CVSSv3 user interaction
"""
class Meta:
NOT_REQUIRED = ["N", "Not Required"]
REQUIRED = ["R", "Required"]
@choices
class ScopeEnum:
"""
CVSSv3 scope
"""
class Meta:
UNCHANGED = ["U", "Unchanged"]
CHANGED = ["C", "Changed"]
@choices
class ConfidentialityImpactEnum:
"""
CVSSv3 confidentiality impact
"""
class Meta:
HIGH = ["H", "High"]
LOW = ["L", "Low"]
NONE = ["N", "None"]
@choices
class IntegrityImpactEnum:
"""
CVSSv3 integrity impact
"""
class Meta:
HIGH = ["H", "High"]
LOW = ["L", "Low"]
NONE = ["N", "None"]
@choices
class AvailabilityImpactEnum:
"""
CVSSv3 availability impact
"""
class Meta:
HIGH = ["H", "High"]
LOW = ["L", "Low"]
NONE = ["N", "None"]
@choices
class MFAMethodEnum:
"""
Multi-factor authentication method
"""
class Meta:
NONE = [0, "None"]
TOTP = [1, "TOTP"]
HOTP = [2, "HOTP"]
@choices
class FileFormatEnum:
"""
The file types.
"""
class Meta:
ANDROID_APK = [0, "APK"]
ANDROID_AAB = [1, "AAB"]
IOS_IPA = [2, "IPA"]
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.meshfix as mf
import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
import nipype.algorithms.misc as misc
from nipype.interfaces.utility import Function
from nipype.workflows.misc.utils import region_list_from_volume, id_list_from_lookup_table
import os, os.path as op
def get_aparc_aseg(files):
"""Return the aparc+aseg.mgz file"""
for name in files:
if 'aparc+aseg' in name:
return name
raise ValueError('aparc+aseg.mgz not found')
def create_getmask_flow(name='getmask', dilate_mask=True):
"""Registers a source file to freesurfer space and create a brain mask in
source space
Requires fsl tools for initializing registration
Parameters
----------
name : string
name of workflow
dilate_mask : boolean
indicates whether to dilate mask or not
Example
-------
>>> getmask = create_getmask_flow()
>>> getmask.inputs.inputspec.source_file = 'mean.nii'
>>> getmask.inputs.inputspec.subject_id = 's1'
>>> getmask.inputs.inputspec.subjects_dir = '.'
>>> getmask.inputs.inputspec.contrast_type = 't2'
Inputs::
inputspec.source_file : reference image for mask generation
inputspec.subject_id : freesurfer subject id
inputspec.subjects_dir : freesurfer subjects directory
inputspec.contrast_type : MR contrast of reference image
Outputs::
outputspec.mask_file : binary mask file in reference image space
outputspec.reg_file : registration file that maps reference image to
freesurfer space
outputspec.reg_cost : cost of registration (useful for detecting misalignment)
"""
"""
Initialize the workflow
"""
getmask = pe.Workflow(name=name)
"""
Define the inputs to the workflow.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['source_file',
'subject_id',
'subjects_dir',
'contrast_type']),
name='inputspec')
"""
Define all the nodes of the workflow:
fssource: used to retrieve aseg.mgz
threshold : binarize aseg
register : coregister source file to freesurfer space
voltransform: convert binarized aseg to source file space
"""
fssource = pe.Node(nio.FreeSurferSource(),
name = 'fssource')
threshold = pe.Node(fs.Binarize(min=0.5, out_type='nii'),
name='threshold')
register = pe.MapNode(fs.BBRegister(init='fsl'),
iterfield=['source_file'],
name='register')
voltransform = pe.MapNode(fs.ApplyVolTransform(inverse=True),
iterfield=['source_file', 'reg_file'],
name='transform')
"""
Connect the nodes
"""
getmask.connect([
(inputnode, fssource, [('subject_id','subject_id'),
('subjects_dir','subjects_dir')]),
(inputnode, register, [('source_file', 'source_file'),
('subject_id', 'subject_id'),
('subjects_dir', 'subjects_dir'),
('contrast_type', 'contrast_type')]),
(inputnode, voltransform, [('subjects_dir', 'subjects_dir'),
('source_file', 'source_file')]),
(fssource, threshold, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),
(register, voltransform, [('out_reg_file','reg_file')]),
(threshold, voltransform, [('binary_file','target_file')])
])
"""
Add remaining nodes and connections
dilate : dilate the transformed file in source space
threshold2 : binarize transformed file
"""
threshold2 = pe.MapNode(fs.Binarize(min=0.5, out_type='nii'),
iterfield=['in_file'],
name='threshold2')
if dilate_mask:
threshold2.inputs.dilate = 1
getmask.connect([
(voltransform, threshold2, [('transformed_file', 'in_file')])
])
"""
Setup an outputnode that defines relevant inputs of the workflow.
"""
outputnode = pe.Node(niu.IdentityInterface(fields=["mask_file",
"reg_file",
"reg_cost"
]),
name="outputspec")
getmask.connect([
(register, outputnode, [("out_reg_file", "reg_file")]),
(register, outputnode, [("min_cost_file", "reg_cost")]),
(threshold2, outputnode, [("binary_file", "mask_file")]),
])
return getmask
def create_get_stats_flow(name='getstats', withreg=False):
"""Retrieves stats from labels
Parameters
----------
name : string
name of workflow
withreg : boolean
indicates whether to register source to label
Example
-------
Inputs::
inputspec.source_file : reference image for mask generation
inputspec.label_file : label file from which to get ROIs
(optionally with registration)
inputspec.reg_file : bbreg file (assumes reg from source to label
inputspec.inverse : boolean whether to invert the registration
inputspec.subjects_dir : freesurfer subjects directory
Outputs::
outputspec.stats_file : stats file
"""
"""
Initialize the workflow
"""
getstats = pe.Workflow(name=name)
"""
Define the inputs to the workflow.
"""
if withreg:
inputnode = pe.Node(niu.IdentityInterface(fields=['source_file',
'label_file',
'reg_file',
'subjects_dir']),
name='inputspec')
else:
inputnode = pe.Node(niu.IdentityInterface(fields=['source_file',
'label_file']),
name='inputspec')
statnode = pe.MapNode(fs.SegStats(),
iterfield=['segmentation_file','in_file'],
name='segstats')
"""
Convert between source and label spaces if registration info is provided
"""
if withreg:
voltransform = pe.MapNode(fs.ApplyVolTransform(inverse=True),
iterfield=['source_file', 'reg_file'],
name='transform')
getstats.connect(inputnode, 'reg_file', voltransform, 'reg_file')
getstats.connect(inputnode, 'source_file', voltransform, 'source_file')
getstats.connect(inputnode, 'label_file', voltransform, 'target_file')
getstats.connect(inputnode, 'subjects_dir', voltransform, 'subjects_dir')
def switch_labels(inverse, transform_output, source_file, label_file):
if inverse:
return transform_output, source_file
else:
return label_file, transform_output
chooser = pe.MapNode(niu.Function(input_names = ['inverse',
'transform_output',
'source_file',
'label_file'],
output_names = ['label_file',
'source_file'],
function=switch_labels),
iterfield=['transform_output','source_file'],
name='chooser')
getstats.connect(inputnode,'source_file', chooser, 'source_file')
getstats.connect(inputnode,'label_file', chooser, 'label_file')
getstats.connect(inputnode,'inverse', chooser, 'inverse')
getstats.connect(voltransform, 'transformed_file', chooser, 'transform_output')
getstats.connect(chooser, 'label_file', statnode, 'segmentation_file')
getstats.connect(chooser, 'source_file', statnode, 'in_file')
else:
getstats.connect(inputnode, 'label_file', statnode, 'segmentation_file')
getstats.connect(inputnode, 'source_file', statnode, 'in_file')
"""
Setup an outputnode that defines relevant inputs of the workflow.
"""
outputnode = pe.Node(niu.IdentityInterface(fields=["stats_file"
]),
name="outputspec")
getstats.connect([
(statnode, outputnode, [("summary_file", "stats_file")]),
])
return getstats
def create_tessellation_flow(name='tessellate', out_format='stl'):
"""Tessellates the input subject's aseg.mgz volume and returns
the surfaces for each region in stereolithic (.stl) format
Example
-------
>>> from nipype.workflows.smri.freesurfer import create_tessellation_flow
>>> tessflow = create_tessellation_flow()
>>> tessflow.inputs.inputspec.subject_id = 'subj1'
>>> tessflow.inputs.inputspec.subjects_dir = '.'
>>> tessflow.inputs.inputspec.lookup_file = 'FreeSurferColorLUT.txt' # doctest: +SKIP
>>> tessflow.run() # doctest: +SKIP
Inputs::
inputspec.subject_id : freesurfer subject id
inputspec.subjects_dir : freesurfer subjects directory
inputspec.lookup_file : lookup file from freesurfer directory
Outputs::
outputspec.meshes : output region meshes in (by default) stereolithographic (.stl) format
"""
"""
Initialize the workflow
"""
tessflow = pe.Workflow(name=name)
"""
Define the inputs to the workflow.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['subject_id',
'subjects_dir',
'lookup_file']),
name='inputspec')
"""
Define all the nodes of the workflow:
fssource: used to retrieve aseg.mgz
mri_convert : converts aseg.mgz to aseg.nii
tessellate : tessellates regions in aseg.mgz
surfconvert : converts regions to stereolithographic (.stl) format
smoother: smooths the tessellated regions
"""
fssource = pe.Node(nio.FreeSurferSource(),
name = 'fssource')
volconvert = pe.Node(fs.MRIConvert(out_type='nii'),
name = 'volconvert')
tessellate = pe.MapNode(fs.MRIMarchingCubes(),
iterfield=['label_value','out_file'],
name='tessellate')
surfconvert = pe.MapNode(fs.MRIsConvert(out_datatype='stl'),
iterfield=['in_file'],
name='surfconvert')
smoother = pe.MapNode(mf.MeshFix(),
iterfield=['in_file1'],
name='smoother')
if out_format == 'gii':
stl_to_gifti = pe.MapNode(fs.MRIsConvert(out_datatype=out_format),
iterfield=['in_file'],
name='stl_to_gifti')
smoother.inputs.save_as_stl = True
smoother.inputs.laplacian_smoothing_steps = 1
region_list_from_volume_interface = Function(input_names=["in_file"],
output_names=["region_list"],
function=region_list_from_volume)
id_list_from_lookup_table_interface = Function(input_names=["lookup_file", "region_list"],
output_names=["id_list"],
function=id_list_from_lookup_table)
region_list_from_volume_node = pe.Node(interface=region_list_from_volume_interface, name='region_list_from_volume_node')
id_list_from_lookup_table_node = pe.Node(interface=id_list_from_lookup_table_interface, name='id_list_from_lookup_table_node')
"""
Connect the nodes
"""
tessflow.connect([
(inputnode, fssource, [('subject_id','subject_id'),
('subjects_dir','subjects_dir')]),
(fssource, volconvert, [('aseg', 'in_file')]),
(volconvert, region_list_from_volume_node, [('out_file', 'in_file')]),
(region_list_from_volume_node, tessellate, [('region_list', 'label_value')]),
(region_list_from_volume_node, id_list_from_lookup_table_node, [('region_list', 'region_list')]),
(inputnode, id_list_from_lookup_table_node, [('lookup_file', 'lookup_file')]),
(id_list_from_lookup_table_node, tessellate, [('id_list', 'out_file')]),
(fssource, tessellate, [('aseg', 'in_file')]),
(tessellate, surfconvert, [('surface','in_file')]),
(surfconvert, smoother, [('converted','in_file1')]),
])
"""
Setup an outputnode that defines relevant inputs of the workflow.
"""
outputnode = pe.Node(niu.IdentityInterface(fields=["meshes"]),
name="outputspec")
if out_format == 'gii':
tessflow.connect([
(smoother, stl_to_gifti, [("mesh_file", "in_file")]),
])
tessflow.connect([
(stl_to_gifti, outputnode, [("converted", "meshes")]),
])
else:
tessflow.connect([
(smoother, outputnode, [("mesh_file", "meshes")]),
])
return tessflow
|
|
# Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nurse rostering in Google CP Solver.
This is a simple nurse rostering model using a DFA and
my decomposition of regular constraint.
The DFA is from MiniZinc Tutorial, Nurse Rostering example:
- one day off every 4 days
- no 3 nights in a row.
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
from collections import defaultdict
#
# Global constraint regular
#
# This is a translation of MiniZinc's regular constraint (defined in
# lib/zinc/globals.mzn), via the Comet code refered above.
# All comments are from the MiniZinc code.
# '''
# The sequence of values in array 'x' (which must all be in the range 1..S)
# is accepted by the DFA of 'Q' states with input 1..S and transition
# function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0'
# (which must be in 1..Q) and accepting states 'F' (which all must be in
# 1..Q). We reserve state 0 to be an always failing state.
# '''
#
# x : IntVar array
# Q : number of states
# S : input_max
# d : transition matrix
# q0: initial state
# F : accepting states
def regular(x, Q, S, d, q0, F):
solver = x[0].solver()
assert Q > 0, 'regular: "Q" must be greater than zero'
assert S > 0, 'regular: "S" must be greater than zero'
# d2 is the same as d, except we add one extra transition for
# each possible input; each extra transition is from state zero
# to state zero. This allows us to continue even if we hit a
# non-accepted input.
# Comet: int d2[0..Q, 1..S]
d2 = []
for i in range(Q + 1):
row = []
for j in range(S):
if i == 0:
row.append(0)
else:
row.append(d[i - 1][j])
d2.append(row)
d2_flatten = [d2[i][j] for i in range(Q + 1) for j in range(S)]
# If x has index set m..n, then a[m-1] holds the initial state
# (q0), and a[i+1] holds the state we're in after processing
# x[i]. If a[n] is in F, then we succeed (ie. accept the
# string).
x_range = range(0, len(x))
m = 0
n = len(x)
a = [solver.IntVar(0, Q + 1, 'a[%i]' % i) for i in range(m, n + 1)]
# Check that the final state is in F
solver.Add(solver.MemberCt(a[-1], F))
# First state is q0
solver.Add(a[m] == q0)
for i in x_range:
solver.Add(x[i] >= 1)
solver.Add(x[i] <= S)
# Determine a[i+1]: a[i+1] == d2[a[i], x[i]]
solver.Add(
a[i + 1] == solver.Element(d2_flatten, ((a[i]) * S) + (x[i] - 1)))
def main():
# Create the solver.
solver = pywrapcp.Solver('Nurse rostering using regular')
#
# data
#
# Note: If you change num_nurses or num_days,
# please also change the constraints
# on nurse_stat and/or day_stat.
num_nurses = 7
num_days = 14
day_shift = 1
night_shift = 2
off_shift = 3
shifts = [day_shift, night_shift, off_shift]
# the DFA (for regular)
n_states = 6
input_max = 3
initial_state = 1 # 0 is for the failing state
accepting_states = [1, 2, 3, 4, 5, 6]
transition_fn = [
# d,n,o
[2, 3, 1], # state 1
[4, 4, 1], # state 2
[4, 5, 1], # state 3
[6, 6, 1], # state 4
[6, 0, 1], # state 5
[0, 0, 1] # state 6
]
days = ['d', 'n', 'o'] # for presentation
#
# declare variables
#
x = {}
for i in range(num_nurses):
for j in range(num_days):
x[i, j] = solver.IntVar(shifts, 'x[%i,%i]' % (i, j))
x_flat = [x[i, j] for i in range(num_nurses) for j in range(num_days)]
# summary of the nurses
nurse_stat = [solver.IntVar(0, num_days, 'nurse_stat[%i]' % i)
for i in range(num_nurses)]
# summary of the shifts per day
day_stat = {}
for i in range(num_days):
for j in shifts:
day_stat[i, j] = solver.IntVar(0, num_nurses, 'day_stat[%i,%i]' % (i, j))
day_stat_flat = [day_stat[i, j] for i in range(num_days) for j in shifts]
#
# constraints
#
for i in range(num_nurses):
reg_input = [x[i, j] for j in range(num_days)]
regular(reg_input, n_states, input_max, transition_fn,
initial_state, accepting_states)
#
# Statistics and constraints for each nurse
#
for i in range(num_nurses):
# number of worked days (day or night shift)
b = [solver.IsEqualCstVar(x[i, j], day_shift) +
solver.IsEqualCstVar(x[i, j], night_shift)
for j in range(num_days)]
solver.Add(nurse_stat[i] == solver.Sum(b))
# Each nurse must work between 7 and 10
# days during this period
solver.Add(nurse_stat[i] >= 7)
solver.Add(nurse_stat[i] <= 10)
#
# Statistics and constraints for each day
#
for j in range(num_days):
for t in shifts:
b = [solver.IsEqualCstVar(x[i, j], t)
for i in range(num_nurses)]
solver.Add(day_stat[j, t] == solver.Sum(b))
#
# Some constraints for this day:
#
# Note: We have a strict requirements of
# the number of shifts.
# Using atleast constraints is much harder
# in this model.
#
if j % 7 == 5 or j % 7 == 6:
# special constraints for the weekends
solver.Add(day_stat[j, day_shift] == 2)
solver.Add(day_stat[j, night_shift] == 1)
solver.Add(day_stat[j, off_shift] == 4)
else:
# workdays:
# - exactly 3 on day shift
solver.Add(day_stat[j, day_shift] == 3)
# - exactly 2 on night
solver.Add(day_stat[j, night_shift] == 2)
# - exactly 1 off duty
solver.Add(day_stat[j, off_shift] == 2)
#
# solution and search
#
db = solver.Phase(day_stat_flat + x_flat + nurse_stat,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
for i in range(num_nurses):
print 'Nurse%i: ' % i,
this_day_stat = defaultdict(int)
for j in range(num_days):
d = days[x[i, j].Value() - 1]
this_day_stat[d] += 1
print d,
print ' day_stat:', [(d, this_day_stat[d]) for d in this_day_stat],
print 'total:', nurse_stat[i].Value(), 'workdays'
print
print 'Statistics per day:'
for j in range(num_days):
print 'Day%2i: ' % j,
for t in shifts:
print day_stat[j, t].Value(),
print
print
# We just show 2 solutions
if num_solutions >= 2:
break
solver.EndSearch()
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WebApplicationFirewallPoliciesOperations(object):
"""WebApplicationFirewallPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Gets all the WAF policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def get(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
policy_name, # type: str
parameters, # type: "_models.WebApplicationFirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Creates or update policy with specified rule set name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
|
|
"""
Author: Armon Dadgar
Start Date: April 7th, 2009
Description:
This file provides a python interface to low-level system call on the Linux platform.
It is designed to abstract away the C-level detail and provide a high-level method of doing
common management tasks.
"""
import os # Provides some convenience functions
import nix_common_api as nix_api # Import the Common API
import textops # Import seattlelib's text processing lib
import portable_popen # For Popen
import platform
# Determine if we are 32 bit or 64 bit
running_32bit = True
architecture = platform.architecture()
if "64" in architecture[0]:
running_32bit = False
# Manually import the common functions we want
exists_outgoing_network_socket = nix_api.exists_outgoing_network_socket
exists_listening_network_socket = nix_api.exists_listening_network_socket
get_available_interfaces = nix_api.get_available_interfaces
# Libc
libc = nix_api.libc
# Functions
myopen = open # This is an annoying restriction of repy
syscall = libc.syscall # syscall function
# Globals
last_stat_data = None # Store the last array of data from _get_proc_info_by_pid
# Constants
JIFFIES_PER_SECOND = 100.0
PAGE_SIZE = os.sysconf('SC_PAGESIZE')
# Get the thread id of the currently executing thread
if running_32bit:
GETTID = 224
else:
GETTID = 186
# Maps each field in /proc/{pid}/stat to an index when split by spaces
FIELDS = {
"pid":0,
"state":1,
"ppid":2,
"pgrp":3,
"session":4,
"tty_nr":5,
"tpgid":6,
"flags":7,
"minflt":8,
"cminflt":9,
"majflt":10,
"cmajflt":11,
"utime":12,
"stime":13,
"cutime":14,
"cstime":15,
"priority":16,
"nice":17,
"num_threads":18,
"itrealvalue":19,
"starttime":20,
"vsize":21,
"rss":22,
"rlim":23,
"startcode":24,
"endcode":25,
"startstack":26,
"kstkesp":27,
"kstkeoip":28,
"signal":29,
"blocked":30,
"sigignore":31,
"sigcatch":32,
"wchan":33,
"nswap":34,
"cnswap":35,
"exit_signal":36,
"processor":37,
"rt_priority":38,
"policy":39,
"delayacct_blkio_ticks":40
}
# Process a /proc/PID/stat or /proc/PID/task/TID/stat file and returns it as an array
def _process_stat_file(file):
# Get the file in proc
fileo = myopen(file,"r")
# Read in all the data
data = fileo.read()
# Close the file object
fileo.close()
# Strip the newline
data = data.strip("\n")
# Remove the substring that says "(python)", since it changes the field alignment
start_index = data.find("(")
if start_index != -1:
end_index = data.find(")", start_index)
data = data[:start_index-1] + data[end_index+1:]
# Break the data into an array by spaces
return data.split(" ")
def _get_proc_info_by_pid(pid):
"""
<Purpose>
Reads in the data from a process stat file, and stores it
<Arguments>
pid: The process identifier for which data should be fetched.
"""
global last_stat_data
# Get the file in proc
file = "/proc/"+str(pid)+"/stat"
# Process the status file
last_stat_data = _process_stat_file(file)
# Check the state, raise an exception if the process is a zombie
if "Z" in last_stat_data[FIELDS["state"]]:
raise Exception, "Queried Process is a zombie (dead)!"
def get_process_cpu_time(pid):
"""
<Purpose>
Returns the total CPU time used by a process.
<Arguments>
pid: The process identifier for the process to query.
<Returns>
The total cpu time.
"""
global last_stat_data
# Update our data
_get_proc_info_by_pid(pid)
# Get the raw usertime and system time
total_time_raw = int(last_stat_data[FIELDS["utime"]])+int(last_stat_data[FIELDS["stime"]])
# Adjust by the number of jiffies per second
total_time = total_time_raw / JIFFIES_PER_SECOND
return total_time
def get_process_rss(force_update=False, pid=None):
"""
<Purpose>
Returns the Resident Set Size of a process. By default, this will
return the information cached by the last call to _get_proc_info_by_pid.
This call is used in get_process_cpu_time.
<Arguments>
force_update:
Allows the caller to force a data update, instead of using the cached data.
pid:
If force_update is True, this parameter must be specified to force the update.
<Returns>
The RSS of the process in bytes.
"""
global last_stat_data
# Check if an update is being forced
if force_update and pid != None:
# Update the info
_get_proc_info_by_pid(pid)
# Fetch the RSS, convert to an integer
rss_pages = int(last_stat_data[FIELDS["rss"]])
rss_bytes = rss_pages * PAGE_SIZE
# Return the info
return rss_bytes
# Get the id of the currently executing thread
def _get_current_thread_id():
# Syscall for GETTID
return syscall(GETTID)
# Get the CPU time of the current thread
def get_current_thread_cpu_time():
"""
<Purpose>
Gets the total CPU time for the currently executing thread.
<Exceptions>
An exception will be raised if something goes wrong.
<Returns>
A floating amount of time in seconds.
"""
# Get the thread id
thread_id = _get_current_thread_id()
# Get our pid
pid = os.getpid()
# Get the file with our status
file = "/proc/"+str(pid)+"/task/"+str(thread_id)+"/stat"
# Process the status file
thread_stat_data = _process_stat_file(file)
# Get the raw usertime and system time
total_time_raw = int(thread_stat_data[FIELDS["utime"]])+int(thread_stat_data[FIELDS["stime"]])
# Adjust by the number of jiffies per second
total_time = total_time_raw / JIFFIES_PER_SECOND
# Return the total time
return total_time
def get_system_uptime():
"""
<Purpose>
Returns the system uptime.
<Exception>
Raises Exception if /proc/uptime is unavailable
<Returns>
The system uptime.
"""
if os.path.exists("/proc/uptime"):
# Open the file
fh = myopen('/proc/uptime', 'r')
# Read in the whole file
data = fh.read()
# Split the file by commas, grap the first number and convert to a float
uptime = float(data.split(" ")[0])
# Close the file
fh.close()
return uptime
else:
raise Exception, "Could not find /proc/uptime!"
def get_uptime_granularity():
"""
<Purpose>
Determines the granularity of the get_system_uptime call.
<Exception>
Raises Exception if /proc/uptime is unavailable
<Returns>
A numerical representation of the minimum granularity.
E.g. 2 digits of granularity would return 0.01
"""
if os.path.exists("/proc/uptime"):
# Open the file
fh = myopen('/proc/uptime', 'r')
# Read in the whole file
data = fh.read()
# Split the file by commas, grap the first number
uptime = data.split(" ")[0]
uptime_digits = len(uptime.split(".")[1])
# Close the file
fh.close()
granularity = uptime_digits
# Convert granularity to a number
return pow(10, 0-granularity)
else:
raise Exception, "Could not find /proc/uptime!"
def get_system_thread_count():
"""
<Purpose>
Returns the number of active threads running on the system.
<Returns>
The thread count.
"""
# Use PS since it is can get the info for us
process = portable_popen.Popen(["ps", "axH"])
ps_output, _ = process.communicate()
# Subtract 1 from the number of lines because the first line is a a table
# header: " PID TTY STAT TIME COMMAND"
threads = len(textops.textops_rawtexttolines(ps_output)) - 1
return threads
def get_interface_ip_addresses(interfaceName):
"""
<Purpose>
Returns the IP address associated with the interface.
<Arguments>
interfaceName: The string name of the interface, e.g. eth0
<Returns>
A list of IP addresses associated with the interface.
"""
# Launch up a shell, get the feed back
# We use ifconfig with the interface name.
ifconfig_process = portable_popen.Popen(["/sbin/ifconfig", interfaceName.strip()])
ifconfig_output, _ = ifconfig_process.communicate()
ifconfig_lines = textops.textops_rawtexttolines(ifconfig_output)
# Look for ipv4 addresses
target_lines = textops.textops_grep("inet", ifconfig_lines)
# and not ipv6
target_lines = textops.textops_grep("inet6", target_lines, exclude=True)
# Only take the ip(s)
target_lines = textops.textops_cut(target_lines, delimiter=":", fields=[1])
target_lines = textops.textops_cut(target_lines, delimiter=" ", fields=[0])
# Create an array for the ip's
ipaddressList = []
for line in target_lines:
# Strip the newline and any spacing
line = line.strip("\n\t ")
ipaddressList.append(line)
# Done, return the interfaces
return ipaddressList
|
|
"""
Tests for lumpy.py.
"""
import os
import tempfile
from Bio import SeqIO
from Bio.Alphabet import generic_dna
from Bio.SeqFeature import FeatureLocation
from Bio.SeqFeature import SeqFeature
from django.conf import settings
from django.test import TestCase
import vcf
from main.models import AlignmentGroup
from main.models import Dataset
from main.models import ExperimentSample
from main.models import ExperimentSampleToAlignment
from main.models import get_dataset_with_type
from main.models import Variant
from main.model_utils import clean_filesystem_location
from main.testing_util import create_common_entities
from main.testing_util import create_sample_and_alignment
from pipeline.read_alignment import get_discordant_read_pairs
from pipeline.read_alignment import get_split_reads
from pipeline.variant_calling import find_variants_with_tool
from pipeline.variant_calling.lumpy import merge_lumpy_vcf
from pipeline.variant_calling.lumpy import run_lumpy
from pipeline.variant_calling.lumpy import process_vcf_post_l_merge
from pipeline.variant_calling import TOOL_LUMPY
from pipeline.variant_calling import VARIANT_TOOL_PARAMS_MAP
from utils.import_util import import_reference_genome_from_local_file
from variants.vcf_parser import parse_alignment_group_vcf
from variants.vcf_parser import SV_REF_VALUE
TEST_DATA_DIR = os.path.join(settings.PWD, 'test_data')
TEST_FASTA = os.path.join(TEST_DATA_DIR, 'fake_genome_and_reads',
'test_genome.fa')
TEST_DISC_SPLIT_BAM = os.path.join(settings.PWD, 'test_data',
'discordant_split_reads', 'bwa_align.bam')
# Test genomes generated using:
# https://github.com/churchlab/structural-variants-testing
DELETION_TEST_DATA_DIR = os.path.join(TEST_DATA_DIR,
'sv_testing', 'deletion_bd5a1123')
DELETION_REF = os.path.join(DELETION_TEST_DATA_DIR, 'small_ref.fa')
# NOTE: Generated below.
DELETION_REF_GENBANK = os.path.join(DELETION_TEST_DATA_DIR, 'small_ref.gb')
# Uncomment/modify to create test data.
# def _create_annotated_ref_genome():
# """Creates annotated ref genome.
# """
# with open(DELETION_REF) as fasta_fh:
# seq_record = SeqIO.read(fasta_fh, 'fasta', alphabet=generic_dna)
# seq_record.features = []
# feature = SeqFeature(
# FeatureLocation(9800, 10200, strand=1), type='CDS', id=1)
# feature.qualifiers['gene'] = ['geneX']
# seq_record.features.append(feature)
# with open(DELETION_REF_GENBANK, 'w') as fh:
# SeqIO.write(seq_record, fh, 'genbank')
# _create_annotated_ref_genome()
DELETION_FASTQ1 = os.path.join(DELETION_TEST_DATA_DIR, 'deletion_bd5a1123.1.fq')
DELETION_FASTQ2 = os.path.join(DELETION_TEST_DATA_DIR, 'deletion_bd5a1123.2.fq')
DELETION_SAMPLE_1_UID = '38d786f2'
DELETION_SAMPLE_1_BWA = os.path.join(DELETION_TEST_DATA_DIR,
'deletion_bd5a1123_sample_uid_38d786f2.bam')
DELETION_SAMPLE_1_UID = 'ds1'
DELETION_SAMPLE_1_BWA = os.path.join(DELETION_TEST_DATA_DIR,
'deletion_bd5a1123_ds1.bam')
DELETION_SAMPLE_2_UID = 'ds2'
DELETION_SAMPLE_2_BWA = os.path.join(DELETION_TEST_DATA_DIR,
'deletion_bd5a1123_ds2.bam')
DELETION_SAMPLE_3_UID = 'ds3'
DELETION_SAMPLE_3_BWA = os.path.join(DELETION_TEST_DATA_DIR,
'deletion_bd5a1123_ds3.bam')
DELETION_f8346a99_TEST_DATA_DIR = os.path.join(
TEST_DATA_DIR, 'sv_testing', 'deletion_f8346a99')
DELETION_SAMPLE_4_UID = 'f8346a99'
DELETION_SAMPLE_4_BWA = os.path.join(DELETION_f8346a99_TEST_DATA_DIR,
'deletion_f8346a99.bam')
INVERSION_TEST_DATA_DIR = os.path.join(
TEST_DATA_DIR, 'sv_testing', 'inversion_5a996d78')
INVERSION_REF = os.path.join(INVERSION_TEST_DATA_DIR, 'small_ref.fa')
INVERSION_SAMPLE_UID = 'group'
INVERSION_SAMPLE_BWA = os.path.join(INVERSION_TEST_DATA_DIR,
'inversion_5a996d78.bam')
L_MERGE_TEST_OUTPUT = os.path.join(
TEST_DATA_DIR, 'sv_testing', 'l_merge_test_data', 'l_merge_output.vcf')
class TestLumpy(TestCase):
def setUp(self):
self.common_data = create_common_entities()
self.project = self.common_data['project']
def test_run_lumpy(self):
TEST_SAMPLE_UID = '8c57e7b9'
# Create a ref genome.
self.reference_genome = import_reference_genome_from_local_file(
self.project, 'ref_genome', TEST_FASTA, 'fasta')
# Create a sample.
self.experiment_sample = ExperimentSample.objects.create(
uid=TEST_SAMPLE_UID, project=self.project, label='sample1')
# Create a new alignment group.
alignment_group = AlignmentGroup.objects.create(
label='test alignment', reference_genome=self.reference_genome)
self.alignment_group = alignment_group
# Create the expected models.
sample_alignment = ExperimentSampleToAlignment.objects.create(
alignment_group=alignment_group,
experiment_sample=self.experiment_sample)
bwa_dataset = Dataset.objects.create(
label=Dataset.TYPE.BWA_ALIGN,
type=Dataset.TYPE.BWA_ALIGN,
status=Dataset.STATUS.READY)
bwa_dataset.filesystem_location = clean_filesystem_location(
TEST_DISC_SPLIT_BAM)
bwa_dataset.save()
sample_alignment.dataset_set.add(bwa_dataset)
sample_alignment.save()
self.bwa_dataset = bwa_dataset
self.sample_alignment = sample_alignment
fasta_ref = get_dataset_with_type(
self.reference_genome,
Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
sample_alignments = [self.sample_alignment]
vcf_output_dir = self.alignment_group.get_model_data_dir()
vcf_output_filename = os.path.join(vcf_output_dir, 'lumpy.vcf')
alignment_type = 'BWA_ALIGN'
# NOTE: Running these functions but not checking results.
get_discordant_read_pairs(self.sample_alignment)
get_split_reads(self.sample_alignment)
run_lumpy(fasta_ref, sample_alignments, vcf_output_dir,
vcf_output_filename, alignment_type)
dataset = Dataset.objects.create(
type=Dataset.TYPE.VCF_LUMPY,
label=Dataset.TYPE.VCF_LUMPY,
filesystem_location=vcf_output_filename,
)
self.alignment_group.dataset_set.add(dataset)
# Parse the resulting vcf, grab variant objects
parse_alignment_group_vcf(self.alignment_group, Dataset.TYPE.VCF_LUMPY)
# Grab the resulting variants.
variants = Variant.objects.filter(reference_genome=self.reference_genome)
# There should be a Variant object for each sv event.
self.assertEqual(2, len(variants))
# One event should be located very close to 25k
va_positions = [v.position for v in variants]
va_offset = [25000 - va_pos for va_pos in va_positions]
self.assertTrue(any([v < 50 for v in va_offset]))
def test_run_lumpy__deletion(self):
"""Tests running Lumpy on data that should have a deletion.
"""
# Create Datasets / import data.
self.reference_genome = import_reference_genome_from_local_file(
self.project, 'ref_genome', DELETION_REF_GENBANK, 'genbank')
# Create an alignment that's already complete, so we can focus on
# testing variant calling only.
self.alignment_group = AlignmentGroup.objects.create(
label='test alignment', reference_genome=self.reference_genome)
r = _create_sample_and_alignment(
self.project, self.alignment_group, DELETION_SAMPLE_1_UID,
DELETION_SAMPLE_1_BWA)
sample_alignment = r['sample_alignment']
# Run lumpy.
lumpy_params = dict(VARIANT_TOOL_PARAMS_MAP[TOOL_LUMPY])
lumpy_params['tool_kwargs'] = {
'region_num': sample_alignment.uid,
'sample_alignments': [sample_alignment]
}
find_variants_with_tool(
self.alignment_group, lumpy_params, project=self.project)
merge_lumpy_vcf(self.alignment_group)
# Grab the resulting variants.
variants = Variant.objects.filter(
reference_genome=self.reference_genome)
# Verify that we have the expected deletion around position 10000 of
# size 1000.
self.assertEqual(1, len(variants))
v = variants[0]
# position/ref
self.assertTrue(9950 < v.position < 10050)
self.assertEqual(SV_REF_VALUE, v.ref_value)
vccd = v.variantcallercommondata_set.all()[0]
# size
size = vccd.data['INFO_END'] - v.position
self.assertTrue(900 < size < 1100)
va = v.variantalternate_set.all()[0]
# Type
self.assertEqual('DEL', va.data['INFO_SVTYPE'])
# SnpEff data
# TODO: Uncomment when Issue #648 is fixed.
# https://github.com/churchlab/millstone/issues/648
# self.assertEqual('geneX', va.data['INFO_EFF_GENE'])
def test_run_lumpy__multiple_samples_of_same_exact_deletion(self):
"""Tests lumpy running on multiple samples.
"""
# Create Datasets / import data.
self.reference_genome = import_reference_genome_from_local_file(
self.project, 'ref_genome', DELETION_REF, 'fasta')
# Create an alignment that's already complete, so we can focus on
# testing variant calling only.
self.alignment_group = AlignmentGroup.objects.create(
label='test alignment', reference_genome=self.reference_genome)
r1 = _create_sample_and_alignment(
self.project, self.alignment_group, DELETION_SAMPLE_1_UID,
DELETION_SAMPLE_1_BWA)
sa1 = r1['sample_alignment']
r2 = _create_sample_and_alignment(
self.project, self.alignment_group, DELETION_SAMPLE_2_UID,
DELETION_SAMPLE_2_BWA)
sa2 = r2['sample_alignment']
r3 = _create_sample_and_alignment(
self.project, self.alignment_group, DELETION_SAMPLE_3_UID,
DELETION_SAMPLE_3_BWA)
sa3 = r3['sample_alignment']
r4 = _create_sample_and_alignment(
self.project, self.alignment_group, DELETION_SAMPLE_4_UID,
DELETION_SAMPLE_4_BWA)
sa4 = r4['sample_alignment']
# Common params for each run of lumpy.
lumpy_params = dict(VARIANT_TOOL_PARAMS_MAP[TOOL_LUMPY])
def _run_lumpy_for_sample_alignment(sa):
"""Helper function to run lumpy for sample alignment.
"""
lumpy_params['tool_kwargs'] = {
'region_num': sa.uid,
'sample_alignments': [sa]
}
find_variants_with_tool(
self.alignment_group, lumpy_params, project=self.project)
_run_lumpy_for_sample_alignment(sa1)
_run_lumpy_for_sample_alignment(sa2)
_run_lumpy_for_sample_alignment(sa3)
_run_lumpy_for_sample_alignment(sa4)
merge_lumpy_vcf(self.alignment_group)
# Grab the resulting variants.
variants = Variant.objects.filter(
reference_genome=self.reference_genome)
# Should have 2 events.
self.assertEqual(2, len(variants))
def test_run_lumpy__inversion(self):
"""Tests running Lumpy on data with single inversion.
"""
# Create Datasets / import data.
self.reference_genome = import_reference_genome_from_local_file(
self.project, 'ref_genome', INVERSION_REF, 'fasta')
# Create an alignment that's already complete, so we can focus on
# testing variant calling only.
self.alignment_group = AlignmentGroup.objects.create(
label='test alignment', reference_genome=self.reference_genome)
r = _create_sample_and_alignment(
self.project, self.alignment_group, INVERSION_SAMPLE_UID,
INVERSION_SAMPLE_BWA)
sample_alignment = r['sample_alignment']
# Run lumpy.
lumpy_params = dict(VARIANT_TOOL_PARAMS_MAP[TOOL_LUMPY])
lumpy_params['tool_kwargs'] = {
'region_num': sample_alignment.uid,
'sample_alignments': [sample_alignment]
}
find_variants_with_tool(
self.alignment_group, lumpy_params, project=self.project)
merge_lumpy_vcf(self.alignment_group)
# Grab the resulting variants.
variants = Variant.objects.filter(
reference_genome=self.reference_genome)
self.assertEqual(1, len(variants))
v = variants[0]
# position
self.assertAlmostEqual(v.position, 30000, delta=2)
# size
vccd = v.variantcallercommondata_set.all()[0]
size = vccd.data['INFO_END'] - v.position
self.assertAlmostEqual(size, 1000, delta=10)
def test_post_l_merge(self):
"""Tests post-processing code following l_sort/l_merge.py on outputs
of lumpy applied on single samples.
"""
_, processed_vcf_path = tempfile.mkstemp()
process_vcf_post_l_merge(L_MERGE_TEST_OUTPUT, processed_vcf_path)
MAP_EXPECTED_VARIANT_POS_TO_SAMPLE_LIST = {
4998: ['f8346a99'],
9999: ['ds1', 'ds2', 'ds3'],
}
with open(processed_vcf_path) as fh:
# Assert expected number of sample cols.
vcf_reader = vcf.Reader(fh)
self.assertEqual(4, len(vcf_reader.samples))
# Assert each sample has proper GT.
for record in vcf_reader:
samples_with_var = MAP_EXPECTED_VARIANT_POS_TO_SAMPLE_LIST[
record.POS]
for sample_call in record.samples:
# error_msg = sample_
if sample_call.gt_nums == '1/1':
self.assertTrue(sample_call.sample in samples_with_var)
else:
self.assertFalse(sample_call.sample in samples_with_var)
###############################################################################
# Helper Functions
###############################################################################
def _count_records_in_vcf(vcf_reader):
record_count = 0
for record in vcf_reader:
record_count += 1
return record_count
def _create_sample_and_alignment(
project, alignment_group, sample_uid, bwa_alignment):
return create_sample_and_alignment(
project, alignment_group, sample_uid, bwa_alignment)
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/master_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.protobuf import master_pb2 as tensorflow_dot_core_dot_protobuf_dot_master__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/master_service.proto',
package='tensorflow.grpc',
syntax='proto3',
serialized_pb=_b('\n-tensorflow/core/protobuf/master_service.proto\x12\x0ftensorflow.grpc\x1a%tensorflow/core/protobuf/master.proto2\xbc\x04\n\rMasterService\x12T\n\rCreateSession\x12 .tensorflow.CreateSessionRequest\x1a!.tensorflow.CreateSessionResponse\x12T\n\rExtendSession\x12 .tensorflow.ExtendSessionRequest\x1a!.tensorflow.ExtendSessionResponse\x12Z\n\x0fPartialRunSetup\x12\".tensorflow.PartialRunSetupRequest\x1a#.tensorflow.PartialRunSetupResponse\x12\x42\n\x07RunStep\x12\x1a.tensorflow.RunStepRequest\x1a\x1b.tensorflow.RunStepResponse\x12Q\n\x0c\x43loseSession\x12\x1f.tensorflow.CloseSessionRequest\x1a .tensorflow.CloseSessionResponse\x12N\n\x0bListDevices\x12\x1e.tensorflow.ListDevicesRequest\x1a\x1f.tensorflow.ListDevicesResponse\x12<\n\x05Reset\x12\x18.tensorflow.ResetRequest\x1a\x19.tensorflow.ResetResponseB3\n\x1aorg.tensorflow.distruntimeB\x13MasterServiceProtosP\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_protobuf_dot_master__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\032org.tensorflow.distruntimeB\023MasterServiceProtosP\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class MasterServiceStub(object):
"""//////////////////////////////////////////////////////////////////////////////
MasterService defines a TensorFlow service with which a client can
interact to execute a distributed TensorFlow computation.
A master service keeps track of multiple "master sessions". Each
session encapsulates a computation graph and its associated state,
and typically corresponds to a single "client session" (e.g. a
`tensorflow::Session` instance).
A session is responsible for the following:
* assigning each node to a device (locally or remotely) using a
placement algorithm. This may make decisions based on collected
statistics from the workers in the system (e.g., memory usage,
bandwidth consumption, etc.)
* inserting intermediate nodes and edges to support cross-device
and cross-process data flows and resource management.
* issuing commands to workers to execute the subgraphs associated
with those workers.
Typically, a client carries out an iterative computation
(e.g. training) by invoking RPCs against the master in a
client-side loop. The client first creates a client session that
connects to a particular master (using gRPC for example). The
master creates a corresponding master session that is hosted on
the master and caches state between the client's invocations.
After the session is established, the master returns an opaque
handle to the client that can be used to associate the client and
master sessions.
The client may send an initial graph to the master in the
CreateSession call, and add nodes to the graph using ExtendSession.
The most frequent operation a master is "RunStep", which implements
the `Session::Run()` API. It supports feeding in arguments,
executing a dataflow computation, and fetching arguments.
Finally, when the client no longer needs the session, it should
close the session by invoking CloseSession, which allows the master
to reclaim resources associated with the session. The master may
implement a garbage collection scheme that closes sessions that
have been inactive for some time.
For example, the following pseudo-code illustrates how a client
interacts with a master:
stub = NewStub("/job:mnist/replica:0/task:0")
{handle} = stub->CreateSession({graph_def})
do {
stub->RunStep({handle, {feeds}, {fetches}})
// The client can evaluate a predicate locally, based on the
// result of `fetches`, to determine whether to terminate. For
// example, it might fetch the loss and evaluate whether it is less
// than some threshold.
} while (!should_stop({fetches}));
stub->CloseSession({handle})
//////////////////////////////////////////////////////////////////////////////
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateSession = channel.unary_unary(
'/tensorflow.grpc.MasterService/CreateSession',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.CreateSessionRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.CreateSessionResponse.FromString,
)
self.ExtendSession = channel.unary_unary(
'/tensorflow.grpc.MasterService/ExtendSession',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ExtendSessionRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ExtendSessionResponse.FromString,
)
self.PartialRunSetup = channel.unary_unary(
'/tensorflow.grpc.MasterService/PartialRunSetup',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.PartialRunSetupRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.PartialRunSetupResponse.FromString,
)
self.RunStep = channel.unary_unary(
'/tensorflow.grpc.MasterService/RunStep',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.RunStepRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.RunStepResponse.FromString,
)
self.CloseSession = channel.unary_unary(
'/tensorflow.grpc.MasterService/CloseSession',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.CloseSessionRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.CloseSessionResponse.FromString,
)
self.ListDevices = channel.unary_unary(
'/tensorflow.grpc.MasterService/ListDevices',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ListDevicesRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ListDevicesResponse.FromString,
)
self.Reset = channel.unary_unary(
'/tensorflow.grpc.MasterService/Reset',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ResetRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ResetResponse.FromString,
)
class MasterServiceServicer(object):
"""//////////////////////////////////////////////////////////////////////////////
MasterService defines a TensorFlow service with which a client can
interact to execute a distributed TensorFlow computation.
A master service keeps track of multiple "master sessions". Each
session encapsulates a computation graph and its associated state,
and typically corresponds to a single "client session" (e.g. a
`tensorflow::Session` instance).
A session is responsible for the following:
* assigning each node to a device (locally or remotely) using a
placement algorithm. This may make decisions based on collected
statistics from the workers in the system (e.g., memory usage,
bandwidth consumption, etc.)
* inserting intermediate nodes and edges to support cross-device
and cross-process data flows and resource management.
* issuing commands to workers to execute the subgraphs associated
with those workers.
Typically, a client carries out an iterative computation
(e.g. training) by invoking RPCs against the master in a
client-side loop. The client first creates a client session that
connects to a particular master (using gRPC for example). The
master creates a corresponding master session that is hosted on
the master and caches state between the client's invocations.
After the session is established, the master returns an opaque
handle to the client that can be used to associate the client and
master sessions.
The client may send an initial graph to the master in the
CreateSession call, and add nodes to the graph using ExtendSession.
The most frequent operation a master is "RunStep", which implements
the `Session::Run()` API. It supports feeding in arguments,
executing a dataflow computation, and fetching arguments.
Finally, when the client no longer needs the session, it should
close the session by invoking CloseSession, which allows the master
to reclaim resources associated with the session. The master may
implement a garbage collection scheme that closes sessions that
have been inactive for some time.
For example, the following pseudo-code illustrates how a client
interacts with a master:
stub = NewStub("/job:mnist/replica:0/task:0")
{handle} = stub->CreateSession({graph_def})
do {
stub->RunStep({handle, {feeds}, {fetches}})
// The client can evaluate a predicate locally, based on the
// result of `fetches`, to determine whether to terminate. For
// example, it might fetch the loss and evaluate whether it is less
// than some threshold.
} while (!should_stop({fetches}));
stub->CloseSession({handle})
//////////////////////////////////////////////////////////////////////////////
"""
def CreateSession(self, request, context):
"""Creates a session.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExtendSession(self, request, context):
"""Extends a session.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PartialRunSetup(self, request, context):
"""Prepares future partial run calls.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunStep(self, request, context):
"""Drives the graph computation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CloseSession(self, request, context):
"""Closes a session.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListDevices(self, request, context):
"""List the devices usable by the master.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Reset(self, request, context):
"""Close and abandon all existing sessions. Ongoing computations
will no longer affect fresh ones via the resources in containers listed in
the ResetRequest. See ResetRequest for more details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MasterServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateSession': grpc.unary_unary_rpc_method_handler(
servicer.CreateSession,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.CreateSessionRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.CreateSessionResponse.SerializeToString,
),
'ExtendSession': grpc.unary_unary_rpc_method_handler(
servicer.ExtendSession,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ExtendSessionRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ExtendSessionResponse.SerializeToString,
),
'PartialRunSetup': grpc.unary_unary_rpc_method_handler(
servicer.PartialRunSetup,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.PartialRunSetupRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.PartialRunSetupResponse.SerializeToString,
),
'RunStep': grpc.unary_unary_rpc_method_handler(
servicer.RunStep,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.RunStepRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.RunStepResponse.SerializeToString,
),
'CloseSession': grpc.unary_unary_rpc_method_handler(
servicer.CloseSession,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.CloseSessionRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.CloseSessionResponse.SerializeToString,
),
'ListDevices': grpc.unary_unary_rpc_method_handler(
servicer.ListDevices,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ListDevicesRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ListDevicesResponse.SerializeToString,
),
'Reset': grpc.unary_unary_rpc_method_handler(
servicer.Reset,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ResetRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_master__pb2.ResetResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.grpc.MasterService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaMasterServiceServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""//////////////////////////////////////////////////////////////////////////////
MasterService defines a TensorFlow service with which a client can
interact to execute a distributed TensorFlow computation.
A master service keeps track of multiple "master sessions". Each
session encapsulates a computation graph and its associated state,
and typically corresponds to a single "client session" (e.g. a
`tensorflow::Session` instance).
A session is responsible for the following:
* assigning each node to a device (locally or remotely) using a
placement algorithm. This may make decisions based on collected
statistics from the workers in the system (e.g., memory usage,
bandwidth consumption, etc.)
* inserting intermediate nodes and edges to support cross-device
and cross-process data flows and resource management.
* issuing commands to workers to execute the subgraphs associated
with those workers.
Typically, a client carries out an iterative computation
(e.g. training) by invoking RPCs against the master in a
client-side loop. The client first creates a client session that
connects to a particular master (using gRPC for example). The
master creates a corresponding master session that is hosted on
the master and caches state between the client's invocations.
After the session is established, the master returns an opaque
handle to the client that can be used to associate the client and
master sessions.
The client may send an initial graph to the master in the
CreateSession call, and add nodes to the graph using ExtendSession.
The most frequent operation a master is "RunStep", which implements
the `Session::Run()` API. It supports feeding in arguments,
executing a dataflow computation, and fetching arguments.
Finally, when the client no longer needs the session, it should
close the session by invoking CloseSession, which allows the master
to reclaim resources associated with the session. The master may
implement a garbage collection scheme that closes sessions that
have been inactive for some time.
For example, the following pseudo-code illustrates how a client
interacts with a master:
stub = NewStub("/job:mnist/replica:0/task:0")
{handle} = stub->CreateSession({graph_def})
do {
stub->RunStep({handle, {feeds}, {fetches}})
// The client can evaluate a predicate locally, based on the
// result of `fetches`, to determine whether to terminate. For
// example, it might fetch the loss and evaluate whether it is less
// than some threshold.
} while (!should_stop({fetches}));
stub->CloseSession({handle})
//////////////////////////////////////////////////////////////////////////////
"""
def CreateSession(self, request, context):
"""Creates a session.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ExtendSession(self, request, context):
"""Extends a session.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def PartialRunSetup(self, request, context):
"""Prepares future partial run calls.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RunStep(self, request, context):
"""Drives the graph computation.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CloseSession(self, request, context):
"""Closes a session.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ListDevices(self, request, context):
"""List the devices usable by the master.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Reset(self, request, context):
"""Close and abandon all existing sessions. Ongoing computations
will no longer affect fresh ones via the resources in containers listed in
the ResetRequest. See ResetRequest for more details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaMasterServiceStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""//////////////////////////////////////////////////////////////////////////////
MasterService defines a TensorFlow service with which a client can
interact to execute a distributed TensorFlow computation.
A master service keeps track of multiple "master sessions". Each
session encapsulates a computation graph and its associated state,
and typically corresponds to a single "client session" (e.g. a
`tensorflow::Session` instance).
A session is responsible for the following:
* assigning each node to a device (locally or remotely) using a
placement algorithm. This may make decisions based on collected
statistics from the workers in the system (e.g., memory usage,
bandwidth consumption, etc.)
* inserting intermediate nodes and edges to support cross-device
and cross-process data flows and resource management.
* issuing commands to workers to execute the subgraphs associated
with those workers.
Typically, a client carries out an iterative computation
(e.g. training) by invoking RPCs against the master in a
client-side loop. The client first creates a client session that
connects to a particular master (using gRPC for example). The
master creates a corresponding master session that is hosted on
the master and caches state between the client's invocations.
After the session is established, the master returns an opaque
handle to the client that can be used to associate the client and
master sessions.
The client may send an initial graph to the master in the
CreateSession call, and add nodes to the graph using ExtendSession.
The most frequent operation a master is "RunStep", which implements
the `Session::Run()` API. It supports feeding in arguments,
executing a dataflow computation, and fetching arguments.
Finally, when the client no longer needs the session, it should
close the session by invoking CloseSession, which allows the master
to reclaim resources associated with the session. The master may
implement a garbage collection scheme that closes sessions that
have been inactive for some time.
For example, the following pseudo-code illustrates how a client
interacts with a master:
stub = NewStub("/job:mnist/replica:0/task:0")
{handle} = stub->CreateSession({graph_def})
do {
stub->RunStep({handle, {feeds}, {fetches}})
// The client can evaluate a predicate locally, based on the
// result of `fetches`, to determine whether to terminate. For
// example, it might fetch the loss and evaluate whether it is less
// than some threshold.
} while (!should_stop({fetches}));
stub->CloseSession({handle})
//////////////////////////////////////////////////////////////////////////////
"""
def CreateSession(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Creates a session.
"""
raise NotImplementedError()
CreateSession.future = None
def ExtendSession(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Extends a session.
"""
raise NotImplementedError()
ExtendSession.future = None
def PartialRunSetup(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Prepares future partial run calls.
"""
raise NotImplementedError()
PartialRunSetup.future = None
def RunStep(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Drives the graph computation.
"""
raise NotImplementedError()
RunStep.future = None
def CloseSession(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Closes a session.
"""
raise NotImplementedError()
CloseSession.future = None
def ListDevices(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""List the devices usable by the master.
"""
raise NotImplementedError()
ListDevices.future = None
def Reset(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Close and abandon all existing sessions. Ongoing computations
will no longer affect fresh ones via the resources in containers listed in
the ResetRequest. See ResetRequest for more details.
"""
raise NotImplementedError()
Reset.future = None
def beta_create_MasterService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('tensorflow.grpc.MasterService', 'CloseSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.CloseSessionRequest.FromString,
('tensorflow.grpc.MasterService', 'CreateSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.CreateSessionRequest.FromString,
('tensorflow.grpc.MasterService', 'ExtendSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ExtendSessionRequest.FromString,
('tensorflow.grpc.MasterService', 'ListDevices'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ListDevicesRequest.FromString,
('tensorflow.grpc.MasterService', 'PartialRunSetup'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.PartialRunSetupRequest.FromString,
('tensorflow.grpc.MasterService', 'Reset'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ResetRequest.FromString,
('tensorflow.grpc.MasterService', 'RunStep'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.RunStepRequest.FromString,
}
response_serializers = {
('tensorflow.grpc.MasterService', 'CloseSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.CloseSessionResponse.SerializeToString,
('tensorflow.grpc.MasterService', 'CreateSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.CreateSessionResponse.SerializeToString,
('tensorflow.grpc.MasterService', 'ExtendSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ExtendSessionResponse.SerializeToString,
('tensorflow.grpc.MasterService', 'ListDevices'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ListDevicesResponse.SerializeToString,
('tensorflow.grpc.MasterService', 'PartialRunSetup'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.PartialRunSetupResponse.SerializeToString,
('tensorflow.grpc.MasterService', 'Reset'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ResetResponse.SerializeToString,
('tensorflow.grpc.MasterService', 'RunStep'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.RunStepResponse.SerializeToString,
}
method_implementations = {
('tensorflow.grpc.MasterService', 'CloseSession'): face_utilities.unary_unary_inline(servicer.CloseSession),
('tensorflow.grpc.MasterService', 'CreateSession'): face_utilities.unary_unary_inline(servicer.CreateSession),
('tensorflow.grpc.MasterService', 'ExtendSession'): face_utilities.unary_unary_inline(servicer.ExtendSession),
('tensorflow.grpc.MasterService', 'ListDevices'): face_utilities.unary_unary_inline(servicer.ListDevices),
('tensorflow.grpc.MasterService', 'PartialRunSetup'): face_utilities.unary_unary_inline(servicer.PartialRunSetup),
('tensorflow.grpc.MasterService', 'Reset'): face_utilities.unary_unary_inline(servicer.Reset),
('tensorflow.grpc.MasterService', 'RunStep'): face_utilities.unary_unary_inline(servicer.RunStep),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_MasterService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('tensorflow.grpc.MasterService', 'CloseSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.CloseSessionRequest.SerializeToString,
('tensorflow.grpc.MasterService', 'CreateSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.CreateSessionRequest.SerializeToString,
('tensorflow.grpc.MasterService', 'ExtendSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ExtendSessionRequest.SerializeToString,
('tensorflow.grpc.MasterService', 'ListDevices'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ListDevicesRequest.SerializeToString,
('tensorflow.grpc.MasterService', 'PartialRunSetup'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.PartialRunSetupRequest.SerializeToString,
('tensorflow.grpc.MasterService', 'Reset'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ResetRequest.SerializeToString,
('tensorflow.grpc.MasterService', 'RunStep'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.RunStepRequest.SerializeToString,
}
response_deserializers = {
('tensorflow.grpc.MasterService', 'CloseSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.CloseSessionResponse.FromString,
('tensorflow.grpc.MasterService', 'CreateSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.CreateSessionResponse.FromString,
('tensorflow.grpc.MasterService', 'ExtendSession'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ExtendSessionResponse.FromString,
('tensorflow.grpc.MasterService', 'ListDevices'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ListDevicesResponse.FromString,
('tensorflow.grpc.MasterService', 'PartialRunSetup'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.PartialRunSetupResponse.FromString,
('tensorflow.grpc.MasterService', 'Reset'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.ResetResponse.FromString,
('tensorflow.grpc.MasterService', 'RunStep'): tensorflow_dot_core_dot_protobuf_dot_master__pb2.RunStepResponse.FromString,
}
cardinalities = {
'CloseSession': cardinality.Cardinality.UNARY_UNARY,
'CreateSession': cardinality.Cardinality.UNARY_UNARY,
'ExtendSession': cardinality.Cardinality.UNARY_UNARY,
'ListDevices': cardinality.Cardinality.UNARY_UNARY,
'PartialRunSetup': cardinality.Cardinality.UNARY_UNARY,
'Reset': cardinality.Cardinality.UNARY_UNARY,
'RunStep': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'tensorflow.grpc.MasterService', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
|
# -*- coding: utf-8 -*-
#
# pietrack documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 29 18:10:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-mfa'
copyright = u'2016, Micro Pyramid'
author = u'Micro Pyramid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-mfa'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'djangomfa.tex', u'Django MFA Documentation',
u'Micro Pyramid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangomfa', u'Django MFA Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'djangomfa', u'Django MFA Documentation',
author, 'djangomfa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False# -*- coding: utf-8 -*-
#
# pietrack documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 29 18:10:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-mfa'
copyright = u'2016, Micro Pyramid'
author = u'Micro Pyramid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'djangomfa'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'djangomfa.tex', u'Django MFA Documentation',
u'Micro Pyramid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangomfa', u'Django MFA Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'djangomfa', u'Django MFA Documentation',
author, 'djangomfa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from time import sleep
from requests import Timeout
from six import text_type, iteritems, string_types
from six.moves import map
from ...docker_api import HostConfig, NetworkingConfig, EndpointConfig
from ...functional import resolve_value
from ...utils import format_image_tag
from ..action import Action
from ..config.client import USE_HC_MERGE
from ..input import ItemType, NotSet
from ..policy.utils import init_options, extract_user
from . import AbstractRunner
from .attached import AttachedPreparationMixin
from .cmd import ExecMixin
from .image import ImageMixin
from .network import NetworkUtilMixin
from .script import ScriptMixin
from .signal_stop import SignalMixin
from .utils import update_kwargs, get_volumes, get_volumes_from, get_host_binds, get_port_bindings
log = logging.getLogger(__name__)
class DockerBaseRunnerMixin(object):
action_method_names = [
(ItemType.NETWORK, Action.CREATE, 'create_network'),
(ItemType.NETWORK, Action.REMOVE, 'remove_network'),
(ItemType.VOLUME, Action.CREATE, 'create_volume'),
(ItemType.VOLUME, Action.REMOVE, 'remove_volume'),
(ItemType.CONTAINER, Action.CREATE, 'create_container'),
(ItemType.CONTAINER, Action.START, 'start_container'),
(ItemType.CONTAINER, Action.RESTART, 'restart'),
(ItemType.CONTAINER, Action.STOP, 'stop'),
(ItemType.CONTAINER, Action.REMOVE, 'remove_container'),
(ItemType.CONTAINER, Action.UPDATE, 'update_container'),
(ItemType.CONTAINER, Action.KILL, 'kill'),
(ItemType.CONTAINER, Action.WAIT, 'wait'),
]
def create_network(self, action, n_name, **kwargs):
"""
Creates a configured network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
"""
c_kwargs = self.get_network_create_kwargs(action, n_name, **kwargs)
res = action.client.create_network(**c_kwargs)
self._policy.network_names[action.client_name][n_name] = res['Id']
return res
def remove_network(self, action, n_name, **kwargs):
"""
Removes a network.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param n_name: Network name or id.
:type n_name: unicode | str
:param kwargs: Additional keyword arguments.
:type kwargs: dict
"""
c_kwargs = self.get_network_remove_kwargs(action, n_name, **kwargs)
res = action.client.remove_network(**c_kwargs)
del self._policy.network_names[action.client_name][n_name]
return res
def create_volume(self, action, v_name, **kwargs):
if action.client_config.features['volumes']:
c_kwargs = self.get_volume_create_kwargs(action, v_name, kwargs=kwargs)
res = action.client.create_volume(**c_kwargs)
self._policy.volume_names[action.client_name].add(v_name)
return res
c_kwargs = self.get_attached_container_create_kwargs(action, v_name, kwargs=kwargs)
res = action.client.create_container(**c_kwargs)
self._policy.container_names[action.client_name][v_name] = res['Id']
if action.client_config.features['host_config']:
action.client.start(v_name)
else:
c_kwargs = self.get_attached_container_host_config_kwargs(action, v_name, kwargs=kwargs)
action.client.start(**c_kwargs)
return res
def remove_volume(self, action, v_name, **kwargs):
if action.client_config.features['volumes']:
c_kwargs = self.get_volume_create_kwargs(action, v_name, kwargs=kwargs)
res = action.client.remove_volume(**c_kwargs)
self._policy.volume_names[action.client_name].discard(v_name)
return res
c_kwargs = self.get_container_remove_kwargs(action, v_name, kwargs=kwargs)
res = action.client.remove_container(**c_kwargs)
del self._policy.container_names[action.client_name][v_name]
return res
def create_container(self, action, c_name, **kwargs):
c_kwargs = self.get_container_create_kwargs(action, c_name, kwargs=kwargs)
res = action.client.create_container(**c_kwargs)
self._policy.container_names[action.client_name][c_name] = res['Id']
return res
def start_container(self, action, c_name, **kwargs):
if action.client_config.features['host_config']:
res = action.client.start(c_name)
else:
c_kwargs = self.get_container_host_config_kwargs(action, c_name, kwargs=kwargs)
res = action.client.start(**c_kwargs)
start_delay = action.config.start_delay
if start_delay:
log.debug("Sleeping %s seconds after container %s start.", start_delay, c_name)
sleep(start_delay)
return res
def restart(self, action, c_name, **kwargs):
c_kwargs = self.get_container_restart_kwargs(action, c_name, kwargs=kwargs)
return action.client.restart(**c_kwargs)
def stop(self, action, c_name, **kwargs):
c_kwargs = self.get_container_stop_kwargs(action, c_name, kwargs=kwargs)
try:
return action.client.stop(**c_kwargs)
except Timeout:
log.warning("Container %s did not stop in time - sent SIGKILL.", c_name)
try:
action.client.wait(c_name, timeout=c_kwargs.get('timeout', 10))
except Timeout:
pass
return None
def remove_container(self, action, c_name, **kwargs):
c_kwargs = self.get_container_remove_kwargs(action, c_name, kwargs=kwargs)
res = action.client.remove_container(**c_kwargs)
del self._policy.container_names[action.client_name][c_name]
return res
def update_container(self, action, c_name, update_values, **kwargs):
c_kwargs = self.get_container_update_kwargs(action, c_name, update_values, kwargs=kwargs)
return action.client.update_container(**c_kwargs)
def kill(self, action, c_name, **kwargs):
c_kwargs = self.get_container_kill_kwargs(action, c_name, kwargs=kwargs)
return action.client.kill(**c_kwargs)
def wait(self, action, c_name, **kwargs):
c_kwargs = self.get_container_wait_kwargs(action, c_name, kwargs=kwargs)
return action.client.wait(**c_kwargs)
class DockerConfigMixin(object):
def get_container_create_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to create a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
policy = self._policy
client_config = action.client_config
container_map = action.container_map
container_config = action.config
image_tag = container_map.get_image(container_config.image or action.config_id.config_name)
default_paths = policy.default_volume_paths[action.config_id.map_name]
c_kwargs = dict(
name=container_name,
image=format_image_tag(image_tag),
volumes=get_volumes(container_map, container_config, default_paths,
client_config.features['volumes']),
user=extract_user(container_config.user),
ports=[resolve_value(port_binding.exposed_port)
for port_binding in container_config.exposes if port_binding.exposed_port],
domainname=resolve_value(client_config.get('domainname', container_map.default_domain)) or None,
)
if container_map.set_hostname or container_map.set_hostname is NotSet:
c_kwargs['hostname'] = policy.get_hostname(container_name, action.client_name)
if container_config.network_mode == 'none':
c_kwargs['network_disabled'] = True
elif client_config.features['networks'] and container_config.networks:
first_network = container_config.networks[0]
c_kwargs['networking_config'] = NetworkingConfig({
policy.nname(action.config_id.map_name, first_network.network_name): EndpointConfig(
client_config.version, **self.get_network_create_endpoint_kwargs(action, first_network)
)
})
if client_config.features['stop_signal'] and container_config.stop_signal:
c_kwargs['stop_signal'] = container_config.stop_signal
hc_extra_kwargs = kwargs.pop('host_config', None) if kwargs else None
use_host_config = client_config.features['host_config']
if use_host_config:
hc_kwargs = self.get_container_host_config_kwargs(action, None, kwargs=hc_extra_kwargs)
if hc_kwargs:
if use_host_config == USE_HC_MERGE:
c_kwargs.update(hc_kwargs)
else:
c_kwargs['host_config'] = HostConfig(version=client_config.version, **hc_kwargs)
if client_config.features['stop_timeout'] and container_config.stop_timeout:
c_kwargs['stop_timeout'] = container_config.stop_timeout
if client_config.features['healthcheck'] and container_config.healthcheck:
c_kwargs['healthcheck'] = container_config.healthcheck._asdict()
update_kwargs(c_kwargs, init_options(container_config.create_options), kwargs)
return c_kwargs
def get_container_host_config_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
container_map = action.container_map
container_config = action.config
client_config = action.client_config
config_id = action.config_id
map_name = config_id.map_name
policy = self._policy
cname = policy.cname
supports_volumes = client_config.features['volumes']
c_kwargs = dict(
links=[(cname(map_name, l_name), alias or policy.get_hostname(l_name))
for l_name, alias in container_config.links],
binds=get_host_binds(container_map, config_id.config_name, container_config, config_id.instance_name,
policy, supports_volumes),
volumes_from=get_volumes_from(container_map, config_id.config_name, container_config,
policy, not supports_volumes),
port_bindings=get_port_bindings(container_config, client_config),
)
network_mode = container_config.network_mode
if isinstance(network_mode, tuple):
c_kwargs['network_mode'] = 'container:{0}'.format(cname(map_name, *network_mode))
elif isinstance(network_mode, string_types):
c_kwargs['network_mode'] = network_mode
if container_name:
c_kwargs['container'] = container_name
update_kwargs(c_kwargs, init_options(container_config.host_config), kwargs)
return c_kwargs
def get_attached_container_create_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to create an attached container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
client_config = action.client_config
policy = self._policy
config_id = action.config_id
path = resolve_value(policy.default_volume_paths[config_id.map_name][config_id.instance_name])
user = extract_user(action.config.user)
c_kwargs = dict(
name=container_name,
image=self._policy.base_image,
volumes=[path],
user=user,
network_disabled=True,
)
hc_extra_kwargs = kwargs.pop('host_config', None) if kwargs else None
use_host_config = client_config.features['host_config']
if use_host_config:
hc_kwargs = self.get_attached_container_host_config_kwargs(action, None, kwargs=hc_extra_kwargs)
if hc_kwargs:
if use_host_config == USE_HC_MERGE:
c_kwargs.update(hc_kwargs)
else:
c_kwargs['host_config'] = HostConfig(version=client_config.version, **hc_kwargs)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_attached_container_host_config_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig or start an attached container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
if container_name:
c_kwargs = {'container': container_name}
else:
c_kwargs = {}
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_container_update_kwargs(self, action, container_name, update_values, kwargs=None):
"""
Generates keyword arguments for the Docker client to update the HostConfig of an existing container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param update_values: Dictionary of values to update; i.e. keyword arguments to the Docker client.
:type update_values: dict[unicode | str, unicode | str | int | float | decimal.Decimal]
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(container=container_name)
update_kwargs(c_kwargs, update_values, kwargs)
return c_kwargs
def get_container_restart_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to restart a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(container=container_name)
stop_timeout = action.config.stop_timeout
if stop_timeout is NotSet:
timeout = action.client_config.get('stop_timeout')
if timeout is not None:
c_kwargs['timeout'] = timeout
elif stop_timeout is not None:
c_kwargs['timeout'] = stop_timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_container_wait_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to wait for a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(container=container_name)
timeout = action.client_config.get('wait_timeout')
if timeout is not None:
c_kwargs['timeout'] = timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_container_stop_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to stop a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(
container=container_name,
)
stop_timeout = action.config.stop_timeout
if stop_timeout is NotSet:
timeout = action.client_config.get('stop_timeout')
if timeout is not None:
c_kwargs['timeout'] = timeout
elif stop_timeout is not None:
c_kwargs['timeout'] = stop_timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_container_remove_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to remove a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(container=container_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_network_create_kwargs(self, action, network_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to create a network.
:param action: Action configuration.
:type action: ActionConfig
:param network_name: Network name or id.
:type network_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
config = action.config
c_kwargs = dict(
name=network_name,
driver=config.driver,
options=config.driver_options,
)
if config.internal:
c_kwargs['internal'] = True
driver_opts = init_options(config.driver_options)
if driver_opts:
c_kwargs['options'] = {option_name: resolve_value(option_value)
for option_name, option_value in iteritems(driver_opts)}
update_kwargs(c_kwargs, init_options(config.create_options), kwargs)
return c_kwargs
def get_network_remove_kwargs(self, action, network_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to remove a network.
:param action: Action configuration.
:type action: ActionConfig
:param network_name: Network name or id.
:type network_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(net_id=network_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_network_create_endpoint_kwargs(self, action, endpoint_config, kwargs=None):
"""
Generates keyword arguments for Docker's ``create_endpoint_config`` utility / ``EndpointConfig`` type as well
as for ``connect_container_to_network``.
:param action: Action configuration.
:type action: ActionConfig
:param endpoint_config: Network endpoint configuration.
:type endpoint_config: dockermap.map.input.NetworkEndpoint
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
map_name = action.config_id.map_name
policy = self._policy
c_kwargs = dict(
ipv4_address=resolve_value(endpoint_config.ipv4_address),
ipv6_address=resolve_value(endpoint_config.ipv6_address),
)
if endpoint_config.aliases:
c_kwargs['aliases'] = list(map(resolve_value, endpoint_config.aliases))
if endpoint_config.links:
c_kwargs['links'] = [(policy.cname(map_name, l_name), alias or policy.get_hostname(l_name))
for l_name, alias in endpoint_config.links]
if endpoint_config.link_local_ips:
c_kwargs['link_local_ips'] = list(map(resolve_value, endpoint_config.link_local_ips))
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_network_connect_kwargs(self, action, network_name, container_name, endpoint_config=None, kwargs=None):
"""
Generates keyword arguments for the Docker client to add a container to a network.
:param action: Action configuration.
:type action: ActionConfig
:param network_name: Network name or id.
:type network_name: unicode | str
:param container_name: Container name or id.
:type container_name: unicode | str
:param endpoint_config: Network endpoint configuration.
:type endpoint_config: dockermap.map.input.NetworkEndpoint
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(
container=container_name,
net_id=network_name,
)
if endpoint_config:
c_kwargs.update(self.get_network_create_endpoint_kwargs(action, endpoint_config))
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_network_disconnect_kwargs(self, action, network_name, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to remove a container from a network.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param network_name: Network name or id.
:type network_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(
container=container_name,
net_id=network_name,
)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_exec_create_kwargs(self, action, container_name, exec_cmd, exec_user, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:param exec_cmd: Command to be executed.
:type exec_cmd: unicode | str
:param exec_user: User to run the command.
:type exec_user: unicode | str
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(
container=container_name,
cmd=resolve_value(exec_cmd),
)
if exec_user is not None:
c_kwargs['user'] = text_type(resolve_value(exec_user))
elif action.config.user is not NotSet:
c_kwargs['user'] = extract_user(action.config.user)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_exec_start_kwargs(self, action, container_name, exec_id, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig or start a container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id.
:type container_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:param exec_id: Id of the exec instance.
:type exec_id: long
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(exec_id=exec_id)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_container_kill_kwargs(self, action, container_name, kwargs=None):
c_kwargs = dict(container=container_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_volume_create_kwargs(self, action, volume_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to create a volume.
:param action: Action configuration.
:type action: ActionConfig
:param volume_name: Volume name.
:type volume_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
config = action.config
c_kwargs = dict(name=volume_name)
if config:
c_kwargs['driver'] = config.driver
driver_opts = init_options(config.driver_options)
if driver_opts:
c_kwargs['driver_opts'] = {option_name: resolve_value(option_value)
for option_name, option_value in iteritems(driver_opts)}
update_kwargs(c_kwargs, init_options(config.create_options), kwargs)
else:
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_volume_remove_kwargs(self, action, volume_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to remove a volume.
:param action: Action configuration.
:type action: ActionConfig
:param volume_name: Volume name.
:type volume_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(name=volume_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
class DockerClientRunner(DockerBaseRunnerMixin, DockerConfigMixin, AttachedPreparationMixin, ExecMixin, SignalMixin,
ScriptMixin, NetworkUtilMixin, ImageMixin, AbstractRunner):
"""
Runs actions on a Docker client and returns results from the API.
"""
pass
|
|
#!/usr/bin/env python
# Copyright 2012 NEC Corporation.
# Based on ryu/openvswitch agents.
#
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
# @author: Akihiro MOTOKI
import socket
import time
import eventlet
eventlet.monkey_patch()
from neutron.agent.linux import ovs_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as logging_config
from neutron.common import constants as q_const
from neutron.common import rpc_compat
from neutron.common import topics
from neutron import context as q_context
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.rpc import dispatcher
from neutron.plugins.nec.common import config
LOG = logging.getLogger(__name__)
class NECPluginApi(agent_rpc.PluginApi):
BASE_RPC_API_VERSION = '1.0'
def update_ports(self, context, agent_id, datapath_id,
port_added, port_removed):
"""RPC to update information of ports on Neutron Server."""
LOG.info(_("Update ports: added=%(added)s, "
"removed=%(removed)s"),
{'added': port_added, 'removed': port_removed})
self.call(context,
self.make_msg('update_ports',
topic=topics.AGENT,
agent_id=agent_id,
datapath_id=datapath_id,
port_added=port_added,
port_removed=port_removed))
class NECAgentRpcCallback(rpc_compat.RpcCallback):
RPC_API_VERSION = '1.0'
def __init__(self, context, agent, sg_agent):
super(NECAgentRpcCallback, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug(_("port_update received: %s"), kwargs)
port = kwargs.get('port')
# Validate that port is on OVS
vif_port = self.agent.int_br.get_vif_port_by_id(port['id'])
if not vif_port:
return
if ext_sg.SECURITYGROUPS in port:
self.sg_agent.refresh_firewall()
class SecurityGroupServerRpcApi(rpc_compat.RpcProxy,
sg_rpc.SecurityGroupServerRpcApiMixin):
def __init__(self, topic):
super(SecurityGroupServerRpcApi, self).__init__(
topic=topic, default_version=sg_rpc.SG_RPC_VERSION)
class SecurityGroupAgentRpcCallback(
rpc_compat.RpcCallback,
sg_rpc.SecurityGroupAgentRpcCallbackMixin):
RPC_API_VERSION = sg_rpc.SG_RPC_VERSION
def __init__(self, context, sg_agent):
super(SecurityGroupAgentRpcCallback, self).__init__()
self.context = context
self.sg_agent = sg_agent
class SecurityGroupAgentRpc(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, context):
self.context = context
self.plugin_rpc = SecurityGroupServerRpcApi(topics.PLUGIN)
self.init_firewall()
class NECNeutronAgent(object):
def __init__(self, integ_br, root_helper, polling_interval):
'''Constructor.
:param integ_br: name of the integration bridge.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to check the bridge.
'''
self.int_br = ovs_lib.OVSBridge(integ_br, root_helper)
self.polling_interval = polling_interval
self.cur_ports = []
self.need_sync = True
self.datapath_id = "0x%s" % self.int_br.get_datapath_id()
self.agent_state = {
'binary': 'neutron-nec-agent',
'host': config.CONF.host,
'topic': q_const.L2_AGENT_TOPIC,
'configurations': {},
'agent_type': q_const.AGENT_TYPE_NEC,
'start_flag': True}
self.setup_rpc()
def setup_rpc(self):
self.host = socket.gethostname()
self.agent_id = 'nec-q-agent.%s' % self.host
LOG.info(_("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.context = q_context.get_admin_context_without_session()
self.plugin_rpc = NECPluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.sg_agent = SecurityGroupAgentRpc(self.context)
# RPC network init
# Handle updates from service
self.callback_nec = NECAgentRpcCallback(self.context,
self, self.sg_agent)
self.callback_sg = SecurityGroupAgentRpcCallback(self.context,
self.sg_agent)
self.dispatcher = dispatcher.RpcDispatcher([self.callback_nec,
self.callback_sg])
# Define the listening consumer for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.dispatcher,
self.topic,
consumers)
report_interval = config.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
# How many devices are likely used by a VM
num_devices = len(self.cur_ports)
self.agent_state['configurations']['devices'] = num_devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def _vif_port_to_port_info(self, vif_port):
return dict(id=vif_port.vif_id, port_no=vif_port.ofport,
mac=vif_port.vif_mac)
def _process_security_group(self, port_added, port_removed):
if port_added:
devices_added = [p['id'] for p in port_added]
self.sg_agent.prepare_devices_filter(devices_added)
if port_removed:
self.sg_agent.remove_devices_filter(port_removed)
def loop_handler(self):
try:
# self.cur_ports will be kept until loop_handler succeeds.
cur_ports = [] if self.need_sync else self.cur_ports
new_ports = []
port_added = []
for vif_port in self.int_br.get_vif_ports():
port_id = vif_port.vif_id
new_ports.append(port_id)
if port_id not in cur_ports:
port_info = self._vif_port_to_port_info(vif_port)
port_added.append(port_info)
port_removed = []
for port_id in cur_ports:
if port_id not in new_ports:
port_removed.append(port_id)
if port_added or port_removed:
self.plugin_rpc.update_ports(self.context,
self.agent_id, self.datapath_id,
port_added, port_removed)
self._process_security_group(port_added, port_removed)
else:
LOG.debug(_("No port changed."))
self.cur_ports = new_ports
self.need_sync = False
except Exception:
LOG.exception(_("Error in agent event loop"))
self.need_sync = True
def daemon_loop(self):
"""Main processing loop for NEC Plugin Agent."""
while True:
self.loop_handler()
time.sleep(self.polling_interval)
def main():
config.CONF(project='neutron')
logging_config.setup_logging(config.CONF)
# Determine which agent type to use.
integ_br = config.OVS.integration_bridge
root_helper = config.AGENT.root_helper
polling_interval = config.AGENT.polling_interval
agent = NECNeutronAgent(integ_br, root_helper, polling_interval)
# Start everything.
agent.daemon_loop()
if __name__ == "__main__":
main()
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutronclient.v2_0 import client as neutron_client
from oslotest import mockpatch
from cloudferrylib.os.network import neutron
from cloudferrylib.utils import utils
from tests import test
FAKE_CONFIG = utils.ext_dict(
cloud=utils.ext_dict({'user': 'fake_user',
'password': 'fake_password',
'tenant': 'fake_tenant',
'auth_url': 'http://1.1.1.1:35357/v2.0/',
'service_tenant': 'services'}),
migrate=utils.ext_dict({'ext_net_map': 'fake_ext_net_map.yaml',
'speed_limit': '10MB',
'retry': '7',
'time_wait': 5}))
class NeutronTestCase(test.TestCase):
def setUp(self):
super(NeutronTestCase, self).setUp()
self.neutron_mock_client = mock.MagicMock()
self.neutron_client_patch = \
mockpatch.PatchObject(neutron_client,
'Client',
new=self.neutron_mock_client)
self.useFixture(self.neutron_client_patch)
self.identity_mock = mock.Mock()
self.network_mock = mock.Mock()
self.network_mock.neutron_client = self.neutron_mock_client
self.fake_cloud = mock.Mock()
self.fake_cloud.mysql_connector = mock.Mock()
self.fake_cloud.resources = dict(identity=self.identity_mock,
network=self.network_mock)
self.neutron_network_client = \
neutron.NeutronNetwork(FAKE_CONFIG, self.fake_cloud)
self.identity_mock.get_tenant_id_by_name = self.f_tenant_id_by_name
self.identity_mock.get_tenants_func = \
mock.Mock(return_value=self.f_mock)
self.net_1_info = {'name': 'fake_network_name_1',
'id': 'fake_network_id_1',
'admin_state_up': True,
'shared': False,
'tenant_id': 'fake_tenant_id_1',
'tenant_name': 'fake_tenant_name_1',
'subnet_names': ['fake_subnet_name_1'],
'router:external': False,
'provider:physical_network': None,
'provider:network_type': 'gre',
'provider:segmentation_id': 5,
'res_hash': 'fake_net_hash_1',
'meta': {}}
self.net_2_info = {'name': 'fake_network_name_2',
'id': 'fake_network_id_2',
'admin_state_up': True,
'shared': False,
'tenant_id': 'fake_tenant_id_2',
'tenant_name': 'fake_tenant_name_2',
'subnet_names': ['fake_subnet_name_2'],
'router:external': False,
'provider:physical_network': None,
'provider:network_type': 'gre',
'provider:segmentation_id': 10,
'res_hash': 'fake_net_hash_2',
'meta': {}}
self.subnet_1_info = {'name': 'fake_subnet_name_1',
'id': 'fake_subnet_id_1',
'enable_dhcp': True,
'allocation_pools': [{'start': 'fake_start_ip_1',
'end': 'fake_end_ip_1'}],
'gateway_ip': 'fake_gateway_ip_1',
'ip_version': 4,
'cidr': 'fake_cidr_1',
'network_name': 'fake_network_name_1',
'external': False,
'network_id': 'fake_network_id_1',
'tenant_name': 'fake_tenant_name_1',
'res_hash': 'fake_subnet_hash_1',
'meta': {}}
self.subnet_2_info = {'name': 'fake_subnet_name_2',
'id': 'fake_subnet_id_2',
'enable_dhcp': True,
'allocation_pools': [{'start': 'fake_start_ip_2',
'end': 'fake_end_ip_2'}],
'gateway_ip': 'fake_gateway_ip_2',
'ip_version': 4,
'cidr': 'fake_cidr_2',
'network_name': 'fake_network_name_2',
'external': False,
'network_id': 'fake_network_id_2',
'tenant_name': 'fake_tenant_name_2',
'res_hash': 'fake_subnet_hash_2',
'meta': {}}
def f_mock(self, tenant_id):
if tenant_id == 'fake_tenant_id_1':
return 'fake_tenant_name_1'
elif tenant_id == 'fake_tenant_id_2':
return 'fake_tenant_name_2'
def f_tenant_id_by_name(self, name):
if name == 'fake_tenant_name_1':
return 'fake_tenant_id_1'
elif name == 'fake_tenant_name_2':
return 'fake_tenant_id_2'
def test_get_client(self):
# To check self.mock_client call only from this test method
self.neutron_mock_client.reset_mock()
client = self.neutron_network_client.get_client()
self.neutron_mock_client.assert_called_once_with(
username='fake_user',
password='fake_password',
tenant_name='fake_tenant',
auth_url='http://1.1.1.1:35357/v2.0/'
)
self.assertEqual(self.neutron_mock_client(), client)
def test_get_networks(self):
fake_networks_list = {'networks': [{'status': 'ACTIVE',
'subnets': ['fake_subnet_id_1'],
'name': 'fake_network_name_1',
'provider:physical_network': None,
'admin_state_up': True,
'tenant_id': 'fake_tenant_id_1',
'provider:network_type': 'gre',
'router:external': False,
'shared': False,
'id': 'fake_network_id_1',
'provider:segmentation_id': 5}]}
self.neutron_mock_client().list_networks.return_value = \
fake_networks_list
self.neutron_mock_client.show_subnet.return_value = \
{'subnet': {'name': 'fake_subnet_name_1'}}
self.network_mock.get_resource_hash = \
mock.Mock(return_value='fake_net_hash_1')
networks_info = [self.net_1_info]
networks_info_result = self.neutron_network_client.get_networks()
self.assertEquals(networks_info, networks_info_result)
def test_get_subnets(self):
fake_subnets_list = {
'subnets': [{'name': 'fake_subnet_name_1',
'enable_dhcp': True,
'network_id': 'fake_network_id_1',
'tenant_id': 'fake_tenant_id_1',
'allocation_pools': [
{'start': 'fake_start_ip_1',
'end': 'fake_end_ip_1'}
],
'host_routes': [],
'ip_version': 4,
'gateway_ip': 'fake_gateway_ip_1',
'cidr': 'fake_cidr_1',
'id': 'fake_subnet_id_1'}]}
self.neutron_mock_client().list_subnets.return_value = \
fake_subnets_list
self.neutron_mock_client.show_network.return_value = \
{'network': {'name': 'fake_network_name_1',
'router:external': False}}
self.network_mock.get_resource_hash = \
mock.Mock(return_value='fake_subnet_hash_1')
subnets_info = [self.subnet_1_info]
subnets_info_result = self.neutron_network_client.get_subnets()
self.assertEquals(subnets_info, subnets_info_result)
def test_get_routers(self):
fake_routers_list = {
'routers': [{'status': 'ACTIVE',
'external_gateway_info': {
'network_id': 'fake_network_id_1',
'enable_snat': True
},
'name': 'fake_router_name_1',
'admin_state_up': True,
'tenant_id': 'fake_tenant_id_1',
'routes': [],
'id': 'fake_router_id_1'}]}
self.neutron_mock_client().list_routers.return_value = \
fake_routers_list
self.neutron_mock_client.show_network.return_value = \
{'network': {'name': 'fake_network_name_1',
'tenant_id': 'fake_tenant_id_1'}}
fake_ports_list = {
'ports': [{'fixed_ips': [{'subnet_id': 'fake_subnet_id_1',
'ip_address': 'fake_ipaddr_1'}],
'device_id': 'fake_router_id_1'}]}
self.neutron_mock_client.list_ports.return_value = fake_ports_list
self.network_mock.get_resource_hash = \
mock.Mock(return_value='fake_router_hash')
routers_info = [{'name': 'fake_router_name_1',
'id': 'fake_router_id_1',
'admin_state_up': True,
'routes': [],
'external_gateway_info': {
'network_id': 'fake_network_id_1',
'enable_snat': True
},
'ext_net_name': 'fake_network_name_1',
'ext_net_tenant_name': 'fake_tenant_name_1',
'ext_net_id': 'fake_network_id_1',
'tenant_name': 'fake_tenant_name_1',
'ips': ['fake_ipaddr_1'],
'subnet_ids': ['fake_subnet_id_1'],
'res_hash': 'fake_router_hash',
'meta': {}}]
routers_info_result = self.neutron_network_client.get_routers()
self.assertEquals(routers_info, routers_info_result)
def test_get_floatingips(self):
fake_floatingips_list = {
'floatingips': [{'router_id': None,
'tenant_id': 'fake_tenant_id_1',
'floating_network_id': 'fake_network_id_1',
'fixed_ip_address': None,
'floating_ip_address': 'fake_floatingip_1',
'port_id': None,
'id': 'fake_floating_ip_id_1'}]}
self.neutron_mock_client().list_floatingips.return_value = \
fake_floatingips_list
self.neutron_mock_client.show_network.return_value = \
{'network': {'name': 'fake_network_name_1',
'tenant_id': 'fake_tenant_id_1'}}
floatingips_info = [{'id': 'fake_floating_ip_id_1',
'tenant_id': 'fake_tenant_id_1',
'floating_network_id': 'fake_network_id_1',
'network_name': 'fake_network_name_1',
'ext_net_tenant_name': 'fake_tenant_name_1',
'tenant_name': 'fake_tenant_name_1',
'fixed_ip_address': None,
'floating_ip_address': 'fake_floatingip_1',
'meta': {}}]
floatings_info_result = self.neutron_network_client.get_floatingips()
self.assertEquals(floatingips_info, floatings_info_result)
def test_get_security_groups(self):
fake_secgroups_list = {
'security_groups': [
{'id': 'fake_secgr_id_1',
'tenant_id': 'fake_tenant_id_1',
'name': 'fake_secgr_name_1',
'security_group_rules': [
{'remote_group_id': None,
'direction': 'egress',
'remote_ip_prefix': None,
'protocol': 'fake_protocol',
'tenant_id': 'fake_tenant_id_1',
'port_range_max': 22,
'security_group_id': 'fake_secgr_id_1',
'port_range_min': 22,
'ethertype': 'IPv4',
'id': 'fake_secgr_rule_id_1'}
],
'description': 'fake_secgr_1_description'}
]
}
self.neutron_mock_client().list_security_groups.return_value = \
fake_secgroups_list
secgr_info = {'name': 'fake_secgr_name_1',
'id': 'fake_secgr_id_1',
'tenant_id': 'fake_tenant_id_1',
'tenant_name': 'fake_tenant_name_1',
'description': 'fake_secgr_1_description',
'meta': {}}
rule_info = {'remote_group_id': None,
'direction': 'egress',
'remote_ip_prefix': None,
'protocol': 'fake_protocol',
'port_range_min': 22,
'port_range_max': 22,
'ethertype': 'IPv4',
'security_group_id': 'fake_secgr_id_1',
'meta': {}}
rule_info['rule_hash'] = \
self.neutron_network_client.get_resource_hash(rule_info,
'direction',
'remote_ip_prefix',
'protocol',
'port_range_min',
'port_range_max',
'ethertype')
secgr_info['security_group_rules'] = [rule_info]
secgr_info['res_hash'] = \
self.neutron_network_client.get_resource_hash(secgr_info,
'name',
'tenant_name',
'description')
secgroups_info = [secgr_info]
self.network_mock.get_resource_hash.side_effect = [
rule_info['rule_hash'],
secgr_info['res_hash']
]
secgr_info_result = self.neutron_network_client.get_sec_gr_and_rules()
self.assertEquals(secgroups_info, secgr_info_result)
def test_upload_neutron_security_groups(self):
sg1_info = {'name': 'fake_secgr_name_1',
'tenant_name': 'fake_tenant_name_1',
'description': 'fake_secgr_1_description',
'res_hash': 'fake_secgr_1_hash',
'meta': {}}
sg2_info = {'name': 'fake_secgr_name_2',
'tenant_name': 'fake_tenant_name_1',
'description': 'fake_secgr_2_description',
'res_hash': 'fake_secgr_2_hash',
'meta': {}}
self.neutron_network_client.get_sec_gr_and_rules = \
mock.Mock(return_value=[sg1_info])
fake_secgs = [sg1_info, sg2_info]
self.neutron_network_client.upload_neutron_security_groups(fake_secgs)
sec_gr_info = {
'security_group': {'name': 'fake_secgr_name_2',
'tenant_id': 'fake_tenant_id_1',
'description': 'fake_secgr_2_description'}}
self.neutron_mock_client().create_security_group.\
assert_called_once_with(sec_gr_info)
def test_upload_sec_group_rules(self):
sg1_info = {
'name': 'fake_secgr_name_1',
'tenant_name': 'fake_tenant_name_1',
'description': 'fake_secgr_1_description',
'security_group_rules': [{'remote_group_id': None,
'direction': 'egress',
'remote_ip_prefix': None,
'protocol': 'tcp',
'port_range_min': 22,
'port_range_max': 22,
'ethertype': 'IPv4',
'security_group_id': 'fake_secgr_id_1',
'rule_hash': 'fake_rule_1_hash',
'meta': {}}],
'res_hash': 'fake_secgr_1_hash'}
sg2_info = {
'name': 'fake_secgr_name_2',
'tenant_name': 'fake_tenant_name_1',
'description': 'fake_secgr_2_description',
'security_group_rules': [{'remote_group_id': None,
'direction': 'egress',
'remote_ip_prefix': None,
'protocol': 'tcp',
'port_range_min': 80,
'port_range_max': 80,
'ethertype': 'IPv4',
'security_group_id': 'fake_secgr_id_2',
'rule_hash': 'fake_rule_2_hash',
'meta': {}}],
'res_hash': 'fake_secgr_2_hash'}
existing_sg2_info = {
'name': 'fake_secgr_name_2',
'tenant_id': 'fake_existing_tenant_id',
'tenant_name': 'fake_tenant_name_1',
'id': 'fake_existing_secgr_id_2',
'description': 'fake_secgr_2_description',
'security_group_rules': [
{'remote_group_id': None,
'direction': 'egress',
'remote_ip_prefix': None,
'protocol': None,
'port_range_min': None,
'port_range_max': None,
'ethertype': 'IPv4',
'security_group_id': 'fake_existing_secgr_id_2',
'rule_hash': 'fake_rule_2.1_hash'}
],
'res_hash': 'fake_secgr_2_hash'}
fake_existing_secgroups = [sg1_info, existing_sg2_info]
self.neutron_network_client.get_sec_gr_and_rules = \
mock.Mock(return_value=fake_existing_secgroups)
self.neutron_network_client.upload_sec_group_rules([sg1_info,
sg2_info])
rule_info = {
'security_group_rule': {
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 80,
'port_range_max': 80,
'ethertype': 'IPv4',
'remote_ip_prefix': None,
'security_group_id': 'fake_existing_secgr_id_2',
'tenant_id': 'fake_existing_tenant_id'}
}
self.neutron_mock_client().create_security_group_rule.\
assert_called_once_with(rule_info)
def test_upload_networks(self):
fake_existing_nets = [self.net_2_info]
self.neutron_network_client.get_networks = \
mock.Mock(return_value=fake_existing_nets)
network_info = {
'network': {'name': 'fake_network_name_1',
'admin_state_up': True,
'tenant_id': 'fake_tenant_id_1',
'shared': False,
'router:external': False,
'provider:physical_network': None,
'provider:network_type': 'gre'
}}
self.neutron_network_client.upload_networks([self.net_1_info])
if network_info['network']['provider:physical_network']:
self.neutron_mock_client().create_network.\
assert_called_once_with(network_info)
def test_upload_subnets(self):
src_net_info = copy.deepcopy(self.net_1_info)
src_net_info['subnet_names'].append('fake_subnet_name_2')
dst_net_info = self.net_1_info
subnet1_info = self.subnet_1_info
subnet2_info = copy.deepcopy(self.subnet_2_info)
subnet2_info['network_name'] = 'fake_network_name_1'
subnet2_info['network_id'] = 'fake_network_id_1'
subnet2_info['tenant_name'] = 'fake_tenant_name_1'
self.neutron_network_client.get_networks = \
mock.Mock(return_value=[dst_net_info])
self.neutron_network_client.get_subnets = \
mock.Mock(return_value=[{'res_hash': 'fake_subnet_hash_1'}])
subnet_info = {
'subnet': {'name': 'fake_subnet_name_2',
'enable_dhcp': True,
'network_id': 'fake_network_id_1',
'cidr': 'fake_cidr_2',
'allocation_pools': [{'start': 'fake_start_ip_2',
'end': 'fake_end_ip_2'}],
'gateway_ip': 'fake_gateway_ip_2',
'ip_version': 4,
'tenant_id': 'fake_tenant_id_1'}}
self.neutron_network_client.upload_subnets([src_net_info],
[subnet1_info,
subnet2_info])
self.neutron_mock_client().create_subnet.\
assert_called_once_with(subnet_info)
def test_upload_routers(self):
router1_info = {
'name': 'fake_router_name_1',
'id': 'fake_router_id_1',
'admin_state_up': True,
'routes': [],
'external_gateway_info': {'network_id': 'fake_network_id_1',
'enable_snat': True},
'ext_net_name': 'fake_network_name_1',
'ext_net_tenant_name': 'fake_tenant_name_1',
'ext_net_id': 'fake_network_id_1',
'tenant_name': 'fake_tenant_name_1',
'ips': ['fake_ipaddr_1'],
'subnet_ids': ['fake_subnet_id_1'],
'res_hash': 'fake_router_hash_1',
'meta': {}}
router2_info = {
'name': 'fake_router_name_2',
'id': 'fake_router_id_2',
'admin_state_up': True,
'routes': [],
'external_gateway_info': {'network_id': 'fake_network_id_2',
'enable_snat': True},
'ext_net_name': 'fake_network_name_2',
'ext_net_tenant_name': 'fake_tenant_name_2',
'ext_net_id': 'fake_network_id_2',
'tenant_name': 'fake_tenant_name_2',
'ips': ['fake_ipaddr_2'],
'subnet_ids': ['fake_subnet_id_2'],
'res_hash': 'fake_router_hash_2',
'meta': {}}
src_nets_info = [self.net_1_info, self.net_2_info]
src_subnets_info = [self.subnet_1_info, self.subnet_2_info]
src_routers_info = [router1_info, router2_info]
self.neutron_network_client.get_networks = \
mock.Mock(return_value=src_nets_info)
self.neutron_network_client.get_subnets = \
mock.Mock(return_value=src_subnets_info)
self.neutron_network_client.get_routers = \
mock.Mock(return_value=[router1_info])
self.neutron_network_client.add_router_interfaces = \
mock.Mock(return_value=None)
router_info = {
'router': {'name': 'fake_router_name_2',
'tenant_id': 'fake_tenant_id_2',
'external_gateway_info': {
'network_id': 'fake_network_id_2'
}}}
self.neutron_network_client.upload_routers(src_nets_info,
src_subnets_info,
src_routers_info)
self.neutron_mock_client().create_router.\
assert_called_once_with(router_info)
def test_add_router_interfaces(self):
src_router = {'id': 'fake_router_id_1',
'subnet_ids': ['fake_subnet_id_1'],
'external_gateway_info': None}
src_subnets = [{'id': 'fake_subnet_id_1',
'external': False,
'res_hash': 'fake_subnet_hash'}]
dst_router = {'id': 'fake_router_id_2',
'subnet_ids': ['fake_subnet_id_2'],
'external_gateway_info': None}
dst_subnets = [{'id': 'fake_subnet_id_2',
'external': False,
'res_hash': 'fake_subnet_hash'}]
self.neutron_network_client.add_router_interfaces(src_router,
dst_router,
src_subnets,
dst_subnets)
self.neutron_mock_client().add_interface_router.\
assert_called_once_with('fake_router_id_2',
{'subnet_id': 'fake_subnet_id_2'})
|
|
import numpy as np
import pytest
from sklearn.multioutput import MultiOutputRegressor
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from skopt import gp_minimize
from skopt import forest_minimize
from skopt.benchmarks import bench1, bench1_with_time
from skopt.benchmarks import branin
from skopt.learning import ExtraTreesRegressor, RandomForestRegressor
from skopt.learning import GradientBoostingQuantileRegressor
from skopt.optimizer import Optimizer
from scipy.optimize import OptimizeResult
TREE_REGRESSORS = (ExtraTreesRegressor(random_state=2),
RandomForestRegressor(random_state=2),
GradientBoostingQuantileRegressor(random_state=2))
ACQ_FUNCS_PS = ["EIps", "PIps"]
ACQ_FUNCS_MIXED = ["EI", "EIps"]
ESTIMATOR_STRINGS = ["GP", "RF", "ET", "GBRT", "DUMMY",
"gp", "rf", "et", "gbrt", "dummy"]
@pytest.mark.fast_test
def test_multiple_asks():
# calling ask() multiple times without a tell() inbetween should
# be a "no op"
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,
acq_optimizer="sampling")
opt.run(bench1, n_iter=3)
# tell() computes the next point ready for the next call to ask()
# hence there are three after three iterations
assert_equal(len(opt.models), 3)
assert_equal(len(opt.Xi), 3)
opt.ask()
assert_equal(len(opt.models), 3)
assert_equal(len(opt.Xi), 3)
assert_equal(opt.ask(), opt.ask())
opt.update_next()
assert_equal(opt.ask(), opt.ask())
@pytest.mark.fast_test
def test_model_queue_size():
# Check if model_queue_size limits the model queue size
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,
acq_optimizer="sampling", model_queue_size=2)
opt.run(bench1, n_iter=3)
# tell() computes the next point ready for the next call to ask()
# hence there are three after three iterations
assert_equal(len(opt.models), 2)
assert_equal(len(opt.Xi), 3)
opt.ask()
assert_equal(len(opt.models), 2)
assert_equal(len(opt.Xi), 3)
assert_equal(opt.ask(), opt.ask())
@pytest.mark.fast_test
def test_invalid_tell_arguments():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,
acq_optimizer="sampling")
# can't have single point and multiple values for y
assert_raises(ValueError, opt.tell, [1.], [1., 1.])
@pytest.mark.fast_test
def test_invalid_tell_arguments_list():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,
acq_optimizer="sampling")
assert_raises(ValueError, opt.tell, [[1.], [2.]], [1., None])
@pytest.mark.fast_test
def test_bounds_checking_1D():
low = -2.
high = 2.
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(low, high)], base_estimator, n_initial_points=1,
acq_optimizer="sampling")
assert_raises(ValueError, opt.tell, [high + 0.5], 2.)
assert_raises(ValueError, opt.tell, [low - 0.5], 2.)
# feed two points to tell() at once
assert_raises(ValueError, opt.tell, [high + 0.5, high], (2., 3.))
assert_raises(ValueError, opt.tell, [low - 0.5, high], (2., 3.))
@pytest.mark.fast_test
def test_bounds_checking_2D():
low = -2.
high = 2.
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(low, high), (low+4, high+4)], base_estimator,
n_initial_points=1, acq_optimizer="sampling")
assert_raises(ValueError, opt.tell, [high + 0.5, high + 4.5], 2.)
assert_raises(ValueError, opt.tell, [low - 0.5, low - 4.5], 2.)
# first out, second in
assert_raises(ValueError, opt.tell, [high + 0.5, high + 0.5], 2.)
assert_raises(ValueError, opt.tell, [low - 0.5, high + 0.5], 2.)
@pytest.mark.fast_test
def test_bounds_checking_2D_multiple_points():
low = -2.
high = 2.
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(low, high), (low+4, high+4)], base_estimator,
n_initial_points=1, acq_optimizer="sampling")
# first component out, second in
assert_raises(ValueError, opt.tell,
[(high + 0.5, high + 0.5), (high + 0.5, high + 0.5)],
[2., 3.])
assert_raises(ValueError, opt.tell,
[(low - 0.5, high + 0.5), (low - 0.5, high + 0.5)],
[2., 3.])
@pytest.mark.fast_test
def test_dimension_checking_1D():
low = -2
high = 2
opt = Optimizer([(low, high)])
with pytest.raises(ValueError) as e:
# within bounds but one dimension too high
opt.tell([low+1, low+1], 2.)
assert "Dimensions of point " in str(e.value)
@pytest.mark.fast_test
def test_dimension_checking_2D():
low = -2
high = 2
opt = Optimizer([(low, high), (low, high)])
# within bounds but one dimension too little
with pytest.raises(ValueError) as e:
opt.tell([low+1, ], 2.)
assert "Dimensions of point " in str(e.value)
# within bounds but one dimension too much
with pytest.raises(ValueError) as e:
opt.tell([low+1, low+1, low+1], 2.)
assert "Dimensions of point " in str(e.value)
@pytest.mark.fast_test
def test_dimension_checking_2D_multiple_points():
low = -2
high = 2
opt = Optimizer([(low, high), (low, high)])
# within bounds but one dimension too little
with pytest.raises(ValueError) as e:
opt.tell([[low+1, ], [low+1, low+2], [low+1, low+3]], 2.)
assert "dimensions as the space" in str(e.value)
# within bounds but one dimension too much
with pytest.raises(ValueError) as e:
opt.tell([[low + 1, low + 1, low + 1], [low + 1, low + 2],
[low + 1, low + 3]], 2.)
assert "dimensions as the space" in str(e.value)
@pytest.mark.fast_test
def test_returns_result_object():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,
acq_optimizer="sampling")
result = opt.tell([1.5], 2.)
assert isinstance(result, OptimizeResult)
assert_equal(len(result.x_iters), len(result.func_vals))
assert_equal(np.min(result.func_vals), result.fun)
@pytest.mark.fast_test
@pytest.mark.parametrize("base_estimator", TREE_REGRESSORS)
def test_acq_optimizer(base_estimator):
with pytest.raises(ValueError) as e:
Optimizer([(-2.0, 2.0)], base_estimator=base_estimator,
n_initial_points=1, acq_optimizer='lbfgs')
assert "should run with acq_optimizer='sampling'" in str(e.value)
@pytest.mark.parametrize("base_estimator", TREE_REGRESSORS)
@pytest.mark.parametrize("acq_func", ACQ_FUNCS_PS)
def test_acq_optimizer_with_time_api(base_estimator, acq_func):
opt = Optimizer([(-2.0, 2.0),], base_estimator=base_estimator,
acq_func=acq_func,
acq_optimizer="sampling", n_initial_points=2)
x1 = opt.ask()
opt.tell(x1, (bench1(x1), 1.0))
x2 = opt.ask()
res = opt.tell(x2, (bench1(x2), 2.0))
# x1 and x2 are random.
assert x1 != x2
assert len(res.models) == 1
assert_array_equal(res.func_vals.shape, (2,))
assert_array_equal(res.log_time.shape, (2,))
# x3 = opt.ask()
with pytest.raises(TypeError) as e:
opt.tell(x2, bench1(x2))
@pytest.mark.fast_test
@pytest.mark.parametrize("acq_func", ACQ_FUNCS_MIXED)
def test_optimizer_copy(acq_func):
# Checks that the base estimator, the objective and target values
# are copied correctly.
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(-2.0, 2.0)], base_estimator, acq_func=acq_func,
n_initial_points=1, acq_optimizer="sampling")
# run three iterations so that we have some points and objective values
if "ps" in acq_func:
opt.run(bench1_with_time, n_iter=3)
else:
opt.run(bench1, n_iter=3)
opt_copy = opt.copy()
copied_estimator = opt_copy.base_estimator_
if "ps" in acq_func:
assert isinstance(copied_estimator, MultiOutputRegressor)
# check that the base_estimator is not wrapped multiple times
is_multi = isinstance(copied_estimator.estimator,
MultiOutputRegressor)
assert not is_multi
else:
assert not isinstance(copied_estimator, MultiOutputRegressor)
assert_array_equal(opt_copy.Xi, opt.Xi)
assert_array_equal(opt_copy.yi, opt.yi)
@pytest.mark.parametrize("base_estimator", ESTIMATOR_STRINGS)
def test_exhaust_initial_calls(base_estimator):
# check a model is fitted and used to make suggestions after we added
# at least n_initial_points via tell()
opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=2,
acq_optimizer="sampling", random_state=1)
x0 = opt.ask() # random point
x1 = opt.ask() # random point
assert x0 != x1
# first call to tell()
r1 = opt.tell(x1, 3.)
assert len(r1.models) == 0
x2 = opt.ask() # random point
assert x1 != x2
# second call to tell()
r2 = opt.tell(x2, 4.)
if base_estimator.lower() == 'dummy':
assert len(r2.models) == 0
else:
assert len(r2.models) == 1
# this is the first non-random point
x3 = opt.ask()
assert x2 != x3
x4 = opt.ask()
r3 = opt.tell(x3, 1.)
# no new information was added so should be the same, unless we are using
# the dummy estimator which will forever return random points and never
# fits any models
if base_estimator.lower() == 'dummy':
assert x3 != x4
assert len(r3.models) == 0
else:
assert x3 == x4
assert len(r3.models) == 2
@pytest.mark.fast_test
def test_optimizer_base_estimator_string_invalid():
with pytest.raises(ValueError) as e:
Optimizer([(-2.0, 2.0)], base_estimator="rtr",
n_initial_points=1)
assert "'RF', 'ET', 'GP', 'GBRT' or 'DUMMY'" in str(e.value)
@pytest.mark.fast_test
@pytest.mark.parametrize("base_estimator", ESTIMATOR_STRINGS)
def test_optimizer_base_estimator_string_smoke(base_estimator):
opt = Optimizer([(-2.0, 2.0)], base_estimator=base_estimator,
n_initial_points=2, acq_func="EI")
opt.run(func=lambda x: x[0]**2, n_iter=3)
@pytest.mark.fast_test
def test_optimizer_base_estimator_string_smoke_njobs():
opt = Optimizer([(-2.0, 2.0)], base_estimator="GBRT",
n_initial_points=1, acq_func="EI", n_jobs=-1)
opt.run(func=lambda x: x[0]**2, n_iter=3)
def test_defaults_are_equivalent():
# check that the defaults of Optimizer reproduce the defaults of
# gp_minimize
space = [(-5., 10.), (0., 15.)]
#opt = Optimizer(space, 'ET', acq_func="EI", random_state=1)
opt = Optimizer(space, random_state=1)
for n in range(12):
x = opt.ask()
res_opt = opt.tell(x, branin(x))
#res_min = forest_minimize(branin, space, n_calls=12, random_state=1)
res_min = gp_minimize(branin, space, n_calls=12, random_state=1)
assert res_min.space == res_opt.space
# tolerate small differences in the points sampled
assert np.allclose(res_min.x_iters, res_opt.x_iters)#, atol=1e-5)
assert np.allclose(res_min.x, res_opt.x)#, atol=1e-5)
res_opt2 = opt.get_result()
assert np.allclose(res_min.x_iters, res_opt2.x_iters) # , atol=1e-5)
assert np.allclose(res_min.x, res_opt2.x) # , atol=1e-5)
@pytest.mark.fast_test
def test_dimensions_names():
from skopt.space import Real, Categorical, Integer
# create search space and optimizer
space = [Real(0, 1, name='real'),
Categorical(['a', 'b', 'c'], name='cat'),
Integer(0, 1, name='int')]
opt = Optimizer(space, n_initial_points=2)
# result of the optimizer missing dimension names
result = opt.tell([(0.5, 'a', 0.5)], [3])
names = []
for d in result.space.dimensions:
names.append(d.name)
assert len(names) == 3
assert "real" in names
assert "cat" in names
assert "int" in names
assert None not in names
@pytest.mark.fast_test
def test_categorical_only():
from skopt.space import Categorical
cat1 = Categorical([2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
cat2 = Categorical([2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
opt = Optimizer([cat1, cat2])
for n in range(15):
x = opt.ask()
res = opt.tell(x, 12 * n)
assert len(res.x_iters) == 15
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
cat3 = Categorical(["2", "3", "4", "5", "6", "7", "8", "9", "10", "11"])
cat4 = Categorical(["2", "3", "4", "5", "6", "7", "8", "9", "10", "11"])
opt = Optimizer([cat3, cat4])
for n in range(15):
x = opt.ask()
res = opt.tell(x, 12 * n)
assert len(res.x_iters) == 15
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
def test_categorical_only2():
from numpy import linalg
from skopt.space import Categorical
from skopt.learning import GaussianProcessRegressor
space = [Categorical([1, 2, 3]), Categorical([4, 5, 6])]
opt = Optimizer(space,
base_estimator=GaussianProcessRegressor(alpha=1e-7),
acq_optimizer='lbfgs',
n_initial_points=10,
n_jobs=2)
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
opt.tell(next_x, [linalg.norm(x) for x in next_x])
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
opt.tell(next_x, [linalg.norm(x) for x in next_x])
next_x = opt.ask(n_points=4)
assert len(next_x) == 4
|
|
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for network API."""
import itertools
import random
import mock
import mox
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import network
from nova.network import api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova.objects import fixed_ip as fixed_ip_obj
from nova import policy
from nova import test
from nova.tests.objects import test_fixed_ip
from nova import utils
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
policy.reset()
def test_check_policy(self):
self.mox.StubOutWithMock(policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
api.check_policy(self.context, 'get_all')
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
'fake-project')
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
# doesn't pass macs down: nova-network doesn't support hypervisor
# mac address limits (today anyhow).
macs = set(['ab:cd:ef:01:23:34'])
self.mox.StubOutWithMock(
self.network_api.network_rpcapi, "allocate_for_instance")
kwargs = dict(zip(['host', 'instance_id', 'project_id',
'requested_networks', 'rxtx_factor', 'vpn', 'macs',
'dhcp_options'],
itertools.repeat(mox.IgnoreArg())))
self.network_api.network_rpcapi.allocate_for_instance(
mox.IgnoreArg(), **kwargs).AndReturn([])
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 0
sys_meta = flavors.save_flavor_info({}, flavor)
instance = dict(id='id', uuid='uuid', project_id='project_id',
host='host', system_metadata=utils.dict_to_metadata(sys_meta))
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', 'requested_networks', macs=macs)
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
new_instance = {'uuid': 'new-uuid'}
def fake_associate(*args, **kwargs):
return orig_instance_uuid
self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate)
def fake_instance_get_by_uuid(context, instance_uuid):
return {'uuid': instance_uuid}
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
self.stubs.Set(self.network_api, '_get_instance_nw_info',
fake_get_nw_info)
if orig_instance_uuid:
expected_updated_instances = [new_instance['uuid'],
orig_instance_uuid]
else:
expected_updated_instances = [new_instance['uuid']]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEqual(instance_uuid,
expected_updated_instances.pop())
self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
fake_instance_info_cache_update)
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
self.stubs.Set(api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip('orig-uuid')
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
def test_get_floating_ip_invalid_id(self):
self.assertRaises(exception.InvalidID,
self.network_api.get_floating_ip,
self.context, '123zzz')
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_flavor = flavors.get_default_flavor()
fake_flavor['rxtx_factor'] = 1.21
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, fake_flavor))
fake_instance = {'uuid': 'fake_uuid',
'instance_type_id': fake_flavor['id'],
'project_id': 'fake_project_id',
'system_metadata': sys_meta}
fake_migration = {'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest'}
def fake_mig_inst_method(*args, **kwargs):
info['kwargs'] = kwargs
def fake_is_multi_host(*args, **kwargs):
return multi_host
def fake_get_floaters(*args, **kwargs):
return ['fake_float1', 'fake_float2']
self.stubs.Set(network_rpcapi.NetworkAPI, method,
fake_mig_inst_method)
self.stubs.Set(self.network_api, '_is_multi_host',
fake_is_multi_host)
self.stubs.Set(self.network_api, '_get_floating_ip_addresses',
fake_get_floaters)
expected = {'instance_uuid': 'fake_uuid',
'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest',
'rxtx_factor': 1.21,
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
def test_migrate_instance_start_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_start_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', False, info)
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_is_multi_host_instance_has_no_fixed_ip(self):
def fake_fixed_ip_get_by_instance(ctxt, uuid):
raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
self.assertFalse(self.network_api._is_multi_host(self.context,
instance))
def test_is_multi_host_network_has_no_project_id(self):
is_multi_host = random.choice([True, False])
network = {'project_id': None,
'multi_host': is_multi_host, }
network_ref = self.network_api.db.network_create_safe(
self.context.elevated(),
network)
def fake_fixed_ip_get_by_instance(ctxt, uuid):
fixed_ip = [{'network_id': network_ref['id'],
'instance_uuid': FAKE_UUID, }]
return fixed_ip
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
result = self.network_api._is_multi_host(self.context, instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_project_id(self):
is_multi_host = random.choice([True, False])
network = {'project_id': self.context.project_id,
'multi_host': is_multi_host, }
network_ref = self.network_api.db.network_create_safe(
self.context.elevated(),
network)
def fake_fixed_ip_get_by_instance(ctxt, uuid):
fixed_ip = [{'network_id': network_ref['id'],
'instance_uuid': FAKE_UUID, }]
return fixed_ip
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
result = self.network_api._is_multi_host(self.context, instance)
self.assertEqual(is_multi_host, result)
def test_network_disassociate_project(self):
def fake_network_disassociate(ctx, network_id, disassociate_host,
disassociate_project):
self.assertEqual(network_id, 1)
self.assertEqual(disassociate_host, False)
self.assertEqual(disassociate_project, True)
def fake_get(context, network_uuid):
return {'id': 1}
self.stubs.Set(self.network_api.db, 'network_disassociate',
fake_network_disassociate)
self.stubs.Set(self.network_api, 'get', fake_get)
self.network_api.associate(self.context, FAKE_UUID, project=None)
@mock.patch('nova.db.fixed_ip_get_by_address')
def test_get_fixed_ip_by_address(self, fip_get):
fip_get.return_value = test_fixed_ip.fake_fixed_ip
fip = self.network_api.get_fixed_ip_by_address(self.context,
'fake-addr')
self.assertIsInstance(fip, fixed_ip_obj.FixedIP)
class TestUpdateInstanceCache(test.TestCase):
def setUp(self):
super(TestUpdateInstanceCache, self).setUp()
self.context = context.get_admin_context()
self.instance = {'uuid': FAKE_UUID}
self.impl = self.mox.CreateMock(api.API)
vifs = [network_model.VIF(id='super_vif')]
self.nw_info = network_model.NetworkInfo(vifs)
self.is_nw_info = mox.Func(lambda d: 'super_vif' in d['network_info'])
def expect_cache_update(self, nw_info):
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_info_cache_update(self.context,
self.instance['uuid'],
nw_info)
def test_update_nw_info_none(self):
self.impl._get_instance_nw_info(self.context, self.instance)\
.AndReturn(self.nw_info)
self.expect_cache_update(self.is_nw_info)
self.mox.ReplayAll()
api.update_instance_cache_with_nw_info(self.impl, self.context,
self.instance, None)
def test_update_nw_info_one_network(self):
self.expect_cache_update(self.is_nw_info)
self.mox.ReplayAll()
api.update_instance_cache_with_nw_info(self.impl, self.context,
self.instance, self.nw_info)
def test_update_nw_info_empty_list(self):
self.expect_cache_update({'network_info': '[]'})
self.mox.ReplayAll()
api.update_instance_cache_with_nw_info(self.impl, self.context,
self.instance,
network_model.NetworkInfo([]))
def test_decorator_return_object(self):
@api.refresh_cache
def func(self, context, instance):
return network_model.NetworkInfo([])
self.expect_cache_update({'network_info': '[]'})
self.mox.ReplayAll()
func(self.impl, self.context, self.instance)
def test_decorator_return_none(self):
@api.refresh_cache
def func(self, context, instance):
pass
self.impl._get_instance_nw_info(self.context, self.instance)\
.AndReturn(self.nw_info)
self.expect_cache_update(self.is_nw_info)
self.mox.ReplayAll()
func(self.impl, self.context, self.instance)
|
|
# -*- coding: utf-8 -*-
import furl
from rest_framework import status as http_status
from future.moves.urllib.parse import urlencode
import markupsafe
from django.core.exceptions import ValidationError
from django.utils import timezone
from flask import request
from addons.osfstorage.models import Region
from framework import forms, sentry, status
from framework import auth as framework_auth
from framework.auth import exceptions
from framework.auth import cas, campaigns
from framework.auth import logout as osf_logout
from framework.auth import get_user
from framework.auth.exceptions import DuplicateEmailError, ExpiredTokenError, InvalidTokenError
from framework.auth.core import generate_verification_key
from framework.auth.decorators import block_bing_preview, collect_auth, must_be_logged_in
from framework.auth.forms import ResendConfirmationForm, ForgotPasswordForm, ResetPasswordForm
from framework.auth.utils import ensure_external_identity_uniqueness, validate_recaptcha
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.sessions.utils import remove_sessions_for_user, remove_session
from framework.sessions import get_session
from framework.utils import throttle_period_expired
from osf.models import OSFUser
from osf.utils.sanitize import strip_html
from website import settings, mails, language
from api.waffle.utils import storage_i18n_flag_active
from website.util import web_url_for
from osf.exceptions import ValidationValueError, BlockedEmailError
from osf.models.provider import PreprintProvider
from osf.models.tag import Tag
from osf.utils.requests import check_select_for_update
from website.util.metrics import CampaignClaimedTags, CampaignSourceTags
from website.ember_osf_web.decorators import ember_flag_is_active
from osf import features
#from osf.models import PreprintProvider
@block_bing_preview
@collect_auth
def reset_password_get(auth, uid=None, token=None):
"""Identical to ``reset_password_institution_get`` b/c ``website/routes.py`` framework requires
unique view methods for routes using the same renderer/template."""
return _reset_password_get(auth, uid=uid, token=token)
@block_bing_preview
@collect_auth
def reset_password_institution_get(auth, uid=None, token=None):
"""Identical to ``reset_password_get`` b/c ``website/routes.py`` framework requires unique view
methods for routes using the same renderer/template."""
return _reset_password_get(auth, uid=uid, token=token, institutional=True)
def _reset_password_get(auth, uid=None, token=None, institutional=False):
"""
View for user to land on the reset password page. Takes a unique token generated by the
forgot-password page and (if valid) grants the user a new temporary token to allow them to
reset their password. User is redirected to the reset password page. If token is not valid,
returns an error page and 400 Bad Request status code.
HTTP Method: GET
:param auth: the authentication state
:param uid: the user id
:param token: the token in verification key
:param institutional: is this the institutional reset password page?
:return
:raises: HTTPError(http_status.HTTP_400_BAD_REQUEST) if verification key for the user is invalid, has expired or was used
"""
# if users are logged in, log them out and redirect back to this page
if auth.logged_in:
return auth_logout(redirect_url=request.url)
# Check if request bears a valid pair of `uid` and `token`
user_obj = OSFUser.load(uid)
if not (user_obj and user_obj.verify_password_token(token=token)):
error_data = {
'message_short': 'Invalid Request.',
'message_long': 'The requested URL is invalid, has expired, or was already used',
}
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=error_data)
# refresh the verification key (v2)
user_obj.verification_key_v2 = generate_verification_key(verification_type='password')
user_obj.save()
# override routes.py login_url to redirect to dashboard
service_url = web_url_for('dashboard', _absolute=True)
return {
'uid': user_obj._id,
'token': user_obj.verification_key_v2['token'],
'login_url': service_url,
'isInstitutional': institutional,
# view knows paths better than template
'resetPath': 'resetpassword-institution' if institutional else 'resetpassword',
}
def reset_password_post(uid=None, token=None):
"""
View for user to submit reset password form. Accepts a temporary password token. If password
token is valid, reset user's password to the value from the submitted form.
HTTP Method: POST
:param uid: the user id
:param token: the token in verification key
:return:
:raises: HTTPError(http_status.HTTP_400_BAD_REQUEST) if verification key for the user is invalid, has expired or was used
"""
form = ResetPasswordForm(request.form)
# Check if request bears a valid pair of `uid` and `token`
user_obj = OSFUser.load(uid)
if not (user_obj and user_obj.verify_password_token(token=token)):
error_data = {
'message_short': 'Invalid Request.',
'message_long': 'The requested URL is invalid, has expired, or was already used',
}
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=error_data)
if not form.validate():
# Don't go anywhere
forms.push_errors_to_status(form.errors)
else:
# clear verification key (v2)
user_obj.verification_key_v2 = {}
# new verification key (v1) for CAS
user_obj.verification_key = generate_verification_key(verification_type=None)
try:
user_obj.set_password(form.password.data)
osf4m_source_tag, created = Tag.all_tags.get_or_create(name=CampaignSourceTags.Osf4m.value, system=True)
osf4m_claimed_tag, created = Tag.all_tags.get_or_create(name=CampaignClaimedTags.Osf4m.value, system=True)
if user_obj.all_tags.filter(id=osf4m_source_tag.id, system=True).exists():
user_obj.add_system_tag(osf4m_claimed_tag)
user_obj.save()
except exceptions.ChangePasswordError as error:
for message in error.messages:
status.push_status_message(message, kind='warning', trust=False)
else:
status.push_status_message('Password reset', kind='success', trust=False)
# redirect to CAS and authenticate the user automatically with one-time verification key.
return redirect(cas.get_login_url(
web_url_for('user_account', _absolute=True),
username=user_obj.username,
verification_key=user_obj.verification_key
))
return {
'uid': user_obj._id,
'token': user_obj.verification_key_v2['token'],
}
def reset_password_institution_post(uid=None, token=None):
"""Behaves identically to ``reset_password_post``, so just dispatch to that method"""
return reset_password_post(uid=uid, token=token)
@collect_auth
def forgot_password_get(auth):
"""
View for user to land on the forgot password page. Logs user out if they are already logged
in.
HTTP Method: GET
:param auth: the authentication context
:return
"""
# if users are logged in, log them out and redirect back to this page
if auth.logged_in:
return auth_logout(redirect_url=request.url)
#overriding the routes.py sign in url to redirect to the dashboard after login
context = {}
context['login_url'] = web_url_for('dashboard', _absolute=True)
return context
@collect_auth
def redirect_unsupported_institution(auth):
"""
Sends user back to the "Unsupported Institution" page on CAS. Logs user out if they are
already logged in.
HTTP Method: GET
:param auth: the authentication context
:return
"""
cas_unsupp_inst_url = cas.get_login_url('', campaign='unsupportedinstitution')
# if users are logged in, log them out and redirect back to this page
if auth.logged_in:
return auth_logout(redirect_url=cas_unsupp_inst_url)
return redirect(cas_unsupp_inst_url)
def forgot_password_post():
"""Dispatches to ``_forgot_password_post`` passing non-institutional user mail template
and reset action."""
return _forgot_password_post(mail_template=mails.FORGOT_PASSWORD,
reset_route='reset_password_get')
def forgot_password_institution_post():
"""Dispatches to `_forgot_password_post` passing institutional user mail template, reset
action, and setting the ``institutional`` flag."""
return _forgot_password_post(mail_template=mails.FORGOT_PASSWORD_INSTITUTION,
reset_route='reset_password_institution_get',
institutional=True)
def _forgot_password_post(mail_template, reset_route, institutional=False):
"""
View for user to submit forgot password form (standard or institutional). Validates submitted
form and sends reset-password link via email if valid. If user has submitted another password
reset request recently, declines to create a new one and asks the user to not submit again for
awhile.
Standard and institutional forgot-password requests behave similarly but use slightly different
language and interfaces. When an institution is deactivated, the user should be given the
opportunity to reclaim their account. CAS co-ops the forgot-password functionality to send a
"set a new password" email link to the institutional user. The language of the email has been
adjusted from the standard context, the response html the status message from the reset
action is displayed as regular text, and the password form is not shown.
HTTP Method: POST
:return {}
"""
form = ForgotPasswordForm(request.form, prefix='forgot_password')
if not form.validate():
# Don't go anywhere
forms.push_errors_to_status(form.errors)
else:
email = form.email.data
status_message = ('If there is an OSF account associated with {0}, an email with instructions on how to '
'reset the OSF password has been sent to {0}. If you do not receive an email and believe '
'you should have, please contact OSF Support. ').format(email)
kind = 'success'
# check if the user exists
user_obj = get_user(email=email)
if user_obj:
# rate limit forgot_password_post
if not throttle_period_expired(user_obj.email_last_sent, settings.SEND_EMAIL_THROTTLE):
status_message = 'You have recently requested to change your password. Please wait a few minutes ' \
'before trying again.'
kind = 'error'
# TODO [OSF-6673]: Use the feature in [OSF-6998] for user to resend claim email.
elif user_obj.is_active:
# new random verification key (v2)
user_obj.verification_key_v2 = generate_verification_key(verification_type='password')
user_obj.email_last_sent = timezone.now()
user_obj.save()
reset_link = furl.urljoin(
settings.DOMAIN,
web_url_for(
reset_route,
uid=user_obj._id,
token=user_obj.verification_key_v2['token']
)
)
mails.send_mail(
to_addr=email,
mail=mail_template,
reset_link=reset_link,
can_change_preferences=False,
)
# institutional forgot password page displays the message as main text, not as an alert
if institutional:
# pass isError instead of kind to template to decouple python error flag from template's
# css class
return {'message': status_message, 'isError': (kind == 'error'),
'institutional': institutional}
status.push_status_message(status_message, kind=kind, trust=False)
return {}
def login_and_register_handler(auth, login=True, campaign=None, next_url=None, logout=None):
"""
Non-view helper to handle `login` and `register` requests.
:param auth: the auth context
:param login: `True` if `GET /login`, `False` if `GET /register`
:param campaign: a target campaign defined in `auth.campaigns`
:param next_url: the service url for CAS login or redirect url for OSF
:param logout: used only for `claim_user_registered`
:return: data object that contains actions for `auth_register` and `auth_login`
:raises: http_status.HTTP_400_BAD_REQUEST
"""
# Only allow redirects which are relative root or full domain. Disallows external redirects.
if next_url and not validate_next_url(next_url):
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
data = {
'status_code': http_status.HTTP_302_FOUND if login else http_status.HTTP_200_OK,
'next_url': next_url,
'campaign': None,
'must_login_warning': False,
}
# login or register with campaign parameter
if campaign:
if validate_campaign(campaign):
# GET `/register` or '/login` with `campaign=institution`
# unlike other campaigns, institution login serves as an alternative for authentication
if campaign == 'institution':
if next_url is None:
next_url = web_url_for('dashboard', _absolute=True)
data['status_code'] = http_status.HTTP_302_FOUND
if auth.logged_in:
data['next_url'] = next_url
else:
data['next_url'] = cas.get_login_url(next_url, campaign='institution')
# for non-institution campaigns
else:
destination = next_url if next_url else campaigns.campaign_url_for(campaign)
if auth.logged_in:
# if user is already logged in, go to the campaign landing page
data['status_code'] = http_status.HTTP_302_FOUND
data['next_url'] = destination
else:
# if user is logged out, go to the osf register page with campaign context
if login:
# `GET /login?campaign=...`
data['next_url'] = web_url_for('auth_register', campaign=campaign, next=destination)
else:
# `GET /register?campaign=...`
data['campaign'] = campaign
if campaigns.is_proxy_login(campaign):
data['next_url'] = web_url_for(
'auth_login',
next=destination,
_absolute=True
)
else:
data['next_url'] = destination
else:
# invalid campaign, inform sentry and redirect to non-campaign sign up or sign in
redirect_view = 'auth_login' if login else 'auth_register'
data['status_code'] = http_status.HTTP_302_FOUND
data['next_url'] = web_url_for(redirect_view, campaigns=None, next=next_url)
data['campaign'] = None
sentry.log_message(
'{} is not a valid campaign. Please add it if this is a new one'.format(campaign)
)
# login or register with next parameter
elif next_url:
# TODO - logout is no longer used by claim_user_registered, see [#PLAT-1151]
if logout:
# handle `claim_user_registered`
data['next_url'] = next_url
if auth.logged_in:
# log user out and come back
data['status_code'] = 'auth_logout'
else:
# after logout, land on the register page with "must_login" warning
data['status_code'] = http_status.HTTP_200_OK
data['must_login_warning'] = True
elif auth.logged_in:
# if user is already logged in, redirect to `next_url`
data['status_code'] = http_status.HTTP_302_FOUND
data['next_url'] = next_url
elif login:
# `/login?next=next_url`: go to CAS login page with current request url as service url
data['status_code'] = http_status.HTTP_302_FOUND
data['next_url'] = cas.get_login_url(request.url)
else:
# `/register?next=next_url`: land on OSF register page with request url as next url
data['status_code'] = http_status.HTTP_200_OK
data['next_url'] = request.url
else:
# `/login/` or `/register/` without any parameter
if auth.logged_in:
data['status_code'] = http_status.HTTP_302_FOUND
data['next_url'] = web_url_for('dashboard', _absolute=True)
return data
@collect_auth
def auth_login(auth):
"""
View (no template) for OSF Login.
Redirect user based on `data` returned from `login_and_register_handler`.
`/login` only takes valid campaign, valid next, or no query parameter
`login_and_register_handler()` handles the following cases:
if campaign and logged in, go to campaign landing page (or valid next_url if presents)
if campaign and logged out, go to campaign register page (with next_url if presents)
if next_url and logged in, go to next url
if next_url and logged out, go to cas login page with current request url as service parameter
if none, go to `/dashboard` which is decorated by `@must_be_logged_in`
:param auth: the auth context
:return: redirects
"""
campaign = request.args.get('campaign')
next_url = request.args.get('next')
data = login_and_register_handler(auth, login=True, campaign=campaign, next_url=next_url)
if data['status_code'] == http_status.HTTP_302_FOUND:
return redirect(data['next_url'])
@collect_auth
@ember_flag_is_active(features.EMBER_AUTH_REGISTER)
def auth_register(auth):
"""
View for OSF register. Land on the register page, redirect or go to `auth_logout`
depending on `data` returned by `login_and_register_handler`.
`/register` only takes a valid campaign, a valid next, the logout flag or no query parameter
`login_and_register_handler()` handles the following cases:
if campaign and logged in, go to campaign landing page (or valid next_url if presents)
if campaign and logged out, go to campaign register page (with next_url if presents)
if next_url and logged in, go to next url
if next_url and logged out, go to cas login page with current request url as service parameter
if next_url and logout flag, log user out first and then go to the next_url
if none, go to `/dashboard` which is decorated by `@must_be_logged_in`
:param auth: the auth context
:return: land, redirect or `auth_logout`
:raise: http_status.HTTP_400_BAD_REQUEST
"""
context = {}
# a target campaign in `auth.campaigns`
campaign = request.args.get('campaign')
# the service url for CAS login or redirect url for OSF
next_url = request.args.get('next')
# TODO: no longer used for `claim_user_registered`, see [#PLAT-1151]
logout = request.args.get('logout')
# logout must have next_url
if logout and not next_url:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
data = login_and_register_handler(auth, login=False, campaign=campaign, next_url=next_url, logout=logout)
# land on register page
if data['status_code'] == http_status.HTTP_200_OK:
if data['must_login_warning']:
status.push_status_message(language.MUST_LOGIN, trust=False)
destination = cas.get_login_url(data['next_url'])
# "Already have and account?" link
context['non_institution_login_url'] = destination
# "Sign In" button in navigation bar, overwrite the default value set in routes.py
context['login_url'] = destination
# "Login through your institution" link
context['institution_login_url'] = cas.get_login_url(data['next_url'], campaign='institution')
context['preprint_campaigns'] = {k._id + '-preprints': {
'id': k._id,
'name': k.name,
'logo_path': k.get_asset_url('square_color_no_transparent')
} for k in PreprintProvider.objects.all() if k._id != 'osf'}
context['campaign'] = data['campaign']
return context, http_status.HTTP_200_OK
# redirect to url
elif data['status_code'] == http_status.HTTP_302_FOUND:
return redirect(data['next_url'])
# go to other views
elif data['status_code'] == 'auth_logout':
return auth_logout(redirect_url=data['next_url'])
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
@collect_auth
def auth_logout(auth, redirect_url=None, next_url=None):
"""
Log out, delete current session and remove OSF cookie.
If next url is valid and auth is logged in, redirect to CAS logout endpoint with the current request url as service.
If next url is valid and auth is logged out, redirect directly to the next url.
Otherwise, redirect to CAS logout or login endpoint with redirect url as service.
The CAS logout endpoint which clears sessions and cookies for CAS and Shibboleth.
HTTP Method: GET
Note 1: OSF tells CAS where it wants to be redirected back after successful logout. However, CAS logout flow may not
respect this url if user is authenticated through remote identity provider.
Note 2: The name of the query parameter is `next`, `next_url` is used to avoid python reserved word.
:param auth: the authentication context
:param redirect_url: url to DIRECTLY redirect after CAS logout, default is `OSF/goodbye`
:param next_url: url to redirect after OSF logout, which is after CAS logout
:return: the response
"""
# For `?next=`:
# takes priority
# the url must be a valid OSF next url,
# the full request url is set to CAS service url,
# does not support `reauth`
# For `?redirect_url=`:
# the url must be valid CAS service url
# the redirect url is set to CAS service url.
# support `reauth`
# logout/?next=<an OSF verified next url>
next_url = next_url or request.args.get('next', None)
if next_url and validate_next_url(next_url):
cas_logout_endpoint = cas.get_logout_url(request.url)
if auth.logged_in:
resp = redirect(cas_logout_endpoint)
else:
resp = redirect(next_url)
# logout/ or logout/?redirect_url=<a CAS verified redirect url>
else:
redirect_url = redirect_url or request.args.get('redirect_url') or web_url_for('goodbye', _absolute=True)
# set redirection to CAS log out (or log in if `reauth` is present)
if 'reauth' in request.args:
cas_endpoint = cas.get_login_url(redirect_url)
else:
cas_endpoint = cas.get_logout_url(redirect_url)
resp = redirect(cas_endpoint)
# perform OSF logout
osf_logout()
# set response to delete OSF cookie
resp.delete_cookie(settings.COOKIE_NAME, domain=settings.OSF_COOKIE_DOMAIN)
return resp
def auth_email_logout(token, user):
"""
When a user is adding an email or merging an account, add the email to the user and log them out.
"""
redirect_url = cas.get_logout_url(service_url=cas.get_login_url(service_url=web_url_for('index', _absolute=True)))
try:
unconfirmed_email = user.get_unconfirmed_email_for_token(token)
except InvalidTokenError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Bad token',
'message_long': 'The provided token is invalid.'
})
except ExpiredTokenError:
status.push_status_message('The private link you used is expired.')
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Expired link',
'message_long': 'The private link you used is expired.'
})
try:
user_merge = OSFUser.objects.get(emails__address=unconfirmed_email)
except OSFUser.DoesNotExist:
user_merge = False
if user_merge:
remove_sessions_for_user(user_merge)
user.email_verifications[token]['confirmed'] = True
user.save()
remove_sessions_for_user(user)
resp = redirect(redirect_url)
resp.delete_cookie(settings.COOKIE_NAME, domain=settings.OSF_COOKIE_DOMAIN)
return resp
@block_bing_preview
@collect_auth
def external_login_confirm_email_get(auth, uid, token):
"""
View for email confirmation links when user first login through external identity provider.
HTTP Method: GET
When users click the confirm link, they are expected not to be logged in. If not, they will be logged out first and
redirected back to this view. After OSF verifies the link and performs all actions, they will be automatically
logged in through CAS and redirected back to this view again being authenticated.
:param auth: the auth context
:param uid: the user's primary key
:param token: the verification token
"""
user = OSFUser.load(uid)
if not user:
sentry.log_message('external_login_confirm_email_get::400 - Cannot find user')
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
destination = request.args.get('destination')
if not destination:
sentry.log_message('external_login_confirm_email_get::400 - bad destination')
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
# if user is already logged in
if auth and auth.user:
# if it is a wrong user
if auth.user._id != user._id:
return auth_logout(redirect_url=request.url)
# if it is the expected user
new = request.args.get('new', None)
if destination in campaigns.get_campaigns():
# external domain takes priority
campaign_url = campaigns.external_campaign_url_for(destination)
if not campaign_url:
campaign_url = campaigns.campaign_url_for(destination)
return redirect(campaign_url)
if new:
status.push_status_message(language.WELCOME_MESSAGE, kind='default', jumbotron=True, trust=True, id='welcome_message')
return redirect(web_url_for('dashboard'))
# token is invalid
if token not in user.email_verifications:
sentry.log_message('external_login_confirm_email_get::400 - bad token')
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
verification = user.email_verifications[token]
email = verification['email']
provider = list(verification['external_identity'].keys())[0]
provider_id = list(verification['external_identity'][provider].keys())[0]
# wrong provider
if provider not in user.external_identity:
sentry.log_message('external_login_confirm_email_get::400 - Auth error...wrong provider')
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
external_status = user.external_identity[provider][provider_id]
try:
ensure_external_identity_uniqueness(provider, provider_id, user)
except ValidationError as e:
sentry.log_message('external_login_confirm_email_get::403 - Validation Error')
raise HTTPError(http_status.HTTP_403_FORBIDDEN, e.message)
if not user.is_registered:
user.register(email)
if not user.emails.filter(address=email.lower()):
user.emails.create(address=email.lower())
user.date_last_logged_in = timezone.now()
user.external_identity[provider][provider_id] = 'VERIFIED'
user.social[provider.lower()] = provider_id
del user.email_verifications[token]
user.verification_key = generate_verification_key()
user.save()
service_url = request.url
if external_status == 'CREATE':
mails.send_mail(
to_addr=user.username,
mail=mails.WELCOME,
user=user,
domain=settings.DOMAIN,
osf_support_email=settings.OSF_SUPPORT_EMAIL,
storage_flag_is_active=storage_i18n_flag_active(),
)
service_url += '&{}'.format(urlencode({'new': 'true'}))
elif external_status == 'LINK':
mails.send_mail(
user=user,
to_addr=user.username,
mail=mails.EXTERNAL_LOGIN_LINK_SUCCESS,
external_id_provider=provider,
can_change_preferences=False,
)
# redirect to CAS and authenticate the user with the verification key
return redirect(cas.get_login_url(
service_url,
username=user.username,
verification_key=user.verification_key
))
@block_bing_preview
@collect_auth
def confirm_email_get(token, auth=None, **kwargs):
"""
View for email confirmation links. Authenticates and redirects to user settings page if confirmation is successful,
otherwise shows an "Expired Link" error.
HTTP Method: GET
"""
is_merge = 'confirm_merge' in request.args
try:
if not is_merge or not check_select_for_update():
user = OSFUser.objects.get(guids___id=kwargs['uid'], guids___id__isnull=False)
else:
user = OSFUser.objects.filter(guids___id=kwargs['uid'], guids___id__isnull=False).select_for_update().get()
except OSFUser.DoesNotExist:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
is_initial_confirmation = not user.date_confirmed
log_out = request.args.get('logout', None)
# if the user is merging or adding an email (they already are an osf user)
if log_out:
return auth_email_logout(token, user)
if auth and auth.user and (auth.user._id == user._id or auth.user._id == getattr(user.merged_by, '_id', False)):
if not is_merge:
# determine if the user registered through a campaign
campaign = campaigns.campaign_for_user(user)
if campaign:
return redirect(campaigns.campaign_url_for(campaign))
# go to home page with push notification
if auth.user.emails.count() == 1 and len(auth.user.email_verifications) == 0:
status.push_status_message(language.WELCOME_MESSAGE, kind='default', jumbotron=True, trust=True, id='welcome_message')
if token in auth.user.email_verifications:
status.push_status_message(language.CONFIRM_ALTERNATE_EMAIL_ERROR, kind='danger', trust=True, id='alternate_email_error')
return redirect(web_url_for('index'))
status.push_status_message(language.MERGE_COMPLETE, kind='success', trust=False)
return redirect(web_url_for('user_account'))
try:
user.confirm_email(token, merge=is_merge)
except exceptions.EmailConfirmTokenError as e:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': e.message_short,
'message_long': str(e)
})
if is_initial_confirmation:
user.update_date_last_login()
user.save()
# send out our welcome message
mails.send_mail(
to_addr=user.username,
mail=mails.WELCOME,
user=user,
domain=settings.DOMAIN,
osf_support_email=settings.OSF_SUPPORT_EMAIL,
storage_flag_is_active=storage_i18n_flag_active(),
)
# new random verification key, allows CAS to authenticate the user w/o password one-time only.
user.verification_key = generate_verification_key()
user.save()
# redirect to CAS and authenticate the user with a verification key.
return redirect(cas.get_login_url(
request.url,
username=user.username,
verification_key=user.verification_key
))
@must_be_logged_in
def unconfirmed_email_remove(auth=None):
"""
Called at login if user cancels their merge or email add.
HTTP Method: DELETE
"""
user = auth.user
json_body = request.get_json()
try:
given_token = json_body['token']
except KeyError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Missing token',
'message_long': 'Must provide a token'
})
user.clean_email_verifications(given_token=given_token)
user.save()
return {
'status': 'success',
'removed_email': json_body['address']
}, 200
@must_be_logged_in
def unconfirmed_email_add(auth=None):
"""
Called at login if user confirms their merge or email add.
HTTP Method: PUT
"""
user = auth.user
json_body = request.get_json()
try:
token = json_body['token']
except KeyError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Missing token',
'message_long': 'Must provide a token'
})
try:
user.confirm_email(token, merge=True)
except exceptions.InvalidTokenError:
raise InvalidTokenError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid user token',
'message_long': 'The user token is invalid'
})
except exceptions.EmailConfirmTokenError as e:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': e.message_short,
'message_long': str(e)
})
user.save()
return {
'status': 'success',
'removed_email': json_body['address']
}, 200
def send_confirm_email(user, email, renew=False, external_id_provider=None, external_id=None, destination=None):
"""
Sends `user` a confirmation to the given `email`.
:param user: the user
:param email: the email
:param renew: refresh the token
:param external_id_provider: user's external id provider
:param external_id: user's external id
:param destination: the destination page to redirect after confirmation
:return:
:raises: KeyError if user does not have a confirmation token for the given email.
"""
confirmation_url = user.get_confirmation_url(
email,
external=True,
force=True,
renew=renew,
external_id_provider=external_id_provider,
destination=destination
)
try:
merge_target = OSFUser.objects.get(emails__address=email)
except OSFUser.DoesNotExist:
merge_target = None
campaign = campaigns.campaign_for_user(user)
branded_preprints_provider = None
logo = None
# Choose the appropriate email template to use and add existing_user flag if a merge or adding an email.
if external_id_provider and external_id:
# First time login through external identity provider, link or create an OSF account confirmation
if user.external_identity[external_id_provider][external_id] == 'CREATE':
mail_template = mails.EXTERNAL_LOGIN_CONFIRM_EMAIL_CREATE
elif user.external_identity[external_id_provider][external_id] == 'LINK':
mail_template = mails.EXTERNAL_LOGIN_CONFIRM_EMAIL_LINK
elif merge_target:
# Merge account confirmation
mail_template = mails.CONFIRM_MERGE
confirmation_url = '{}?logout=1'.format(confirmation_url)
elif user.is_active:
# Add email confirmation
mail_template = mails.CONFIRM_EMAIL
confirmation_url = '{}?logout=1'.format(confirmation_url)
elif campaign:
# Account creation confirmation: from campaign
mail_template = campaigns.email_template_for_campaign(campaign)
if campaigns.is_proxy_login(campaign) and campaigns.get_service_provider(campaign) != 'OSF':
branded_preprints_provider = campaigns.get_service_provider(campaign)
logo = campaigns.get_campaign_logo(campaign)
else:
# Account creation confirmation: from OSF
mail_template = mails.INITIAL_CONFIRM_EMAIL
mails.send_mail(
email,
mail_template,
user=user,
confirmation_url=confirmation_url,
email=email,
merge_target=merge_target,
external_id_provider=external_id_provider,
branded_preprints_provider=branded_preprints_provider,
osf_support_email=settings.OSF_SUPPORT_EMAIL,
can_change_preferences=False,
logo=logo if logo else settings.OSF_LOGO
)
def register_user(**kwargs):
"""
Register new user account.
HTTP Method: POST
:param-json str email1:
:param-json str email2:
:param-json str password:
:param-json str fullName:
:param-json str campaign:
:raises: HTTPError(http_status.HTTP_400_BAD_REQUEST) if validation fails or user already exists
"""
# Verify that email address match.
# Note: Both `landing.mako` and `register.mako` already have this check on the form. Users can not submit the form
# if emails do not match. However, this check should not be removed given we may use the raw api call directly.
json_data = request.get_json()
if str(json_data['email1']).lower() != str(json_data['email2']).lower():
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long='Email addresses must match.')
)
# Verify that captcha is valid
if settings.RECAPTCHA_SITE_KEY and not validate_recaptcha(json_data.get('g-recaptcha-response'), remote_ip=request.remote_addr):
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long='Invalid Captcha')
)
try:
full_name = request.json['fullName']
full_name = strip_html(full_name)
campaign = json_data.get('campaign')
if campaign and campaign not in campaigns.get_campaigns():
campaign = None
accepted_terms_of_service = timezone.now() if json_data.get('acceptedTermsOfService') else None
user = framework_auth.register_unconfirmed(
request.json['email1'],
request.json['password'],
full_name,
campaign=campaign,
accepted_terms_of_service=accepted_terms_of_service
)
framework_auth.signals.user_registered.send(user)
except (ValidationValueError, DuplicateEmailError):
raise HTTPError(
http_status.HTTP_409_CONFLICT,
data=dict(
message_long=language.ALREADY_REGISTERED.format(
email=markupsafe.escape(request.json['email1'])
)
)
)
except BlockedEmailError:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=language.BLOCKED_EMAIL)
)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=user.username)
message = language.REGISTRATION_SUCCESS.format(email=user.username)
return {'message': message}
else:
return {'message': 'You may now log in.'}
@collect_auth
def resend_confirmation_get(auth):
"""
View for user to land on resend confirmation page.
HTTP Method: GET
"""
# If user is already logged in, log user out
if auth.logged_in:
return auth_logout(redirect_url=request.url)
form = ResendConfirmationForm(request.form)
return {
'form': form,
}
@collect_auth
def resend_confirmation_post(auth):
"""
View for user to submit resend confirmation form.
HTTP Method: POST
"""
# If user is already logged in, log user out
if auth.logged_in:
return auth_logout(redirect_url=request.url)
form = ResendConfirmationForm(request.form)
if form.validate():
clean_email = form.email.data
user = get_user(email=clean_email)
status_message = ('If there is an OSF account associated with this unconfirmed email address {0}, '
'a confirmation email has been resent to it. If you do not receive an email and believe '
'you should have, please contact OSF Support.').format(clean_email)
kind = 'success'
if user:
if throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
try:
send_confirm_email(user, clean_email, renew=True)
except KeyError:
# already confirmed, redirect to dashboard
status_message = 'This email {0} has already been confirmed.'.format(clean_email)
kind = 'warning'
user.email_last_sent = timezone.now()
user.save()
else:
status_message = ('You have recently requested to resend your confirmation email. '
'Please wait a few minutes before trying again.')
kind = 'error'
status.push_status_message(status_message, kind=kind, trust=False)
else:
forms.push_errors_to_status(form.errors)
# Don't go anywhere
return {'form': form}
def external_login_email_get():
"""
Landing view for first-time oauth-login user to enter their email address.
HTTP Method: GET
"""
form = ResendConfirmationForm(request.form)
session = get_session()
if not session.is_external_first_login:
raise HTTPError(http_status.HTTP_401_UNAUTHORIZED)
external_id_provider = session.data['auth_user_external_id_provider']
auth_user_fullname = session.data.get('auth_user_fullname')
return {
'form': form,
'external_id_provider': external_id_provider,
'auth_user_fullname': auth_user_fullname,
}
def external_login_email_post():
"""
View to handle email submission for first-time oauth-login user.
HTTP Method: POST
"""
form = ResendConfirmationForm(request.form)
session = get_session()
if not session.is_external_first_login:
raise HTTPError(http_status.HTTP_401_UNAUTHORIZED)
external_id_provider = session.data['auth_user_external_id_provider']
external_id = session.data['auth_user_external_id']
fullname = session.data.get('auth_user_fullname') or form.name.data
service_url = session.data['service_url']
# TODO: @cslzchen use user tags instead of destination
destination = 'dashboard'
for campaign in campaigns.get_campaigns():
if campaign != 'institution':
# Handle different url encoding schemes between `furl` and `urlparse/urllib`.
# OSF use `furl` to parse service url during service validation with CAS. However, `web_url_for()` uses
# `urlparse/urllib` to generate service url. `furl` handles `urlparser/urllib` generated urls while ` but
# not vice versa.
campaign_url = furl.furl(campaigns.campaign_url_for(campaign)).url
external_campaign_url = furl.furl(campaigns.external_campaign_url_for(campaign)).url
if campaigns.is_proxy_login(campaign):
# proxy campaigns: OSF Preprints and branded ones
if check_service_url_with_proxy_campaign(str(service_url), campaign_url, external_campaign_url):
destination = campaign
# continue to check branded preprints even service url matches osf preprints
if campaign != 'osf-preprints':
break
elif service_url.startswith(campaign_url):
# osf campaigns: ERPC
destination = campaign
break
if form.validate():
clean_email = form.email.data
user = get_user(email=clean_email)
external_identity = {
external_id_provider: {
external_id: None,
},
}
try:
ensure_external_identity_uniqueness(external_id_provider, external_id, user)
except ValidationError as e:
raise HTTPError(http_status.HTTP_403_FORBIDDEN, e.message)
if user:
# 1. update user oauth, with pending status
external_identity[external_id_provider][external_id] = 'LINK'
if external_id_provider in user.external_identity:
user.external_identity[external_id_provider].update(external_identity[external_id_provider])
else:
user.external_identity.update(external_identity)
if not user.accepted_terms_of_service and form.accepted_terms_of_service.data:
user.accepted_terms_of_service = timezone.now()
# 2. add unconfirmed email and send confirmation email
user.add_unconfirmed_email(clean_email, external_identity=external_identity)
user.save()
send_confirm_email(
user,
clean_email,
external_id_provider=external_id_provider,
external_id=external_id,
destination=destination
)
# 3. notify user
message = language.EXTERNAL_LOGIN_EMAIL_LINK_SUCCESS.format(
external_id_provider=external_id_provider,
email=user.username
)
kind = 'success'
# 4. remove session and osf cookie
remove_session(session)
else:
# 1. create unconfirmed user with pending status
external_identity[external_id_provider][external_id] = 'CREATE'
accepted_terms_of_service = timezone.now() if form.accepted_terms_of_service.data else None
user = OSFUser.create_unconfirmed(
username=clean_email,
password=None,
fullname=fullname,
external_identity=external_identity,
campaign=None,
accepted_terms_of_service=accepted_terms_of_service
)
# TODO: [#OSF-6934] update social fields, verified social fields cannot be modified
user.save()
# 3. send confirmation email
send_confirm_email(
user,
user.username,
external_id_provider=external_id_provider,
external_id=external_id,
destination=destination
)
# 4. notify user
message = language.EXTERNAL_LOGIN_EMAIL_CREATE_SUCCESS.format(
external_id_provider=external_id_provider,
email=user.username
)
kind = 'success'
# 5. remove session
remove_session(session)
status.push_status_message(message, kind=kind, trust=False)
else:
forms.push_errors_to_status(form.errors)
# Don't go anywhere
return {
'form': form,
'external_id_provider': external_id_provider,
'auth_user_fullname': fullname
}
def validate_campaign(campaign):
"""
Non-view helper function that validates `campaign`.
:param campaign: the campaign to validate
:return: True if valid, False otherwise
"""
return campaign and campaign in campaigns.get_campaigns()
def validate_next_url(next_url):
"""
Non-view helper function that checks `next_url`.
Only allow redirects which are relative root or full domain (CAS, OSF and MFR).
Disallows external redirects.
:param next_url: the next url to check
:return: True if valid, False otherwise
"""
# disable external domain using `//`: the browser allows `//` as a shortcut for non-protocol specific requests
# like http:// or https:// depending on the use of SSL on the page already.
if next_url.startswith('//'):
return False
# only OSF, MFR, CAS and Branded Preprints domains are allowed
if next_url[0] == '/' or next_url.startswith(settings.DOMAIN):
# OSF
return True
if next_url.startswith(settings.CAS_SERVER_URL) or next_url.startswith(settings.MFR_SERVER_URL):
# CAS or MFR
return True
for url in Region.objects.values_list('mfr_url', flat=True):
if next_url.startswith(url):
return True
for url in campaigns.get_external_domains():
# Branded Preprints Phase 2
if next_url.startswith(url):
return True
return False
def check_service_url_with_proxy_campaign(service_url, campaign_url, external_campaign_url=None):
"""
Check if service url belongs to proxy campaigns: OSF Preprints and branded ones.
Both service_url and campaign_url are parsed using `furl` encoding scheme.
:param service_url: the `furl` formatted service url
:param campaign_url: the `furl` formatted campaign url
:param external_campaign_url: the `furl` formatted external campaign url
:return: the matched object or None
"""
prefix_1 = settings.DOMAIN + 'login/?next=' + campaign_url
prefix_2 = settings.DOMAIN + 'login?next=' + campaign_url
valid = service_url.startswith(prefix_1) or service_url.startswith(prefix_2)
valid_external = False
if external_campaign_url:
prefix_3 = settings.DOMAIN + 'login/?next=' + external_campaign_url
prefix_4 = settings.DOMAIN + 'login?next=' + external_campaign_url
valid_external = service_url.startswith(prefix_3) or service_url.startswith(prefix_4)
return valid or valid_external
|
|
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import os, sys, unittest, shutil
from threading import RLock
import test_PyLucene
from lucene import \
PythonLock, PythonLockFactory, \
PythonIndexInput, PythonIndexOutput, PythonDirectory, \
JavaError, IOException, JArray, String
"""
The Directory Implementation here is for testing purposes only, not meant
as an example of writing one, the implementation here suffers from a lack
of safety when dealing with concurrent modifications as it does away with
the file locking in the default lucene fsdirectory implementation.
"""
DEBUG = False
class DebugWrapper(object):
def __init__(self, obj):
self.obj = obj
def __getattr__(self, name):
print self.obj.__class__.__name__, self.obj.name, name
sys.stdout.flush()
return getattr(self.obj, name)
class DebugFactory(object):
def __init__(self, klass):
self.klass = klass
def __call__(self, *args, **kw):
instance = self.klass(*args, **kw)
return DebugWrapper(instance)
class PythonDirLock(PythonLock):
# only safe for a single process
def __init__(self, name, path, lock):
super(PythonDirLock, self).__init__()
self.name = name
self.lock_file = path
self.lock = lock
def isLocked(self):
return self.lock.locked()
def obtain(self):
return self.lock.acquire()
def release(self):
return self.lock.release()
class PythonDirLockFactory(PythonLockFactory):
def __init__(self, path):
super(PythonDirLockFactory, self).__init__()
self.path = path
self._locks = {}
def makeLock(self, name):
lock = self._locks.get(name)
if lock is None:
lock = PythonDirLock(name, os.path.join(self.path, name), RLock())
self._locks[name] = lock
return lock
def clearLock(self, name):
lock = self._locks.pop(name, None)
if lock is not None:
lock.release()
class PythonFileStreamInput(PythonIndexInput):
def __init__(self, name, fh, size, clone=False):
if not clone:
super(PythonFileStreamInput, self).__init__()
self.name = name
self.fh = fh
self._length = size
self.isOpen = True
self.isClone = clone
def length(self):
return long(self._length)
def clone(self):
clone = PythonFileStreamInput(self.name, self.fh, self._length, True)
return super(PythonFileStreamInput, self).clone(clone)
def close(self):
if self.isOpen:
self.isOpen = False
if not self.isClone:
self.fh.close()
def readInternal(self, length, pos):
self.fh.seek(pos)
return JArray('byte')(self.fh.read(length))
def seekInternal(self, pos):
self.fh.seek(pos)
class PythonFileStreamOutput(PythonIndexOutput):
def __init__(self, name, fh):
super(PythonFileStreamOutput, self).__init__()
self.name = name
self.fh = fh
self.isOpen = True
self._length = 0
def close(self):
if self.isOpen:
super(PythonFileStreamOutput, self).close()
self.isOpen = False
self.fh.close()
def length(self):
return long(self._length)
def seekInternal(self, pos):
self.fh.seek(pos)
def flushBuffer(self, bytes):
self.fh.write(bytes.string_)
self.fh.flush()
self._length += len(bytes)
class PythonFileDirectory(PythonDirectory):
def __init__(self, path):
super(PythonFileDirectory, self).__init__(PythonDirLockFactory(path))
self.name = path
assert os.path.isdir(path)
self.path = path
self._streams = []
def close(self):
for stream in self._streams:
stream.close()
del self._streams[:]
def createOutput(self, name):
file_path = os.path.join(self.path, name)
fh = open(file_path, "wb")
stream = PythonFileStreamOutput(name, fh)
self._streams.append(stream)
return stream
def deleteFile(self, name):
if self.fileExists(name):
os.unlink(os.path.join(self.path, name))
def fileExists(self, name):
return os.path.exists(os.path.join(self.path, name))
def fileLength(self, name):
file_path = os.path.join(self.path, name)
return long(os.path.getsize(file_path))
def fileModified(self, name):
file_path = os.path.join(self.path, name)
return os.path.getmtime(file_path)
def listAll(self):
return os.listdir(self.path)
def sync(self, name):
pass
def openInput(self, name, bufferSize=0):
file_path = os.path.join(self.path, name)
try:
fh = open(file_path, "rb")
except IOError:
raise JavaError, IOException(name)
stream = PythonFileStreamInput(name, fh, os.path.getsize(file_path))
self._streams.append(stream)
return stream
def touchFile(self, name):
file_path = os.path.join(self.path, name)
os.utime(file_path, None)
if DEBUG:
_globals = globals()
_globals['PythonFileDirectory'] = DebugFactory(PythonFileDirectory)
_globals['PythonFileStreamInput'] = DebugFactory(PythonFileStreamInput)
_globals['PythonFileStreamOutput'] = DebugFactory(PythonFileStreamOutput)
_globals['PythonDirLock'] = DebugFactory(PythonDirLock)
del _globals
class PythonDirectoryTests(unittest.TestCase, test_PyLucene.Test_PyLuceneBase):
STORE_DIR = "testpyrepo"
def setUp(self):
if not os.path.exists(self.STORE_DIR):
os.mkdir(self.STORE_DIR)
def tearDown(self):
if os.path.exists(self.STORE_DIR):
shutil.rmtree(self.STORE_DIR)
def openStore(self):
return PythonFileDirectory(self.STORE_DIR)
def closeStore(self, store, *args):
for arg in args:
if arg is not None:
arg.close()
store.close()
def test_IncrementalLoop(self):
print "Testing Indexing Incremental Looping"
for i in range(100):
print "indexing ", i
sys.stdout.flush()
self.test_indexDocument()
if __name__ == "__main__":
import sys, lucene
env = lucene.initVM()
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
unittest.main()
except:
pass
print 'inputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexOutput', 0)
print 'outputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexInput', 0)
print 'locks', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0)
print 'dirs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0)
else:
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Object Services
--------------------
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.errors import ExecutionError, InvalidParameterDatatype
from bacpypes.primitivedata import CharacterString
from bacpypes.constructeddata import ArrayOf
from bacpypes.object import register_object_type, ReadableProperty, \
WritableProperty, Object
from bacpypes.local.object import CurrentPropertyListMixIn
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
class TestBasic(unittest.TestCase):
def test_basic(self):
"""Test basic configuration of a network."""
if _debug: TestBasic._debug("test_basic")
# create an object, no properties
Object()
@bacpypes_debugging
@register_object_type(vendor_id=999)
class SampleReadableLocation(Object):
objectType = 'sampleReadableLocation'
properties = [
ReadableProperty('location', CharacterString),
]
def __init__(self, **kwargs):
if _debug: SampleReadableLocation._debug("__init__ %r", kwargs)
Object.__init__(self, **kwargs)
@bacpypes_debugging
class TestReadableLocation(unittest.TestCase):
def test_sample(self):
"""Test basic configuration of a network."""
if _debug: TestReadableLocation._debug("test_sample")
# create an object, default property value is None
obj = SampleReadableLocation()
assert obj.location == None
# create an object with a location
obj = SampleReadableLocation(location="home")
assert obj.ReadProperty('location') == "home"
# not an array, write access denied
with self.assertRaises(ExecutionError):
obj.ReadProperty('location', 0)
with self.assertRaises(ExecutionError):
obj.WriteProperty('location', "work")
@bacpypes_debugging
@register_object_type(vendor_id=999)
class SampleWritableLocation(Object):
objectType = 'sampleWritableLocation'
properties = [
WritableProperty('location', CharacterString),
]
def __init__(self, **kwargs):
if _debug: SampleWritableLocation._debug("__init__ %r", kwargs)
Object.__init__(self, **kwargs)
@bacpypes_debugging
class TestWritableLocation(unittest.TestCase):
def test_sample(self):
"""Test basic configuration of a network."""
if _debug: TestWritableLocation._debug("test_sample")
# create an object with a location
obj = SampleWritableLocation(location="home")
assert obj.ReadProperty('location') == "home"
# not an array, write access denied
with self.assertRaises(ExecutionError):
obj.ReadProperty('location', 0)
# write access successful
obj.WriteProperty('location', "work")
assert obj.location == "work"
# wrong data type
with self.assertRaises(InvalidParameterDatatype):
obj.WriteProperty('location', 12)
# array of character strings
ArrayOfCharacterString = ArrayOf(CharacterString)
@bacpypes_debugging
@register_object_type(vendor_id=999)
class SampleWritableArray(Object):
objectType = 'sampleWritableLocation'
properties = [
WritableProperty('location', ArrayOfCharacterString),
]
def __init__(self, **kwargs):
if _debug: SampleWritableArray._debug("__init__ %r", kwargs)
Object.__init__(self, **kwargs)
@bacpypes_debugging
class TestWritableArray(unittest.TestCase):
def test_empty_array(self):
"""Test basic configuration of a network."""
if _debug: TestWritableArray._debug("test_empty_array")
# create an object with a location
obj = SampleWritableArray(location=ArrayOfCharacterString())
if _debug: TestWritableArray._debug(" - obj.location: %r", obj.location)
assert len(obj.location) == 0
assert obj.location[0] == 0
def test_short_array(self):
if _debug: TestWritableArray._debug("test_short_array")
# create an object with a location
obj = SampleWritableArray(location=ArrayOfCharacterString(["home"]))
if _debug: TestWritableArray._debug(" - obj.location: %r", obj.location)
assert obj.ReadProperty('location', 0) == 1
assert obj.ReadProperty('location', 1) == "home"
def test_changing_length(self):
if _debug: TestWritableArray._debug("test_changing_length")
# create an object with a location
obj = SampleWritableArray(location=ArrayOfCharacterString(["home"]))
if _debug: TestWritableArray._debug(" - obj.location: %r", obj.location)
# change the length of the array
obj.WriteProperty('location', 2, arrayIndex=0)
assert obj.ReadProperty('location', 0) == 2
# array extended with none, should get property default value
assert obj.ReadProperty('location', 2) == ""
# wrong datatype
with self.assertRaises(InvalidParameterDatatype):
obj.WriteProperty('location', "nope", arrayIndex=0)
def test_changing_item(self):
if _debug: TestWritableArray._debug("test_changing_item")
# create an object with a location
obj = SampleWritableArray(location=ArrayOfCharacterString(["home"]))
if _debug: TestWritableArray._debug(" - obj.location: %r", obj.location)
# change the element
obj.WriteProperty('location', "work", arrayIndex=1)
assert obj.ReadProperty('location', 1) == "work"
# wrong datatype
with self.assertRaises(InvalidParameterDatatype):
obj.WriteProperty('location', 12, arrayIndex=1)
def test_replacing_array(self):
if _debug: TestWritableArray._debug("test_replacing_array")
# create an object with a location
obj = SampleWritableArray()
if _debug: TestWritableArray._debug(" - obj.location: %r", obj.location)
# replace the array
obj.WriteProperty('location', ArrayOfCharacterString(["home", "work"]))
assert obj.ReadProperty('location', 0) == 2
assert obj.ReadProperty('location', 1) == "home"
assert obj.ReadProperty('location', 2) == "work"
@bacpypes_debugging
@register_object_type(vendor_id=999)
class SampleLocationObject(CurrentPropertyListMixIn, Object):
objectType = 'sampleLocationObject'
properties = [
WritableProperty('location', CharacterString),
]
def __init__(self, **kwargs):
if _debug: SampleWritableArray._debug("__init__ %r", kwargs)
Object.__init__(self, **kwargs)
@bacpypes_debugging
class TestCurrentPropertyListMixIn(unittest.TestCase):
def test_with_location(self):
if _debug: TestCurrentPropertyListMixIn._debug("test_with_location")
# create an object without a location
obj = SampleLocationObject(location="home")
if _debug: TestCurrentPropertyListMixIn._debug(" - obj.location: %r", obj.location)
assert obj.propertyList.value == [1, "location"]
def test_without_location(self):
if _debug: TestCurrentPropertyListMixIn._debug("test_property_list_1")
# create an object without a location
obj = SampleLocationObject()
if _debug: TestCurrentPropertyListMixIn._debug(" - obj.location: %r", obj.location)
assert obj.propertyList.value == [0]
def test_location_appears(self):
if _debug: TestCurrentPropertyListMixIn._debug("test_location_appears")
# create an object without a location
obj = SampleLocationObject()
if _debug: TestCurrentPropertyListMixIn._debug(" - obj.location: %r", obj.location)
# give it a location
obj.location = "away"
assert obj.propertyList.value == [1, "location"]
def test_location_disappears(self):
if _debug: TestCurrentPropertyListMixIn._debug("test_location_disappears")
# create an object without a location
obj = SampleLocationObject(location="home")
if _debug: TestCurrentPropertyListMixIn._debug(" - obj.location: %r", obj.location)
# location 'removed'
obj.location = None
assert obj.propertyList.value == [0]
|
|
#!/usr/bin/python
# Copyright (C) 2015, WSID
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from gi.repository import GObject
from gi.repository import CrankBase
class TestAdvMat(unittest.TestCase):
def assertFloat (self, a, b, delta=0.0001):
"""A simple custom assert that given values are same.
It takes into delta values into account, so that test can endure little
errors.
"""
try: #if they are both of list type.
if (len(a) != len(b)):
raise AssertionError ("array length: %d != %d" % (len(a), len(b)))
for i in range (0, len(a)):
if ((a[i] < b[i] - delta) or (b[i] + delta < a[i])):
raise AssertionError ("%g != %g (diff=%g)" % (a[i], b[i], b[i]-a[i]))
except TypeError: #then they are numeric type.
if ((a < b - delta) or (b + delta < a)):
raise AssertionError ("%g != %g (diff=%g)" % (a, b, b-a))
def test_lu (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
3, 12, 21,
8, 39, 84,
2, 16, 49 ])
(r, l, u) = CrankBase.lu_mat_float_n (a)
assert (r)
self.assertFloat (l.get (0, 0), 3)
self.assertFloat (l.get (0, 1), 0)
self.assertFloat (l.get (0, 2), 0)
self.assertFloat (l.get (1, 0), 8)
self.assertFloat (l.get (1, 1), 7)
self.assertFloat (l.get (1, 2), 0)
self.assertFloat (l.get (2, 0), 2)
self.assertFloat (l.get (2, 1), 8)
self.assertFloat (l.get (2, 2), 3)
self.assertFloat (u.get (0, 0), 1)
self.assertFloat (u.get (0, 1), 4)
self.assertFloat (u.get (0, 2), 7)
self.assertFloat (u.get (1, 0), 0)
self.assertFloat (u.get (1, 1), 1)
self.assertFloat (u.get (1, 2), 4)
self.assertFloat (u.get (2, 0), 0)
self.assertFloat (u.get (2, 1), 0)
self.assertFloat (u.get (2, 2), 1)
def test_lu_p (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
0, 4, 3,
3, 6, 6,
2, 20, 8 ])
(r, p, l, u) = CrankBase.lu_p_mat_float_n (a)
assert (r)
self.assertEqual (p.get (0), 1)
self.assertEqual (p.get (1), 2)
self.assertEqual (p.get (2), 0)
self.assertFloat (l.get (0, 0), 3)
self.assertFloat (l.get (0, 1), 0)
self.assertFloat (l.get (0, 2), 0)
self.assertFloat (l.get (1, 0), 2)
self.assertFloat (l.get (1, 1), 16)
self.assertFloat (l.get (1, 2), 0)
self.assertFloat (l.get (2, 0), 0)
self.assertFloat (l.get (2, 1), 4)
self.assertFloat (l.get (2, 2), 2)
self.assertFloat (u.get (0, 0), 1)
self.assertFloat (u.get (0, 1), 2)
self.assertFloat (u.get (0, 2), 2)
self.assertFloat (u.get (1, 0), 0)
self.assertFloat (u.get (1, 1), 1)
self.assertFloat (u.get (1, 2), 0.25)
self.assertFloat (u.get (2, 0), 0)
self.assertFloat (u.get (2, 1), 0)
self.assertFloat (u.get (2, 2), 1)
def test_ch (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
4, 12, -16,
12, 37, -43,
-16, -43, 98] )
(r, l) = CrankBase.ch_mat_float_n (a)
self.assertFloat (l.get (0, 0), 2)
self.assertFloat (l.get (0, 1), 0)
self.assertFloat (l.get (0, 2), 0)
self.assertFloat (l.get (1, 0), 6)
self.assertFloat (l.get (1, 1), 1)
self.assertFloat (l.get (1, 2), 0)
self.assertFloat (l.get (2, 0),-8)
self.assertFloat (l.get (2, 1), 5)
self.assertFloat (l.get (2, 2), 3)
def test_ldl (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
4, 12, -16,
12, 37, -43,
-16, -43, 98] )
(r, l, d) = CrankBase.ldl_mat_float_n (a)
self.assertFloat (l.get (0, 0), 1)
self.assertFloat (l.get (0, 1), 0)
self.assertFloat (l.get (0, 2), 0)
self.assertFloat (l.get (1, 0), 3)
self.assertFloat (l.get (1, 1), 1)
self.assertFloat (l.get (1, 2), 0)
self.assertFloat (l.get (2, 0),-4)
self.assertFloat (l.get (2, 1), 5)
self.assertFloat (l.get (2, 2), 1)
self.assertFloat (d.get (0), 4)
self.assertFloat (d.get (1), 1)
self.assertFloat (d.get (2), 9)
def test_gram_schmidt (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
3, 4, 1,
2, 2, 1,
4, 2, 1] )
(res, q, r) = CrankBase.gram_schmidt_mat_float_n (a)
assert (res)
self.assertFloat (q.get (0, 0), 0.5571)
self.assertFloat (q.get (0, 1), 0.7459)
self.assertFloat (q.get (0, 2), -0.3651)
self.assertFloat (q.get (1, 0), 0.3714)
self.assertFloat (q.get (1, 1), 0.1695)
self.assertFloat (q.get (1, 2), 0.9129)
self.assertFloat (q.get (2, 0), 0.7428)
self.assertFloat (q.get (2, 1), -0.6442)
self.assertFloat (q.get (2, 2), -0.1826)
self.assertFloat (r.get (0, 0), 5.3852)
self.assertFloat (r.get (0, 1), 4.4567)
self.assertFloat (r.get (0, 2), 1.6713)
self.assertFloat (r.get (1, 0), 0.0)
self.assertFloat (r.get (1, 1), 2.0342)
self.assertFloat (r.get (1, 2), 0.2712)
self.assertFloat (r.get (2, 0), 0)
self.assertFloat (r.get (2, 1), 0)
self.assertFloat (r.get (2, 2), 0.3651)
def test_qr_household (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
3, 4, 1,
2, 2, 1,
4, 2, 1] )
(res, r) = CrankBase.qr_householder_mat_float_n (a)
assert (res)
self.assertFloat (r.get (0, 0), 5.3852)
self.assertFloat (r.get (0, 1), 4.4567)
self.assertFloat (r.get (0, 2), 1.6713)
self.assertFloat (r.get (1, 0), 0.0)
self.assertFloat (r.get (1, 1), 2.0342)
self.assertFloat (r.get (1, 2), 0.2712)
self.assertFloat (r.get (2, 0), 0)
self.assertFloat (r.get (2, 1), 0)
self.assertFloat (r.get (2, 2), 0.3651)
def test_qr_givens (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
3, 4, 1,
2, 2, 1,
4, 2, 1] )
(res, r) = CrankBase.qr_givens_mat_float_n (a)
assert (res)
self.assertFloat (r.get (0, 0), 5.3852)
self.assertFloat (r.get (0, 1), 4.4567)
self.assertFloat (r.get (0, 2), 1.6713)
self.assertFloat (r.get (1, 0), 0.0)
self.assertFloat (r.get (1, 1), 2.0342)
self.assertFloat (r.get (1, 2), 0.2712)
self.assertFloat (r.get (2, 0), 0)
self.assertFloat (r.get (2, 1), 0)
self.assertFloat (r.get (2, 2), 0.3651)
def test_eval_power (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
1, 2, 3,
2, 4, 9,
3, 9, 16 ] );
(e, evec) = CrankBase.eval_power_mat_float_n (a, None)
self.assertFloat (e, 21.4467);
self.assertFloat (evec.get(0), 0.1729);
self.assertFloat (evec.get(1), 0.4671);
self.assertFloat (evec.get(2), 0.8671);
@unittest.skip ("Function signiture needs to be changed.")
def test_eval_qr (self):
a = CrankBase.MatFloatN.init_arr (3, 3, [
1, 2, 3,
2, 3, 9,
3, 9, 16 ])
if __name__ == '__main__':
unittest.main ()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from socket import timeout as socket_timeout # noqa
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from wildcard import api
from wildcard.test import helpers as test
USERS_INDEX_URL = reverse('horizon:admin:users:index')
USER_CREATE_URL = reverse('horizon:admin:users:create')
USER_UPDATE_URL = reverse('horizon:admin:users:update', args=[1])
class UsersViewTests(test.BaseAdminViewTests):
def _get_default_domain(self):
domain = {"id": self.request.session.get('domain_context',
None),
"name": self.request.session.get('domain_context_name',
None)}
return api.base.APIDictWrapper(domain)
def _get_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
@test.create_stubs({api.keystone: ('user_list',)})
def test_index(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
api.keystone.user_list(
IgnoreArg(), domain=domain_id).AndReturn(users)
self.mox.ReplayAll()
res = self.client.get(USERS_INDEX_URL)
self.assertTemplateUsed(res, 'admin/users/index.html')
self.assertItemsEqual(res.context['table'].data, users)
if domain_id:
for user in res.context['table'].data:
self.assertItemsEqual(user.domain_id, domain_id)
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('user_create',
'get_default_domain',
'tenant_list',
'add_tenant_user_role',
'get_default_role',
'role_list')})
def test_create(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
role = self.roles.first()
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_with_password_mismatch(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': "doesntmatch"}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_short(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': 'four',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'four'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_long(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': 'MoreThanEighteenChars',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'MoreThanEighteenChars'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'user_update_password',
'user_update',
'roles_for_user', )})
def test_update(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
test_password = 'normalpwd'
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=u'[email protected]',
name=u'test_user',
password=test_password,
project=self.tenant.id).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': test_password,
'project': self.tenant.id,
'confirm_password': test_password}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'keystone_can_edit_user',
'roles_for_user', )})
def test_update_with_keystone_can_edit_user_false(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(
IsA(http.HttpRequest), '1', admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.keystone_can_edit_user().AndReturn(False)
api.keystone.keystone_can_edit_user().AndReturn(False)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'project': self.tenant.id, }
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
@test.create_stubs({api.keystone: ('domain_get',
'user_get',
'tenant_list')})
def test_update_validation_for_password_too_short(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': 't',
'project': self.tenant.id,
'confirm_password': 't'}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('domain_get',
'user_get',
'tenant_list')})
def test_update_validation_for_password_too_long(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': 'ThisIsASuperLongPassword',
'project': self.tenant.id,
'confirm_password': 'ThisIsASuperLongPassword'}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_enable_user(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id).AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
True).AndReturn(user)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_disable_user(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
self.assertTrue(user.enabled)
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
False).AndReturn(user)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_enable_disable_user_exception(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(), user.id, True) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_list',)})
def test_disabling_current_user(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You cannot disable the user you are currently '
u'logged in as.')
@test.create_stubs({api.keystone: ('user_list',)})
def test_delete_user_with_improper_permissions(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__delete__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You do not have permission to delete user: %s'
% self.request.user.username)
class SeleniumTests(test.SeleniumAdminTestCase):
def _get_default_domain(self):
domain = {"id": None, "name": None}
return api.base.APIDictWrapper(domain)
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'get_default_role',
'role_list',
'user_list')})
def test_modal_create_user_with_passwords_not_matching(self):
domain = self._get_default_domain()
api.keystone.get_default_domain(IgnoreArg()) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=None, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.user_list(IgnoreArg(), domain=None) \
.AndReturn(self.users.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USERS_INDEX_URL))
# Open the modal menu
self.selenium.find_element_by_id("users__action_create") \
.send_keys("\n")
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_id("id_name"))
body = self.selenium.find_element_by_tag_name("body")
self.assertFalse("Passwords do not match" in body.text,
"Error message should not be visible at loading time")
self.selenium.find_element_by_id("id_name").send_keys("Test User")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").send_keys("[email protected]")
body = self.selenium.find_element_by_tag_name("body")
self.assertTrue("Passwords do not match" in body.text,
"Error message not found in body")
@test.create_stubs({api.keystone: ('tenant_list',
'user_get',
'domain_get')})
def test_update_user_with_passwords_not_matching(self):
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(self.user)
api.keystone.domain_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.domain)
api.keystone.tenant_list(IgnoreArg(),
domain=self.user.domain_id,
user=self.user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USER_UPDATE_URL))
body = self.selenium.find_element_by_tag_name("body")
self.assertFalse("Passwords do not match" in body.text,
"Error message should not be visible at loading time")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").clear()
body = self.selenium.find_element_by_tag_name("body")
self.assertTrue("Passwords do not match" in body.text,
"Error message not found in body")
|
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2013 [email protected].
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2013-11-01
#
"""workflow-build [options] <workflow-dir>
Build Alfred Workflows.
Compile contents of <workflow-dir> to a ZIP file (with extension
`.alfred3workflow`).
The name of the output file is generated from the workflow name,
which is extracted from the workflow's `info.plist`. If a `version`
file is contained within the workflow directory, it's contents
will be appended to the compiled workflow's filename.
Usage:
workflow-build [-v|-q|-d] [-f] [-o <outputdir>] <workflow-dir>...
workflow-build (-h|--version)
Options:
-o, --output=<outputdir> Directory to save workflow(s) to.
Default is current working directory.
-f, --force Overwrite existing files.
-h, --help Show this message and exit.
-V, --version Show version number and exit.
-q, --quiet Only show errors and above.
-v, --verbose Show info messages and above.
-d, --debug Show debug messages.
"""
from __future__ import print_function
import sys
import os
import logging
import logging.handlers
import plistlib
from subprocess import check_call, CalledProcessError
from docopt import docopt
__version__ = "0.4"
__author__ = "[email protected]"
DEFAULT_LOG_LEVEL = logging.WARNING
LOGPATH = os.path.expanduser('~/Library/Logs/MyScripts.log')
LOGSIZE = 1024 * 1024 * 5 # 5 megabytes
EXCLUDE_PATTERNS = [
'*.pyc*',
'*.log*',
'.DS_Store',
'*.acorn*',
'*.swp*',
'*.sublime-project*',
'*.sublime-workflow*',
'*.git*',
'*.dist-info*',
'*.egg-info*',
'*.gif*',
'README.md',
'workflow-build.py',
'requirements.txt',
'*.idea*'
]
class TechnicolorFormatter(logging.Formatter):
"""
Prepend level name to any message not level logging.INFO.
Also, colour!
"""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET = "\033[0m"
COLOUR_BASE = "\033[1;{:d}m"
BOLD = "\033[1m"
LEVEL_COLOURS = {
logging.DEBUG: BLUE,
logging.INFO: WHITE,
logging.WARNING: YELLOW,
logging.ERROR: MAGENTA,
logging.CRITICAL: RED
}
def __init__(self, fmt=None, datefmt=None, technicolor=True):
logging.Formatter.__init__(self, fmt, datefmt)
self.technicolor = technicolor
self._isatty = sys.stderr.isatty()
def format(self, record):
if record.levelno == logging.INFO:
msg = logging.Formatter.format(self, record)
return msg
if self.technicolor and self._isatty:
colour = self.LEVEL_COLOURS[record.levelno]
bold = (False, True)[record.levelno > logging.INFO]
levelname = self.colourise('{:9s}'.format(record.levelname),
colour, bold)
else:
levelname = '{:9s}'.format(record.levelname)
return (levelname + logging.Formatter.format(self, record))
def colourise(self, text, colour, bold=False):
colour = self.COLOUR_BASE.format(colour + 30)
output = []
if bold:
output.append(self.BOLD)
output.append(colour)
output.append(text)
output.append(self.RESET)
return ''.join(output)
# logfile
logfile = logging.handlers.RotatingFileHandler(LOGPATH, maxBytes=LOGSIZE,
backupCount=5)
formatter = logging.Formatter(
'%(asctime)s %(levelname)-8s [%(name)-12s] %(message)s',
datefmt="%d/%m %H:%M:%S")
logfile.setFormatter(formatter)
logfile.setLevel(logging.DEBUG)
# console output
console = logging.StreamHandler()
formatter = TechnicolorFormatter('%(message)s')
console.setFormatter(formatter)
console.setLevel(logging.DEBUG)
log = logging.getLogger('')
log.addHandler(logfile)
log.addHandler(console)
def safename(name):
"""Make name filesystem-safe."""
name = name.replace(u'/', u'-')
name = name.replace(u':', u'-')
name = name.replace(u' ', u'-')
return name
def build_workflow(workflow_dir, outputdir, overwrite=False, verbose=False):
"""Create an .alfred3workflow file from the contents of `workflow_dir`."""
curdir = os.curdir
os.chdir(workflow_dir)
info = plistlib.readPlist(u'info.plist')
version = None
if not os.path.exists(u'info.plist'):
log.error(u'info.plist not found')
return False
if 'version' in info and info.get('version'):
version = info['version']
elif os.path.exists(u'version'):
with open('version') as fp:
version = fp.read().strip().decode('utf-8')
name = safename(info[u'name'])
zippath = os.path.join(outputdir, name)
if version:
zippath += u'-' + version
zippath += u'.alfred3workflow'
if os.path.exists(zippath):
if overwrite:
log.info(u'Overwriting existing workflow')
os.unlink(zippath)
else:
log.error(u"File '{}' already exists. Use -f to overwrite".format(
zippath))
return False
# build workflow
command = [u'zip']
if not verbose:
command.append(u'-q')
command.append(zippath)
for root, dirnames, filenames in os.walk(u'.'):
for filename in filenames:
path = os.path.join(root, filename)
command.append(path)
command.append(u'-x')
command.extend(EXCLUDE_PATTERNS)
log.debug(u'command : {}'.format(u' '.join(command)))
try:
check_call(command)
except CalledProcessError as err:
log.error(u'zip returned : {}'.format(err.returncode))
os.chdir(curdir)
return False
log.info(u'Wrote {}'.format(zippath))
os.chdir(curdir)
return True
def main(args=None):
"""Run CLI."""
args = docopt(__doc__, version=__version__)
if args.get('--verbose'):
log.setLevel(logging.INFO)
elif args.get('--quiet'):
log.setLevel(logging.ERROR)
elif args.get('--debug'):
log.setLevel(logging.DEBUG)
else:
log.setLevel(DEFAULT_LOG_LEVEL)
log.debug("Set log level to %s" %
logging.getLevelName(log.level))
log.debug('args :\n{}'.format(args))
# Build options
outputdir = os.path.abspath(args.get(u'--output') or os.curdir)
workflow_dirs = [os.path.abspath(p) for p in args.get(u'<workflow-dir>')]
log.debug(u'outputdir : {}, workflow_dirs : {}'.format(outputdir,
workflow_dirs))
errors = False
verbose = False
if log.level == logging.DEBUG:
verbose = True
# Build workflow(s)
for path in workflow_dirs:
ok = build_workflow(path, outputdir, args.get(u'--force'), verbose)
if not ok:
errors = True
if errors:
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
from __future__ import absolute_import
import warnings
from importlib import import_module
from django import forms
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.sites.shortcuts import get_current_site
from django.core import exceptions, validators
from django.urls import reverse
from django.utils.translation import pgettext
from allauth.compat import ugettext, ugettext_lazy as _
from ..utils import (
build_absolute_uri,
get_username_max_length,
set_form_field_order,
)
from . import app_settings
from .adapter import get_adapter
from .app_settings import AuthenticationMethod
from .models import EmailAddress
from .utils import (
filter_users_by_email,
get_user_model,
perform_login,
setup_user_email,
sync_user_email_addresses,
url_str_to_user_pk,
user_email,
user_pk_to_url_str,
user_username,
)
class EmailAwarePasswordResetTokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
ret = super(
EmailAwarePasswordResetTokenGenerator, self)._make_hash_value(
user, timestamp)
sync_user_email_addresses(user)
emails = set([user.email] if user.email else [])
emails.update(
EmailAddress.objects
.filter(user=user)
.values_list('email', flat=True))
ret += '|'.join(sorted(emails))
return ret
default_token_generator = EmailAwarePasswordResetTokenGenerator()
class PasswordVerificationMixin(object):
def clean(self):
cleaned_data = super(PasswordVerificationMixin, self).clean()
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if (password1 and password2) and password1 != password2:
self.add_error(
'password2', _("You must type the same password each time.")
)
return cleaned_data
class PasswordField(forms.CharField):
def __init__(self, *args, **kwargs):
render_value = kwargs.pop('render_value',
app_settings.PASSWORD_INPUT_RENDER_VALUE)
kwargs['widget'] = forms.PasswordInput(render_value=render_value,
attrs={'placeholder':
kwargs.get("label")})
super(PasswordField, self).__init__(*args, **kwargs)
class SetPasswordField(PasswordField):
def __init__(self, *args, **kwargs):
super(SetPasswordField, self).__init__(*args, **kwargs)
self.user = None
def clean(self, value):
value = super(SetPasswordField, self).clean(value)
value = get_adapter().clean_password(value, user=self.user)
return value
class LoginForm(forms.Form):
password = PasswordField(label=_("Password"))
remember = forms.BooleanField(label=_("Remember Me"),
required=False)
user = None
error_messages = {
'account_inactive':
_("This account is currently inactive."),
'email_password_mismatch':
_("The e-mail address and/or password you specified are not correct."),
'username_password_mismatch':
_("The username and/or password you specified are not correct."),
}
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(LoginForm, self).__init__(*args, **kwargs)
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
login_widget = forms.TextInput(attrs={'type': 'email',
'placeholder':
_('E-mail address'),
'autofocus': 'autofocus'})
login_field = forms.EmailField(label=_("E-mail"),
widget=login_widget)
elif app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME:
login_widget = forms.TextInput(attrs={'placeholder':
_('Username'),
'autofocus': 'autofocus'})
login_field = forms.CharField(
label=_("Username"),
widget=login_widget,
max_length=get_username_max_length())
else:
assert app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME_EMAIL
login_widget = forms.TextInput(attrs={'placeholder':
_('Username or e-mail'),
'autofocus': 'autofocus'})
login_field = forms.CharField(label=pgettext("field label",
"Login"),
widget=login_widget)
self.fields["login"] = login_field
set_form_field_order(self, ["login", "password", "remember"])
if app_settings.SESSION_REMEMBER is not None:
del self.fields['remember']
def user_credentials(self):
"""
Provides the credentials required to authenticate the user for
login.
"""
credentials = {}
login = self.cleaned_data["login"]
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
credentials["email"] = login
elif (
app_settings.AUTHENTICATION_METHOD ==
AuthenticationMethod.USERNAME):
credentials["username"] = login
else:
if self._is_login_email(login):
credentials["email"] = login
credentials["username"] = login
credentials["password"] = self.cleaned_data["password"]
return credentials
def clean_login(self):
login = self.cleaned_data['login']
return login.strip()
def _is_login_email(self, login):
try:
validators.validate_email(login)
ret = True
except exceptions.ValidationError:
ret = False
return ret
def clean(self):
super(LoginForm, self).clean()
if self._errors:
return
credentials = self.user_credentials()
user = get_adapter(self.request).authenticate(
self.request,
**credentials)
if user:
self.user = user
else:
auth_method = app_settings.AUTHENTICATION_METHOD
if auth_method == app_settings.AuthenticationMethod.USERNAME_EMAIL:
login = self.cleaned_data['login']
if self._is_login_email(login):
auth_method = app_settings.AuthenticationMethod.EMAIL
else:
auth_method = app_settings.AuthenticationMethod.USERNAME
raise forms.ValidationError(
self.error_messages['%s_password_mismatch' % auth_method])
return self.cleaned_data
def login(self, request, redirect_url=None):
ret = perform_login(request, self.user,
email_verification=app_settings.EMAIL_VERIFICATION,
redirect_url=redirect_url)
remember = app_settings.SESSION_REMEMBER
if remember is None:
remember = self.cleaned_data['remember']
if remember:
request.session.set_expiry(app_settings.SESSION_COOKIE_AGE)
else:
request.session.set_expiry(0)
return ret
class _DummyCustomSignupForm(forms.Form):
def signup(self, request, user):
"""
Invoked at signup time to complete the signup of the user.
"""
pass
def _base_signup_form_class():
"""
Currently, we inherit from the custom form, if any. This is all
not very elegant, though it serves a purpose:
- There are two signup forms: one for local accounts, and one for
social accounts
- Both share a common base (BaseSignupForm)
- Given the above, how to put in a custom signup form? Which form
would your custom form derive from, the local or the social one?
"""
if not app_settings.SIGNUP_FORM_CLASS:
return _DummyCustomSignupForm
try:
fc_module, fc_classname = app_settings.SIGNUP_FORM_CLASS.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s does not point to a form'
' class'
% app_settings.SIGNUP_FORM_CLASS)
try:
mod = import_module(fc_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured('Error importing form class %s:'
' "%s"' % (fc_module, e))
try:
fc_class = getattr(mod, fc_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Module "%s" does not define a'
' "%s" class' % (fc_module,
fc_classname))
if not hasattr(fc_class, 'signup'):
if hasattr(fc_class, 'save'):
warnings.warn("The custom signup form must offer"
" a `def signup(self, request, user)` method",
DeprecationWarning)
else:
raise exceptions.ImproperlyConfigured(
'The custom signup form must implement a "signup" method')
return fc_class
class BaseSignupForm(_base_signup_form_class()):
username = forms.CharField(label=_("Username"),
min_length=app_settings.USERNAME_MIN_LENGTH,
widget=forms.TextInput(
attrs={'placeholder':
_('Username'),
'autofocus': 'autofocus'}))
email = forms.EmailField(widget=forms.TextInput(
attrs={'type': 'email',
'placeholder': _('E-mail address')}))
def __init__(self, *args, **kwargs):
email_required = kwargs.pop('email_required',
app_settings.EMAIL_REQUIRED)
self.username_required = kwargs.pop('username_required',
app_settings.USERNAME_REQUIRED)
super(BaseSignupForm, self).__init__(*args, **kwargs)
username_field = self.fields['username']
username_field.max_length = get_username_max_length()
username_field.validators.append(
validators.MaxLengthValidator(username_field.max_length))
username_field.widget.attrs['maxlength'] = str(
username_field.max_length)
default_field_order = [
'email',
'email2', # ignored when not present
'username',
'password1',
'password2' # ignored when not present
]
if app_settings.SIGNUP_EMAIL_ENTER_TWICE:
self.fields["email2"] = forms.EmailField(
label=_("E-mail (again)"),
widget=forms.TextInput(
attrs={
'type': 'email',
'placeholder': _('E-mail address confirmation')
}
)
)
if email_required:
self.fields['email'].label = ugettext("E-mail")
self.fields['email'].required = True
else:
self.fields['email'].label = ugettext("E-mail (optional)")
self.fields['email'].required = False
self.fields['email'].widget.is_required = False
if self.username_required:
default_field_order = [
'username',
'email',
'email2', # ignored when not present
'password1',
'password2' # ignored when not present
]
if not self.username_required:
del self.fields["username"]
set_form_field_order(
self,
getattr(self, 'field_order', None) or default_field_order)
def clean_username(self):
value = self.cleaned_data["username"]
value = get_adapter().clean_username(value)
return value
def clean_email(self):
value = self.cleaned_data['email']
value = get_adapter().clean_email(value)
if value and app_settings.UNIQUE_EMAIL:
value = self.validate_unique_email(value)
return value
def validate_unique_email(self, value):
return get_adapter().validate_unique_email(value)
def clean(self):
cleaned_data = super(BaseSignupForm, self).clean()
if app_settings.SIGNUP_EMAIL_ENTER_TWICE:
email = cleaned_data.get('email')
email2 = cleaned_data.get('email2')
if (email and email2) and email != email2:
self.add_error(
'email2', _("You must type the same email each time.")
)
return cleaned_data
def custom_signup(self, request, user):
custom_form = super(BaseSignupForm, self)
if hasattr(custom_form, 'signup') and callable(custom_form.signup):
custom_form.signup(request, user)
else:
warnings.warn("The custom signup form must offer"
" a `def signup(self, request, user)` method",
DeprecationWarning)
# Historically, it was called .save, but this is confusing
# in case of ModelForm
custom_form.save(user)
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields['password1'] = PasswordField(label=_("Password"))
if app_settings.SIGNUP_PASSWORD_ENTER_TWICE:
self.fields['password2'] = PasswordField(
label=_("Password (again)"))
if hasattr(self, 'field_order'):
set_form_field_order(self, self.field_order)
def clean(self):
super(SignupForm, self).clean()
# `password` cannot be of type `SetPasswordField`, as we don't
# have a `User` yet. So, let's populate a dummy user to be used
# for password validaton.
dummy_user = get_user_model()
user_username(dummy_user, self.cleaned_data.get("username"))
user_email(dummy_user, self.cleaned_data.get("email"))
password = self.cleaned_data.get('password1')
if password:
try:
get_adapter().clean_password(
password,
user=dummy_user)
except forms.ValidationError as e:
self.add_error('password1', e)
if app_settings.SIGNUP_PASSWORD_ENTER_TWICE \
and "password1" in self.cleaned_data \
and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] \
!= self.cleaned_data["password2"]:
self.add_error(
'password2',
_("You must type the same password each time."))
return self.cleaned_data
def save(self, request):
adapter = get_adapter(request)
user = adapter.new_user(request)
adapter.save_user(request, user, self)
self.custom_signup(request, user)
# TODO: Move into adapter `save_user` ?
setup_user_email(request, user, [])
return user
class UserForm(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
class AddEmailForm(UserForm):
email = forms.EmailField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(
attrs={"type": "email",
"size": "30",
"placeholder": _('E-mail address')}))
def clean_email(self):
value = self.cleaned_data["email"]
value = get_adapter().clean_email(value)
errors = {
"this_account": _("This e-mail address is already associated"
" with this account."),
"different_account": _("This e-mail address is already associated"
" with another account."),
}
users = filter_users_by_email(value)
on_this_account = [u for u in users if u.pk == self.user.pk]
on_diff_account = [u for u in users if u.pk != self.user.pk]
if on_this_account:
raise forms.ValidationError(errors["this_account"])
if on_diff_account and app_settings.UNIQUE_EMAIL:
raise forms.ValidationError(errors["different_account"])
return value
def save(self, request):
return EmailAddress.objects.add_email(request,
self.user,
self.cleaned_data["email"],
confirm=True)
class ChangePasswordForm(PasswordVerificationMixin, UserForm):
oldpassword = PasswordField(label=_("Current Password"))
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def __init__(self, *args, **kwargs):
super(ChangePasswordForm, self).__init__(*args, **kwargs)
self.fields['password1'].user = self.user
def clean_oldpassword(self):
if not self.user.check_password(self.cleaned_data.get("oldpassword")):
raise forms.ValidationError(_("Please type your current"
" password."))
return self.cleaned_data["oldpassword"]
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class SetPasswordForm(PasswordVerificationMixin, UserForm):
password1 = SetPasswordField(label=_("Password"))
password2 = PasswordField(label=_("Password (again)"))
def __init__(self, *args, **kwargs):
super(SetPasswordForm, self).__init__(*args, **kwargs)
self.fields['password1'].user = self.user
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class ResetPasswordForm(forms.Form):
email = forms.EmailField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(attrs={
"type": "email",
"size": "30",
"placeholder": _("E-mail address"),
})
)
def clean_email(self):
email = self.cleaned_data["email"]
email = get_adapter().clean_email(email)
self.users = filter_users_by_email(email)
if not self.users:
raise forms.ValidationError(_("The e-mail address is not assigned"
" to any user account"))
return self.cleaned_data["email"]
def save(self, request, **kwargs):
current_site = get_current_site(request)
email = self.cleaned_data["email"]
token_generator = kwargs.get("token_generator",
default_token_generator)
for user in self.users:
temp_key = token_generator.make_token(user)
# save it to the password reset model
# password_reset = PasswordReset(user=user, temp_key=temp_key)
# password_reset.save()
# send the password reset email
path = reverse("account_reset_password_from_key",
kwargs=dict(uidb36=user_pk_to_url_str(user),
key=temp_key))
url = build_absolute_uri(
request, path)
context = {"current_site": current_site,
"user": user,
"password_reset_url": url,
"request": request}
if app_settings.AUTHENTICATION_METHOD \
!= AuthenticationMethod.EMAIL:
context['username'] = user_username(user)
get_adapter(request).send_mail(
'account/email/password_reset_key',
email,
context)
return self.cleaned_data["email"]
class ResetPasswordKeyForm(PasswordVerificationMixin, forms.Form):
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", None)
self.temp_key = kwargs.pop("temp_key", None)
super(ResetPasswordKeyForm, self).__init__(*args, **kwargs)
self.fields['password1'].user = self.user
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class UserTokenForm(forms.Form):
uidb36 = forms.CharField()
key = forms.CharField()
reset_user = None
token_generator = default_token_generator
error_messages = {
'token_invalid': _('The password reset token was invalid.'),
}
def _get_user(self, uidb36):
User = get_user_model()
try:
pk = url_str_to_user_pk(uidb36)
return User.objects.get(pk=pk)
except (ValueError, User.DoesNotExist):
return None
def clean(self):
cleaned_data = super(UserTokenForm, self).clean()
uidb36 = cleaned_data.get('uidb36', None)
key = cleaned_data.get('key', None)
if not key:
raise forms.ValidationError(self.error_messages['token_invalid'])
self.reset_user = self._get_user(uidb36)
if (self.reset_user is None or
not self.token_generator.check_token(self.reset_user, key)):
raise forms.ValidationError(self.error_messages['token_invalid'])
return cleaned_data
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import hashlib
import operator
from c7n.actions import Action
from c7n.exceptions import PolicyValidationError
from c7n.filters import Filter
from c7n.query import QueryResourceManager, TypeInfo
from c7n.manager import resources
from c7n.tags import universal_augment
from c7n.utils import chunks, get_retry, local_session, type_schema, filter_empty
from c7n.version import version
from .aws import shape_validate
from .ec2 import EC2
@resources.register('ssm-parameter')
class SSMParameter(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ssm'
enum_spec = ('describe_parameters', 'Parameters', None)
name = "Name"
id = "Name"
universal_taggable = True
arn_type = "parameter"
retry = staticmethod(get_retry(('Throttled',)))
permissions = ('ssm:GetParameters',
'ssm:DescribeParameters')
augment = universal_augment
@resources.register('ssm-managed-instance')
class ManagedInstance(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ssm'
enum_spec = ('describe_instance_information', 'InstanceInformationList', None)
id = 'InstanceId'
name = 'Name'
date = 'RegistrationDate'
arn_type = "managed-instance"
permissions = ('ssm:DescribeInstanceInformation',)
@EC2.action_registry.register('send-command')
@ManagedInstance.action_registry.register('send-command')
class SendCommand(Action):
"""Run an SSM Automation Document on an instance.
:Example:
Find ubuntu 18.04 instances are active with ssm.
.. code-block:: yaml
policies:
- name: ec2-osquery-install
resource: ec2
filters:
- type: ssm
key: PingStatus
value: Online
- type: ssm
key: PlatformName
value: Ubuntu
- type: ssm
key: PlatformVersion
value: 18.04
actions:
- type: send-command
command:
DocumentName: AWS-RunShellScript
Parameters:
commands:
- wget https://pkg.osquery.io/deb/osquery_3.3.0_1.linux.amd64.deb
- dpkg -i osquery_3.3.0_1.linux.amd64.deb
"""
schema = type_schema(
'send-command',
command={'type': 'object'},
required=('command',))
permissions = ('ssm:SendCommand',)
shape = "SendCommandRequest"
annotation = 'c7n:SendCommand'
def validate(self):
shape_validate(self.data['command'], self.shape, 'ssm')
# If used against an ec2 resource, require an ssm status filter
# to ensure that we're not trying to send commands to instances
# that aren't in ssm.
if self.manager.type != 'ec2':
return
found = False
for f in self.manager.iter_filters():
if f.type == 'ssm':
found = True
break
if not found:
raise PolicyValidationError(
"send-command requires use of ssm filter on ec2 resources")
def process(self, resources):
client = local_session(self.manager.session_factory).client('ssm')
for resource_set in chunks(resources, 50):
self.process_resource_set(client, resource_set)
def process_resource_set(self, client, resources):
command = dict(self.data['command'])
command['InstanceIds'] = [
r['InstanceId'] for r in resources]
result = client.send_command(**command).get('Command')
for r in resources:
r.setdefault('c7n:SendCommand', []).append(result['CommandId'])
@resources.register('ssm-activation')
class SSMActivation(QueryResourceManager):
class resource_type(TypeInfo):
service = 'ssm'
enum_spec = ('describe_activations', 'ActivationList', None)
id = 'ActivationId'
name = 'Description'
date = 'CreatedDate'
arn = False
permissions = ('ssm:DescribeActivations',)
@SSMActivation.action_registry.register('delete')
class DeleteSSMActivation(Action):
schema = type_schema('delete')
permissions = ('ssm:DeleteActivation',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('ssm')
for a in resources:
client.delete_activation(ActivationId=a["ActivationId"])
@resources.register('ops-item')
class OpsItem(QueryResourceManager):
"""Resource for OpsItems in SSM OpsCenter
https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html
"""
class resource_type(TypeInfo):
enum_spec = ('describe_ops_items', 'OpsItemSummaries', None)
service = 'ssm'
arn_type = 'opsitem'
id = 'OpsItemId'
name = 'Title'
default_report_fields = (
'Status', 'Title', 'LastModifiedTime',
'CreatedBy', 'CreatedTime')
QueryKeys = set((
'Status',
'CreatedBy',
'Source',
'Priority',
'Title',
'OpsItemId',
'CreatedTime',
'LastModifiedTime',
'OperationalData',
'OperationalDataKey',
'OperationalDataValue',
'ResourceId',
'AutomationId'))
QueryOperators = set(('Equal', 'LessThan', 'GreaterThan', 'Contains'))
def validate(self):
self.query = self.resource_query()
return super(OpsItem, self).validate()
def get_resources(self, ids, cache=True, augment=True):
if isinstance(ids, str):
ids = [ids]
return self.resources({
'OpsItemFilters': [{
'Key': 'OpsItemId',
'Values': [i],
'Operator': 'Equal'} for i in ids]})
def resources(self, query=None):
q = self.resource_query()
if q and query and 'OpsItemFilters' in query:
q['OpsItemFilters'].extend(query['OpsItemFilters'])
return super(OpsItem, self).resources(query=q)
def resource_query(self):
filters = []
for q in self.data.get('query', ()):
if (not isinstance(q, dict) or
not set(q.keys()) == set(('Key', 'Values', 'Operator')) or
q['Key'] not in self.QueryKeys or
q['Operator'] not in self.QueryOperators):
raise PolicyValidationError(
"invalid ops-item query %s" % self.data['query'])
filters.append(q)
return {'OpsItemFilters': filters}
@OpsItem.action_registry.register('update')
class UpdateOpsItem(Action):
"""Update an ops item.
: example :
Close out open ops items older than 30 days for a given issue.
.. code-block:: yaml
policies:
- name: issue-items
resource: aws.ops-item
filters:
- Status: Open
- Title: checking-lambdas
- type: value
key: CreatedTime
value_type: age
op: greater-than
value: 30
actions:
- type: update
status: Resolved
"""
schema = type_schema(
'update',
description={'type': 'string'},
priority={'enum': list(range(1, 6))},
title={'type': 'string'},
topics={'type': 'array', 'items': {'type': 'string'}},
status={'enum': ['Open', 'In Progress', 'Resolved']},
)
permissions = ('ssm:UpdateOpsItem',)
def process(self, resources):
attrs = dict(self.data)
attrs = filter_empty({
'Description': attrs.get('description'),
'Title': attrs.get('title'),
'Priority': attrs.get('priority'),
'Status': attrs.get('status'),
'Notifications': [{'Arn': a} for a in attrs.get('topics', ())]})
modified = []
for r in resources:
for k, v in attrs.items():
if k not in r or r[k] != v:
modified.append(r)
self.log.debug("Updating %d of %d ops items", len(modified), len(resources))
client = local_session(self.manager.session_factory).client('ssm')
for m in modified:
client.update_ops_item(OpsItemId=m['OpsItemId'], **attrs)
class OpsItemFilter(Filter):
"""Filter resources associated to extant OpsCenter operational items.
:example:
Find ec2 instances with open ops items.
.. code-block:: yaml
policies:
- name: ec2-instances-ops-items
resource: ec2
filters:
- type: ops-item
# we can filter on source, title, priority
priority: [1, 2]
"""
schema = type_schema(
'ops-item',
status={'type': 'array',
'default': ['Open'],
'items': {'enum': ['Open', 'In progress', 'Resolved']}},
priority={'type': 'array', 'items': {'enum': list(range(1, 6))}},
title={'type': 'string'},
source={'type': 'string'})
schema_alias = True
permissions = ('ssm:DescribeOpsItems',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ssm')
results = []
for resource_set in chunks(resources, 10):
qf = self.get_query_filter(resource_set)
items = client.describe_ops_items(**qf).get('OpsItemSummaries')
arn_item_map = {}
for i in items:
for arn in json.loads(
i['OperationalData']['/aws/resources']['Value']):
arn_item_map.setdefault(arn['arn'], []).append(i['OpsItemId'])
for arn, r in zip(self.manager.get_arns(resource_set), resource_set):
if arn in arn_item_map:
r['c7n:opsitems'] = arn_item_map[arn]
results.append(r)
return results
def get_query_filter(self, resources):
q = []
q.append({'Key': 'Status', 'Operator': 'Equal',
'Values': self.data.get('status', ('Open',))})
if self.data.get('priority'):
q.append({'Key': 'Priority', 'Operator': 'Equal',
'Values': list(map(str, self.data['priority']))})
if self.data.get('title'):
q.append({'Key': 'Title', 'Operator': 'Contains',
'Values': [self.data['title']]})
if self.data.get('source'):
q.append({'Key': 'Source', 'Operator': 'Equal',
'Values': [self.data['source']]})
q.append({'Key': 'ResourceId', 'Operator': 'Contains',
'Values': [r[self.manager.resource_type.id] for r in resources]})
return {'OpsItemFilters': q}
@classmethod
def register(cls, registry, _):
for resource in registry.keys():
klass = registry.get(resource)
klass.filter_registry.register('ops-item', cls)
resources.subscribe(resources.EVENT_FINAL, OpsItemFilter.register)
class PostItem(Action):
"""Post an OpsItem to AWS Systems Manager OpsCenter Dashboard.
https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html
Each ops item supports up to a 100 associated resources. This
action supports the builtin OpsCenter dedup logic with additional
support for associating new resources to existing Open ops items.
: Example :
Create an ops item for ec2 instances with Create User permissions
.. code-block:: yaml
policies:
- name: over-privileged-ec2
resource: aws.ec2
filters:
- type: check-permissions
match: allowed
actions:
- iam:CreateUser
actions:
- type: post-item
priority: 3
The builtin OpsCenter dedup logic will kick in if the same
resource set (ec2 instances in this case) is posted for the same
policy.
: Example :
Create an ops item for sqs queues with cross account access as ops items.
.. code-block:: yaml
policies:
- name: sqs-cross-account-access
resource: aws.sqs
filters:
- type: cross-account
actions:
- type: mark-for-op
days: 5
op: delete
- type: post-item
title: SQS Cross Account Access
description: |
Cross Account Access detected in SQS resource IAM Policy.
tags:
Topic: Security
"""
schema = type_schema(
'post-item',
description={'type': 'string'},
tags={'type': 'object'},
priority={'enum': list(range(1, 6))},
title={'type': 'string'},
topics={'type': 'string'},
)
schema_alias = True
permissions = ('ssm:CreateOpsItem',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('ssm')
item_template = self.get_item_template()
resources = list(sorted(resources, key=operator.itemgetter(
self.manager.resource_type.id)))
items = self.get_items(client, item_template)
if items:
# - Use a copy of the template as we'll be passing in status changes on updates.
# - The return resources will be those that we couldn't fit into updates
# to existing resources.
resources = self.update_items(client, items, dict(item_template), resources)
item_ids = [i['OpsItemId'] for i in items[:5]]
for resource_set in chunks(resources, 100):
resource_arns = json.dumps(
[{'arn': arn} for arn in sorted(self.manager.get_arns(resource_set))])
item_template['OperationalData']['/aws/resources'] = {
'Type': 'SearchableString', 'Value': resource_arns}
if items:
item_template['RelatedOpsItems'] = [
{'OpsItemId': item_ids[:5]}]
try:
oid = client.create_ops_item(**item_template).get('OpsItemId')
item_ids.insert(0, oid)
except client.exceptions.OpsItemAlreadyExistsException:
pass
for r in resource_set:
r['c7n:opsitem'] = oid
def get_items(self, client, item_template):
qf = [
{'Key': 'OperationalDataValue',
'Operator': 'Contains',
'Values': [item_template['OperationalData'][
'/custodian/dedup']['Value']]},
{'Key': 'OperationalDataKey',
'Operator': 'Equal',
'Values': ['/custodian/dedup']},
{'Key': 'Status',
'Operator': 'Equal',
# In progress could imply activity/executions underway, we don't want to update
# the resource set out from underneath that so only look at Open state.
'Values': ['Open']},
{'Key': 'Source',
'Operator': 'Equal',
'Values': ['Cloud Custodian']}]
items = client.describe_ops_items(OpsItemFilters=qf)['OpsItemSummaries']
return list(sorted(items, key=operator.itemgetter('CreatedTime'), reverse=True))
def update_items(self, client, items, item_template, resources):
"""Update existing Open OpsItems with new resources.
Originally this tried to support attribute updates as well, but
the reasoning around that is a bit complex due to partial state
evaluation around any given execution, so its restricted atm
to just updating associated resources.
For management of ops items, use a policy on the
ops-item resource.
Rationale: Typically a custodian policy will be evaluating
some partial set of resources at any given execution (ie think
a lambda looking at newly created resources), where as a
collection of ops center items will represent the total
set. Custodian can multiplex the partial set of resource over
a set of ops items (100 resources per item) which minimizes
the item count. When updating the state of an ops item though,
we have to contend with the possibility that we're doing so
with only a partial state. Which could be confusing if we
tried to set the Status to Resolved even if we're only evaluating
a handful of resources associated to an ops item.
"""
arn_item_map = {}
item_arn_map = {}
for i in items:
item_arn_map[i['OpsItemId']] = arns = json.loads(
i['OperationalData']['/aws/resources']['Value'])
for arn in arns:
arn_item_map[arn['arn']] = i['OpsItemId']
arn_resource_map = dict(zip(self.manager.get_arns(resources), resources))
added = set(arn_resource_map).difference(arn_item_map)
updated = set()
remainder = []
# Check for resource additions
for a in added:
handled = False
for i in items:
if len(item_arn_map[i['OpsItemId']]) >= 100:
continue
item_arn_map[i['OpsItemId']].append({'arn': a})
updated.add(i['OpsItemId'])
arn_resource_map[a]['c7n:opsitem'] = i['OpsItemId']
handled = True
break
if not handled:
remainder.append(a)
for i in items:
if not i['OpsItemId'] in updated:
continue
i = dict(i)
for k in ('CreatedBy', 'CreatedTime', 'Source', 'LastModifiedBy',
'LastModifiedTime'):
i.pop(k, None)
i['OperationalData']['/aws/resources']['Value'] = json.dumps(
item_arn_map[i['OpsItemId']])
i['OperationalData'].pop('/aws/dedup', None)
client.update_ops_item(**i)
return remainder
def get_item_template(self):
title = self.data.get('title', self.manager.data['name']).strip()
dedup = ("%s %s %s %s" % (
title,
self.manager.type,
self.manager.config.region,
self.manager.config.account_id)).encode('utf8')
# size restrictions on this value is 4-20, digest is 32
dedup = hashlib.md5(dedup).hexdigest()[:20]
i = dict(
Title=title,
Description=self.data.get(
'description',
self.manager.data.get(
'description',
self.manager.data.get('name'))),
Priority=self.data.get('priority'),
Source="Cloud Custodian",
Tags=[{'Key': k, 'Value': v} for k, v in self.data.get(
'tags', self.manager.data.get('tags', {})).items()],
Notifications=[{'Arn': a} for a in self.data.get('topics', ())],
OperationalData={
'/aws/dedup': {
'Type': 'SearchableString',
'Value': json.dumps({'dedupString': dedup})},
'/custodian/execution-id': {
'Type': 'String',
'Value': self.manager.ctx.execution_id},
# We need our own dedup string to be able to filter
# search on it.
'/custodian/dedup': {
'Type': 'SearchableString',
'Value': dedup},
'/custodian/policy': {
'Type': 'String',
'Value': json.dumps(self.manager.data)},
'/custodian/version': {
'Type': 'String',
'Value': version},
'/custodian/policy-name': {
'Type': 'SearchableString',
'Value': self.manager.data['name']},
'/custodian/resource': {
'Type': 'SearchableString',
'Value': self.manager.type},
}
)
return filter_empty(i)
@classmethod
def register(cls, registry, _):
for resource in registry.keys():
klass = registry.get(resource)
klass.action_registry.register('post-item', cls)
resources.subscribe(resources.EVENT_FINAL, PostItem.register)
|
|
'''
Evaluation code for the SICK dataset (SemEval 2014 Task 1)
'''
import numpy as np
import os.path
from util import *
from sklearn.metrics import mean_squared_error as mse
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from sklearn.utils import shuffle
from utils.score import report_score
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam
from sklearn.metrics import accuracy_score
from keras.utils.np_utils import to_categorical
import pandas as pd
from sklearn.model_selection import train_test_split
import random
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, Merge, Embedding
from keras import regularizers
from keras.layers import Merge, Input, Multiply, Layer
from sklearn.preprocessing import StandardScaler
def split(train_l,train_r,label,ratio):
total = train_l.shape[0]
train_samples = int(total*(1-ratio))
test_samples = total-train_samples
tr_l,tst_l,tr_r,tst_r,l_tr,l_tst=[],[],[],[],[],[]
dat=random.sample(range(total),train_samples)
for a in dat:
tr_l.append(train_l[a])
tr_r.append(train_r[a])
l_tr.append(label[a])
print 'splitting - validation samples ',test_samples
for i in range(total):
if i not in dat:
tst_l.append(train_l[i])
tst_r.append(train_r[i])
l_tst.append(label[i])
print 'splitting - train samples ',len(dat)
tr_l = np.array(tr_l)
tr_r = np.array(tr_r)
tst_l = np.array(tst_l)
tst_r = np.array(tst_r)
l_tr = np.array(l_tr)
l_tst = np.array(l_tst)
return tr_l,tst_l,tr_r,tst_r,l_tr,l_tst
def load_dataset(file_trhead, file_trbody, file_tshead, file_tsbody):
trhead = pd.read_csv(file_trhead)
trbody = pd.read_csv(file_trbody)
tshead = pd.read_csv(file_tshead)
tsbody = pd.read_csv(file_tsbody)
tr_head_array = trhead.values
tr_body_array = trbody.values
ts_head_array = tshead.values
ts_body_array = tsbody.values
tr_labels = tr_head_array[:,2]
ts_labels = ts_head_array[:,2]
tr_body_id = tr_head_array[:,1]
train_dh = tr_head_array[:,0] ##########
train_db = []
for i in range(len(tr_head_array)):
for j in range(len(tr_body_array)):
if tr_body_array[j][0] == tr_body_id[i]:
train_db.append(tr_body_array[j][1])
break
tr_lab = []
for i in tr_labels:
if i == 'unrelated':
tr_lab.append(3)
if i == 'agree':
tr_lab.append(0)
if i == 'discuss':
tr_lab.append(2)
if i == 'disagree':
tr_lab.append(1)
train_db = np.array(train_db) ##############
ts_body_id = ts_head_array[:,1]
test_dh = ts_head_array[:,0] ##########
test_db = []
for i in range(len(ts_head_array)):
for j in range(len(ts_body_array)):
if ts_body_array[j][0] == ts_body_id[i]:
test_db.append(ts_body_array[j][1])
break
ts_lab = []
for i in ts_labels:
if i == 'unrelated':
ts_lab.append(3)
if i == 'agree':
ts_lab.append(0)
if i == 'discuss':
ts_lab.append(2)
if i == 'disagree':
ts_lab.append(1)
test_db= np.array(test_db) #############
#signs=['?','.',]
print("Refining train datset")
train_rdh = []
for i in range(len(train_dh)):
sentence = ""
for char in train_dh[i]:
if char.isalpha() or char == ' ':
sentence+=char.lower()
else:
sentence+=' '
train_rdh.append(sentence)
train_rdb = []
for i in range(len(train_db)):
sentence = ""
for char in train_db[i]:
if char.isalpha() or char == ' ':
sentence+=char.lower()
else:
sentence+=' '
train_rdb.append(sentence)
print("Refining test datset")
test_rdh = []
for i in range(len(test_dh)):
sentence = ""
for char in test_dh[i]:
if char.isalpha() or char == ' ':
sentence+=char.lower()
else:
sentence+=' '
test_rdh.append(sentence)
test_rdb = []
for i in range(len(test_db)):
sentence = ""
for char in test_db[i]:
if char.isalpha() or char == ' ':
sentence+=char.lower()
else:
sentence+=' '
test_rdb.append(sentence)
dic = pd.read_pickle('stop_dic')
train_new_rdb = []
test_new_rdb = []
word_limit = 250
print 'removing stop words and using', word_limit,'words limit .....'
for i in train_rdb:
temp=[]
for j in i.split():
try:
a=dic[j]
except:
temp.append(j)
train_new_rdb.append(' '.join(temp[0:min(len(temp),word_limit)]))
for i in test_rdb:
temp=[]
for j in i.split():
try:
a=dic[j]
except:
temp.append(j)
test_new_rdb.append(' '.join(temp[0:min(len(temp),word_limit)]))
train_rdh = np.array(train_rdh)
test_rdh = np.array(test_rdh)
train_new_rdb = np.array(train_new_rdb)
test_new_rdb = np.array(test_new_rdb)
return train_rdh, train_new_rdb, test_rdh, test_new_rdb
#tr_h, dev_h, tr_b, dev_b, tr_s, dev_s = split(np.array(train_rdh), np.array(train_rdb), tr_lab, 0.2)
#return [tr_h, tr_b], [dev_h, dev_b], [tr_s, dev_s]
def evaluate(encoder=None, seed=1234, evaltest=False, loc='./data/'):
"""
Run experiment
"""
print 'Preparing data for fnc...'
#train, dev, test, scores = load_data(loc)
#train[0], train[1], scores[0] = shuffle(train[0], train[1], scores[0], random_state=seed)
'''
trh, trb, tsh, tsb =\
load_dataset("/fnc_data/train_stances.csv", "/fnc_data/train_bodies.csv",\
"/fnc_data/competition_test_stances.csv", "/fnc_data/test_bodies.csv")
'''
train_h = np.load('/fncdata2/encode_train_head.npy')
train_b = np.load('/fncdata2/encode_train_body.npy')
test_h = np.load('/fncdata2/encode_test_head.npy')
test_b = np.load('/fncdata2/encode_test_body.npy')
score_train = np.load('/fncdata2/score_train.npy')
score_test = np.load('/fncdata2/score_test.npy')
#train_b = big_mat
#train_h, dev_h, train_b, dev_b, score_train, dev_score = split(np.array(train_h), train_b, score_train, 0.2)
print 'loading training skipthoughts...'
#trainA = encoder.encode(train_h, verbose=False, use_eos=True)
#trainB = encoder.encode(train_b, verbose=False, use_eos=True)
trainA = train_h
trainB = train_b
print 'Computing development skipthoughts...'
#devA = encoder.encode(dev_h, verbose=False, use_eos=True)
#devB = encoder.encode(dev_b, verbose=False, use_eos=True)
# devA = dev_h
# devB = dev_b
devA = test_h
devB = test_b
dev_score = score_test
print 'Computing feature combinations...'
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
devF = np.c_[np.abs(devA - devB), devA * devB]
print 'Encoding labels...'
#trainY = encode_labels(train_labels)
#devY = encode_labels(holdout_labels)
trainY = to_categorical(score_train, 4)
devY = to_categorical(dev_score, 4)
train_Fx, test_Fx = load_features()
#fmodel = generate_feature_model(train_Fx, score_train, test_Fx, dev_score, ninputs=len(train_Fx[0]))
train_tfidf, test_tfidf = generate_tfidf()
print 'Compiling model...'
lrmodel = prepare_model(ninputs=trainF.shape[1],n_feats=train_Fx.shape[1],n_tfidf=train_tfidf.shape[1])
print 'Training...'
bestlrmodel = train_model(lrmodel, trainF, trainY, devF, devY, dev_score, train_Fx, test_Fx, train_tfidf, test_tfidf)
if evaltest:
print 'Loading test skipthoughts...'
testA = test_h
testB = test_b
print 'Computing feature combinations...'
testF = np.c_[np.abs(testA - testB), testA * testB]
yhat = bestlrmodel.predict(testF, verbose=2)
yhat = [i.argmax()for i in yhat]
string_predicted,test_stances = [],[]
for i,j in zip(yhat,score_test):
if i == 3:
string_predicted.append('unrelated')
if i == 0:
string_predicted.append('agree')
if i == 2:
string_predicted.append('discuss')
if i == 1:
string_predicted.append('disagree')
if j == 3:
test_stances.append('unrelated')
if j == 0:
test_stances.append('agree')
if j == 2:
test_stances.append('discuss')
if j == 1:
test_stances.append('disagree')
report_score(test_stances,string_predicted)
score = accuracy_score(score_test, yhat)
print 'accuracy is ..',score
#print 'Evaluating...'
def generate_tfidf():
file_train_instances = "/fncdata/train_stances.csv"
file_train_bodies = "/fncdata/train_bodies.csv"
file_test_instances = "/fncdata/competition_test_stances.csv"
file_test_bodies = "/fncdata/test_bodies.csv"
raw_train = FNCData(file_train_instances, file_train_bodies)
raw_test = FNCData(file_test_instances, file_test_bodies)
n_train = len(raw_train.instances)
lim_unigram = 5000
train_set, train_stances, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer = \
pipeline_train(raw_train, raw_test, lim_unigram=lim_unigram)
#feature_size = len(train_set[0])
test_set = pipeline_test(raw_test, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer)
return np.array(train_set), np.array(test_set)
def prepare_model(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.2)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
#m1 = Dropout(0.2)(m1)
#m1 = Dense(4, activation='sigmoid')(m1)
#m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(50,activation='relu')(inp2)
#m2=Dense(4,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.4)(m3)
m3 = Dense(50, activation='relu')(m3)
#m3 = Dropout(0.4)(m3)
#m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
def prepare_model2(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.2)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
#m1 = Dropout(0.2)(m1)
#m1 = Dense(4, activation='sigmoid')(m1)
m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(4,activation='relu')(inp2)
#m2=Dense(4,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.4)(m3)
m3 = Dense(50, activation='relu')(m3)
#m3 = Dropout(0.4)(m3)
#m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
def prepare_model1(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.5)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
m1 = Dropout(0.5)(m1)
m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(30,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.6)(m3)
m3 = Dense(100, activation='relu')(m3)
m3 = Dropout(0.4)(m3)
m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
"""
Set up and compile the model architecture (Logistic regression)
print 'changed'
out_neurons1 = 500
lrmodel = Sequential()
lrmodel.add(Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001)))
lrmodel.add(Dropout(0.5))
#lrmodel.add(Dense(out_neurons2))
#lrmodel.add(Dropout(0.5))
lrmodel.add(Dense(output_dim=nclass))
#lrmodel.add(Dense(input_dim=ninputs, output_dim=nclass))
#lrmodel.add(Dropout(0.3))
lrmodel.add(Activation('softmax'))
lrmodel.compile(loss='categorical_crossentropy', optimizer='adam')
return lrmodel
"""
def train_model(lrmodel, X, Y, devX, devY, devscores, feat_train, feat_dev, train_tfidf, test_tfidf):
"""
Train model, using pearsonr on dev for early stopping
"""
done = False
best = -1.0
#r = np.arange(1,5)
while not done:
# Every 100 epochs, check Pearson on development set
lrmodel.fit([X,feat_train,train_tfidf], Y, verbose=2, shuffle=False, nb_epoch = 3, validation_data=([devX,feat_dev,test_tfidf], devY))
#yhat = np.dot(lrmodel.predict(devX, verbose=2), r)
yhat = lrmodel.predict([devX,feat_dev,test_tfidf], verbose=2)
yhat = [i.argmax()for i in yhat]
string_predicted,test_stances = [],[]
for i,j in zip(yhat,devscores):
if i == 3:
string_predicted.append('unrelated')
if i == 0:
string_predicted.append('agree')
if i == 2:
string_predicted.append('discuss')
if i == 1:
string_predicted.append('disagree')
if j == 3:
test_stances.append('unrelated')
if j == 0:
test_stances.append('agree')
if j == 2:
test_stances.append('discuss')
if j == 1:
test_stances.append('disagree')
print 'using new limit value....'
#score = accuracy_score(devscores, yhat)
score = report_score(test_stances,string_predicted,val=True)
#return lrmodel
if score > best:
print score
best = score
bestlrmodel = prepare_model(ninputs=X.shape[1],n_feats=feat_train.shape[1],n_tfidf=train_tfidf.shape[1])
bestlrmodel.set_weights(lrmodel.get_weights())
else:
done = True
print '***** best model obtained with score',best,'******'
yhat = bestlrmodel.predict([devX, feat_dev, test_tfidf], verbose=2)
yhat = [i.argmax()for i in yhat]
string_predicted,test_stances = [],[]
for i,j in zip(yhat,devscores):
if i == 3:
string_predicted.append('unrelated')
if i == 0:
string_predicted.append('agree')
if i == 2:
string_predicted.append('discuss')
if i == 1:
string_predicted.append('disagree')
if j == 3:
test_stances.append('unrelated')
if j == 0:
test_stances.append('agree')
if j == 2:
test_stances.append('discuss')
if j == 1:
test_stances.append('disagree')
report_score(test_stances,string_predicted)
return bestlrmodel
import math
def load_features():
train_hand = np.load('/fncdata3/hand.train.npy')
#train_overlap = np.load('/fncdata3/overlap.train.npy')
#train_refuting = np.load('/fncdata3/refuting.train.npy')
#train_polarity = np.load('/fncdata3/polarity.train.npy')
test_hand = np.load('/fncdata3/hand.test.npy')
#test_overlap = np.load('/fncdata3/overlap.test.npy')
#test_refuting = np.load('/fncdata3/refuting.test.npy')
#test_polarity = np.load('/fncdata3/polarity.test.npy')
'''
train_other = np.load('/fncdata4/x_train.npy')
test_other = np.load('/fncdata4/x_test.npy')
train_other = train_other[:,16]
test_other = test_other[:,16]
#train_X = np.c_[train_polarity, train_refuting, train_overlap]
#test_X = np.c_[test_polarity, test_refuting, test_overlap]
for k,i in enumerate(test_other):
if math.isnan(i):
#print 'here',k
test_other[k] = 0.0
train_X = np.c_[train_hand, train_other]
test_X = np.c_[test_hand, test_other]
train_feat = np.load('/fncdata3/feat_train.npy')
train_other = np.load('/fncdata3/x_train.npy')
test_feat = np.load('/fncdata3/feat_test.npy')
test_other = np.load('/fncdata3/x_test.npy')
train_X = np.c_[train_feat, train_other]
test_X = np.c_[test_feat, test_other]
for k,i in enumerate(test_X):
for ind,j in enumerate(i):
if math.isnan(j):
#print 'here',k
test_X[k][ind] = 0.0
ss = StandardScaler()
ss.fit(np.vstack((train_X, test_X)))
feat1_train = ss.transform(train_X)
feat1_test = ss.transform(test_X)
#feat_dev = feat1_train[len(trainF):]
#feat1_train = feat1_train[0:len(trainF)]
#feat_dev = feat1_test
'''
return train_hand, test_hand
|
|
# http://inamidst.com/saxo/
# Created by Sean B. Palmer
import html.entities
import re
import urllib.parse
import urllib.request
# NOTE: (?i) does work in byte instances
_regex_charset = re.compile(b"(?i)<meta[^>]+charset=[\"']?([^\"'> \r\n\t]+)")
_regex_entity = re.compile(r"&([^;\s]+);")
_regex_key = re.compile(r'([^=]+)')
_regex_value = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*"|[^;]+)')
user_agent = "Mozilla/5.0 (Services)"
# modern_user_agent = " ".join([
# "Mozilla/5.0",
# "(Macintosh; Intel Mac OS X 10.9; rv:26.0)"
# "Gecko/20100101 Firefox/26.0"])
modern_user_agent = " ".join([
"Mozilla/5.0",
"(Macintosh; Intel Mac OS X 10.10; rv:37.0)"
"Gecko/20100101 Firefox/37.0"])
def content_type(info):
mime = None
encoding = None
def parse(parameters):
while parameters:
match = _regex_key.match(parameters)
if not match:
break
key = match.group(1)
parameters = parameters[len(key):]
if parameters.startswith("="):
parameters = parameters[1:]
match = _regex_value.match(parameters)
if not match:
break
value = match.group(1)
if value.startswith('"'):
value = value[1:-1].replace('\\"', '"')
parameters = parameters[len(value):]
if parameters.startswith(";"):
parameters = parameters[1:]
key = key.lower().strip(" \t")
value = value.lower().strip(" \t")
yield key, value
if "Content-Type" in info:
header = info["Content-Type"]
if ";" in header:
mime, parameters = header.split(";", 1)
else:
mime, parameters = header, ""
for key, value in parse(parameters):
if key == "charset":
encoding = value
break
return mime, encoding
def decode_entities(hypertext):
def entity(match):
name = match.group(1).lower()
if name.startswith("#x"):
return chr(int(name[2:], 16))
elif name.startswith("#"):
return chr(int(name[1:]))
elif name in html.entities.name2codepoint:
return chr(html.entities.name2codepoint[name])
return "[" + name + "]"
def default(match):
try: return entity(match)
except: return match.group(1)
return _regex_entity.sub(default, hypertext)
def construct(url, query=None):
safe = "".join(chr(i) for i in range(0x01, 0x80))
base = urllib.parse.quote(url, safe=safe)
if query:
query = urllib.parse.urlencode(query)
return "?".join((base, query))
return base
def request(url, query=None, data=None, method="GET",
limit=None, follow=True, headers=None, modern=False):
if url.startswith("file:"):
raise ValueError("file: scheme is not allowed")
headers = {} if (headers is None) else headers
response = {}
if "User-Agent" not in headers:
modern = modern is True
headers["User-Agent"] = modern_user_agent if modern else user_agent
response["request-headers"] = headers
parts = list(urllib.parse.urlparse(url))
try: parts[1].encode("ascii")
except UnicodeEncodeError:
parts[1] = parts[1].encode("idna").decode("ascii")
url = urllib.parse.urlunparse(tuple(parts))
safe = "".join(chr(i) for i in range(0x01, 0x80))
base = urllib.parse.quote(url, safe=safe)
if query:
query = urllib.parse.urlencode(query)
response["request-url"] = "?".join((base, query))
else:
response["request-url"] = base
class ErrorHandler(urllib.request.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
return fp
handlers = [ErrorHandler()]
if follow:
handlers.append(urllib.request.HTTPRedirectHandler())
opener = urllib.request.build_opener(*handlers)
urllib.request.install_opener(opener)
params = {
"url": response["request-url"],
"headers": response["request-headers"]
}
if data is not None:
if isinstance(data, dict):
data = urllib.parse.urlencode(data)
params["data"] = data.encode("utf-8", "replace")
elif isinstance(data, bytes):
params["data"] = data
elif isinstance(data, str):
params["data"] = data.encode("utf-8", "replace")
else:
raise Exception("Unknown data type: %s" % type(data))
# print("PARAMS:", params)
req = urllib.request.Request(**params)
with urllib.request.urlopen(req) as res:
response["url"] = res.url
response["status"] = res.status # int
response["info"] = res.info()
response["headers"] = {
a.lower(): b for (a, b) in response["info"].items()
}
if method in {"GET", "POST"}:
if limit is None:
response["octets"] = res.read()
else:
response["octets"] = res.read(limit)
if "Content-Encoding" in response["info"]:
if response["info"]["Content-Encoding"] == "gzip":
from gzip import GzipFile
from io import BytesIO
sio = BytesIO(response["octets"])
gz = GzipFile(fileobj=sio)
try: response["octets"] = gz.read()
except OSError:
# e.g. not gzip encoded, despite the site saying it is
...
mime, encoding = content_type(response["info"])
if mime:
response["mime"] = mime
if encoding:
response["encoding"] = encoding
response["encoding-source"] = "Content-Type"
if mime and ("octets" in response):
if ("/html" in mime) or ("/xhtml" in mime):
search = _regex_charset.search(response["octets"])
if search:
html_encoding = search.group(1).lower()
html_encoding = html_encoding.decode("ascii", "replace")
if encoding and (encoding == html_encoding):
response["encoding-source"] += ", HTML"
else: # TODO: Precedence check
response["encoding"] = html_encoding
response["encoding-source"] = "HTML"
if "octets" in response:
def guess_encoding(response):
try: response["text"] = response["octets"].decode("utf-8")
except UnicodeDecodeError:
response["text"] = response["octets"].decode("iso-8859-1")
response["encoding"] = "iso-8859-1"
else:
response["encoding"] = "utf-8"
response["encoding-source"] = "heuristic"
if "encoding" in response:
encoding = response["encoding"]
try: response["text"] = response["octets"].decode(encoding)
except (UnicodeDecodeError, LookupError):
guess_encoding(response)
else:
guess_encoding(response)
if mime and ("text" in response):
if ("/html" in mime) or ("/xhtml" in mime):
response["html"] = response["text"]
response["text"] = decode_entities(response["text"])
response["decoded-entities"] = True
return response
|
|
from __future__ import absolute_import
import numpy as np
from keras import backend as K
from keras import activations, initializations, regularizers
from keras.engine import Layer, InputSpec
from keras.layers import Recurrent
from keras.layers import time_distributed_dense
import memory as EM
import head
class NTM(Recurrent):
'''Long-Short Term Memory unit - Hochreiter 1997.
For a step-by-step description of the algorithm_learning, see
[this tutorial](http://deeplearning.net/tutorial/lstm.html).
# Arguments
output_dim: dimension of the internal projections and the final output.
init: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
inner_init: initialization function of the inner cells.
forget_bias_init: initialization function for the bias of the forget gate.
[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recommend initializing with ones.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
inner_activation: activation function for the inner cells.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_W: float between 0 and 1. Fraction of the input units to drop for input gates.
dropout_U: float between 0 and 1. Fraction of the input units to drop for recurrent connections.
# References
- [Long short-term memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labelling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
'''
def __init__(self, output_dim, memory_dim=128, memory_size=20,
controller_output_dim=100, location_shift_range=1,
num_read_head=1, num_write_head=1,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, R_regularizer=None,
b_regularizer=None, W_y_regularizer=None,
W_xi_regularizer=None, W_r_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(NTM, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[2]
if self.stateful:
self.reset_states()
else:
# initial states: 2 all-zero tensors of shape (output_dim)
self.states = [None, None]
if self.consume_less == 'gpu':
self.W = self.init((self.input_dim, 4 * self.output_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, 4 * self.output_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.output_dim),
K.get_value(self.forget_bias_init((self.output_dim,))),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
self.trainable_weights = [self.W, self.U, self.b]
else:
self.W_i = self.init((self.input_dim, self.output_dim),
name='{}_W_i'.format(self.name))
self.U_i = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_i'.format(self.name))
self.b_i = K.zeros((self.output_dim,), name='{}_b_i'.format(self.name))
self.W_f = self.init((self.input_dim, self.output_dim),
name='{}_W_f'.format(self.name))
self.U_f = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_f'.format(self.name))
self.b_f = self.forget_bias_init((self.output_dim,),
name='{}_b_f'.format(self.name))
self.W_c = self.init((self.input_dim, self.output_dim),
name='{}_W_c'.format(self.name))
self.U_c = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_c'.format(self.name))
self.b_c = K.zeros((self.output_dim,), name='{}_b_c'.format(self.name))
self.W_o = self.init((self.input_dim, self.output_dim),
name='{}_W_o'.format(self.name))
self.U_o = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_o'.format(self.name))
self.b_o = K.zeros((self.output_dim,), name='{}_b_o'.format(self.name))
self.trainable_weights = [self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o]
self.W = K.concatenate([self.W_i, self.W_f, self.W_c, self.W_o])
self.U = K.concatenate([self.U_i, self.U_f, self.U_c, self.U_o])
self.b = K.concatenate([self.b_i, self.b_f, self.b_c, self.b_o])
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
K.set_value(self.states[1],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim)),
K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
if 0 < self.dropout_W < 1:
dropout = self.dropout_W
else:
dropout = 0
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
input_dim, self.output_dim, timesteps)
x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
input_dim, self.output_dim, timesteps)
x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
input_dim, self.output_dim, timesteps)
x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return x
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
if self.consume_less == 'gpu':
z = K.dot(x * B_W[0], self.W) + K.dot(h_tm1 * B_U[0], self.U) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim: 2 * self.output_dim]
z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
i = self.inner_activation(z0)
f = self.inner_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.inner_activation(z3)
else:
if self.consume_less == 'cpu':
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
elif self.consume_less == 'mem':
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
else:
raise Exception('Unknown `consume_less` mode.')
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
f = self.inner_activation(x_f + K.dot(h_tm1 * B_U[1], self.U_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * B_U[2], self.U_c))
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[3], self.U_o))
h = o * self.activation(c)
return h, [h, c]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'forget_bias_init': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'inner_activation': self.inner_activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(NTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from django.db import connection
from django.db.models import Prefetch
from django.db.models.query import get_prefetcher
from django.test import TestCase, override_settings
from django.utils import six
from django.utils.encoding import force_text
from .models import (Author, Book, Reader, Qualification, Teacher, Department,
TaggedItem, Bookmark, AuthorAddress, FavoriteAuthors, AuthorWithAge,
BookWithYear, BookReview, Person, House, Room, Employee, Comment,
LessonEntry, WordEntry, Author2)
class PrefetchRelatedTests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author.objects.create(name="Jane",
first_book=self.book4)
self.book1.authors.add(self.author1, self.author2, self.author3)
self.book2.authors.add(self.author1)
self.book3.authors.add(self.author3)
self.book4.authors.add(self.author4)
self.reader1 = Reader.objects.create(name="Amy")
self.reader2 = Reader.objects.create(name="Belinda")
self.reader1.books_read.add(self.book1, self.book4)
self.reader2.books_read.add(self.book2, self.book4)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertTrue(qs[0] in qs)
def test_clear(self):
"""
Test that we can clear the behavior by calling prefetch_related()
"""
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""
Test we can follow a m2m and another m2m
"""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Test that objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[six.text_type(r) for r in b.read_by.all()]
for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
Test we can follow an m2m relation after a relation like ForeignKey
that doesn't have many objects
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[six.text_type(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"],
["Amy"],
["Amy"],
["Amy", "Belinda"]])
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
with self.assertRaises(AttributeError) as cm:
list(qs)
self.assertTrue('prefetch_related' in str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
with self.assertRaises(ValueError) as cm:
list(qs)
self.assertTrue('prefetch_related' in str(cm.exception))
self.assertTrue("name" in str(cm.exception))
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
self.house1 = House.objects.create(name='House 1', address="123 Main St", owner=self.person1)
self.house2 = House.objects.create(name='House 2', address="45 Side St", owner=self.person1)
self.house3 = House.objects.create(name='House 3', address="6 Downing St", owner=self.person2)
self.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=self.person2)
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.room2_3 = Room.objects.create(name="Kitchen", house=self.house2)
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.room4_3 = Room.objects.create(name="Kitchen", house=self.house4)
self.person1.houses.add(self.house1, self.house2)
self.person2.houses.add(self.house3, self.house4)
self.house1.main_room = self.room1_1
self.house1.save()
self.house2.main_room = self.room2_1
self.house2.save()
self.house3.main_room = self.room3_1
self.house3.save()
self.house4.main_room = self.room4_1
self.house4.save()
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
with self.assertRaises(ValueError):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
with self.assertRaises(AttributeError):
self.traverse_qs(
Person.objects.prefetch_related('houses_lst__rooms', Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]), to_attr='houses_lst')))
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst')))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ReverseSingleRelatedObjectDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.filter(address='DoesNotExist'))).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test SingleRelatedObjectDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')))
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')).first()
self.assertIsNone(room.main_room_of_attr)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join(["%s department: %s\n" %
(dept.name, ", ".join(six.text_type(t) for t in dept.teachers.all()))
for dept in qs])
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
def setUp(self):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
self.book1, self.book2, self.book3 = book1, book2, book3
self.reader1, self.reader2, self.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="stupid", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
Test that we can traverse a 'content_object' with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = set([r.name for tag in qs
for r in tag.content_object.read_by.all()])
self.assertEqual(readers_of_awesome_books, set(["me", "you", "someone"]))
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted([i.tag for i in bookmark.tags.all()]), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
class MultiTableInheritanceTest(TestCase):
def setUp(self):
self.book1 = BookWithYear.objects.create(
title="Poems", published_year=2010)
self.book2 = BookWithYear.objects.create(
title="More poems", published_year=2011)
self.author1 = AuthorWithAge.objects.create(
name='Jane', first_book=self.book1, age=50)
self.author2 = AuthorWithAge.objects.create(
name='Tom', first_book=self.book1, age=49)
self.author3 = AuthorWithAge.objects.create(
name='Robert', first_book=self.book2, age=48)
self.authorAddress = AuthorAddress.objects.create(
author=self.author1, address='SomeStreet 1')
self.book2.aged_authors.add(self.author2, self.author3)
self.br1 = BookReview.objects.create(
book=self.book1, notes="review book1")
self.br2 = BookReview.objects.create(
book=self.book2, notes="review book2")
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.authorAddress)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
l = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(l, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
def setUp(self):
self.book = Book.objects.create(title="Poems")
self.author1 = Author.objects.create(name='Jane', first_book=self.book)
self.author2 = Author.objects.create(name='Tom', first_book=self.book)
self.author3 = Author.objects.create(name='Robert', first_book=self.book)
self.authorAddress = AuthorAddress.objects.create(
author=self.author1, address='SomeStreet 1'
)
FavoriteAuthors.objects.create(author=self.author1,
likes_author=self.author2)
FavoriteAuthors.objects.create(author=self.author2,
likes_author=self.author3)
FavoriteAuthors.objects.create(author=self.author3,
likes_author=self.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.authorAddress)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[six.text_type(i_like) for i_like in author.favorite_authors.all()],
[six.text_type(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([six.text_type(self.author2)], [six.text_type(self.author3)]),
([six.text_type(self.author3)], [six.text_type(self.author1)]),
([six.text_type(self.author1)], [six.text_type(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
self.house1 = House.objects.create(address="123 Main St")
self.house2 = House.objects.create(address="45 Side St")
self.house3 = House.objects.create(address="6 Downing St")
self.house4 = House.objects.create(address="7 Regents St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.person1.houses.add(self.house1, self.house2)
self.person2.houses.add(self.house3, self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
def setUp(self):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Check that prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join(["%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1])
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join(["%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2])
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane",
first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', force_text(queryset.query))
|
|
from django.conf.urls import url
from django.conf import settings
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.views import APIView
from onadata.apps.api.viewsets.charts_viewset import ChartsViewSet
from onadata.apps.api.viewsets.connect_viewset import ConnectViewSet
from onadata.apps.api.viewsets.data_viewset import DataViewSet
from onadata.apps.api.viewsets.metadata_viewset import MetaDataViewSet
from onadata.apps.api.viewsets.note_viewset import NoteViewSet
from onadata.apps.api.viewsets.organization_profile_viewset import\
OrganizationProfileViewSet
from onadata.apps.api.viewsets.project_viewset import ProjectViewSet
from onadata.apps.api.viewsets.stats_viewset import StatsViewSet
from onadata.apps.api.viewsets.team_viewset import TeamViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.api.viewsets.user_profile_viewset import UserProfileViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
from onadata.apps.api.viewsets.submissionstats_viewset import\
SubmissionStatsViewSet
from onadata.apps.api.viewsets.attachment_viewset import AttachmentViewSet
from onadata.apps.api.viewsets.xform_list_viewset import XFormListViewSet
from onadata.apps.api.viewsets.xform_submission_viewset import\
XFormSubmissionViewSet
from onadata.apps.api.viewsets.briefcase_viewset import BriefcaseViewset
from onadata.apps.api.viewsets.osm_viewset import OsmViewSet
from onadata.apps.restservice.viewsets.restservices_viewset import \
RestServicesViewSet
from onadata.apps.api.viewsets.media_viewset import MediaViewSet
def make_routes(template_text):
return routers.Route(
url=r'^{prefix}/{%s}{trailing_slash}$' % template_text,
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'})
class MultiLookupRouter(routers.DefaultRouter):
def __init__(self, *args, **kwargs):
super(MultiLookupRouter, self).__init__(*args, **kwargs)
self.lookups_routes = []
self.lookups_routes.append(routers.Route(
url=r'^{prefix}/{lookups}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
))
self.lookups_routes.append(make_routes('lookup'))
self.lookups_routes.append(make_routes('lookups'))
# Dynamically generated routes.
# Generated using @action or @link decorators on methods of the viewset
self.lookups_routes.append(routers.Route(
url=[
r'^{prefix}/{lookups}/{methodname}{trailing_slash}$',
r'^{prefix}/{lookups}/{methodname}/{extra}{trailing_slash}$'],
mapping={
'{httpmethod}': '{methodname}',
},
name='{basename}-{methodnamehyphen}',
initkwargs={}
))
def get_extra_lookup_regexes(self, route):
ret = []
base_regex = '(?P<{lookup_field}>[^/]+)'
if 'extra_lookup_fields' in route.initkwargs:
for lookup_field in route.initkwargs['extra_lookup_fields']:
ret.append(base_regex.format(lookup_field=lookup_field))
return '/'.join(ret)
def get_lookup_regexes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
for i in range(1, len(lookup_fields)):
tmp = []
for lookup_field in lookup_fields[:i + 1]:
if lookup_field == lookup_fields[i]:
base_regex = '(?P<{lookup_field}>[^/.]+)'
else:
base_regex = '(?P<{lookup_field}>[^/]+)'
tmp.append(base_regex.format(lookup_field=lookup_field))
ret.append(tmp)
return ret
def get_lookup_routes(self, viewset):
ret = [self.routes[0]]
# Determine any `@action` or `@link` decorated methods on the viewset
dynamic_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
if httpmethods:
httpmethods = [method.lower() for method in httpmethods]
dynamic_routes.append((httpmethods, methodname))
for route in self.lookups_routes:
if route.mapping == {'{httpmethod}': '{methodname}'}:
# Dynamic routes (@link or @action decorator)
for httpmethods, methodname in dynamic_routes:
initkwargs = route.initkwargs.copy()
initkwargs.update(getattr(viewset, methodname).kwargs)
mapping = dict(
(httpmethod, methodname) for httpmethod in httpmethods)
name = routers.replace_methodname(route.name, methodname)
if 'extra_lookup_fields' in initkwargs:
uri = route.url[1]
uri = routers.replace_methodname(uri, methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name='%s-extra' % name,
initkwargs=initkwargs,
))
uri = routers.replace_methodname(route.url[0], methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name=name,
initkwargs=initkwargs,
))
else:
# Standard route
ret.append(route)
return ret
def get_routes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
ret = self.get_lookup_routes(viewset)
else:
ret = super(MultiLookupRouter, self).get_routes(viewset)
return ret
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = {}
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class OnaApi(APIView):
"""
## Ona JSON Rest API endpoints:
"""
def get(self, request, format=None):
ret = {}
for key, url_name in api_root_dict.items():
ret[key] = reverse(
url_name, request=request, format=format)
# Adding for static documentation
ret['api-docs'] = \
request.build_absolute_uri(settings.STATIC_DOC)
return Response(ret)
return OnaApi.as_view()
def get_urls(self):
ret = []
if self.include_root_view:
root_url = url(r'^$', self.get_api_root_view(),
name=self.root_view_name)
ret.append(root_url)
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
lookup_list = self.get_lookup_regexes(viewset)
if lookup_list:
# lookup = lookups[0]
lookup_list = [u'/'.join(k) for k in lookup_list]
else:
lookup_list = [u'']
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
for lookups in lookup_list:
regex = route.url.format(
prefix=prefix,
lookup=lookup,
lookups=lookups,
trailing_slash=self.trailing_slash,
extra=self.get_extra_lookup_regexes(route)
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
if self.include_format_suffixes:
ret = format_suffix_patterns(ret, allowed=['[a-z]+[0-9]*'])
return ret
router = MultiLookupRouter(trailing_slash=False)
router.register(r'users', UserViewSet)
router.register(r'user', ConnectViewSet)
router.register(r'profiles', UserProfileViewSet)
router.register(r'orgs', OrganizationProfileViewSet)
router.register(r'forms', XFormViewSet)
router.register(r'projects', ProjectViewSet)
router.register(r'teams', TeamViewSet)
router.register(r'notes', NoteViewSet)
router.register(r'data', DataViewSet, base_name='data')
router.register(r'stats', StatsViewSet, base_name='stats')
router.register(r'stats/submissions', SubmissionStatsViewSet,
base_name='submissionstats')
router.register(r'charts', ChartsViewSet, base_name='chart')
router.register(r'metadata', MetaDataViewSet, base_name='metadata')
router.register(r'media', AttachmentViewSet, base_name='attachment')
router.register(r'formlist', XFormListViewSet, base_name='formlist')
router.register(r'submissions', XFormSubmissionViewSet,
base_name='submissions')
router.register(r'briefcase', BriefcaseViewset, base_name='briefcase')
router.register(r'osm', OsmViewSet, base_name='osm')
router.register(r'restservices', RestServicesViewSet, base_name='restservices')
router.register(r'files', MediaViewSet, base_name='files')
|
|
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
TEST FILE 2
"""
from zope.interface import implements, Interface, Attribute
from twisted.python.reflect import namedAny
from twisted.python import components
from twisted.internet import defer
from twisted.persisted import sob
from twisted.plugin import IPlugin
class IServiceMaker(Interface):
"""
An object which can be used to construct services in a flexible
way.
This interface should most often be implemented along with
L{twisted.plugin.IPlugin}, and will most often be used by the
'twistd' command.
"""
tapname = Attribute(
"A short string naming this Twisted plugin, for example 'web' or "
"'pencil'. This name will be used as the subcommand of 'twistd'.")
description = Attribute(
"A brief summary of the features provided by this "
"Twisted application plugin.")
options = Attribute(
"A C{twisted.python.usage.Options} subclass defining the"
"configuration options for this application.")
def makeService(options):
"""
Create and return an object providing
L{twisted.application.service.IService}.
@param options: A mapping (typically a C{dict} or
C{twisted.python.usage.Options} instance) of configuration
options to desired configuration values.
"""
class ServiceMaker(object):
"""
Utility class to simplify the definition of L{IServiceMaker} plugins.
"""
implements(IPlugin, IServiceMaker)
def __init__(self, name, module, description, tapname):
self.name = name
self.module = module
self.description = description
self.tapname = tapname
def options():
def get(self):
return namedAny(self.module).Options
return get,
options = property(*options())
def makeService():
def get(self):
return namedAny(self.module).makeService
return get,
makeService = property(*makeService())
class IService(Interface):
"""
A service.
Run start-up and shut-down code at the appropriate times.
@type name: C{string}
@ivar name: The name of the service (or None)
@type running: C{boolean}
@ivar running: Whether the service is running.
"""
def setName(name):
"""
Set the name of the service.
@type name: C{str}
@raise RuntimeError: Raised if the service already has a parent.
"""
def setServiceParent(parent):
"""
Set the parent of the service.
@type parent: L{IServiceCollection}
@raise RuntimeError: Raised if the service already has a parent
or if the service has a name and the parent already has a child
by that name.
"""
def disownServiceParent():
"""
Use this API to remove an L{IService} from an L{IServiceCollection}.
This method is used symmetrically with L{setServiceParent} in that it
sets the C{parent} attribute on the child.
@rtype: L{Deferred}
@return: a L{Deferred} which is triggered when the service has
finished shutting down. If shutting down is immediate,
a value can be returned (usually, C{None}).
"""
def startService():
"""
Start the service.
"""
def stopService():
"""
Stop the service.
@rtype: L{Deferred}
@return: a L{Deferred} which is triggered when the service has
finished shutting down. If shutting down is immediate, a
value can be returned (usually, C{None}).
"""
def privilegedStartService():
"""
Do preparation work for starting the service.
Here things which should be done before changing directory,
root or shedding privileges are done.
"""
class Service:
"""
Base class for services.
Most services should inherit from this class. It handles the
book-keeping reponsibilities of starting and stopping, as well
as not serializing this book-keeping information.
"""
implements(IService)
running = 0
name = None
parent = None
def __getstate__(self):
dict = self.__dict__.copy()
if dict.has_key("running"):
del dict['running']
return dict
def setName(self, name):
if self.parent is not None:
raise RuntimeError("cannot change name when parent exists")
self.name = name
def setServiceParent(self, parent):
if self.parent is not None:
self.disownServiceParent()
parent = IServiceCollection(parent, parent)
self.parent = parent
self.parent.addService(self)
def disownServiceParent(self):
d = self.parent.removeService(self)
self.parent = None
return d
def privilegedStartService(self):
pass
def startService(self):
self.running = 1
def stopService(self):
self.running = 0
class IServiceCollection(Interface):
"""
Collection of services.
Contain several services, and manage their start-up/shut-down.
Services can be accessed by name if they have a name, and it
is always possible to iterate over them.
"""
def getServiceNamed(name):
"""
Get the child service with a given name.
@type name: C{str}
@rtype: L{IService}
@raise KeyError: Raised if the service has no child with the
given name.
"""
def __iter__():
"""
Get an iterator over all child services.
"""
def addService(service):
"""
Add a child service.
@type service: L{IService}
@raise RuntimeError: Raised if the service has a child with
the given name.
"""
def removeService(service):
"""
Remove a child service.
Only implementations of L{IService.disownServiceParent} should
use this method.
@type service: L{IService}
@raise ValueError: Raised if the given service is not a child.
@rtype: L{Deferred}
@return: a L{Deferred} which is triggered when the service has
finished shutting down. If shutting down is immediate, a
value can be returned (usually, C{None}).
"""
class MultiService(Service):
"""
Straightforward Service Container.
Hold a collection of services, and manage them in a simplistic
way. No service will wait for another, but this object itself
will not finish shutting down until all of its child services
will finish.
"""
implements(IServiceCollection)
def __init__(self):
self.services = []
self.namedServices = {}
self.parent = None
def privilegedStartService(self):
Service.privilegedStartService(self)
for service in self:
service.privilegedStartService()
def startService(self):
Service.startService(self)
for service in self:
service.startService()
def stopService(self):
Service.stopService(self)
l = []
services = list(self)
services.reverse()
for service in services:
l.append(defer.maybeDeferred(service.stopService))
return defer.DeferredList(l)
def getServiceNamed(self, name):
return self.namedServices[name]
def __iter__(self):
return iter(self.services)
def addService(self, service):
if service.name is not None:
if self.namedServices.has_key(service.name):
raise RuntimeError("cannot have two services with same name"
" '%s'" % service.name)
self.namedServices[service.name] = service
self.services.append(service)
if self.running:
# It may be too late for that, but we will do our best
service.privilegedStartService()
service.startService()
def removeService(self, service):
if service.name:
del self.namedServices[service.name]
self.services.remove(service)
if self.running:
# Returning this so as not to lose information from the
# MultiService.stopService deferred.
return service.stopService()
else:
return None
class IProcess(Interface):
"""
Process running parameters.
Represents parameters for how processes should be run.
"""
processName = Attribute(
"""
A C{str} giving the name the process should have in ps (or C{None}
to leave the name alone).
""")
uid = Attribute(
"""
An C{int} giving the user id as which the process should run (or
C{None} to leave the UID alone).
""")
gid = Attribute(
"""
An C{int} giving the group id as which the process should run (or
C{None} to leave the GID alone).
""")
class Process:
"""
Process running parameters.
Sets up uid/gid in the constructor, and has a default
of C{None} as C{processName}.
"""
implements(IProcess)
processName = None
def __init__(self, uid=None, gid=None):
"""
Set uid and gid.
@param uid: The user ID as whom to execute the process. If
this is C{None}, no attempt will be made to change the UID.
@param gid: The group ID as whom to execute the process. If
this is C{None}, no attempt will be made to change the GID.
"""
self.uid = uid
self.gid = gid
def Application(name, uid=None, gid=None):
"""
Return a compound class.
Return an object supporting the L{IService}, L{IServiceCollection},
L{IProcess} and L{sob.IPersistable} interfaces, with the given
parameters. Always access the return value by explicit casting to
one of the interfaces.
"""
ret = components.Componentized()
for comp in (MultiService(), sob.Persistent(ret, name), Process(uid, gid)):
ret.addComponent(comp, ignoreClass=1)
IService(ret).setName(name)
return ret
def loadApplication(filename, kind, passphrase=None):
"""
Load Application from a given file.
The serialization format it was saved in should be given as
C{kind}, and is one of C{pickle}, C{source}, C{xml} or C{python}. If
C{passphrase} is given, the application was encrypted with the
given passphrase.
@type filename: C{str}
@type kind: C{str}
@type passphrase: C{str}
"""
if kind == 'python':
application = sob.loadValueFromFile(filename, 'application', passphrase)
else:
application = sob.load(filename, kind, passphrase)
return application
__all__ = ['IServiceMaker', 'IService', 'Service',
'IServiceCollection', 'MultiService',
'IProcess', 'Process', 'Application', 'loadApplication']
|
|
from __future__ import print_function
import enum
import re
import struct
import sys
import threading
import time
import serial
from serial.tools.list_ports import comports
from common import *
def multichr(ords):
if sys.version_info[0] >= 3:
return bytes(ords)
else:
return ''.join(map(chr, ords))
def multiord(b):
if sys.version_info[0] >= 3:
return list(b)
else:
return map(ord, b)
class Arm(enum.Enum):
UNKNOWN = 0
RIGHT = 1
LEFT = 2
class XDirection(enum.Enum):
UNKNOWN = 0
X_TOWARD_WRIST = 1
X_TOWARD_ELBOW = 2
class Pose(enum.Enum):
REST = 0
FIST = 1
WAVE_IN = 2
WAVE_OUT = 3
FINGERS_SPREAD = 4
THUMB_TO_PINKY = 5
UNKNOWN = 255
class Packet(object):
def __init__(self, ords):
self.typ = ords[0]
self.cls = ords[2]
self.cmd = ords[3]
self.payload = multichr(ords[4:])
def __repr__(self):
return 'Packet(%02X, %02X, %02X, [%s])' % \
(self.typ, self.cls, self.cmd,
' '.join('%02X' % b for b in multiord(self.payload)))
class BT(object):
'''Implements the non-Myo-specific details of the Bluetooth protocol.'''
def __init__(self, tty):
self.ser = serial.Serial(port=tty, baudrate=9600, dsrdtr=1)
self.buf = []
self.lock = threading.Lock()
self.handlers = []
## internal data-handling methods
def recv_packet(self, timeout=None):
t0 = time.time()
self.ser.timeout = None
while timeout is None or time.time() < t0 + timeout:
if timeout is not None: self.ser.timeout = t0 + timeout - time.time()
c = self.ser.read()
if not c: return None
ret = self.proc_byte(ord(c))
if ret:
if ret.typ == 0x80:
self.handle_event(ret)
return ret
def recv_packets(self, timeout=.5):
res = []
t0 = time.time()
while time.time() < t0 + timeout:
p = self.recv_packet(t0 + timeout - time.time())
if not p: return res
res.append(p)
return res
def proc_byte(self, c):
if not self.buf:
if c in [0x00, 0x80, 0x08, 0x88]:
self.buf.append(c)
return None
elif len(self.buf) == 1:
self.buf.append(c)
self.packet_len = 4 + (self.buf[0] & 0x07) + self.buf[1]
return None
else:
self.buf.append(c)
if self.packet_len and len(self.buf) == self.packet_len:
p = Packet(self.buf)
self.buf = []
return p
return None
def handle_event(self, p):
for h in self.handlers:
h(p)
def add_handler(self, h):
self.handlers.append(h)
def remove_handler(self, h):
try: self.handlers.remove(h)
except ValueError: pass
def wait_event(self, cls, cmd):
res = [None]
def h(p):
if p.cls == cls and p.cmd == cmd:
res[0] = p
self.add_handler(h)
while res[0] is None:
self.recv_packet()
self.remove_handler(h)
return res[0]
## specific BLE commands
def connect(self, addr):
return self.send_command(6, 3, pack('6sBHHHH', multichr(addr), 0, 6, 6, 64, 0))
def get_connections(self):
return self.send_command(0, 6)
def discover(self):
return self.send_command(6, 2, b'\x01')
def end_scan(self):
return self.send_command(6, 4)
def disconnect(self, h):
return self.send_command(3, 0, pack('B', h))
def read_attr(self, con, attr):
self.send_command(4, 4, pack('BH', con, attr))
return self.wait_event(4, 5)
def write_attr(self, con, attr, val):
self.send_command(4, 5, pack('BHB', con, attr, len(val)) + val)
return self.wait_event(4, 1)
def send_command(self, cls, cmd, payload=b'', wait_resp=True):
s = pack('4B', 0, len(payload), cls, cmd) + payload
self.ser.write(s)
while True:
p = self.recv_packet()
## no timeout, so p won't be None
if p.typ == 0: return p
## not a response: must be an event
self.handle_event(p)
class MyoRaw(object):
'''Implements the Myo-specific communication protocol.'''
def __init__(self, tty=None):
if tty is None:
tty = self.detect_tty()
if tty is None:
raise ValueError('Myo dongle not found!')
self.bt = BT(tty)
self.conn = None
self.emg_handlers = []
self.imu_handlers = []
self.arm_handlers = []
self.pose_handlers = []
def detect_tty(self):
for p in comports():
if re.search(r'PID=2458:0*1', p[2]):
print('using device:', p[0])
return p[0]
return None
def run(self, timeout=None):
self.bt.recv_packet(timeout)
def connect(self):
## stop everything from before
self.bt.end_scan()
self.bt.disconnect(0)
self.bt.disconnect(1)
self.bt.disconnect(2)
## start scanning
print('scanning...')
self.bt.discover()
while True:
p = self.bt.recv_packet()
print('scan response:', p)
if p.payload.endswith(b'\x06\x42\x48\x12\x4A\x7F\x2C\x48\x47\xB9\xDE\x04\xA9\x01\x00\x06\xD5'):
addr = list(multiord(p.payload[2:8]))
break
self.bt.end_scan()
## connect and wait for status event
conn_pkt = self.bt.connect(addr)
self.conn = multiord(conn_pkt.payload)[-1]
self.bt.wait_event(3, 0)
## get firmware version
fw = self.read_attr(0x17)
_, _, _, _, v0, v1, v2, v3 = unpack('BHBBHHHH', fw.payload)
print('firmware version: %d.%d.%d.%d' % (v0, v1, v2, v3))
self.old = (v0 == 0)
if self.old:
## don't know what these do; Myo Connect sends them, though we get data
## fine without them
self.write_attr(0x19, b'\x01\x02\x00\x00')
self.write_attr(0x2f, b'\x01\x00')
self.write_attr(0x2c, b'\x01\x00')
self.write_attr(0x32, b'\x01\x00')
self.write_attr(0x35, b'\x01\x00')
## enable EMG data
self.write_attr(0x28, b'\x01\x00')
## enable IMU data
self.write_attr(0x1d, b'\x01\x00')
## Sampling rate of the underlying EMG sensor, capped to 1000. If it's
## less than 1000, emg_hz is correct. If it is greater, the actual
## framerate starts dropping inversely. Also, if this is much less than
## 1000, EMG data becomes slower to respond to changes. In conclusion,
## 1000 is probably a good value.
C = 1000
emg_hz = 50
## strength of low-pass filtering of EMG data
emg_smooth = 100
imu_hz = 50
## send sensor parameters, or we don't get any data
self.write_attr(0x19, pack('BBBBHBBBBB', 2, 9, 2, 1, C, emg_smooth, C // emg_hz, imu_hz, 0, 0))
else:
name = self.read_attr(0x03)
print('device name: %s' % name.payload)
## enable IMU data
self.write_attr(0x1d, b'\x01\x00')
## enable on/off arm notifications
self.write_attr(0x24, b'\x02\x00')
# self.write_attr(0x19, b'\x01\x03\x00\x01\x01')
self.start_raw()
## add data handlers
def handle_data(p):
if (p.cls, p.cmd) != (4, 5): return
c, attr, typ = unpack('BHB', p.payload[:4])
pay = p.payload[5:]
if attr == 0x27:
vals = unpack('8HB', pay)
## not entirely sure what the last byte is, but it's a bitmask that
## seems to indicate which sensors think they're being moved around or
## something
emg = vals[:8]
moving = vals[8]
self.on_emg(emg, moving)
elif attr == 0x1c:
vals = unpack('10h', pay)
quat = vals[:4]
acc = vals[4:7]
gyro = vals[7:10]
self.on_imu(quat, acc, gyro)
elif attr == 0x23:
typ, val, xdir, _, _, _ = unpack('6B', pay)
if typ == 1: # on arm
self.on_arm(Arm(val), XDirection(xdir))
elif typ == 2: # removed from arm
self.on_arm(Arm.UNKNOWN, XDirection.UNKNOWN)
elif typ == 3: # pose
self.on_pose(Pose(val))
else:
print('data with unknown attr: %02X %s' % (attr, p))
self.bt.add_handler(handle_data)
def write_attr(self, attr, val):
if self.conn is not None:
self.bt.write_attr(self.conn, attr, val)
def read_attr(self, attr):
if self.conn is not None:
return self.bt.read_attr(self.conn, attr)
return None
def disconnect(self):
if self.conn is not None:
self.bt.disconnect(self.conn)
def start_raw(self):
'''Sending this sequence for v1.0 firmware seems to enable both raw data and
pose notifications.
'''
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
def mc_start_collection(self):
'''Myo Connect sends this sequence (or a reordering) when starting data
collection for v1.0 firmware; this enables raw data but disables arm and
pose notifications.
'''
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x09\x01\x01\x00\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x01\x03\x00\x01\x00')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x00')
def mc_end_collection(self):
'''Myo Connect sends this sequence (or a reordering) when ending data collection
for v1.0 firmware; this reenables arm and pose notifications, but
doesn't disable raw data.
'''
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
self.write_attr(0x19, b'\x09\x01\x00\x00\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x00\x01\x01')
self.write_attr(0x28, b'\x01\x00')
self.write_attr(0x1d, b'\x01\x00')
self.write_attr(0x24, b'\x02\x00')
self.write_attr(0x19, b'\x01\x03\x01\x01\x01')
def vibrate(self, length):
if length in xrange(1, 4):
## first byte tells it to vibrate; purpose of second byte is unknown
self.write_attr(0x19, pack('3B', 3, 1, length))
def add_emg_handler(self, h):
self.emg_handlers.append(h)
def add_imu_handler(self, h):
self.imu_handlers.append(h)
def add_pose_handler(self, h):
self.pose_handlers.append(h)
def add_arm_handler(self, h):
self.arm_handlers.append(h)
def on_emg(self, emg, moving):
for h in self.emg_handlers:
h(emg, moving)
def on_imu(self, quat, acc, gyro):
for h in self.imu_handlers:
h(quat, acc, gyro)
def on_pose(self, p):
for h in self.pose_handlers:
h(p)
def on_arm(self, arm, xdir):
for h in self.arm_handlers:
h(arm, xdir)
if __name__ == '__main__':
try:
import pygame
from pygame.locals import *
HAVE_PYGAME = True
except ImportError:
HAVE_PYGAME = False
if HAVE_PYGAME:
w, h = 1200, 400
scr = pygame.display.set_mode((w, h))
last_vals = None
def plot(scr, vals):
DRAW_LINES = False
global last_vals
if last_vals is None:
last_vals = vals
return
D = 5
scr.scroll(-D)
scr.fill((0,0,0), (w - D, 0, w, h))
for i, (u, v) in enumerate(zip(last_vals, vals)):
if DRAW_LINES:
pygame.draw.line(scr, (0,255,0),
(w - D, int(h/8 * (i+1 - u))),
(w, int(h/8 * (i+1 - v))))
pygame.draw.line(scr, (255,255,255),
(w - D, int(h/8 * (i+1))),
(w, int(h/8 * (i+1))))
else:
c = int(255 * max(0, min(1, v)))
scr.fill((c, c, c), (w - D, i * h / 8, D, (i + 1) * h / 8 - i * h / 8));
pygame.display.flip()
last_vals = vals
m = MyoRaw(sys.argv[1] if len(sys.argv) >= 2 else None)
def proc_emg(emg, moving, times=[]):
if HAVE_PYGAME:
## update pygame display
plot(scr, [e / 2000. for e in emg])
else:
print(emg)
## print framerate of received data
times.append(time.time())
if len(times) > 20:
#print((len(times) - 1) / (times[-1] - times[0]))
times.pop(0)
m.add_emg_handler(proc_emg)
m.connect()
m.add_arm_handler(lambda arm, xdir: print('arm', arm, 'xdir', xdir))
m.add_pose_handler(lambda p: print('pose', p))
try:
while True:
m.run(1)
if HAVE_PYGAME:
for ev in pygame.event.get():
if ev.type == QUIT or (ev.type == KEYDOWN and ev.unicode == 'q'):
raise KeyboardInterrupt()
elif ev.type == KEYDOWN:
if K_1 <= ev.key <= K_3:
m.vibrate(ev.key - K_0)
if K_KP1 <= ev.key <= K_KP3:
m.vibrate(ev.key - K_KP0)
except KeyboardInterrupt:
pass
finally:
m.disconnect()
print()
|
|
import re
import json
import hearthbreaker
from hearthbreaker.cards.heroes import hero_from_name
import hearthbreaker.constants
from hearthbreaker.engine import Game, card_lookup, Deck
import hearthbreaker.game_objects
import hearthbreaker.cards
import hearthbreaker.proxies
from hearthbreaker.serialization.move import Move, AttackMove, PowerMove, TurnEndMove, \
TurnStartMove, ConcedeMove, PlayMove, GameEndMove
from pprint import pprint
__doc__ = """
Responsible for reading and writing replays in either the compact or complete replay format (see the `replay format
<https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_ for details).
Recording a game
~~~~~~~~~~~~~~~~
Recording a game is a matter of creating a game, calling :meth:record on that game, playing the game, and then saving
the replay. For example: ::
game = create_a_game() # Create a game somehow
replay = record(game) # Create a replay that will track the game's moves
game.start() # Play the game
replay.write_json("my_replay.hsreplay") # Save the replay to a file
Playing back a game
~~~~~~~~~~~~~~~~~~~
Playing back a game is a matter of loading the replay, getting a game for playing it back, and then starting the game
For example: ::
replay = Replay() # create a new replay object
replay.read_json("my_replay.hsreplay") # load the replay (this can be combined with the previous line)
game = playback(replay) # create a game associated with the replay
game.start() # play the recorded game
"""
class Replay:
"""
Encapsulates the data stored in a replay, along with functions to read and write replays. The data
stored in this class can be used for either recording or playing back replays.
"""
def __init__(self, filename=None):
"""
Create a new Replay. This replay can be used for recording or playing back a game.
If the `filename` string is present, then this will also load the file located at `filename` for playback
:param string filename: A string representing a filename for a replay file to load or None (the default).
If present, it will load the selected replay and prepare it for playback.
The replay file must be in the complete format
"""
self._moves = []
self.__next_target = None
self.__next_index = -1
self.decks = []
self.keeps = []
self.random = []
schema_file = open("replay.schema.json", "r")
self.schema = json.load(schema_file)
schema_file.close()
if filename is not None:
self.read_json(filename)
def _save_decks(self, deck1, deck2):
"""
Save the decks specified by the parameters
:param hearthbreaker.game_objects.Deck deck1: The deck for player 1
:param hearthbreaker.game_objects.Deck deck2: The deck for player 2
"""
self.decks = [deck1, deck2]
def _record_random(self, result):
"""
Record a random number that has been generated by the system.
This random number will be added to the header if the game hasn't started, or top the most recent
move if it has.
"""
if len(self._moves) > 0:
if self._moves[-1].__class__.__name__ != 'GameEndMove':
self._moves[-1].random_numbers.append(result)
else:
self._moves[-2].random_numbers.append(result)
else:
self.random.append(result)
def _record_card_played(self, card, index):
"""
Record that a card has been played. This will add a new PlayMove to the moves array
"""
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(index), target=card.target))
if self.__next_index >= 0:
self._moves[-1].index = self.__next_index
self.__next_index = -1
def _record_option_chosen(self, option):
"""
Record that an option was chosen. This will update whichever is the most recent move
"""
self._moves[-1].card.set_option(option)
def _record_attack(self, attacker, target):
"""
Record that an attack occurred. This will create a new AttackMove in the moves array
"""
self._moves.append(AttackMove(attacker, target))
self.__next_target = None
def _record_power(self):
"""
Record that the current played used their hero power
"""
self._moves.append(PowerMove(self.__next_target))
self.__next_target = None
def _record_target(self, target):
"""
Record that a target was chosen. This affects PlayMoves and PowerMoves. AttackMoves have
their target passed in as an argument
"""
self.__next_target = target
def _record_index(self, index):
"""
Records the index that a minion is played at. Will update the most recent move with this index
"""
self.__next_index = index
def _record_kept_index(self, cards, card_index):
"""
Records the index of the cards that a player kept.
"""
k_arr = []
for index in range(0, len(cards)):
if card_index[index]:
k_arr.append(index)
self.keeps.append(k_arr)
def _record_game_end(self, winner):
"""
Record the end of the game
"""
self._moves.append(GameEndMove(winner))
def __shorten_deck(self, cards):
"""
Mostly for testing, this function will check if the deck is made up of a repeating pattern and if so, shorten
the output, since the parser will generate the pattern from a shorter sample
:param cards: The deck of cards to replace
:return: an array of cards that represents the deck if repeated until 30 cards are found
"""
for pattern_length in range(1, 15):
matched = True
for index in range(pattern_length, 30):
if not isinstance(cards[index % pattern_length], type(cards[index])):
matched = False
break
if matched:
return cards[0:pattern_length]
return cards
def write(self, file):
"""
Write a replay in the compact format. This format is a series of directives, and isn't as flexible
or well structured as the json format (in :meth:write_json). For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is to be written. If an IO object, then the IO object should be opened for
writing.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
if 'write' not in dir(file):
was_filename = True
writer = open(file, 'w')
else:
was_filename = False
writer = file
for deck in self.decks:
writer.write("deck(")
writer.write(deck.hero.short_name)
writer.write(",")
writer.write(",".join([card.name for card in self.__shorten_deck(deck.cards)]))
writer.write(")\n")
found_random = False
if self.random.count(0) == len(self.random):
for move in self._moves:
if move.random_numbers.count(0) != len(move.random_numbers):
found_random = True
break
else:
found_random = True
if not found_random:
writer.write("random()\n")
else:
writer.write("random(")
writer.write(",".join([str(num) for num in self.random]))
writer.write(")\n")
for keep in self.keeps:
writer.write("keep(")
writer.write(",".join([str(k) for k in keep]))
writer.write(")\n")
for move in self._moves:
writer.write(move.to_output_string() + "\n")
if len(move.random_numbers) > 0:
writer.write("random(")
writer.write(",".join([str(num) for num in move.random_numbers]))
writer.write(")\n")
if was_filename:
writer.close()
def write_json(self, file):
"""
Write a replay in the complete json format. This format is compatible with the netplay format, and is
also designed to be more future proof. For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file should be written. If an IO object, then the IO object should be opened for
writing.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
was_filename = False
if 'write' not in dir(file):
was_filename = True
writer = open(file, 'w')
else:
writer = file
header_cards = [{"cards": [card.name for card in self.__shorten_deck(deck.cards)],
"hero": deck.hero.short_name} for deck in self.decks]
header = {
'decks': header_cards,
'keep': self.keeps,
'random': self.random,
}
json.dump({'header': header, 'moves': self._moves}, writer, default=lambda o: o.__to_json__(), indent=2,
sort_keys=True)
if was_filename:
writer.close()
def read_json(self, file):
"""
Read a replay in the complete json format. This format is compatible with the netplay format, and is
also designed to be more future proof. For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is found. If an IO object, then the IO object should be opened for
reading.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
from jsonschema import validate
was_filename = False
if 'read' not in dir(file):
was_filename = True
file = open(file, 'r')
jd = json.load(file)
validate(jd, self.schema)
self.decks = []
for deck in jd['header']['decks']:
deck_size = len(deck['cards'])
cards = [card_lookup(deck['cards'][index % deck_size]) for index in range(0, 30)]
self.decks.append(
Deck(cards, hero_from_name(deck['hero'])))
self.random = jd['header']['random']
self.keeps = jd['header']['keep']
if len(self.keeps) == 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
self._moves = [Move.from_json(**js) for js in jd['moves']]
if was_filename:
file.close()
def read(self, file):
"""
Read a replay in the compact format. This format is a series of directives, and isn't as flexible
or well structured as the json format (in :meth:write_json). For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is to be found. If an IO object, then the IO object should be opened for
reading.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
was_filename = False
if 'read' not in dir(file):
was_filename = True
file = open(file, 'r')
line_pattern = re.compile("\s*(\w*)\s*\(([^)]*)\)\s*(;.*)?$")
for line in file:
(move, args) = line_pattern.match(line).group(1, 2)
args = [arg.strip() for arg in args.split(",")]
if move == 'play':
card = args[0]
if len(args) > 1:
target = args[1]
else:
target = None
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(card), target=target))
elif move == 'summon':
card = args[0]
index = int(args[1])
if len(args) > 2:
target = args[2]
else:
target = None
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(card), index, target))
elif move == 'attack':
self._moves.append(AttackMove(args[0], args[1]))
elif move == 'power':
if len(args) > 0 and args[0] != '':
self._moves.append(PowerMove(args[0]))
else:
self._moves.append(PowerMove())
elif move == 'end':
self._moves.append(TurnEndMove())
elif move == 'start':
self._moves.append(TurnStartMove())
elif move == 'random':
if len(self._moves) == 0:
if len(args[0]) > 0:
for num in args:
self.random.append(int(num))
else:
for num in args:
if num.isdigit():
self._moves[-1].random_numbers.append(int(num))
else:
self._moves[-1].random_numbers.append(hearthbreaker.proxies.ProxyCharacter(num))
elif move == 'deck':
if len(self.decks) > 1:
raise Exception("Maximum of two decks per file")
deck_size = len(args) - 1
cards = [card_lookup(args[1 + index % deck_size]) for index in range(0, 30)]
self.decks.append(
Deck(cards, hero_from_name(args[0])))
elif move == 'keep':
if len(self.keeps) > 1:
raise Exception("Maximum of two keep directives per file")
self.keeps.append([int(a) for a in args])
elif move == 'concede':
self._moves.append(ConcedeMove())
elif move == 'game_end':
pass # currently we are not putting in game end because it will end anyways
if was_filename:
file.close()
if len(self.keeps) is 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
def record(game):
"""
Ready a game for recording. This function must be called before the game is played.
Several methods of the game and its agents are modified. These modifications will not affect the operation
of the game or its agents, although any further modifications to these methods will not be recorded.
:param game: A game which has not been started
:type game: :class:`Game <hearthbreaker.game_objects.Game>`
:return: A replay that will track the actions of the game as it is played. Once the game is complete,
this replay can be written to a file to remember the state of this game.
:rtype: :class:`Replay`
"""
class RecordingAgent:
__slots__ = ['agent']
def __init__(self, proxied_agent):
object.__setattr__(self, "agent", proxied_agent)
def choose_index(self, card, player):
index = self.agent.choose_index(card, player)
replay._record_index(index)
return index
def choose_target(self, targets):
target = self.agent.choose_target(targets)
replay._record_target(target)
return target
def choose_option(self, options, player):
option = self.agent.choose_option(options, player)
replay._record_option_chosen(options.index(option))
return option
def __getattr__(self, item):
return self.agent.__getattribute__(item)
def __setattr__(self, key, value):
setattr(self.__getattribute__("agent"), key, value)
replay = hearthbreaker.replay.Replay()
replay.random.append(game.first_player)
game.players[0].agent = RecordingAgent(game.players[0].agent)
game.players[1].agent = RecordingAgent(game.players[1].agent)
if game.first_player == 0:
replay._save_decks(game.players[0].deck, game.players[1].deck)
else:
replay._save_decks(game.players[1].deck, game.players[0].deck)
game.bind("kept_cards", replay._record_kept_index)
game.bind("game_ended", replay._record_game_end)
for player in game.players:
player.bind("used_power", replay._record_power)
player.hero.bind("found_power_target", replay._record_target)
player.bind("card_played", replay._record_card_played)
player.bind("character_attack", replay._record_attack)
_old_random_choice = game.random_choice
_old_generate_random_between = game._generate_random_between
_old_start_turn = game._start_turn
_old_end_turn = game._end_turn
def random_choice(choice):
result = _old_random_choice(choice)
if isinstance(result, hearthbreaker.game_objects.Character):
replay._moves[-1].random_numbers[-1] = hearthbreaker.proxies.ProxyCharacter(result)
return result
def _generate_random_between(lowest, highest):
result = _old_generate_random_between(lowest, highest)
replay._record_random(result)
return result
def _end_turn():
replay._moves.append(TurnEndMove())
_old_end_turn()
def _start_turn():
replay._moves.append(TurnStartMove())
_old_start_turn()
game.random_choice = random_choice
game._generate_random_between = _generate_random_between
game._end_turn = _end_turn
game._start_turn = _start_turn
return replay
def playback(replay):
"""
Create a game which can be replayed back out of a replay.
:param replay: The replay to load the game out of
:type replay: :class:`Replay`
:return: A game which when played will perform all of the actions in the replay.
:rtype: :class:`Game <hearthbreaker.game_objects.Game>`
"""
move_index = -1
k_index = 0
random_index = 0
game = None
class ReplayAgent:
def __init__(self):
self.next_target = None
self.next_index = -1
self.next_option = None
def do_card_check(self, cards):
nonlocal k_index
keep_arr = [False] * len(cards)
for index in replay.keeps[k_index]:
keep_arr[int(index)] = True
k_index += 1
return keep_arr
def do_turn(self, player):
nonlocal move_index, random_index
while move_index < len(replay._moves) and not player.hero.dead and type(
replay._moves[move_index]) is not hearthbreaker.serialization.move.TurnEndMove:
random_index = 0
print(replay._moves[move_index].to_output_string())
replay._moves[move_index].play(game)
move_index += 1
if move_index == len(replay._moves):
player.game.game_ended = True
def set_game(self, game):
pass
def choose_target(self, targets):
return self.next_target
def choose_index(self, card, player):
return self.next_index
def choose_option(self, options, player):
return options[self.next_option]
game = Game.__new__(Game)
_old_random_choice = game.random_choice
_old_start_turn = game._start_turn
_old_end_turn = game._end_turn
_old_pre_game = game.pre_game
def _generate_random_between(lowest, highest):
nonlocal random_index
if len(replay.random) == 0:
return 0
else:
random_index += 1
if move_index == -1:
return replay.random[random_index - 1]
return replay._moves[move_index].random_numbers[random_index - 1]
def random_choice(choice):
nonlocal move_index, random_index
if isinstance(replay._moves[move_index].random_numbers[random_index], hearthbreaker.proxies.ProxyCharacter):
result = replay._moves[move_index].random_numbers[random_index].resolve(game)
random_index += 1
return result
return _old_random_choice(choice)
def _start_turn():
nonlocal move_index, random_index
random_index = 0
_old_start_turn()
move_index += 1
def _end_turn():
nonlocal move_index, random_index
random_index = 0
_old_end_turn()
move_index += 1
def pre_game():
nonlocal move_index
_old_pre_game()
move_index = 0
game.random_choice = random_choice
game._generate_random_between = _generate_random_between
game._end_turn = _end_turn
game._start_turn = _start_turn
game.pre_game = pre_game
game.__init__(replay.decks, [ReplayAgent(), ReplayAgent()])
return game
|
|
#!/usr/bin/env python2
"""
obstartup
~~~~~~~~~
A user-friendly Openbox autostart.sh configurator.
:copyright: Copyright 2010 David Gidwani.
:license: BSD style, see LICENSE
"""
import sys
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gobject
except:
sys.exit(1)
import os
__version__ = "0.0.1"
class StartupEntry(list):
"""Represents a startup (shell) command."""
def __init__(self, command, enabled=True):
super(StartupEntry, self).__init__()
self.extend([enabled, command])
@property
def command(self):
return self[1]
@property
def enabled(self):
return self[0]
@staticmethod
def from_string(string):
command = string.strip()
assert command, "Empty string"
enabled = True
if command.startswith("#"):
enabled = False
command = command[1:].strip()
if command.endswith("&"):
command = command[:-1].strip()
return StartupEntry(command, enabled)
def to_string(self):
command = self.command
if not self.enabled:
command = "# {0}".format(command)
return "{0} &".format(command)
def enable(self):
if not self.enabled:
self[1] = True
return True
def disable(self):
if self.enabled:
self[1] = True
return True
def set_command(self, string):
string = string.strip()
if not string.endswith("&"):
string = string + " &"
self[0] = string
def __repr__(self):
return "<{0}(command='{1}', enabled={2})>".format(
self.__class__.__name__, self.command, self.enabled)
class StartupList(list):
"""A collection of commands."""
def write_to_file(self, filename="autostart.sh"):
list_with_newlines = map(lambda e: e.to_string() + "\n", self)
with open(filename, "w") as f:
f.writelines(list_with_newlines)
@staticmethod
def load_from_file(filename):
startup_list = StartupList()
with open(filename, "r") as f:
for line in f.readlines():
try:
startup_list.append(StartupEntry.from_string(line))
except AssertionError:
continue
return startup_list
class ObStartup(object):
openbox_config_directory = os.path.expanduser("~/.config/openbox")
autostart_file = os.path.join(openbox_config_directory, "autostart.sh")
def __init__(self):
self.sorted = self.unsaved = False
self.builder = gtk.Builder()
self.builder.add_from_file(os.path.join(
os.path.dirname(os.path.realpath(__file__)), "obstartup.glade"))
self.window = self.builder.get_object("main_window")
self.builder.connect_signals(self)
self.currently_opened = self.original_list = None
self.load_autostart_file()
self.window.show()
self.builder.get_object("about_dialog").set_version(__version__)
@property
def startup_list(self):
return self.builder.get_object("startup_list")
@property
def startup_list_store(self):
return self.builder.get_object("startup_list_store")
@property
def row_count(self):
return self.startup_list_store.iter_n_children(None)
def calculate_unsaved(self):
"""Expensive way of checking if the startup list is unsaved or not. """
if not hasattr(self, "original_list"): return False
startup_list = StartupList(self.iterate_items())
self.unsaved = (startup_list != self.original_list and True or False)
return self.unsaved
def message(self, parent=None, flags=0, type_=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_NONE, message_format=None):
dialog = gtk.MessageDialog(parent, flags, type_,
buttons, message_format)
result = dialog.run()
dialog.destroy()
return result
def question(self, message):
result = self.message(type_=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO, message_format=message)
return result == gtk.RESPONSE_YES
def load_autostart_file(self):
if not os.path.exists(self.autostart_file):
if question("No autostart.sh found. Create one?"):
open(self.autostart_file, "w").close()
self.load_from_file(self.autostart_file)
def load_from_file(self, filename):
self.startup_list_store.clear()
startup_list = StartupList.load_from_file(self.autostart_file)
map(lambda i: self.startup_list_store.append(i), startup_list)
self.original_list = startup_list
self.currently_opened = filename
self.builder.get_object("file_save_as").set_sensitive(True)
self._set_unsaved()
return startup_list
def file_select(self, action=gtk.FILE_CHOOSER_ACTION_OPEN,
initial_path=openbox_config_directory, suggested_file="autostart.sh"):
if action == gtk.FILE_CHOOSER_ACTION_OPEN:
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK)
title = "Open"
elif action == gtk.FILE_CHOOSER_ACTION_SAVE:
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK)
title = "Save as"
else:
raise ValueError("Invalid action")
chooser = gtk.FileChooserDialog(title=title, action=action,
buttons=buttons)
sh_filter = gtk.FileFilter()
sh_filter.set_name("Shell scripts")
sh_filter.add_pattern("*.sh")
all_files_filter = gtk.FileFilter()
all_files_filter.set_name("All Files")
all_files_filter.add_pattern("*")
map(chooser.add_filter, (sh_filter, all_files_filter))
if os.path.exists(self.autostart_file):
chooser.set_filename(self.autostart_file)
elif os.path.exists(self.openbox_config_directory):
chooser.set_current_folder(self.openbox_config_directory)
result = ""
if chooser.run() == gtk.RESPONSE_OK:
result = chooser.get_filename()
chooser.destroy()
return result
def iterate_items(self):
iterator = self.startup_list_store.get_iter_root()
while iterator:
item = StartupEntry(*reversed([
self.startup_list_store.get_value(iterator, i)
for i in range(2)]))
yield item
iterator = self.startup_list_store.iter_next(iterator)
def move_selected_up(self):
selected_iter = self.startup_list.get_selection().get_selected()[1]
selected_path = self.startup_list_store.get_path(selected_iter)
position = selected_path[-1]
if position == 0: return False
previous_path = list(selected_path)[:-1]
previous_path.append(position - 1)
previous_iter = self.startup_list_store.get_iter(tuple(previous_path))
# TODO: handle GtkWarning when sorted
self.startup_list_store.swap(selected_iter, previous_iter)
self._set_unsaved()
self._check_sensitive()
def move_selected_down(self):
selected_iter = self.startup_list.get_selection().get_selected()[1]
next_iter = self.startup_list_store.iter_next(selected_iter)
# TODO: handle GtkWarning when sorted
self.startup_list_store.swap(selected_iter, next_iter)
selected_iter_path = self.startup_list_store.get_path(selected_iter)[0]
self._set_unsaved()
self._check_sensitive()
def add_item(self):
selected_iter = self.startup_list.get_selection().get_selected()[1]
if selected_iter:
path = self.startup_list_store.get_path(selected_iter)[0]
else:
path = self.row_count
self.startup_list_store.insert(path, [True, "Enter a command"])
self.startup_list.set_cursor(path, self.startup_list.get_column(1),
True)
if self.row_count > 1: return
self._set_unsaved()
self._check_sensitive()
def remove_selected_item(self):
selected_iter = self.startup_list.get_selection().get_selected()[1]
if not selected_iter: return
self.startup_list_store.remove(selected_iter)
self._set_unsaved()
self._check_sensitive()
def save(self, show_dialog=False):
if not self.calculate_unsaved():
self.message(message_format="Data is identical. Skipping save.",
buttons=gtk.BUTTONS_OK)
return
if self.sorted and not self.question("You have sorted one or more "\
"columns which is why you will be unable to move any " \
"entries up or down. File contents will be in the same " \
"order as currently sorted. Are you sure you want to save?"):
return
if self.currently_opened and not show_dialog:
filename = self.currently_opened
else:
filename = self.file_select(action=gtk.FILE_CHOOSER_ACTION_SAVE)
if not filename: return
startup_list = StartupList(self.iterate_items())
startup_list.write_to_file(filename)
self._set_unsaved(False)
self.original_list = startup_list
def save_and_quit(self):
if (not self.calculate_unsaved() or self.unsaved and self.question(
"Startup list has been modified but not saved. Really quit?")):
gtk.main_quit()
return True
def _check_sensitive(self, row=None):
if not row:
selected = self.startup_list.get_selection().get_selected_rows()[1]
for item in ("remove_button", "edit_remove"):
self.builder.get_object(item).set_sensitive(
selected and True or False)
if not selected or self.sorted:
for item in ("edit_up", "up_button", "edit_down",
"down_button"):
self.builder.get_object(item).set_sensitive(False)
return
row = selected[0][0]
for item in ("up_button", "edit_up"):
self.builder.get_object(item).set_sensitive(
True if row > 0 else False)
for item in ("down_button", "edit_down"):
self.builder.get_object(item).set_sensitive(
True if row < self.row_count - 1 else False)
if self.row_count <= 1:
for item in ("file_save", "file_save_as", "edit_up", "edit_down",
"up_button", "down_button"):
self.builder.get_object(item).set_sensitive(False)
self.builder.get_object("file_save").set_sensitive(
True if self.unsaved else False)
def _set_unsaved(self, value=True):
if not hasattr(self, "unsaved"): return
self.unsaved = value
self.builder.get_object("file_save").set_sensitive(value)
def on_file_open_activate(self, widget):
filename = self.file_select(action=gtk.FILE_CHOOSER_ACTION_OPEN)
if filename:
self.load_from_file(filename)
def on_file_save_activate(self, widget):
self.save()
def on_file_save_as_activate(self, widget):
self.save(show_dialog=True)
def on_file_quit_activate(self, widget):
self.save_and_quit()
def on_edit_down_activate(self, widget):
self.move_selected_down()
def on_edit_up_activate(self, widget):
self.move_selected_up()
def on_edit_add_activate(self, widget):
self.add_item()
def on_edit_remove_activate(self, widget):
self.remove_selected_item()
def on_help_about_activate(self, widget):
about_dialog = self.builder.get_object("about_dialog")
about_dialog.show()
def on_enabled_cell_toggled(self, widget, path, *user_data):
iterator = self.startup_list_store.get_iter_from_string(path)
enabled = not self.startup_list_store.get_value(iterator, 0)
self.startup_list_store.set(iterator, 0, enabled)
self._set_unsaved()
def on_command_cell_edited(self, widget, path, new_text, *user_data):
iterator = self.startup_list_store.get_iter_from_string(path)
command = self.startup_list_store.get_value(iterator, 1)
if command != new_text and new_text != "":
self.startup_list_store.set(iterator, 1, new_text)
self._set_unsaved()
def on_startup_list_cursor_changed(self, widget, *user_data):
self._check_sensitive()
def on_startup_list_store_row_inserted(self, model, path, iterator,
*user_data):
self._set_unsaved()
def on_startup_list_store_row_deleted(self, model, path, *user_data):
self._set_unsaved()
def on_enabled_col_clicked(self, column, *user_data):
# TODO: permanent fix for resorting columns for saving and further
# reordering via Up/Down buttons or menu items.
self.sorted = True
def on_add_button_clicked(self, widget):
self.add_item()
def on_remove_button_clicked(self, widget):
self.remove_selected_item()
def on_down_button_clicked(self, widget):
self.move_selected_down()
def on_up_button_clicked(self, widget):
self.move_selected_up()
def on_save_button_clicked(self, widget):
self.save()
def on_main_window_delete_event(self, widget, event, *user_data):
return self.save_and_quit()
def on_main_window_destroy(self, widget):
self.save_and_quit()
def on_about_dialog_response(self, widget, response_id):
if response_id in (gtk.RESPONSE_CANCEL, gtk.RESPONSE_CLOSE,
gtk.RESPONSE_DELETE_EVENT):
widget.hide()
def on_about_dialog_delete_event(self, widget, event, *user_data):
widget.hide()
return True
if __name__ == "__main__":
ObStartup()
gtk.main()
|
|
"""
This file contains the automatic test generation logic that generates test cases
with exactly one basic attribute containing a long string. It is used to test
the overflow resistant capability of the other end. The main criteria for
passing this set of test cases is that the other end does not crash. The minor
criteria is that the other end aborts the connection.
@author: Calvin Jia Liang
Created on Oct 11, 2014
"""
from src.TestGroups import *
class TestOverflow:
NAME_TABLE = ["SignatureAlgorithm", "SubjectAltName",
"BasicConstraint", "KeyUsage",
"ExtendedKeyUsage", "IssuerC", "IssuerST",
"IssuerL", "IssuerO", "IssuerOU", "IssuerCN",
"IssuerEmail", "SubjectC", "SubjectST",
"SubjectL", "SubjectO", "SubjectOU", "SubjectCN",
"SubjectEmail"]
NAME_TABLE_EXT = ["LongChain", "LongExtension", "LongOID"]
"""
TestOverflow constructor
:param fqdn: fully quantifiable domain name
:type fqdn: string
:param info: other information for the test session
:type info: Information object
:param length: byte length of the overflow filler
:type length: integer
:param validCA: asssert if the CA of this test set is valid
:type validCA: boolean
:returns: TestOverflow object
"""
def __init__(self, fqdn, info, overflowLen=DEFAULT_OVERFLOW_LENGTH):
self.fqdn = fqdn
self.info = copy.copy(info)
self.info.metadata = None
self.overflowLen = overflowLen
self.validCA = OVERFLOW_VALID_CA
self.step = 0
self.filler = None
self.cases = []
"""
Build a list of overflow test cases based on basic attributes of the
certificate; they are mostly negative test cases that has exactly one
attribute containing a long string
:returns: TestOverflow object
"""
def build(self):
baseCase = self.newSubstrate("TestOverflowBaseCase")
baseCase.getServCert().modifier.hasPreSign = False
baseCase.testBuild(replace=True)
cert = baseCase.getServCert().getCert()
substrate = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
cert = decoder.decode(substrate, asn1Spec=rfc2459.Certificate())[0]
cnt = self.countBasicAttr(cert)
if (cnt != len(TestOverflow.NAME_TABLE)):
raise Exception("Attribute count and name table length mismatch")
tempCases = []
for i in range(cnt):
tempCases.append(self.newSubstrate(self.getName(i)))
tempCases.append(self.getLongChain())
tempCases.append(self.getLongExtension())
# tempCases.append(self.getLongAttribute())
tempCases.append(self.getLongOID())
for c in tempCases:
if (not self.validCA):
c.getFirstCA().selfSign()
self.cases.append(c)
return self
def getName(self, idx):
return "Long" + TestOverflow.NAME_TABLE[idx]
"""
Get a new test case substrate
:param name: name of the test case
:type name: string
:returns: Certificate object
"""
def newSubstrate(self, name):
metadata = TestMetadata(name, "", None, None, False, False,
overflow=True)
substrate = TestCaseChained(self.fqdn, metadata, self.info, 2)
substrate.includeAltName()
substrate.getServCert().subject.commonName = self.fqdn
substrate.getServCert().addExtension(BasicConstraint(False))
substrate.getServCert().addExtension(KeyUsage(keyEncipherment=True))
substrate.getServCert().addExtension(ExtendedKeyUsage(serverAuth=True))
substrate.getServCert().modifier.hasPreSign = True
substrate.getServCert().modifier.preSign = self.preSignSubstrate
return substrate
"""
Callback function to be executed before signature
:param cert: certificate to be altered in asn1 format
:type cert: pyasn1 object
:returns: pyasn1 object
"""
def preSignSubstrate(self, cert):
parent, idx = self.getState(cert, queue.Queue(), 0)
comp = parent.getComponentByPosition(idx)
if (comp._value == b'\x05\x00'):
comp._value = b'\x07\x01'
string = self.getFiller(self.overflowLen)
comp._value = comp._value[0:1] + string
self.step += 1
return cert
"""
Get a long chained test case
:returns: Certificate object
"""
def getLongChain(self):
name = TestOverflow.NAME_TABLE_EXT[0]
metadata = TestMetadata(name, "", None, None, False, False,
overflow=True)
substrate = TestCaseChained(self.fqdn, metadata, self.info,
OVERFLOW_CHAIN_LEN)
substrate.includeAltName()
return substrate
"""
Get a long extension test case
:returns: Certificate object
"""
def getLongExtension(self):
name = TestOverflow.NAME_TABLE_EXT[1]
metadata = TestMetadata(name, "", None, None, False, False,
overflow=True)
substrate = TestCaseChained(self.fqdn, metadata, self.info, 2)
substrate.includeAltName(critical=False)
base = substrate.getServCert().extensions[0]
for _ in range(OVERFLOW_EXT_LEN):
substrate.getServCert().extensions.append(base)
return substrate
# """
# Get a long attribute test case
# :returns: Certificate object
# """
#
# def getLongAttribute(self):
# name = "Overflow_LongAttribute"
# metadata = TestMetadata(name, "", None, None, False, False,
# overflow=True)
#
# substrate = TestCaseChained(self.fqdn, metadata, self.info, 2)
# substrate.includeAltName()
# substrate.getServCert().subject.commonName = self.fqdn
#
# substrate.getServCert().modifier.hasPreSign = True
# substrate.getServCert().modifier.preSign = self.preSignAttribute
#
# return substrate
#
# """
# Callback function to be executed before signature
# :param cert: certificate to be altered in asn1 format
# :type cert: pyasn1 object
# :returns: pyasn1 object
# """
#
# def preSignAttribute(self, cert):
# comp = cert.getComponentByPosition(0).getComponentByName('extensions').\
# getComponentByPosition(0).getComponentByName('extnValue')
# string = self.getFiller(self.overflowLen*OVERFLOW_MEGA_MUL)
# comp._value = comp._value[0:1] + string
#
# return cert
"""
Get a long attribute test case
:returns: Certificate object
"""
def getLongOID(self):
name = TestOverflow.NAME_TABLE_EXT[2]
metadata = TestMetadata(name, "", None, None, False, False,
overflow=True)
substrate = TestCaseChained(self.fqdn, metadata, self.info, 2)
substrate.includeAltName(critical=False)
substrate.getServCert().modifier.hasPreSign = True
substrate.getServCert().modifier.preSign = self.preSignOID
return substrate
"""
Callback function to be executed before signature
:param cert: certificate to be altered in asn1 format
:type cert: pyasn1 object
:returns: pyasn1 object
"""
def preSignOID(self, cert):
oid = ((NONSTANDARD_OID + '.') * OVERFLOW_OID_MUL)[:-1]
(cert
.getComponentByPosition(0)
.getComponentByName('extensions')
.getComponentByPosition(0)
.setComponentByName('extnID',
rfc2459.univ.ObjectIdentifier(oid)))
return cert
def getFiller(self, size):
bLen = math.ceil((math.log(size+1)/math.log(2))/8)
filler = bytes([128+bLen]) + \
size.to_bytes(bLen, 'big') + b'a'*size
return filler
"""
Get the number basic attributes in the certificate
:param cert: certificate to be counted
:type cert: pyasn1 object
:returns: integer
"""
def countBasicAttr(self, cert):
cnt = 0
self.step = 0
while (True):
parent, idx = self.getState(cert, queue.Queue(), 0)
if (parent is None and idx is None):
break
self.step += 1
cnt += 1
self.step = 0
return cnt-2 # exclude attr that only exist after signature
"""
Get the component and index of the certificate to be altered
:param cert: certificate to be altered
:type cert: pyasn1 object
:param q: current queue
:type q: Queue object
:param s: current step
:type s: integer
:returns: asn1 object, integer
"""
def getState(self, cert, q, s):
if (q.empty()):
q.put((cert, None, None))
basic = False
comp, parent, idx = q.get()
if (comp.prettyPrint()[0:2] == '0x'):
basic = True
if (hasattr(comp, 'getComponentByPosition')):
for i in range(len(comp)):
sub = comp.getComponentByPosition(i)
if (sub):
q.put((sub, comp, i))
if (not basic or s != self.step):
if (q.empty()):
parent = idx = None
elif (not basic):
parent, idx = self.getState(cert, q, s)
elif (s != self.step):
parent, idx = self.getState(cert, q, s+1)
return parent, idx
def getAllNames(self, tbl, exclude):
for i in range(len(TestOverflow.NAME_TABLE)):
name = self.getName(i)
if (name not in exclude):
tbl[name] = name
for name in TestOverflow.NAME_TABLE_EXT:
if (name not in exclude):
tbl[name] = name
return tbl
"""
Get the list of test cases created from this object
:returns: list of TestCase object
"""
def getTestCases(self):
return self.cases
|
|
"""
Copyright 2017 Government of Canada - Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import json
import logging
from os import environ
from pathlib import Path
from shutil import rmtree
from tempfile import gettempdir
import pytest
from indy import wallet, pool, signus, ledger
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("indy").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture
def seed_trustee1():
logger = logging.getLogger(__name__)
logger.debug("seed_trustee1: >>>")
res = "000000000000000000000000Trustee1"
logger.debug("seed_trustee1: <<< res: %r", res)
return res
@pytest.fixture
def seed_steward1():
logger = logging.getLogger(__name__)
logger.debug("seed_trustee1: >>>")
res = "000000000000000000000000Steward1"
logger.debug("seed_trustee1: <<< res: %r", res)
return res
@pytest.fixture
def seed_my1():
logger = logging.getLogger(__name__)
logger.debug("seed_my1: >>>")
res = "00000000000000000000000000000My1"
logger.debug("seed_my1: <<< res: %r", res)
return res
@pytest.fixture
def endpoint():
return "127.0.0.1:9700"
@pytest.fixture
def path_temp():
logger = logging.getLogger(__name__)
logger.debug("path_temp: >>>")
path = Path(gettempdir()).joinpath("indy_client")
if path.exists():
logger.debug("path_temp: Cleanup tmp path: %s", path)
# rmtree(str(path))
logger.debug("path_temp: yield: %r", path)
yield path
if path.exists():
logger.debug("path_temp: Cleanup tmp path: %s", path)
# rmtree(str(path))
logger.debug("path_temp: <<<")
@pytest.fixture
def path_home() -> Path:
logger = logging.getLogger(__name__)
logger.debug("path_home: >>>")
path = Path.home().joinpath(".indy_client")
if path.exists():
logger.debug("path_home: Cleanup home path: %r", path)
# rmtree(str(path))
logger.debug("path_home: yield: %r", path)
yield path
if path.exists():
logger.debug("path_home: Cleanup home path: %r", path)
# rmtree(str(path))
logger.debug("path_home: <<<")
@pytest.fixture
def wallet_name():
logger = logging.getLogger(__name__)
logger.debug("wallet_name: >>>")
res = "wallet1"
logger.debug("wallet_name: <<< res: %r", res)
return res
@pytest.fixture
def wallet_type():
logger = logging.getLogger(__name__)
logger.debug("wallet_type: >>>")
res = "default"
logger.debug("wallet_type: <<< res: %r", res)
return res
@pytest.fixture
def wallet_config():
logger = logging.getLogger(__name__)
logger.debug("wallet_config: >>>")
res = None
logger.debug("wallet_config: <<< res: %r", res)
return res
@pytest.fixture
def xwallet_cleanup():
logger = logging.getLogger(__name__)
logger.debug("wallet_cleanup: >>>")
res = True
logger.debug("wallet_cleanup: <<< res: %r", res)
return res
# noinspection PyUnusedLocal
@pytest.fixture
def xwallet(event_loop, pool_name, wallet_name, wallet_type, xwallet_cleanup, path_home):
logger = logging.getLogger(__name__)
logger.debug("xwallet: >>> pool_name: %r, wallet_type: %r, xwallet_cleanup: %r, path_home: %r",
pool_name,
wallet_type,
xwallet,
path_home)
logger.debug("xwallet: Creating wallet")
event_loop.run_until_complete(wallet.create_wallet(pool_name, wallet_name, wallet_type, None, None))
logger.debug("xwallet: yield")
yield
logger.debug("xwallet: Deleting wallet")
event_loop.run_until_complete(wallet.delete_wallet(wallet_name, None)) if xwallet_cleanup else None
logger.debug("xwallet: <<<")
@pytest.fixture
def wallet_runtime_config():
logger = logging.getLogger(__name__)
logger.debug("wallet_runtime_config: >>>")
res = None
logger.debug("wallet_runtime_config: <<< res: %r", res)
return res
@pytest.fixture
def wallet_handle_cleanup():
logger = logging.getLogger(__name__)
logger.debug("wallet_handle_cleanup: >>>")
res = True
logger.debug("wallet_handle_cleanup: <<< res: %r", res)
return res
@pytest.fixture
def wallet_handle(event_loop, wallet_name, xwallet, wallet_runtime_config, wallet_handle_cleanup):
logger = logging.getLogger(__name__)
logger.debug(
"wallet_handle: >>> wallet_name: %r, xwallet: %r, wallet_runtime_config: %r, wallet_handle_cleanup: %r",
wallet_name,
xwallet,
wallet_runtime_config,
wallet_handle_cleanup)
logger.debug("wallet_handle: Opening wallet")
wallet_handle = event_loop.run_until_complete(wallet.open_wallet(wallet_name, wallet_runtime_config, None))
assert type(wallet_handle) is int
logger.debug("wallet_handle: yield %r", wallet_handle)
yield wallet_handle
logger.debug("wallet_handle: Closing wallet")
event_loop.run_until_complete(wallet.close_wallet(wallet_handle)) if wallet_handle_cleanup else None
logger.debug("wallet_handle: <<<")
@pytest.fixture
def pool_name():
logger = logging.getLogger(__name__)
logger.debug("pool_name: >>>")
res = "pool1"
logger.debug("pool_name: <<< res: %r", res)
return res
@pytest.fixture
def pool_ip():
logger = logging.getLogger(__name__)
logger.debug("pool_ip: >>>")
res = environ.get("TEST_POOL_IP", "127.0.0.1")
logger.debug("pool_ip: <<< res: %r", res)
return res
@pytest.fixture
def pool_genesis_txn_count():
logger = logging.getLogger(__name__)
logger.debug("pool_genesis_txn_count: >>>")
res = 4
logger.debug("pool_genesis_txn_count: <<< res: %r", res)
return res
@pytest.fixture
def pool_genesis_txn_data(pool_genesis_txn_count, pool_ip):
logger = logging.getLogger(__name__)
logger.debug("pool_genesis_txn_data: >>> pool_genesis_txn_count: %r, pool_ip: %r",
pool_genesis_txn_count,
pool_ip)
assert 0 < pool_genesis_txn_count <= 4
res = "\n".join([
'{{"data":{{"alias":"Node1","blskey":"4N8aUNHSgjQVgkpm8nhNEfDf6txHznoYREg9kirmJrkivgL4oSEimFF6nsQ6M41QvhM2Z33nves5vfSn9n1UwNFJBYtWVnHYMATn76vLuL3zU88KyeAYcHfsih3He6UHcXDxcaecHVz6jhCYz1P2UZn2bDVruL5wXpehgBfBaLKm3Ba","client_ip":"{}","client_port":9702,"node_ip":"{}","node_port":9701,"services":["VALIDATOR"]}},"dest":"Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv","identifier":"Th7MpTaRZVRYnPiabds81Y","txnId":"fea82e10e894419fe2bea7d96296a6d46f50f93f9eeda954ec461b2ed2950b62","type":"0"}}'.format(
pool_ip, pool_ip),
'{{"data":{{"alias":"Node2","blskey":"37rAPpXVoxzKhz7d9gkUe52XuXryuLXoM6P6LbWDB7LSbG62Lsb33sfG7zqS8TK1MXwuCHj1FKNzVpsnafmqLG1vXN88rt38mNFs9TENzm4QHdBzsvCuoBnPH7rpYYDo9DZNJePaDvRvqJKByCabubJz3XXKbEeshzpz4Ma5QYpJqjk","client_ip":"{}","client_port":9704,"node_ip":"{}","node_port":9703,"services":["VALIDATOR"]}},"dest":"8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb","identifier":"EbP4aYNeTHL6q385GuVpRV","txnId":"1ac8aece2a18ced660fef8694b61aac3af08ba875ce3026a160acbc3a3af35fc","type":"0"}}'.format(
pool_ip, pool_ip),
'{{"data":{{"alias":"Node3","blskey":"3WFpdbg7C5cnLYZwFZevJqhubkFALBfCBBok15GdrKMUhUjGsk3jV6QKj6MZgEubF7oqCafxNdkm7eswgA4sdKTRc82tLGzZBd6vNqU8dupzup6uYUf32KTHTPQbuUM8Yk4QFXjEf2Usu2TJcNkdgpyeUSX42u5LqdDDpNSWUK5deC5","client_ip":"{}","client_port":9706,"node_ip":"{}","node_port":9705,"services":["VALIDATOR"]}},"dest":"DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya","identifier":"4cU41vWW82ArfxJxHkzXPG","txnId":"7e9f355dffa78ed24668f0e0e369fd8c224076571c51e2ea8be5f26479edebe4","type":"0"}}'.format(
pool_ip, pool_ip),
'{{"data":{{"alias":"Node4","blskey":"2zN3bHM1m4rLz54MJHYSwvqzPchYp8jkHswveCLAEJVcX6Mm1wHQD1SkPYMzUDTZvWvhuE6VNAkK3KxVeEmsanSmvjVkReDeBEMxeDaayjcZjFGPydyey1qxBHmTvAnBKoPydvuTAqx5f7YNNRAdeLmUi99gERUU7TD8KfAa6MpQ9bw","client_ip":"{}","client_port":9708,"node_ip":"{}","node_port":9707,"services":["VALIDATOR"]}},"dest":"4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA","identifier":"TWwCRQRZ2ZHMJFn9TzLp7W","txnId":"aa5e817d7cc626170eca175822029339a444eb0ee8f0bd20d3b0b76e566fb008","type":"0"}}'.format(
pool_ip, pool_ip)
][0:pool_genesis_txn_count])
logger.debug("pool_genesis_txn_data: <<< res: %r", res)
return res
@pytest.fixture
def pool_genesis_txn_path(pool_name, path_temp):
logger = logging.getLogger(__name__)
logger.debug("pool_genesis_txn_path: >>> pool_name: %r",
pool_name)
res = path_temp.joinpath("{}.txn".format(pool_name))
logger.debug("pool_genesis_txn_path: <<< res: %r", res)
return res
# noinspection PyUnusedLocal
@pytest.fixture
def pool_genesis_txn_file(pool_genesis_txn_path, pool_genesis_txn_data):
logger = logging.getLogger(__name__)
logger.debug("pool_genesis_txn_file: >>> pool_genesis_txn_path: %r, pool_genesis_txn_data: %r",
pool_genesis_txn_path,
pool_genesis_txn_data)
pool_genesis_txn_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(pool_genesis_txn_path), "w+") as f:
f.writelines(pool_genesis_txn_data)
logger.debug("pool_genesis_txn_file: <<<")
@pytest.fixture
def pool_ledger_config_cleanup():
return True
# noinspection PyUnusedLocal
@pytest.fixture
def pool_ledger_config(event_loop, pool_name, pool_genesis_txn_path, pool_genesis_txn_file,
pool_ledger_config_cleanup, path_home):
logger = logging.getLogger(__name__)
logger.debug("pool_ledger_config: >>> pool_name: %r, pool_genesis_txn_path: %r, pool_genesis_txn_file: %r,"
" pool_ledger_config_cleanup: %r, path_home: %r",
pool_name,
pool_genesis_txn_path,
pool_genesis_txn_file,
pool_ledger_config_cleanup,
path_home)
logger.debug("pool_ledger_config: Creating pool ledger config")
event_loop.run_until_complete(pool.create_pool_ledger_config(
pool_name,
json.dumps({
"genesis_txn": str(pool_genesis_txn_path)
})))
logger.debug("pool_ledger_config: yield")
yield
logger.debug("pool_ledger_config: Deleting pool ledger config")
event_loop.run_until_complete(pool.delete_pool_ledger_config(pool_name)) if pool_ledger_config_cleanup else None
logger.debug("pool_ledger_config: <<<")
@pytest.fixture
def pool_handle_cleanup():
logger = logging.getLogger(__name__)
logger.debug("pool_handle_cleanup: >>>")
res = True
logger.debug("pool_handle_cleanup: <<< res: %r", res)
return res
@pytest.fixture
def pool_config():
logger = logging.getLogger(__name__)
logger.debug("pool_config: >>>")
res = None
logger.debug("pool_config: <<< res: %r", res)
return res
# noinspection PyUnusedLocal
@pytest.fixture
def pool_handle(event_loop, pool_name, pool_ledger_config, pool_config, pool_handle_cleanup):
logger = logging.getLogger(__name__)
logger.debug("pool_handle: >>> pool_name: %r, pool_ledger_config: %r, pool_config: %r, pool_handle_cleanup: %r",
pool_name,
pool_ledger_config,
pool_config,
pool_handle_cleanup)
logger.debug("pool_handle: Opening pool ledger")
pool_handle = event_loop.run_until_complete(pool.open_pool_ledger(pool_name, pool_config))
assert type(pool_handle) is int
logger.debug("pool_handle: yield: %r", pool_handle)
yield pool_handle
logger.debug("pool_handle: Closing pool ledger")
event_loop.run_until_complete(pool.close_pool_ledger(pool_handle)) if pool_handle_cleanup else None
logger.debug("pool_handle: <<<")
@pytest.fixture
async def identity_trustee1(wallet_handle, seed_trustee1):
(trustee_did, trustee_verkey, _) = await signus.create_and_store_my_did(wallet_handle,
json.dumps({"seed": seed_trustee1}))
return (trustee_did, trustee_verkey)
@pytest.fixture
async def identity_steward1(wallet_handle, seed_steward1):
(steward_did, steward_verkey, _) = await signus.create_and_store_my_did(wallet_handle,
json.dumps({"seed": seed_steward1}))
return (steward_did, steward_verkey)
@pytest.fixture
async def identity_my1(wallet_handle, pool_handle, identity_trustee1, seed_my1, ):
(trustee_did, trustee_verkey) = identity_trustee1
(my_did, my_verkey, _) = await signus.create_and_store_my_did(wallet_handle,
json.dumps({"seed": seed_my1}))
nym_request = await ledger.build_nym_request(trustee_did, my_did, my_verkey, None, None)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did, nym_request)
return (my_did, my_verkey)
|
|
import os
import time
import datetime as dt
import numpy as np
from netCDF4 import Dataset
from scipy.interpolate import interp1d
import scipy.ndimage as ndimage
from utils.c_wrapper import cvort, cvort4
from utils.utils import cfind_extrema, upscale_field
from load_settings import settings
import setup_logging
C20_DATA_DIR = os.path.join(settings.DATA_DIR, 'c20_full')
EARTH_RADIUS = 6371000
EARTH_CIRC = EARTH_RADIUS * 2 * np.pi
NUM_ENSEMBLE_MEMBERS = 56
log = setup_logging.get_logger('st.find_vortmax')
class C20Data(object):
'''Class used for accessing data from C20 Reanalysis project.
This acts as a wrapper around netCDF4.Datasets and makes it easy to view data.
Typically it exposes the prmsl and vort850/vort9950 fields for all ensemble members.
It will load these fields, along with corresponding maxima (vorticity) and minima (pressure)
each time a new date is set.
:param year: Year from which to take data
:param fields: List of C20 fields that are to be loaded, or use 'all' for complete set
:param version: Version of C20 data to use
'''
def __init__(self, year, fields='all', version=settings.C20_VERSION):
self._year = year
self.dx = None
self.date = None
self.version = version
log.info('C20Data: year={}, version={}'.format(year, version))
if fields == 'all':
# rh995 has been removed.
self.fields = ['u9950', 'v9950', 'u850', 'v850', 'prmsl', 't9950', 't850', 'cape', 'pwat']
else:
self.fields = fields
if 'u9950' in self.fields and 'v9950' in self.fields:
self.calc_9950_vorticity = True
else:
self.calc_9950_vorticity = False
if 'u850' in self.fields and 'v850' in self.fields:
self.calc_850_vorticity = True
else:
self.calc_850_vorticity = False
fields = ', '.join(self.fields)
log.info('Using: {}'.format(fields))
self._load_datasets(self._year)
def set_year(self, year):
'''Sets a year and loads the relevant dataset'''
self._year = year
self.close_datasets()
self._load_datasets(self._year)
def close_datasets(self):
'''Closes all open datasets'''
for dataset in self.nc_datasets.values():
dataset.close()
def _load_datasets(self, year):
'''Loads datasets for a given year
Just sets up the NetCDF4 objects, doesn't actually load any data apart from
lons/lats and dates.
'''
# All datasets have lon/lat/time info in them, so any will do.
any_dataset = None
dataset_fieldname = None
self.nc_datasets = {}
for field in self.fields:
# e.g. ~/stormtracks_data/data/c20_full/2005/prmsl_2005.nc
path = os.path.join(C20_DATA_DIR, self.version, str(year), '{}_{}.nc'.format(field, year))
if not os.path.exists(path):
msg = 'File does not exist: {}'.format(path)
log.error(msg)
raise RuntimeError(msg)
log.debug('Loading {} from {}'.format(field, path))
dataset = Dataset(path)
dataset_fieldname = field
any_dataset = dataset
self.nc_datasets[field] = dataset
start_date = dt.datetime(1, 1, 1)
hours_since_JC = any_dataset.variables['time'][:]
self.number_enseble_members = any_dataset.variables[dataset_fieldname].shape[1]
self.lons = any_dataset.variables['lon'][:]
self.lats = any_dataset.variables['lat'][:]
self.dates = np.array([start_date + dt.timedelta(hs / 24.) -
dt.timedelta(2) for hs in hours_since_JC])
dlon = self.lons[2] - self.lons[0]
# N.B. array as dx varies with lat.
# lons, lats are in degres.
self.dx = (dlon * np.cos(self.lats * np.pi / 180) * EARTH_CIRC) / 360.
self.dy = (self.lats[0] - self.lats[2]) * EARTH_CIRC / 360.
# Interpolation functions.
self.f_lon = interp1d(np.arange(0, 180), self.lons)
self.f_lat = interp1d(np.arange(0, 91), self.lats)
self.first_date()
def first_date(self):
'''Sets date to the first date of the year (i.e. Jan the 1st)'''
return self.set_date(self.dates[0])
def next_date(self):
'''Moves date on by one timestep (6hr)'''
index = np.where(self.dates == self.date)[0][0]
if index < len(self.dates):
date = self.dates[index + 1]
return self.set_date(date)
else:
log.warn('Trying to set date beyond date range')
return None
def prev_date(self):
'''Moves date back by one timestep (6hr)'''
index = np.where(self.dates == self.date)[0][0]
if index > 0:
date = self.dates[index - 1]
return self.set_date(date)
else:
log.warn('Trying to set date beyond date range')
return None
def set_date(self, date):
'''Sets date and loads all data for that date
Will have no effect if there is no difference in date.
:param date: date to load
:returns: date if successful, otherwise None
'''
if date != self.date:
try:
log.debug("Setting date to {0}".format(date))
index = np.where(self.dates == date)[0][0]
self.date = date
self._process_ensemble_data(index)
except:
self.date = None
log.exception('Problem loading date {}'.format(date))
raise
return date
def _cvorticity(self, u, v):
'''Calculates the (2nd order) vorticity by calling into a c function'''
vort = np.zeros_like(u)
cvort(u, v, u.shape[0], u.shape[1], self.dx, self.dy, vort)
return vort
def _cvorticity4(self, u, v):
'''Calculates the (4th order) vorticity by calling into a c function
Algorithm was taken from Walsh's code'''
vort = np.zeros_like(u)
cvort4(u, v, u.shape[0], u.shape[1], self.dx, self.dy, vort)
return vort
def _process_ensemble_data(self, index):
'''
Processes data for one ensemble member
Loads the relevant data and then performs a variety of calculations on it.
At a minimum, prmsl, vort and vort4 will be calculated for the current date, as well
as their maxima/minima as appropriate. Additionally (depending on how class is configured),
smoothed_vort and up_vort (upscaled_vorticity) can be calculated.
Rough times for each step are recorded.
:param index: index of timestep in C20 data
'''
start = time.time()
self._load_ensemble_data(index)
end = time.time()
fields = ', '.join(self.fields)
log.debug(' Loaded {0} in {1}'.format(fields, end - start))
if self.calc_9950_vorticity:
start = time.time()
self._calculate_vorticities('9950')
end = time.time()
log.debug(' Calculated 9950 vorticity in {0}'.format(end - start))
if self.calc_850_vorticity:
start = time.time()
self._calculate_vorticities('850')
end = time.time()
log.debug(' Calculated 850 vorticity in {0}'.format(end - start))
start = time.time()
self._find_min_max_from_fields()
end = time.time()
log.debug(' Found maxima/minima in {0}'.format(end - start))
def _load_ensemble_data(self, index):
'''Loads the raw data from the NetCDF4 files'''
# N.B. it is very important how the data is loaded. The data is stored in NetCDF4 files,
# which in turn uses HDF5 as a storage medium. HDF5 allows for compression of particular
# subsets of data ('chunks'). If you access the data in terms of these chunks, it will be
# **much** faster, which is why all data for one date is loaded at a time, i.e. 56x91x180
# cells, or num_ensemble_members x lat x lon.
# This can be seen by looking at e.g. c20data.prmsl.shape, which will be (56, 91, 180).
for field in self.fields:
if field in ['u9950', 'u850', 'u250']:
setattr(self, field, - self.nc_datasets[field].variables[field][index])
else:
setattr(self, field, self.nc_datasets[field].variables[field][index])
def _calculate_vorticities(self, pressure_level):
'''Calculates vort (2nd order) and vort4 (4th order)
Uses c functions for speed.'''
vort = []
# self.vort4 = []
if pressure_level == '9950':
for em in range(NUM_ENSEMBLE_MEMBERS):
vort.append(self._cvorticity(self.u9950[em], self.v9950[em]))
# vort4.append(self._cvorticity4(self.u[em], self.v[em]))
elif pressure_level == '850':
for em in range(NUM_ENSEMBLE_MEMBERS):
vort.append(self._cvorticity(self.u850[em], self.v850[em]))
# vort4.append(self._cvorticity4(self.u[em], self.v[em]))
setattr(self, 'vort{}'.format(pressure_level), vort)
def _find_min_max_from_fields(self):
'''Finds the minima (prmsl) and maxima (vort/vort4)'''
if 'prmsl' in self.fields:
self.pmins, self.pmaxs = [], []
for ensemble_member in range(NUM_ENSEMBLE_MEMBERS):
e, index_pmaxs, index_pmins = cfind_extrema(self.prmsl[ensemble_member])
self.pmins.append([(self.prmsl[ensemble_member][pmin[0], pmin[1]], (self.lons[pmin[1]], self.lats[pmin[0]]))
for pmin in index_pmins])
if 'u9950' in self.fields and 'v9950' in self.fields:
self.vmaxs9950 = []
for ensemble_member in range(NUM_ENSEMBLE_MEMBERS):
e, index_vmaxs, index_vmins = cfind_extrema(self.vort9950[ensemble_member])
self.vmaxs9950.append([
(self.vort9950[ensemble_member][vmax[0], vmax[1]], (self.lons[vmax[1]], self.lats[vmax[0]]))
for vmax in index_vmaxs])
if 'u850' in self.fields and 'v850' in self.fields:
self.vmaxs850 = []
for ensemble_member in range(NUM_ENSEMBLE_MEMBERS):
e, index_vmaxs, index_vmins = cfind_extrema(self.vort850[ensemble_member])
self.vmaxs850.append([
(self.vort850[ensemble_member][vmax[0], vmax[1]], (self.lons[vmax[1]], self.lats[vmax[0]]))
for vmax in index_vmaxs])
|
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''Non-relativistic RKS analytical nuclear gradients'''
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.dft import numint, radi, gen_grid
from pyscf import __config__
def get_veff(ks_grad, mol=None, dm=None):
'''
First order derivative of DFT effective potential matrix (wrt electron coordinates)
Args:
ks_grad : grad.uhf.Gradients or grad.uks.Gradients object
'''
if mol is None: mol = ks_grad.mol
if dm is None: dm = ks_grad.base.make_rdm1()
t0 = (logger.process_clock(), logger.perf_counter())
mf = ks_grad.base
ni = mf._numint
if ks_grad.grids is not None:
grids = ks_grad.grids
else:
grids = mf.grids
if grids.coords is None:
grids.build(with_non0tab=True)
if mf.nlc != '':
raise NotImplementedError
#enabling range-separated hybrids
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
mem_now = lib.current_memory()[0]
max_memory = max(2000, ks_grad.max_memory*.9-mem_now)
if ks_grad.grid_response:
exc, vxc = get_vxc_full_response(ni, mol, grids, mf.xc, dm,
max_memory=max_memory,
verbose=ks_grad.verbose)
logger.debug1(ks_grad, 'sum(grids response) %s', exc.sum(axis=0))
else:
exc, vxc = get_vxc(ni, mol, grids, mf.xc, dm,
max_memory=max_memory, verbose=ks_grad.verbose)
t0 = logger.timer(ks_grad, 'vxc', *t0)
if abs(hyb) < 1e-10 and abs(alpha) < 1e-10:
vj = ks_grad.get_j(mol, dm)
vxc += vj
else:
vj, vk = ks_grad.get_jk(mol, dm)
vk *= hyb
if abs(omega) > 1e-10: # For range separated Coulomb operator
with mol.with_range_coulomb(omega):
vk += ks_grad.get_k(mol, dm) * (alpha - hyb)
vxc += vj - vk * .5
return lib.tag_array(vxc, exc1_grid=exc)
def get_vxc(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ao_loc = mol.ao_loc_nr()
vmat = numpy.zeros((nset,3,nao,nao))
if xctype == 'LDA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
for idm in range(nset):
rho = make_rho(idm, ao[0], mask, 'LDA')
vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1,
verbose=verbose)[1]
vrho = vxc[0]
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho)
_d1_dot_(vmat[idm], mol, ao[1:4], aow, mask, ao_loc, True)
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
for idm in range(nset):
rho = make_rho(idm, ao[:4], mask, 'GGA')
vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1,
verbose=verbose)[1]
wv = numint._rks_gga_wv0(rho, vxc, weight)
_gga_grad_sum_(vmat[idm], mol, ao, wv, mask, ao_loc)
rho = vxc = vrho = wv = None
elif xctype == 'NLC':
raise NotImplementedError('NLC')
elif xctype == 'MGGA':
raise NotImplementedError('meta-GGA')
exc = None
if nset == 1:
vmat = vmat.reshape(3,nao,nao)
# - sign because nabla_X = -nabla_x
return exc, -vmat
def _make_dR_dao_w(ao, wv):
aow = numpy.einsum('npi,p->npi', ao[1:4], wv[0])
# XX, XY, XZ = 4, 5, 6
# YX, YY, YZ = 5, 7, 8
# ZX, ZY, ZZ = 6, 8, 9
aow[0] += numpy.einsum('pi,p->pi', ao[4], wv[1]) # dX nabla_x
aow[0] += numpy.einsum('pi,p->pi', ao[5], wv[2]) # dX nabla_y
aow[0] += numpy.einsum('pi,p->pi', ao[6], wv[3]) # dX nabla_z
aow[1] += numpy.einsum('pi,p->pi', ao[5], wv[1]) # dY nabla_x
aow[1] += numpy.einsum('pi,p->pi', ao[7], wv[2]) # dY nabla_y
aow[1] += numpy.einsum('pi,p->pi', ao[8], wv[3]) # dY nabla_z
aow[2] += numpy.einsum('pi,p->pi', ao[6], wv[1]) # dZ nabla_x
aow[2] += numpy.einsum('pi,p->pi', ao[8], wv[2]) # dZ nabla_y
aow[2] += numpy.einsum('pi,p->pi', ao[9], wv[3]) # dZ nabla_z
return aow
def _d1_dot_(vmat, mol, ao1, ao2, mask, ao_loc, dR1_on_bra=True):
shls_slice = (0, mol.nbas)
if dR1_on_bra:
vmat[0] += numint._dot_ao_ao(mol, ao1[0], ao2, mask, shls_slice, ao_loc)
vmat[1] += numint._dot_ao_ao(mol, ao1[1], ao2, mask, shls_slice, ao_loc)
vmat[2] += numint._dot_ao_ao(mol, ao1[2], ao2, mask, shls_slice, ao_loc)
else:
vmat[0] += numint._dot_ao_ao(mol, ao1, ao2[0], mask, shls_slice, ao_loc)
vmat[1] += numint._dot_ao_ao(mol, ao1, ao2[1], mask, shls_slice, ao_loc)
vmat[2] += numint._dot_ao_ao(mol, ao1, ao2[2], mask, shls_slice, ao_loc)
def _gga_grad_sum_(vmat, mol, ao, wv, mask, ao_loc):
aow = numpy.einsum('npi,np->pi', ao[:4], wv)
_d1_dot_(vmat, mol, ao[1:4], aow, mask, ao_loc, True)
aow = _make_dR_dao_w(ao, wv)
_d1_dot_(vmat, mol, aow, ao[0], mask, ao_loc, True)
return vmat
def get_vxc_full_response(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
'''Full response including the response of the grids'''
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ao_loc = mol.ao_loc_nr()
excsum = 0
vmat = numpy.zeros((3,nao,nao))
if xctype == 'LDA':
ao_deriv = 1
vtmp = numpy.empty((3,nao,nao))
for atm_id, (coords, weight, weight1) in enumerate(grids_response_cc(grids)):
mask = gen_grid.make_mask(mol, coords)
ao = ni.eval_ao(mol, coords, deriv=ao_deriv, non0tab=mask)
rho = make_rho(0, ao[0], mask, 'LDA')
exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1,
verbose=verbose)[:2]
vrho = vxc[0]
vtmp = numpy.zeros((3,nao,nao))
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho)
_d1_dot_(vtmp, mol, ao[1:4], aow, mask, ao_loc, True)
vmat += vtmp
# response of weights
excsum += numpy.einsum('r,r,nxr->nx', exc, rho, weight1)
# response of grids coordinates
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms) * 2
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 2
for atm_id, (coords, weight, weight1) in enumerate(grids_response_cc(grids)):
mask = gen_grid.make_mask(mol, coords)
ao = ni.eval_ao(mol, coords, deriv=ao_deriv, non0tab=mask)
rho = make_rho(0, ao[:4], mask, 'GGA')
exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1,
verbose=verbose)[:2]
vtmp = numpy.zeros((3,nao,nao))
wv = numint._rks_gga_wv0(rho, vxc, weight)
_gga_grad_sum_(vtmp, mol, ao, wv, mask, ao_loc)
vmat += vtmp
# response of weights
excsum += numpy.einsum('r,r,nxr->nx', exc, rho[0], weight1)
# response of grids coordinates
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms) * 2
rho = vxc = vrho = wv = None
elif xctype == 'NLC':
raise NotImplementedError('NLC')
elif xctype == 'MGGA':
raise NotImplementedError('meta-GGA')
# - sign because nabla_X = -nabla_x
return excsum, -vmat
# JCP 98, 5612 (1993); DOI:10.1063/1.464906
def grids_response_cc(grids):
mol = grids.mol
atom_grids_tab = grids.gen_atomic_grids(mol, grids.atom_grid,
grids.radi_method,
grids.level, grids.prune)
atm_coords = numpy.asarray(mol.atom_coords() , order='C')
atm_dist = gto.inter_distance(mol, atm_coords)
def _radii_adjust(mol, atomic_radii):
charges = mol.atom_charges()
if grids.radii_adjust == radi.treutler_atomic_radii_adjust:
rad = numpy.sqrt(atomic_radii[charges]) + 1e-200
elif grids.radii_adjust == radi.becke_atomic_radii_adjust:
rad = atomic_radii[charges] + 1e-200
else:
fadjust = lambda i, j, g: g
gadjust = lambda *args: 1
return fadjust, gadjust
rr = rad.reshape(-1,1) * (1./rad)
a = .25 * (rr.T - rr)
a[a<-.5] = -.5
a[a>0.5] = 0.5
def fadjust(i, j, g):
return g + a[i,j]*(1-g**2)
#: d[g + a[i,j]*(1-g**2)] /dg = 1 - 2*a[i,j]*g
def gadjust(i, j, g):
return 1 - 2*a[i,j]*g
return fadjust, gadjust
fadjust, gadjust = _radii_adjust(mol, grids.atomic_radii)
def gen_grid_partition(coords, atom_id):
ngrids = coords.shape[0]
grid_dist = []
grid_norm_vec = []
for ia in range(mol.natm):
v = (atm_coords[ia] - coords).T
normv = numpy.linalg.norm(v,axis=0) + 1e-200
v /= normv
grid_dist.append(normv)
grid_norm_vec.append(v)
def get_du(ia, ib): # JCP 98, 5612 (1993); (B10)
uab = atm_coords[ia] - atm_coords[ib]
duab = 1./atm_dist[ia,ib] * grid_norm_vec[ia]
duab-= uab[:,None]/atm_dist[ia,ib]**3 * (grid_dist[ia]-grid_dist[ib])
return duab
pbecke = numpy.ones((mol.natm,ngrids))
dpbecke = numpy.zeros((mol.natm,mol.natm,3,ngrids))
for ia in range(mol.natm):
for ib in range(ia):
g = 1/atm_dist[ia,ib] * (grid_dist[ia]-grid_dist[ib])
p0 = fadjust(ia, ib, g)
p1 = (3 - p0**2) * p0 * .5
p2 = (3 - p1**2) * p1 * .5
p3 = (3 - p2**2) * p2 * .5
t_uab = 27./16 * (1-p2**2) * (1-p1**2) * (1-p0**2) * gadjust(ia, ib, g)
s_uab = .5 * (1 - p3 + 1e-200)
s_uba = .5 * (1 + p3 + 1e-200)
pbecke[ia] *= s_uab
pbecke[ib] *= s_uba
pt_uab =-t_uab / s_uab
pt_uba = t_uab / s_uba
# * When grid is on atom ia/ib, ua/ub == 0, d_uba/d_uab may have huge error
# How to remove this error?
duab = get_du(ia, ib)
duba = get_du(ib, ia)
if ia == atom_id:
dpbecke[ia,ia] += pt_uab * duba
dpbecke[ia,ib] += pt_uba * duba
else:
dpbecke[ia,ia] += pt_uab * duab
dpbecke[ia,ib] += pt_uba * duab
if ib == atom_id:
dpbecke[ib,ib] -= pt_uba * duab
dpbecke[ib,ia] -= pt_uab * duab
else:
dpbecke[ib,ib] -= pt_uba * duba
dpbecke[ib,ia] -= pt_uab * duba
# * JCP 98, 5612 (1993); (B8) (B10) miss many terms
if ia != atom_id and ib != atom_id:
ua_ub = grid_norm_vec[ia] - grid_norm_vec[ib]
ua_ub /= atm_dist[ia,ib]
dpbecke[atom_id,ia] -= pt_uab * ua_ub
dpbecke[atom_id,ib] -= pt_uba * ua_ub
for ia in range(mol.natm):
dpbecke[:,ia] *= pbecke[ia]
return pbecke, dpbecke
natm = mol.natm
for ia in range(natm):
coords, vol = atom_grids_tab[mol.atom_symbol(ia)]
coords = coords + atm_coords[ia]
pbecke, dpbecke = gen_grid_partition(coords, ia)
z = 1./pbecke.sum(axis=0)
w1 = dpbecke[:,ia] * z
w1 -= pbecke[ia] * z**2 * dpbecke.sum(axis=1)
w1 *= vol
w0 = vol * pbecke[ia] * z
yield coords, w0, w1
class Gradients(rhf_grad.Gradients):
# This parameter has no effects for HF gradients. Add this attribute so that
# the kernel function can be reused in the DFT gradients code.
grid_response = getattr(__config__, 'grad_rks_Gradients_grid_response', False)
def __init__(self, mf):
rhf_grad.Gradients.__init__(self, mf)
self.grids = None
# This parameter has no effects for HF gradients. Add this attribute so that
# the kernel function can be reused in the DFT gradients code.
self.grid_response = False
self._keys = self._keys.union(['grid_response', 'grids'])
def dump_flags(self, verbose=None):
rhf_grad.Gradients.dump_flags(self, verbose)
logger.info(self, 'grid_response = %s', self.grid_response)
#if callable(self.base.grids.prune):
# logger.info(self, 'Grid pruning %s may affect DFT gradients accuracy.'
# 'Call mf.grids.run(prune=False) to mute grid pruning',
# self.base.grids.prune)
return self
get_veff = get_veff
def extra_force(self, atom_id, envs):
'''Hook for extra contributions in analytical gradients.
Contributions like the response of auxiliary basis in density fitting
method, the grid response in DFT numerical integration can be put in
this function.
'''
if self.grid_response:
vhf = envs['vhf']
log = envs['log']
log.debug('grids response for atom %d %s',
atom_id, vhf.exc1_grid[atom_id])
return vhf.exc1_grid[atom_id]
else:
return 0
Grad = Gradients
from pyscf import dft
dft.rks.RKS.Gradients = dft.rks_symm.RKS.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import dft
mol = gto.Mole()
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = '631g'
mol.build()
mf = dft.RKS(mol)
mf.conv_tol = 1e-14
#mf.grids.atom_grid = (20,86)
e0 = mf.scf()
g = mf.Gradients()
print(lib.finger(g.kernel()) - -0.049887865971659243)
#[[ -4.20040265e-16 -6.59462771e-16 2.10150467e-02]
# [ 1.42178271e-16 2.81979579e-02 -1.05137653e-02]
# [ 6.34069238e-17 -2.81979579e-02 -1.05137653e-02]]
g.grid_response = True
print(lib.finger(g.kernel()) - -0.049891265876709084)
# O 0.0000000000 -0.0000000000 0.0210225191
# H 0.0000000000 0.0281984036 -0.0105112595
# H -0.0000000000 -0.0281984036 -0.0105112595
mf.xc = 'b88,p86'
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.050382923259300716)
#[[ -8.20194970e-16 -2.04319288e-15 2.44405835e-02]
# [ 4.36709255e-18 2.73690416e-02 -1.22232039e-02]
# [ 3.44483899e-17 -2.73690416e-02 -1.22232039e-02]]
g.grid_response = True
print(lib.finger(g.kernel()) - -0.05036316927480719)
mf.xc = 'b3lypg'
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.035613964330885352)
#[[ -3.59411142e-16 -2.68753987e-16 1.21557501e-02]
# [ 4.04977877e-17 2.11112794e-02 -6.08181640e-03]
# [ 1.52600378e-16 -2.11112794e-02 -6.08181640e-03]]
mol = gto.Mole()
mol.atom = [
['H' , (0. , 0. , 1.804)],
['F' , (0. , 0. , 0. )], ]
mol.unit = 'B'
mol.basis = '631g'
mol.build()
mf = dft.RKS(mol)
mf.conv_tol = 1e-14
mf.kernel()
print(lib.finger(Gradients(mf).kernel()) - 0.0018831588319051444)
# sum over z direction non-zero, due to meshgrid response
#[[ 0 0 -2.68934738e-03]
# [ 0 0 2.69333577e-03]]
mf = dft.RKS(mol)
mf.grids.prune = None
mf.grids.level = 6
mf.conv_tol = 1e-14
mf.kernel()
print(lib.finger(Gradients(mf).kernel()) - 0.0018819497229394144)
#[[ 0 0 -2.68931547e-03]
# [ 0 0 2.68911282e-03]]
|
|
# -*- coding: utf-8 -*-
"""
gspread.models
~~~~~~~~~~~~~~
This module contains common spreadsheets' models
"""
import re
from collections import defaultdict
from itertools import chain
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from .ns import _ns, _ns1, ATOM_NS, BATCH_NS, SPREADSHEET_NS
from .urls import construct_url
from .utils import finditem, numericise_all
from .exceptions import IncorrectCellLabel, WorksheetNotFound, CellNotFound
try:
unicode
except NameError:
basestring = unicode = str
# Patch ElementTree._escape_attrib
_elementtree_escape_attrib = ElementTree._escape_attrib
def _escape_attrib(text, encoding=None, replace=None):
try:
text = _elementtree_escape_attrib(text)
except TypeError as e:
if str(e) == '_escape_attrib() takes exactly 2 arguments (1 given)':
text = _elementtree_escape_attrib(text, encoding)
entities = {'\n': ' ', '\r': ' ', '\t': '	'}
for key, value in entities.items():
text = text.replace(key, value)
return text
ElementTree._escape_attrib = _escape_attrib
class Spreadsheet(object):
""" A class for a spreadsheet object."""
def __init__(self, client, feed_entry):
self.client = client
id_parts = feed_entry.find(_ns('id')).text.split('/')
self.id = id_parts[-1]
self._sheet_list = []
def get_id_fields(self):
return {'spreadsheet_id': self.id}
def _fetch_sheets(self):
feed = self.client.get_worksheets_feed(self)
for elem in feed.findall(_ns('entry')):
self._sheet_list.append(Worksheet(self, elem))
def add_worksheet(self, title, rows, cols):
"""Adds a new worksheet to a spreadsheet.
:param title: A title of a new worksheet.
:param rows: Number of rows.
:param cols: Number of columns.
Returns a newly created :class:`worksheets <Worksheet>`.
"""
feed = Element('entry', {'xmlns': ATOM_NS,
'xmlns:gs': SPREADSHEET_NS})
SubElement(feed, 'title').text = title
SubElement(feed, 'gs:rowCount').text = str(rows)
SubElement(feed, 'gs:colCount').text = str(cols)
url = construct_url('worksheets', self)
elem = self.client.post_feed(url, ElementTree.tostring(feed))
worksheet = Worksheet(self, elem)
self._sheet_list.append(worksheet)
return worksheet
def del_worksheet(self, worksheet):
"""Deletes a worksheet from a spreadsheet.
:param worksheet: The worksheet to be deleted.
"""
self.client.del_worksheet(worksheet)
self._sheet_list.remove(worksheet)
def worksheets(self):
"""Returns a list of all :class:`worksheets <Worksheet>`
in a spreadsheet.
"""
if not self._sheet_list:
self._fetch_sheets()
return self._sheet_list[:]
def worksheet(self, title):
"""Returns a worksheet with specified `title`.
The returning object is an instance of :class:`Worksheet`.
:param title: A title of a worksheet. If there're multiple
worksheets with the same title, first one will
be returned.
Example. Getting worksheet named 'Annual bonuses'
>>> sht = client.open('Sample one')
>>> worksheet = sht.worksheet('Annual bonuses')
"""
if not self._sheet_list:
self._fetch_sheets()
try:
return finditem(lambda x: x.title == title, self._sheet_list)
except StopIteration:
raise WorksheetNotFound(title)
def get_worksheet(self, index):
"""Returns a worksheet with specified `index`.
The returning object is an instance of :class:`Worksheet`.
:param index: An index of a worksheet. Indexes start from zero.
Example. To get first worksheet of a spreadsheet:
>>> sht = client.open('My fancy spreadsheet')
>>> worksheet = sht.get_worksheet(0)
Returns `None` if the worksheet is not found.
"""
if not self._sheet_list:
self._fetch_sheets()
try:
return self._sheet_list[index]
except IndexError:
return None
@property
def sheet1(self):
"""Shortcut property for getting the first worksheet."""
return self.get_worksheet(0)
class Worksheet(object):
"""A class for worksheet object."""
def __init__(self, spreadsheet, element):
self.spreadsheet = spreadsheet
self.client = spreadsheet.client
self._id = element.find(_ns('id')).text.split('/')[-1]
self._title = element.find(_ns('title')).text
self._element = element
try:
self.version = self._get_link(
'edit', element).get('href').split('/')[-1]
except:
# not relevant for read-only spreadsheets
self.version = None
def __repr__(self):
return '<%s %s id:%s>' % (self.__class__.__name__,
repr(self.title),
self.id)
@property
def id(self):
"""Id of a worksheet."""
return self._id
@property
def title(self):
"""Title of a worksheet."""
return self._title
@property
def row_count(self):
"""Number of rows"""
return int(self._element.find(_ns1('rowCount')).text)
@property
def col_count(self):
"""Number of columns"""
return int(self._element.find(_ns1('colCount')).text)
@property
def updated(self):
"""Updated time in RFC 3339 format"""
return self._element.find(_ns('updated')).text
def get_id_fields(self):
return {'spreadsheet_id': self.spreadsheet.id,
'worksheet_id': self.id}
def _cell_addr(self, row, col):
return 'R%sC%s' % (row, col)
def _get_link(self, link_type, feed):
return finditem(lambda x: x.get('rel') == link_type,
feed.findall(_ns('link')))
def _fetch_cells(self):
feed = self.client.get_cells_feed(self)
return [Cell(self, elem) for elem in feed.findall(_ns('entry'))]
_MAGIC_NUMBER = 64
_cell_addr_re = re.compile(r'([A-Za-z]+)(\d+)')
def get_int_addr(self, label):
"""Translates cell's label address to a tuple of integers.
The result is a tuple containing `row` and `column` numbers.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
Example:
>>> wks.get_int_addr('A1')
(1, 1)
"""
m = self._cell_addr_re.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - self._MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
def get_addr_int(self, row, col):
"""Translates cell's tuple of integers to a cell label.
The result is a string containing the cell's coordinates in label form.
:param row: The row of the cell to be converted.
Rows start at index 1.
:param col: The column of the cell to be converted.
Columns start at index 1.
Example:
>>> wks.get_addr_int(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + self._MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def acell(self, label):
"""Returns an instance of a :class:`Cell`.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
Example:
>>> wks.acell('A1') # this could be 'a1' as well
<Cell R1C1 "I'm cell A1">
"""
return self.cell(*(self.get_int_addr(label)))
def cell(self, row, col):
"""Returns an instance of a :class:`Cell` positioned in `row`
and `col` column.
:param row: Integer row number.
:param col: Integer column number.
Example:
>>> wks.cell(1, 1)
<Cell R1C1 "I'm cell A1">
"""
feed = self.client.get_cells_cell_id_feed(self,
self._cell_addr(row, col))
return Cell(self, feed)
def range(self, alphanum):
"""Returns a list of :class:`Cell` objects from specified range.
:param alphanum: A string with range value in common format,
e.g. 'A1:A5'.
"""
feed = self.client.get_cells_feed(self, params={'range': alphanum,
'return-empty': 'true'})
return [Cell(self, elem) for elem in feed.findall(_ns('entry'))]
def get_all_values(self):
"""Returns a list of lists containing all cells' values as strings."""
cells = self._fetch_cells()
# defaultdicts fill in gaps for empty rows/cells not returned by gdocs
rows = defaultdict(lambda: defaultdict(str))
for cell in cells:
row = rows.setdefault(int(cell.row), defaultdict(str))
row[cell.col] = cell.value
# we return a whole rectangular region worth of cells, including
# empties
if not rows:
return []
all_row_keys = chain.from_iterable(row.keys() for row in rows.values())
rect_cols = range(1, max(all_row_keys) + 1)
rect_rows = range(1, max(rows.keys()) + 1)
return [[rows[i][j] for j in rect_cols] for i in rect_rows]
def get_all_records(self, empty2zero=False):
"""Returns a list of dictionaries, all of them having:
- the contents of the spreadsheet's first row of cells as keys,
And each of these dictionaries holding
- the contents of subsequent rows of cells as values.
Cell values are numericised (strings that can be read as ints
or floats are converted).
:param empty2zero: determines whether empty cells are converted to zeros."""
data = self.get_all_values()
keys = data[0]
values = [numericise_all(row, empty2zero) for row in data[1:]]
return [dict(zip(keys, row)) for row in values]
def _list_values(self, index, cell_tuple, position):
cells_list = self._fetch_cells()
cells = dict(map(cell_tuple, filter(position, cells_list)))
try:
last_index = max(cells.keys())
except ValueError:
return []
vals = []
for i in range(1, last_index + 1):
c = cells.get(i)
vals.append(c.value if c else None)
return vals
def row_values(self, row):
"""Returns a list of all values in a `row`.
Empty cells in this list will be rendered as :const:`None`.
"""
return self._list_values(row,
lambda cell: (cell.col, cell),
lambda cell: cell.row == row)
def col_values(self, col):
"""Returns a list of all values in column `col`.
Empty cells in this list will be rendered as :const:`None`.
"""
return self._list_values(col,
lambda cell: (cell.row, cell),
lambda cell: cell.col == col)
def update_acell(self, label, val):
"""Sets the new value to a cell.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
:param val: New value.
Example:
>>> wks.update_acell('A1', '42') # this could be 'a1' as well
<Cell R1C1 "I'm cell A1">
"""
return self.update_cell(*(self.get_int_addr(label)), val=val)
def update_cell(self, row, col, val):
"""Sets the new value to a cell.
:param row: Row number.
:param col: Column number.
:param val: New value.
"""
feed = self.client.get_cells_cell_id_feed(self,
self._cell_addr(row, col))
cell_elem = feed.find(_ns1('cell'))
cell_elem.set('inputValue', unicode(val))
uri = self._get_link('edit', feed).get('href')
self.client.put_feed(uri, ElementTree.tostring(feed))
def _create_update_feed(self, cell_list):
feed = Element('feed', {'xmlns': ATOM_NS,
'xmlns:batch': BATCH_NS,
'xmlns:gs': SPREADSHEET_NS})
id_elem = SubElement(feed, 'id')
id_elem.text = construct_url('cells', self)
for cell in cell_list:
entry = SubElement(feed, 'entry')
SubElement(entry, 'batch:id').text = cell.element.find(
_ns('title')).text
SubElement(entry, 'batch:operation', {'type': 'update'})
SubElement(entry, 'id').text = cell.element.find(_ns('id')).text
edit_link = finditem(lambda x: x.get('rel') == 'edit',
cell.element.findall(_ns('link')))
SubElement(entry, 'link', {'rel': 'edit',
'type': edit_link.get('type'),
'href': edit_link.get('href')})
SubElement(entry, 'gs:cell', {'row': str(cell.row),
'col': str(cell.col),
'inputValue': unicode(cell.value)})
return feed
def update_cells(self, cell_list):
"""Updates cells in batch.
:param cell_list: List of a :class:`Cell` objects to update.
"""
feed = self._create_update_feed(cell_list)
self.client.post_cells(self, ElementTree.tostring(feed))
def resize(self, rows=None, cols=None):
"""Resizes the worksheet.
:param rows: New rows number.
:param cols: New columns number.
"""
if rows is None and cols is None:
raise TypeError("Either 'rows' or 'cols' should be specified.")
self_uri = self._get_link('self', self._element).get('href')
feed = self.client.get_feed(self_uri)
uri = self._get_link('edit', feed).get('href')
if rows:
elem = feed.find(_ns1('rowCount'))
elem.text = str(rows)
if cols:
elem = feed.find(_ns1('colCount'))
elem.text = str(cols)
# Send request and store result
self._element = self.client.put_feed(uri, ElementTree.tostring(feed))
def add_rows(self, rows):
"""Adds rows to worksheet.
:param rows: Rows number to add.
"""
self.resize(rows=self.row_count + rows)
def add_cols(self, cols):
"""Adds colums to worksheet.
:param cols: Columns number to add.
"""
self.resize(cols=self.col_count + cols)
def append_row(self, values):
""""Adds a row to the worksheet and populates it with values.
Widens the worksheet if there are more values than columns.
:param values: List of values for the new row.
"""
self.add_rows(1)
new_row = self.row_count
data_width = len(values)
if self.col_count < data_width:
self.resize(cols=data_width)
cell_list = []
for i, value in enumerate(values, start=1):
cell = self.cell(new_row, i)
cell.value = value
cell_list.append(cell)
self.update_cells(cell_list)
def _finder(self, func, query):
cells = self._fetch_cells()
if isinstance(query, basestring):
match = lambda x: x.value == query
else:
match = lambda x: query.search(x.value)
return func(match, cells)
def find(self, query):
"""Finds first cell matching query.
:param query: A text string or compiled regular expression.
"""
try:
return self._finder(finditem, query)
except StopIteration:
raise CellNotFound(query)
def findall(self, query):
"""Finds all cells matching query.
:param query: A text string or compiled regular expression.
"""
return self._finder(filter, query)
class Cell(object):
"""An instance of this class represents a single cell
in a :class:`worksheet <Worksheet>`.
"""
def __init__(self, worksheet, element):
self.element = element
cell_elem = element.find(_ns1('cell'))
self._row = int(cell_elem.get('row'))
self._col = int(cell_elem.get('col'))
self.input_value = cell_elem.get('inputValue')
#: Value of the cell.
self.value = cell_elem.text or ''
@property
def row(self):
"""Row number of the cell."""
return self._row
@property
def col(self):
"""Column number of the cell."""
return self._col
def __repr__(self):
return '<%s R%sC%s %s>' % (self.__class__.__name__,
self.row,
self.col,
repr(self.value))
|
|
import json
import logging
from lxml import etree
from django.core.urlresolvers import reverse
from django.db.models import Q
from django_datatables_view.base_datatable_view import BaseDatatableView
from core.models import PublishedRecords, Job, Record, IndexMappingFailure, JobValidation,\
CombineBackgroundTask
from core.mongo import mongoengine, ObjectId
from .job import job_details
from .record import record, record_document
from .core_background_tasks import bg_task, bg_task_cancel, bg_task_delete
LOGGER = logging.getLogger(__name__)
####################################################################
# Datatables endpoints #
# https://bitbucket.org/pigletto/django-datatables-view/overview #
####################################################################
class DTPublishedJson(BaseDatatableView):
"""
Prepare and return Datatables JSON for Published records
"""
# define the columns that will be returned
columns = [
'_id',
'record_id',
'job_id',
'publish_set_id',
# 'oai_set',
# 'unique_published',
'document'
]
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
order_columns = [
'_id',
'record_id',
'job_id',
'publish_set_id',
# 'oai_set',
# 'unique_published',
'document'
]
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 1000
def get_initial_queryset(self):
# return queryset used as base for further sorting/filtering
# get PublishedRecords instance
pub_records = PublishedRecords(subset=self.kwargs.get('subset', None))
# return queryset
return pub_records.records
def render_column(self, row, column):
# handle document metadata
if column == '_id':
return '<a href="%s">%s</a>' % (reverse(record, kwargs={
'org_id': row.job.record_group.organization.id,
'record_group_id': row.job.record_group.id,
'job_id': row.job.id, 'record_id': str(row.id)
}), str(row.id))
if column == 'record_id':
return '<a href="%s">%s</a>' % (reverse(record, kwargs={
'org_id': row.job.record_group.organization.id,
'record_group_id': row.job.record_group.id,
'job_id': row.job.id, 'record_id': str(row.id)
}), row.record_id)
if column == 'job_id':
return '<a href="%s">%s</a>' % (reverse(job_details, kwargs={
'org_id': row.job.record_group.organization.id,
'record_group_id': row.job.record_group.id,
'job_id': row.job.id
}), row.job.name)
if column == 'document':
# attempt to parse as XML and return if valid or not
try:
etree.fromstring(row.document.encode('utf-8'))
return '<a target="_blank" href="%s">Valid XML</a>' % (reverse(record_document, kwargs={
'org_id': row.job.record_group.organization.id,
'record_group_id': row.job.record_group.id,
'job_id': row.job.id, 'record_id': str(row.id)
}))
except:
return '<span style="color: red;">Invalid XML</span>'
# # handle associated job
# if column == 'unique_published':
# if row.unique_published:
# return '<span style="color:green;">True</span>'
# else:
# return '<span style="color:red;">False</span>'
else:
return super(DTPublishedJson, self).render_column(row, column)
def filter_queryset(self, qs):
# use parameters passed in GET request to filter queryset
# handle search
search = self.request.GET.get(u'search[value]', None)
if search:
# sniff out ObjectId if present
if len(search) == 24:
try:
oid = ObjectId(search)
qs = qs.filter(mongoengine.Q(id=oid))
except:
LOGGER.debug('received 24 chars, but not ObjectId')
else:
qs = qs.filter(mongoengine.Q(record_id=search) |
mongoengine.Q(publish_set_id=search))
# return
return qs
class DTRecordsJson(BaseDatatableView):
"""
Prepare and return Datatables JSON for Records table in Job Details
"""
# define the columns that will be returned
columns = [
'_id',
'record_id',
'job_id',
'oai_set',
'unique',
'document',
'error',
'valid'
]
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
# order_columns = ['number', 'user', 'state', '', '']
order_columns = [
'_id',
'record_id',
'job_id',
'oai_set',
'unique',
'document',
'error',
'valid'
]
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 1000
def get_initial_queryset(self):
# return queryset used as base for further sorting/filtering
# if job present, filter by job
if 'job_id' in self.kwargs.keys():
# get job self.kwargs['job_id']
job = Job.objects.get(pk=self.kwargs['job_id'])
# return filtered queryset
if 'success_filter' in self.kwargs.keys():
success_filter = bool(int(self.kwargs['success_filter']))
else:
success_filter = None
return job.get_records(success=success_filter)
# else, return all records
job_ids = list(map(lambda j: j.id, Job.objects.all()))
records = Record.objects.filter(job_id__in=job_ids)
return records
def render_column(self, row, column):
# construct record link
record_link = reverse(record, kwargs={
'org_id': row.job.record_group.organization.id,
'record_group_id': row.job.record_group.id,
'job_id': row.job.id,
'record_id': str(row.id)
})
# handle db_id
if column == '_id':
return '<a href="%s"><code>%s</code></a>' % (record_link, str(row.id))
# handle record_id
if column == 'record_id':
return '<a href="%s"><code>%s</code></a>' % (record_link, row.record_id)
# handle document
if column == 'document':
# attempt to parse as XML and return if valid or not
try:
etree.fromstring(row.document.encode('utf-8'))
return '<a target="_blank" href="%s">Valid XML</a>' % (reverse(record_document, kwargs={
'org_id': row.job.record_group.organization.id,
'record_group_id': row.job.record_group.id,
'job_id': row.job.id, 'record_id': str(row.id)
}))
except:
return '<span style="color: red;">Invalid XML</span>'
# handle associated job
if column == 'job':
return '<a href="%s"><code>%s</code></a>' % (reverse(job_details, kwargs={
'org_id': row.job.record_group.organization.id,
'record_group_id': row.job.record_group.id,
'job_id': row.job.id
}), row.job.name)
# handle unique
if column == 'unique':
if row.unique:
return '<span style="color:green;">Unique in Job</span>'
return '<span style="color:red;">Duplicate in Job</span>'
# handle validation_results
if column == 'valid':
if row.valid:
return '<span style="color:green;">Valid</span>'
return '<span style="color:red;">Invalid</span>'
return super(DTRecordsJson, self).render_column(row, column)
def filter_queryset(self, qs):
# use parameters passed in GET request to filter queryset
# handle search
search = self.request.GET.get(u'search[value]', None)
if search:
# sniff out ObjectId if present
if len(search) == 24:
try:
oid = ObjectId(search)
qs = qs.filter(mongoengine.Q(id=oid))
except:
LOGGER.debug('received 24 chars, but not ObjectId')
else:
qs = qs.filter(mongoengine.Q(record_id=search))
# return
return qs
class DTIndexingFailuresJson(BaseDatatableView):
"""
Datatables JSON response for Indexing Failures
"""
# define the columns that will be returned
columns = ['_id', 'record_id', 'mapping_error']
# define column names that will be used in sorting
order_columns = ['_id', 'record_id', 'mapping_error']
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 1000
def get_initial_queryset(self):
# return queryset used as base for further sorting/filtering
# get job
job = Job.objects.get(pk=self.kwargs['job_id'])
# return filtered queryset
return IndexMappingFailure.objects(job_id=job.id)
def render_column(self, row, column):
# determine record link
target_record = row.record
record_link = reverse(record, kwargs={
'org_id': target_record.job.record_group.organization.id,
'record_group_id': target_record.job.record_group.id,
'job_id': target_record.job.id,
'record_id': target_record.id
})
if column == '_id':
return '<a href="%s">%s</a>' % (record_link, target_record.id)
if column == 'record_id':
return '<a href="%s">%s</a>' % (record_link, target_record.record_id)
# handle associated job
if column == 'job':
return row.job.name
return super(DTIndexingFailuresJson, self).render_column(row, column)
class DTJobValidationScenarioFailuresJson(BaseDatatableView):
"""
Prepare and return Datatables JSON for RecordValidation failures from Job, per Validation Scenario
"""
# define the columns that will be returned
columns = [
'id',
'record',
'results_payload',
'fail_count'
]
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
# order_columns = ['number', 'user', 'state', '', '']
order_columns = [
'id',
'record',
'results_payload',
'fail_count'
]
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 1000
def get_initial_queryset(self):
# return queryset used as base for further sorting/filtering
# get job
job_validation = JobValidation.objects.get(
pk=self.kwargs['job_validation_id'])
# return filtered queryset
return job_validation.get_record_validation_failures()
def render_column(self, row, column):
# determine record link
target_record = row.record
record_link = "%s#validation_tab" % reverse(record, kwargs={
'org_id': target_record.job.record_group.organization.id,
'record_group_id': target_record.job.record_group.id,
'job_id': target_record.job.id,
'record_id': target_record.id
})
# handle record id
if column == 'id':
# get target record from row
target_record = row.record
return '<a href="%s">%s</a>' % (record_link, target_record.id)
# handle record record_id
if column == 'record':
# get target record from row
target_record = row.record
return '<a href="%s">%s</a>' % (record_link, target_record.record_id)
# handle results_payload
if column == 'results_payload':
result_payload = json.loads(row.results_payload)['failed']
return ', '.join(result_payload)
# handle all else
return super(DTJobValidationScenarioFailuresJson, self).render_column(row, column)
def filter_queryset(self, qs):
# use parameters passed in GET request to filter queryset
# handle search
search = self.request.GET.get(u'search[value]', None)
if search:
# sniff out ObjectId if present
if len(search) == 24:
try:
oid = ObjectId(search)
qs = qs.filter(mongoengine.Q(record_id=oid))
except:
LOGGER.debug('received 24 chars, but not ObjectId')
# return
return qs
class DTDPLABulkDataMatches(BaseDatatableView):
"""
Prepare and return Datatables JSON for RecordValidation failures from Job, per Validation Scenario
"""
# define the columns that will be returned
columns = [
'id',
'record_id'
]
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
# order_columns = ['number', 'user', 'state', '', '']
order_columns = [
'id',
'record_id'
]
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 1000
def get_initial_queryset(self):
# return queryset used as base for further sorting/filtering
# get job and records
job = Job.objects.get(pk=self.kwargs['job_id'])
# return queryset filtered for match/miss
if self.kwargs['match_type'] == 'matches':
return job.get_records().filter(dbdm=True)
if self.kwargs['match_type'] == 'misses':
return job.get_records().filter(dbdm=False)
def render_column(self, row, column):
# determine record link
target_record = row
record_link = reverse(record, kwargs={
'org_id': target_record.job.record_group.organization.id,
'record_group_id': target_record.job.record_group.id,
'job_id': target_record.job.id,
'record_id': target_record.id
})
# handle record id
if column == 'id':
# get target record from row
target_record = row
return '<a href="%s">%s</a>' % (record_link, target_record.id)
# handle record record_id
if column == 'record_id':
# get target record from row
target_record = row
return '<a href="%s">%s</a>' % (record_link, target_record.record_id)
# handle all else
return super(DTDPLABulkDataMatches, self).render_column(row, column)
def filter_queryset(self, qs):
# use parameters passed in GET request to filter queryset
# handle search
search = self.request.GET.get(u'search[value]', None)
if search:
# sniff out ObjectId if present
if len(search) == 24:
try:
oid = ObjectId(search)
qs = qs.filter(mongoengine.Q(id=oid))
except:
LOGGER.debug('received 24 chars, but not ObjectId')
else:
qs = qs.filter(mongoengine.Q(record_id=search))
# return
return qs
class JobRecordDiffs(BaseDatatableView):
"""
Prepare and return Datatables JSON for Records that were
transformed during a Transformation Job
"""
# define the columns that will be returned
columns = [
'id',
'record_id',
]
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
order_columns = [
'id',
'record_id'
]
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 1000
def get_initial_queryset(self):
# return queryset used as base for further sorting/filtering
# get job
job = Job.objects.get(pk=self.kwargs['job_id'])
job_records = job.get_records()
# filter for records that were transformed
return job_records.filter(transformed=True)
def render_column(self, row, column):
# record link
record_link = "%s#job_type_specific_tab" % reverse(record, kwargs={
'org_id': row.job.record_group.organization.id,
'record_group_id': row.job.record_group.id,
'job_id': row.job.id, 'record_id': row.id
})
# handle db_id
if column == 'id':
return '<a href="%s"><code>%s</code></a>' % (record_link, row.id)
# handle record_id
if column == 'record_id':
return '<a href="%s"><code>%s</code></a>' % (record_link, row.record_id)
return super(JobRecordDiffs, self).render_column(row, column)
def filter_queryset(self, qs):
# use parameters passed in GET request to filter queryset
# handle search
search = self.request.GET.get(u'search[value]', None)
if search:
qs = qs.filter(Q(id__contains=search) | Q(
record_id__contains=search) | Q(document__contains=search))
# return
return qs
class CombineBackgroundTasksDT(BaseDatatableView):
"""
Prepare and return Datatables JSON for Records table in Job Details
"""
# define the columns that will be returned
columns = [
'id',
'start_timestamp',
'name',
'task_type',
'celery_task_id',
'completed',
'duration',
'actions'
]
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
# order_columns = ['number', 'user', 'state', '', '']
order_columns = [
'id',
'start_timestamp',
'name',
'task_type',
'celery_task_id',
'completed',
'duration',
'actions'
]
# set max limit of records returned, this is used to protect our site if someone tries to attack our site
# and make it return huge amount of data
max_display_length = 1000
def get_initial_queryset(self):
# return queryset used as base for further sorting/filtering
return CombineBackgroundTask.objects
def render_column(self, row, column):
if column == 'task_type':
return row.get_task_type_display()
if column == 'celery_task_id':
return '<code>%s</code>' % row.celery_task_id
if column == 'completed':
if row.completed:
if row.celery_status in ['STOPPED', 'REVOKED']:
row_color = 'danger'
else:
row_color = 'success'
else:
row_color = 'warning'
return '''<div class="progress progress-bar bg-{}" role="progressbar" aria-valuenow="100" aria-valuemin="0" aria-valuemax="100" style="width: 100%">{}</div>'''.format(row_color, row.celery_status)
if column == 'duration':
return row.calc_elapsed_as_string()
if column == 'actions':
return '<a href="%s"><button type="button" class="btn btn-success btn-sm">Results <i class="la la-info-circle"></i></button></a> <a href="%s"><button type="button" class="btn btn-danger btn-sm" onclick="return confirm(\'Are you sure you want to cancel this task?\');">Stop <i class="la la-stop"></i></button></a> <a href="%s"><button type="button" class="btn btn-danger btn-sm" onclick="return confirm(\'Are you sure you want to remove this task?\');">Delete <i class="la la-close"></i></button></a>' % (
reverse(bg_task, kwargs={'task_id': row.id}),
reverse(bg_task_cancel, kwargs={'task_id': row.id}),
reverse(bg_task_delete, kwargs={'task_id': row.id}),
)
return super(CombineBackgroundTasksDT, self).render_column(row, column)
def filter_queryset(self, qs):
# use parameters passed in GET request to filter queryset
# handle search
search = self.request.GET.get(u'search[value]', None)
if search:
qs = qs.filter(Q(id__contains=search) | Q(
name__contains=search) | Q(verbose_name__contains=search))
# return
return qs
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import ceil
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Pack")
def _PackGrad(op, grad):
"""Gradient for pack op."""
return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
@ops.RegisterGradient("Unpack")
def _UnpackGrad(op, *grads):
"""Gradient for unpack op."""
return array_ops.stack(grads, axis=op.get_attr("axis"))
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
"""Gradient for concat op.
Args:
op: An operation.
grad: `Tensor` or `IndexedSlices` representing the gradients with respect
to each output of the op.
start_value_index: An integer index of the first value in the op.inputs.
end_value_index: An integer index of the last value in the op.inputs.
dim_index: An interger index of concat_dim or axis parameter in op.inputs.
Returns:
Tensors representing the partial gradients with respect to each input
of the op.
Raises:
ValueError: if concat_dim/axis is not statically known.
"""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[0])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat([
array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1],
array_ops.fill(shape_of_shape - concat_dim - 1, 0)
], 0)
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
def _ExtractInputShapes(inputs):
"""Extract the shapes of a set of input tensors."""
if not context.in_graph_mode():
return array_ops.shape_n(inputs)
sizes = []
fully_known = True
for x in inputs:
input_shape = array_ops.shape(x)
if not isinstance(input_shape,
ops.Tensor) or input_shape.op.type != "Const":
fully_known = False
break
sizes.append(input_shape)
if fully_known:
return sizes
else:
return array_ops.shape_n(inputs)
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return grad + [None] if end_value_index <= dim_index else [None] + grad
concat_dim = op.inputs[dim_index]
input_values = op.inputs[start_value_index:end_value_index]
out_grads = []
if isinstance(grad, ops.Tensor):
if context.in_eager_mode():
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = (
concat_dim._numpy().item(0) % input_values[0]._rank()) # pylint: disable=protected-access
# All inputs are guaranteed to be EagerTensors in eager mode
sizes = pywrap_tensorflow.TFE_Py_TensorShapeSlice(input_values,
non_neg_concat_dim)
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
# Get the inputs' tensor shapes
sizes = _ExtractInputShapes(input_values)
# The magic number of 16 was found through benchmarking a range of sizes
# on CPUs and a Maxwell TitanX. A speedup was seen in a large majority of
# cases when switching implementations at N=16, but it is possible that
# there will be a small number of performance regressions.
# pylint: disable=protected-access
if len(sizes) > 16:
# extract the size of each input along the concat dimension
sizes = array_ops.squeeze(
array_ops.slice(
array_ops.stack(sizes, axis=1), [non_neg_concat_dim, 0],
[1, -1]))
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
offset = gen_array_ops._concat_offset(non_neg_concat_dim, sizes)
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
# pylint: enable=protected-access
elif isinstance(grad, ops.IndexedSlices):
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
if concat_dim_static < 0:
rank = tensor_util.constant_value(array_ops.rank(input_values[0]))
if rank is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"negative concat_dim when first value rank is "
"statically-known.")
concat_dim_static %= rank
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in input_values]
if concat_dim_static > 0:
# IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices
# gradients with all the indices, but with grad.values sliced accordingly.
# This is like the Tensor case, except shape(grad.values)[0] is not equal
# to shape(sizes[i])[0], since only a subset of the dim-0 values are
# stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values, begin,
array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0))
out_grads.append(ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
# only for the relevant indices.
start = constant_op.constant(0, dtype=grad.indices.dtype)
for size in sizes:
size_concat_dim = array_ops.gather(size, non_neg_concat_dim)
if size_concat_dim.dtype != grad.indices.dtype:
size_concat_dim = math_ops.cast(
size_concat_dim, dtype=grad.indices.dtype)
end = start + size_concat_dim
# Compute the 1-D Tensor of indices relevant for this input.
indices_to_select = array_ops.squeeze(
array_ops.where(
math_ops.logical_and(grad.indices >= start,
grad.indices < end)),
squeeze_dims=[1])
new_indices = array_ops.gather(grad.indices, indices_to_select) - start
new_values = array_ops.gather(grad.values, indices_to_select)
out_grads.append(ops.IndexedSlices(new_values, new_indices, size))
start = end
else:
raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))
return (out_grads + [None]
if end_value_index <= dim_index else [None] + out_grads)
@ops.RegisterGradient("Concat")
def _ConcatGrad(op, grad):
return _ConcatGradHelper(
op,
grad,
start_value_index=1,
end_value_index=len(op.inputs),
dim_index=0)
@ops.RegisterGradient("ConcatV2")
def _ConcatGradV2(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=0, end_value_index=-1, dim_index=-1)
ops.NotDifferentiable("ConcatOffset")
@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
"""Gradient for Slice op."""
# Create an Nx2 padding where the first column represents how many
# zeros are to be prepended for each dimension, and the second
# column indicates how many zeros are appended.
#
# The number of zeros to append is the shape of the input
# elementwise-subtracted by both the begin vector and sizes vector.
#
# Some more reshaping is needed to assemble this tensor with the
# right dimensions.
input_vec = op.inputs[0]
begin_vec = op.inputs[1]
input_rank = array_ops.rank(input_vec)
slice_size = array_ops.shape(op.outputs[0])
shape = array_ops.stack([input_rank, 1])
before_pad = array_ops.reshape(begin_vec, shape)
after_pad = array_ops.reshape(
array_ops.shape(input_vec) - slice_size - begin_vec, shape)
paddings = array_ops.concat([before_pad, after_pad], 1)
return array_ops.pad(grad, paddings), None, None
@ops.RegisterGradient("StridedSlice")
def _StridedSliceGrad(op, grad):
"""Gradient for StridedSlice op."""
x = array_ops.shape(op.inputs[0])
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return array_ops.strided_slice_grad(
x,
begin,
end,
strides,
grad,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask")), None, None, None
@ops.RegisterGradient("StridedSliceGrad")
def _StridedSliceGradGrad(op, grad):
"""Gradient for StridedSliceGrad op."""
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return None, None, None, None, array_ops.strided_slice(
grad,
begin,
end,
strides,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask"))
@ops.RegisterGradient("Split")
def _SplitGrad(op, *grads):
return None, array_ops.concat(list(grads), op.inputs[0])
@ops.RegisterGradient("SplitV")
def _SplitVGrad(op, *grads):
returnval = array_ops.concat(list(grads), op.inputs[2])
returnval = [returnval] + [
None,
] * (
len(op.inputs) - 1)
return returnval
ops.NotDifferentiable("Const")
@ops.RegisterGradient("Diag")
def _DiagGrad(_, grad):
return array_ops.diag_part(grad)
@ops.RegisterGradient("DiagPart")
def _DiagPartGrad(_, grad):
return array_ops.diag(grad)
@ops.RegisterGradient("MatrixDiag")
def _MatrixDiagGrad(_, grad):
return array_ops.matrix_diag_part(grad)
@ops.RegisterGradient("MatrixDiagPart")
def _MatrixDiagPartGrad(op, grad):
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]:
return array_ops.matrix_diag(grad)
else:
return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad)
@ops.RegisterGradient("MatrixSetDiag")
def _MatrixSetDiagGrad(op, grad):
"""Gradient for MatrixSetDiag."""
input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
diag_shape = op.inputs[1].get_shape()
batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
matrix_shape = input_shape[-2:]
if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
else:
with ops.colocate_with(grad):
grad_shape = array_ops.shape(grad)
grad_rank = array_ops.rank(grad)
batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
min_dim = math_ops.reduce_min(matrix_shape)
diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
grad_input = array_ops.matrix_set_diag(grad,
array_ops.zeros(
diag_shape, dtype=grad.dtype))
grad_diag = array_ops.matrix_diag_part(grad)
return (grad_input, grad_diag)
@ops.RegisterGradient("MatrixBandPart")
def _MatrixBandPartGrad(op, grad):
num_lower = op.inputs[1]
num_upper = op.inputs[2]
return (array_ops.matrix_band_part(grad, num_lower, num_upper), None, None)
# Edit Distance has no gradient (but can be used to eval seq2seq or CTC).
ops.NotDifferentiable("EditDistance")
@ops.RegisterGradient("Fill")
def _FillGrad(_, grad):
return None, math_ops.reduce_sum(grad)
ops.NotDifferentiable("ZerosLike")
ops.NotDifferentiable("OnesLike")
@ops.RegisterGradient("PreventGradient")
def _PreventGradientGrad(op, _):
raise LookupError(
"Gradient explicitly disabled. Reason: %s" % op.get_attr("message"))
@ops.RegisterGradient("Gather")
def _GatherGrad(op, grad):
"""Gradient for Gather op."""
# params can be large, so colocate the shape calculation with it.
#
# params can be very large for sparse model, array_ops.shape raises
# exception on the Windows platform when any dimension is larger than
# int32. params_shape is not used in optimizer apply_sparse gradients,
# so it's fine to convert it back to int32 regardless of truncation.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
# Build appropriately shaped IndexedSlices
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return [ops.IndexedSlices(values, indices, params_shape), None]
@ops.RegisterGradient("GatherV2")
def _GatherV2Grad(op, grad):
"""Gradient for GatherV2 op."""
# params can be large, so colocate the shape calculation with it.
#
# params can be very large for sparse model, array_ops.shape raises
# exception on the Windows platform when any dimension is larger than
# int32. params_shape is not used in optimizer apply_sparse gradients,
# so it's fine to convert it back to int32 regardless of truncation.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
indices = op.inputs[1]
indices_size = array_ops.expand_dims(array_ops.size(indices), 0)
axis = op.inputs[2]
axis_static = tensor_util.constant_value(axis)
# For axis 0 gathers, build an appropriately shaped IndexedSlices.
if axis_static == 0:
if context.in_eager_mode():
params_tail_shape = params_shape.cpu()[1:]
else:
params_tail_shape = params_shape[1:]
values_shape = array_ops.concat([indices_size, params_tail_shape], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, indices_size)
return [ops.IndexedSlices(values, indices, params_shape), None, None]
outer_shape = params_shape[:axis]
outer_dims = array_ops.size(outer_shape)
inner_shape = params_shape[axis:][1:]
inner_dims = array_ops.size(inner_shape)
outer_axes_indices = math_ops.range(outer_dims)
inner_axes_indices = math_ops.range(outer_dims + 1,
outer_dims + 1 + inner_dims)
values_shape = array_ops.concat([outer_shape, indices_size, inner_shape], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, indices_size)
# We need to sum up every slice `values[..., i, ....]` corresponding to
# `params[..., indices[i], ...]`. Since `unsorted_segment_sum` does not
# support an axis parameter, we transpose the gather dimension to the front,
# then use `unsorted_segment_sum` to build a
# [gather_axis, outer_axes, inner_axes] tensor with all the gradients
# affecting each index in `gather_axis` summed up.
transpose_dims = array_ops.concat(
[[outer_dims], outer_axes_indices, inner_axes_indices], 0)
values_transpose = array_ops.transpose(values, transpose_dims)
num_segments = params_shape[axis]
params_grad = math_ops.unsorted_segment_sum(values_transpose, indices,
num_segments)
# Inverts the above transpose by moving dimension 0 back to its original
# position.
invert_transpose_dims = array_ops.concat(
[outer_axes_indices + 1, [0], inner_axes_indices], 0)
params_grad = array_ops.transpose(params_grad, invert_transpose_dims)
return [params_grad, None, None]
@ops.RegisterGradient("GatherNd")
def _GatherNdGrad(op, grad):
ref = op.inputs[0]
indices = op.inputs[1]
ref_shape = array_ops.shape(ref, out_type=indices.dtype)
if indices.shape.ndims == 2 and indices.shape[-1].value == 1:
ref_grad = ops.IndexedSlices(grad, array_ops.squeeze(indices, axis=-1),
ref_shape)
else:
ref_grad = array_ops.scatter_nd(indices, grad, ref_shape)
return [ref_grad, None]
@ops.RegisterGradient("CheckNumerics")
def _CheckNumericsGrad(_, grad):
"""Gradient for check_numerics op."""
return array_ops.check_numerics(
grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")
@ops.RegisterGradient("PlaceholderWithDefault")
@ops.RegisterGradient("Identity")
def _IdGrad(_, grad):
return grad
@ops.RegisterGradient("RefIdentity")
def _RefIdGrad(_, grad):
return grad
@ops.RegisterGradient("IdentityN")
def _IdNGrad(_, *grad):
return grad
ops.NotDifferentiable("StopGradient")
@ops.RegisterGradient("Reshape")
def _ReshapeGrad(op, grad):
return [array_ops.reshape(grad, array_ops.shape(op.inputs[0])), None]
ops.NotDifferentiable("InvertPermutation")
def _ReshapeToInput(op, grad):
"""Reshapes the gradient to the shape of the original input."""
return array_ops.reshape(grad, array_ops.shape(op.inputs[0]))
@ops.RegisterGradient("ExpandDims")
def _ExpandDimsGrad(op, grad):
return [_ReshapeToInput(op, grad), None]
@ops.RegisterGradient("Squeeze")
def _SqueezeGrad(op, grad):
return _ReshapeToInput(op, grad)
@ops.RegisterGradient("Transpose")
def _TransposeGrad(op, grad):
"""Returns unshuffle(grad)."""
p = op.inputs[1]
return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None]
@ops.RegisterGradient("ConjugateTranspose")
def _ConjugateTransposeGrad(op, grad):
"""Returns conj(unshuffle(grad))."""
p = op.inputs[1]
return [
array_ops.transpose(
grad, array_ops.invert_permutation(p), conjugate=True), None
]
ops.NotDifferentiable("Shape")
ops.NotDifferentiable("ShapeN")
ops.NotDifferentiable("Rank")
ops.NotDifferentiable("Size")
@ops.RegisterGradient("Tile")
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
assert isinstance(grad, ops.Tensor)
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(
array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
if context.in_graph_mode():
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
ops.NotDifferentiable("BroadcastGradientArgs")
def _PadGrad(op, grad):
"""Gradient for Pad."""
# Pad introduces values around the original tensor, so the gradient function
# slices the original shape out of the gradient."""
x = op.inputs[0]
a = op.inputs[1] # [Rank(x), 2]
# Takes a slice of a. The 1st column. [Rank(x), 1].
pad_before = array_ops.slice(a, [0, 0],
array_ops.stack([array_ops.rank(x), 1]))
# Make it a 1-D tensor.
begin = array_ops.reshape(pad_before, [-1])
sizes = array_ops.shape(x)
x_grad = array_ops.slice(grad, begin, sizes)
if len(op.inputs) == 3:
return x_grad, None, None
else:
return x_grad, None
ops.RegisterGradient("Pad")(_PadGrad)
ops.RegisterGradient("PadV2")(_PadGrad)
# ReverseSequence is just a permutation. The gradient permutes back.
@ops.RegisterGradient("ReverseSequence")
def _ReverseSequenceGrad(op, grad):
seq_lengths = op.inputs[1]
return [
array_ops.reverse_sequence(
grad,
batch_axis=op.get_attr("batch_dim"),
seq_axis=op.get_attr("seq_dim"),
seq_lengths=seq_lengths), None
]
@ops.RegisterGradient("Reverse")
def _ReverseGrad(op, grad):
reverse_dims = op.inputs[1]
# pylint: disable=protected-access
return gen_array_ops._reverse(grad, reverse_dims), None
# pylint: enable=protected-access
@ops.RegisterGradient("ReverseV2")
def _ReverseV2Grad(op, grad):
axis = op.inputs[1]
return array_ops.reverse_v2(grad, axis), None
@ops.RegisterGradient("SpaceToBatch")
def _SpaceToBatchGrad(op, grad):
# Its gradient is the opposite op: BatchToSpace.
block_size = op.get_attr("block_size")
return [
array_ops.batch_to_space(grad, op.inputs[1], block_size=block_size), None
]
@ops.RegisterGradient("SpaceToBatchND")
def _SpaceToBatchNDGrad(op, grad):
# Its gradient is the opposite op: BatchToSpaceND.
return [
array_ops.batch_to_space_nd(grad, op.inputs[1], op.inputs[2]), None, None
]
@ops.RegisterGradient("BatchToSpace")
def _BatchToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatch.
block_size = op.get_attr("block_size")
return [
array_ops.space_to_batch(grad, op.inputs[1], block_size=block_size), None
]
@ops.RegisterGradient("BatchToSpaceND")
def _BatchToSpaceNDGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatchND.
return [
array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]), None, None
]
@ops.RegisterGradient("SpaceToDepth")
def _SpaceToDepthGrad(op, grad):
# Its gradient is the opposite op: DepthToSpace.
block_size = op.get_attr("block_size")
data_format = op.get_attr("data_format")
if data_format == "NCHW_VECT_C":
raise ValueError("Cannot compute SpaceToDepth gradient with NCHW_VECT_C. "
"NCHW_VECT_C requires qint8 data type.")
return array_ops.depth_to_space(grad, block_size, data_format=data_format)
@ops.RegisterGradient("DepthToSpace")
def _DepthToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToDepth.
block_size = op.get_attr("block_size")
data_format = op.get_attr("data_format")
if data_format == "NCHW_VECT_C":
raise ValueError("Cannot compute DepthToSpace gradient with NCHW_VECT_C. "
"NCHW_VECT_C requires qint8 data type.")
return array_ops.space_to_depth(grad, block_size, data_format=data_format)
ops.NotDifferentiable("OneHot")
@ops.RegisterGradient("MirrorPad")
def _MirrorPadGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("MirrorPadGrad")
def _MirrorPadGradGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("QuantizeAndDequantize")
def _QuantizeAndDequantizeGrad(_, grad):
return grad
@ops.RegisterGradient("QuantizeAndDequantizeV2")
def _QuantizeAndDequantizeV2Grad(_, grad):
return [grad, None, None]
@ops.RegisterGradient("QuantizeAndDequantizeV3")
def _QuantizeAndDequantizeV3Grad(_, grad):
# Only propagate the gradient for the unquantized input.
return [grad, None, None, None]
@ops.RegisterGradient("ExtractImagePatches")
def _ExtractImagePatchesGrad(op, grad):
batch_size, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].get_shape()
]
input_bhwc = array_ops.shape(op.inputs[0])
batch_size = input_bhwc[0]
channels = input_bhwc[3]
_, rows_out, cols_out, _ = [dim.value for dim in op.outputs[0].get_shape()]
_, ksize_r, ksize_c, _ = op.get_attr("ksizes")
_, stride_r, stride_h, _ = op.get_attr("strides")
_, rate_r, rate_c, _ = op.get_attr("rates")
padding = op.get_attr("padding")
ksize_r_eff = ksize_r + (ksize_r - 1) * (rate_r - 1)
ksize_c_eff = ksize_c + (ksize_c - 1) * (rate_c - 1)
if padding == b"SAME":
rows_out = int(ceil(rows_in / stride_r))
cols_out = int(ceil(cols_in / stride_h))
pad_rows = ((rows_out - 1) * stride_r + ksize_r_eff - rows_in) // 2
pad_cols = ((cols_out - 1) * stride_h + ksize_c_eff - cols_in) // 2
elif padding == b"VALID":
rows_out = int(ceil((rows_in - ksize_r_eff + 1) / stride_r))
cols_out = int(ceil((cols_in - ksize_c_eff + 1) / stride_h))
pad_rows = (rows_out - 1) * stride_r + ksize_r_eff - rows_in
pad_cols = (cols_out - 1) * stride_h + ksize_c_eff - cols_in
pad_rows, pad_cols = max(0, pad_rows), max(0, pad_cols)
grad_expanded = array_ops.transpose(
array_ops.reshape(
grad, (batch_size, rows_out, cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5))
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
row_steps = range(0, rows_out * stride_r, stride_r)
col_steps = range(0, cols_out * stride_h, stride_h)
idx = []
for i in range(rows_out):
for j in range(cols_out):
r_low, c_low = row_steps[i] - pad_rows, col_steps[j] - pad_cols
r_high, c_high = r_low + ksize_r_eff, c_low + ksize_c_eff
idx.extend([(r * (cols_in) + c, i * (cols_out * ksize_r * ksize_c) + j *
(ksize_r * ksize_c) + ri * (ksize_c) + ci)
for (ri, r) in enumerate(range(r_low, r_high, rate_r))
for (ci, c) in enumerate(range(c_low, c_high, rate_c))
if 0 <= r and r < rows_in and 0 <= c and c < cols_in])
sp_shape = (rows_in * cols_in, rows_out * cols_out * ksize_r * ksize_c)
sp_mat = sparse_tensor.SparseTensor(
array_ops.constant(idx, dtype=ops.dtypes.int64),
array_ops.ones((len(idx),), dtype=ops.dtypes.float32), sp_shape)
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(jac, (rows_in, cols_in, batch_size, channels))
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out]
@ops.RegisterGradient("ScatterNd")
def _ScatterNdGrad(op, grad):
indices = op.inputs[0]
updates_grad = array_ops.gather_nd(grad, indices)
return [None, updates_grad, None]
@ops.RegisterGradient("ScatterNdNonAliasingAdd")
def _ScatterNdNonAliasingAddGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
return [grad, None, updates_grad]
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del host command."""
from datetime import datetime
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
from notificationtest import VerifyNotificationsMixin
from machinetest import MachineTestMixin
class TestDelHost(VerifyNotificationsMixin, MachineTestMixin,
TestBrokerCommand):
def test_100_del_unittest02(self):
self.dsdb_expect_delete(self.net["unknown0"].usable[11])
command = "del host --hostname unittest02.one-nyp.ms.com"
self.statustest(command.split(" "))
self.dsdb_verify()
self.verify_buildfiles("unittest", "unittest02.one-nyp.ms.com",
want_exist=False, command="del_host")
def test_105_verify_del_unittest02(self):
command = "show host --hostname unittest02.one-nyp.ms.com"
self.notfoundtest(command.split(" "))
def test_105_verify_service_plenary(self):
command = ["cat", "--service", "utsvc", "--instance", "utsi2",
"--server"]
out = self.commandtest(command)
self.matchclean(out, "unittest02", command)
def test_110_del_unittest00(self):
self.dsdb_expect_delete(self.net["unknown0"].usable[2])
command = "del host --hostname unittest00.one-nyp.ms.com"
self.statustest(command.split(" "))
self.dsdb_verify()
def test_115_verify_del_unittest00(self):
command = "show host --hostname unittest00.one-nyp.ms.com"
self.notfoundtest(command.split(" "))
def test_115_verify_del_unittest00_dns(self):
command = "show address --fqdn unittest00.one-nyp.ms.com"
self.notfoundtest(command.split(" "))
def test_115_verify_ut3c1n3(self):
command = "show machine --machine ut3c1n3"
out = self.commandtest(command.split(" "))
# The primary name must be gone
self.matchclean(out, "Primary Name:", command)
self.matchclean(out, "unittest00.one-nyp.ms.com", command)
# No interface should have the IP address
self.matchclean(out, "Auxiliary:", command)
self.matchclean(out, "Provides:", command)
self.matchclean(out, str(self.net["unknown0"].usable[2]), command)
# unittest01.one-nyp.ms.com gets deleted in test_del_windows_host.
def test_120_del_aurora_with_node(self):
command = "del host --hostname %s.ms.com" % self.aurora_with_node
err = self.statustest(command.split(" "))
self.matchoutput(err,
"WARNING: removing host %s.ms.com from AQDB "
"and *not* changing DSDB." % self.aurora_with_node,
command)
def test_121_verify_del_aurora_with_node(self):
command = "show host --hostname %s.ms.com" % self.aurora_with_node
self.notfoundtest(command.split(" "))
def test_125_del_aurora_without_node(self):
command = "del host --hostname %s.ms.com" % self.aurora_without_node
err = self.statustest(command.split(" "))
self.matchoutput(err,
"WARNING: removing host %s.ms.com from AQDB "
"and *not* changing DSDB." % self.aurora_without_node,
command)
def test_126_verify_del_aurora_without_node(self):
command = "show host --hostname %s.ms.com" % self.aurora_without_node
self.notfoundtest(command.split(" "))
def test_130_del_aurora_without_rack(self):
command = "del host --hostname %s.ms.com" % self.aurora_without_rack
err = self.statustest(command.split(" "))
self.matchoutput(err,
"WARNING: removing host %s.ms.com from AQDB "
"and *not* changing DSDB." % self.aurora_without_rack,
command)
def test_131_verify_del_aurora_without_rack(self):
command = "show host --hostname %s.ms.com" % self.aurora_without_rack
self.notfoundtest(command.split(" "))
def test_140_del_nyaqd1(self):
command = "del host --hostname nyaqd1.ms.com"
self.statustest(command.split(" "))
def test_140_verify_del_nyaqd1(self):
command = "show host --hostname nyaqd1.ms.com"
self.notfoundtest(command.split(" "))
def test_150_del_aurora_default_os(self):
command = "del host --hostname test-aurora-default-os.ms.com --quiet"
self.noouttest(command.split(" "))
self.dsdb_verify(empty=True)
def test_151_verify_del_aurora_default_os(self):
command = "show host --hostname test-aurora-default-os.ms.com"
self.notfoundtest(command.split(" "))
def test_155_del_windows_default_os(self):
ip = self.net["tor_net_0"].usable[5]
self.dsdb_expect_delete(ip)
command = "del host --hostname test-windows-default-os.msad.ms.com --quiet"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_156_verify_del_windows_default_os(self):
command = "show host --hostname test-windows-default-os.msad.ms.com"
self.notfoundtest(command.split(" "))
def test_160_del_jack(self):
self.dsdb_expect_delete(self.net["unknown0"].usable[17])
command = "del host --hostname jack.cards.example.com"
self.statustest(command.split(" "))
self.dsdb_verify()
def test_165_verify_del_jack(self):
command = "show host --hostname jack.cards.example.ms.com"
self.notfoundtest(command.split(" "))
def test_170_unbind_notify(self):
hostname = self.config.get("unittest", "hostname")
command = ["unbind", "server", "--service", "utnotify",
"--instance", "localhost", "--hostname", hostname]
err = self.statustest(command)
self.matchoutput(err,
"Warning: Host %s is missing the following required "
"services" % hostname,
command)
def test_171_del_notify(self):
hostname = self.config.get("unittest", "hostname")
self.dsdb_expect_delete("127.0.0.1")
basetime = datetime.now()
command = ["del", "host", "--hostname", hostname]
self.statustest(command)
self.wait_notification(basetime, 0)
self.dsdb_verify()
def test_300_del_afsbynet(self):
self.delete_host("afs-by-net.aqd-unittest.ms.com",
self.net["netsvcmap"].usable[0], "ut3c5n11")
def test_300_del_netmappers(self):
self.delete_host("netmap-pers.aqd-unittest.ms.com",
self.net["netperssvcmap"].usable[0], "ut3c5n12")
def test_300_del_unittest12(self):
self.delete_host("unittest12.aqd-unittest.ms.com",
self.net["unknown0"].usable[7], "ut3s01p1")
def test_300_del_unittest15(self):
self.delete_host("unittest15.aqd-unittest.ms.com",
self.net["tor_net_0"].usable[1], "ut8s02p1")
def test_300_del_unittest16(self):
self.delete_host("unittest16.aqd-unittest.ms.com",
self.net["tor_net_0"].usable[2], "ut8s02p2")
def test_300_del_unittest17(self):
self.delete_host("unittest17.aqd-unittest.ms.com",
self.net["tor_net_0"].usable[3], "ut8s02p3",
manager_ip=self.net["ut8_oob"].usable[3])
def test_300_del_unittest18(self):
self.delete_host("unittest18.aqd-unittest.ms.com",
self.net["unknown0"].usable[18], "ut3c1n8")
def test_300_del_unittest20(self):
# The transits are deleted in test_del_interface_address
self.delete_host("unittest20.aqd-unittest.ms.com",
self.net["zebra_vip"].usable[2], "ut3c5n2")
def test_300_del_unittest21(self):
self.delete_host("unittest21.aqd-unittest.ms.com",
self.net["zebra_eth0"].usable[1], "ut3c5n3")
def test_300_del_unittest22(self):
self.delete_host("unittest22.aqd-unittest.ms.com",
self.net["zebra_eth0"].usable[2], "ut3c5n4")
def test_300_del_unittest23(self):
self.delete_host("unittest23.aqd-unittest.ms.com",
self.net["vpls"].usable[1], "ut3c5n5")
def test_300_del_unittest24(self):
self.check_plenary_exists("machine", "americas", "np", "np3", "np3c5n5")
self.delete_host("unittest24.aqd-unittest.ms.com",
self.net["vpls"].usable[2], "np3c5n5")
self.check_plenary_gone("machine", "americas", "np", "np3", "np3c5n5")
def test_300_del_unittest25(self):
self.delete_host("unittest25.aqd-unittest.ms.com",
self.net["unknown0"].usable[20], "ut3c5n7")
def test_300_del_unittest26(self):
self.delete_host("unittest26.aqd-unittest.ms.com",
self.net["unknown0"].usable[23], "ut3c5n8")
def test_300_del_filer(self):
self.delete_host("filer1.ms.com", self.net["vm_storage_net"].usable[25],
"filer1")
def test_300_del_f5test(self):
self.delete_host("f5test.aqd-unittest.ms.com", self.net["f5test"].ip,
"f5test")
def test_300_del_utinfra(self):
eth0_ip = self.net["unknown0"].usable[33]
eth1_ip = self.net["unknown1"].usable[34]
ip = self.net["zebra_vip"].usable[3]
self.delete_host("infra1.aqd-unittest.ms.com", ip, "ut3c5n13",
eth0_ip=eth0_ip, eth1_ip=eth1_ip)
def test_300_del_npinfra(self):
eth0_ip = self.net["unknown0"].usable[35]
eth1_ip = self.net["unknown1"].usable[36]
ip = self.net["zebra_vip"].usable[4]
self.delete_host("infra1.one-nyp.ms.com", ip, "np3c5n13",
eth0_ip=eth0_ip, eth1_ip=eth1_ip)
def test_300_del_hp_rack_hosts(self):
servers = 0
net = self.net["hp_eth0"]
mgmt_net = self.net["hp_mgmt"]
for i in range(51, 100):
port = i - 50
if servers < 10:
servers += 1
hostname = "server%d.aqd-unittest.ms.com" % servers
else:
hostname = "aquilon%d.aqd-unittest.ms.com" % i
machine = "ut9s03p%d" % port
self.delete_host(hostname, net.usable[port], machine,
manager_ip=mgmt_net.usable[port])
def test_300_del_ut10_hosts(self):
net = self.net["ut10_eth0"]
mgmt_net = self.net["ut10_oob"]
for i in range(101, 111):
port = i - 100
hostname = "evh%d.aqd-unittest.ms.com" % port
machine = "ut10s04p%d" % port
self.delete_host(hostname, net.usable[port], machine,
manager_ip=mgmt_net.usable[port])
def test_300_del_10gig_rack_hosts(self):
net = self.net["vmotion_net"]
for i in range(1, 25):
hostname = "evh%d.aqd-unittest.ms.com" % (i + 50)
if i < 13:
port = i
machine = "ut11s01p%d" % i
mgmt_net = self.net["ut11_oob"]
else:
port = i - 12
machine = "ut12s02p%d" % (i - 12)
mgmt_net = self.net["ut12_oob"]
self.delete_host(hostname, net.usable[i + 1], machine,
manager_ip=mgmt_net[port])
def test_300_del_utmc8_hosts(self):
self.delete_host("evh80.aqd-unittest.ms.com",
self.net["ut14_net"].usable[0], "ut14s1p0",
eth1_ip=self.net["vm_storage_net"].usable[26],
manager_ip=self.net["ut14_oob"].usable[0])
self.delete_host("evh81.aqd-unittest.ms.com",
self.net["ut14_net"].usable[1], "ut14s1p1",
eth1_ip=self.net["vm_storage_net"].usable[27],
manager_ip=self.net["ut14_oob"].usable[1])
def test_300_del_utmc9_hosts(self):
self.delete_host("evh82.aqd-unittest.ms.com",
self.net["ut14_net"].usable[2], "ut14s1p2",
manager_ip=self.net["ut14_oob"].usable[2])
self.delete_host("evh83.aqd-unittest.ms.com",
self.net["ut14_net"].usable[3], "ut14s1p3",
manager_ip=self.net["ut14_oob"].usable[3])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelHost)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
import test_support
class PossibleForwardDeclarationRuleTest(test_support.TestBase):
def setUp(self):
self.set_default_rules_selection(['PossibleForwardDeclarationRule'])
self.set_default_error_id('possible forward declaration')
self.set_default_error_severity('information')
def assert_result_with_header_files(self,
main_file_lines_without_includes,
project_header_lines = [],
indirect_project_header_lines = [],
system_header_lines = [],
expected_errors = []):
main_file = 'src.cpp'
project_header_file = 'project/header.h'
indirect_project_header_file = 'project/indirect_header.h'
system_header_file = 'system/header.h'
project_header_lines_with_includes = [
'#include <{0}>'.format(indirect_project_header_file)
] + project_header_lines
main_file_lines_with_includes = [
'#include <{0}>'.format(system_header_file),
'#include "{0}"'.format(project_header_file)
] + main_file_lines_without_includes
source_files_data = {
main_file: main_file_lines_with_includes,
project_header_file: project_header_lines_with_includes,
indirect_project_header_file: indirect_project_header_lines,
system_header_file: system_header_lines
}
self.assert_colobot_lint_result_with_custom_files(
source_files_data = source_files_data,
compilation_database_files = [main_file],
target_files = [main_file],
additional_compile_flags = ['-I$TEMP_DIR'],
additional_options = ['-project-local-include-path', '$TEMP_DIR/project'],
expected_errors = expected_errors)
def test_forward_declaration_possible_with_reference_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo&);'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [
{
'msg': "Class 'Foo' can be forward declared instead of #included",
'line': '3'
}
])
def test_forward_declaration_possible_with_pointer_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [
{
'msg': "Class 'Foo' can be forward declared instead of #included",
'line': '3'
}
])
def test_forward_declaration_impossible_with_whole_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc1(const Foo&);',
'void FooFunc2(Foo);'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_return_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc1(const Foo&);',
'Foo FooFunc2();'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_type_used_as_field(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'class Bar',
'{',
' Foo foo;',
'};',
'void FooFunc(Foo&);'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_template_class(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(const FooClassTemplate<int>&);'
],
project_header_lines = [
'template<typename T> class FooClassTemplate {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_template_argument_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'template<typename T> class FooClassTemplate { void Foo(T); };',
'void FooFunc(FooClassTemplate<Foo>&);',
'void FooFunc(Foo&);'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_return_template_argument_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'template<typename T> class FooClassTemplate { void Foo(T); };',
'FooClassTemplate<Foo> FooFunc();',
'void FooFunc(Foo&);'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_elaborated_template_argument_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'namespace NS',
'{',
' template<typename T> class FooClassTemplate { void Foo(T); };',
'}',
'void FooFunc(NS::FooClassTemplate<Foo>&);',
'void FooFunc(Foo&);'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_array_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'class Bar',
'{',
' Foo foo[4];',
'};',
'void FooFunc(Foo&);'
],
project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_old_style_enum(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(const Foo&);'
],
project_header_lines = [
'enum Foo {};'
],
expected_errors = [])
def test_forward_declaration_impossible_with_typedef(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(FooConstRefAlias);'
],
project_header_lines = [
'class Foo {};',
'typedef const Foo& FooConstRefAlias;'
],
expected_errors = [])
def test_forward_declaration_possible_with_enum_class(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(const Foo&);'
],
project_header_lines = [
'enum class Foo : int {};'
],
expected_errors = [
{
'msg': "Enum class 'Foo' can be forward declared instead of #included",
'line': '3'
}
])
def test_ignore_already_forward_declared_type_in_project_header(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);'
],
project_header_lines = [
'class Foo;'
],
expected_errors = [])
def test_ignore_types_from_system_header_reference_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo&);'
],
system_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_ignore_types_from_system_header_pointer_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);'
],
system_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_ignore_types_from_indirect_project_header_reference_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo&);'
],
indirect_project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_ignore_types_from_indirect_project_header_pointer_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);'
],
indirect_project_header_lines = [
'class Foo {};'
],
expected_errors = [])
def test_blacklist_project_header_with_types_which_cannot_be_forward_declared_whole_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);',
'void BarFunc(Bar);',
],
project_header_lines = [
'class Foo {};',
'class Bar {};'
],
expected_errors = [])
def test_blacklist_project_header_with_types_which_cannot_be_forward_declared_base_class(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);',
'class Derived : public Base {};'
],
project_header_lines = [
'class Foo {};',
'class Base {};'
],
expected_errors = [])
def test_blacklist_project_header_with_types_which_cannot_be_forward_declared_typedef(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);',
'void BarFunc(BarAlias);',
],
project_header_lines = [
'class Foo {};',
'class Bar {};',
'typedef Bar BarAlias;'
],
expected_errors = [])
def test_blacklist_project_header_with_types_which_cannot_be_forward_declared_using_template_alias(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);',
'void BarFunc(BarAlias&);'
],
project_header_lines = [
'class Foo {};',
'class Bar {};',
'using BarAlias = ClassTemplate<Bar>;'
],
system_header_lines = [
'template<typename T>',
'class ClassTemplate {};'
],
expected_errors = [])
def test_blacklist_project_header_with_types_which_cannot_be_forward_declared_use_of_const(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);',
'const int ANOTHER_GLOBAL = GLOBAL + 1;',
],
project_header_lines = [
'class Foo {};',
'const int GLOBAL = 1;'
],
expected_errors = [])
def test_blacklist_project_header_with_types_which_cannot_be_forward_declared_old_style_enum(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);'
'void BarFunc(Bar&);'
],
project_header_lines = [
'class Foo {};',
'enum Bar {};'
],
expected_errors = [])
def test_blacklist_project_header_with_types_which_cannot_be_forward_declared_template_class(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);'
'void BarFunc(Bar<int>&);'
],
project_header_lines = [
'class Foo {};',
'template<typename T>',
'class Bar {};'
],
expected_errors = [])
def test_dont_blacklist_project_header_if_it_defines_not_used_non_forward_declarable_type(self):
self.assert_result_with_header_files(
main_file_lines_without_includes = [
'void FooFunc(Foo*);'
],
project_header_lines = [
'class Foo {};',
'class Bar {};',
'typedef Bar BarAlias;'
],
expected_errors = [
{
'msg': "Class 'Foo' can be forward declared instead of #included",
'line': '3'
}
])
|
|
__author__ = 'Ostico <[email protected]>'
import unittest
import os
os.environ['DEBUG'] = "0"
os.environ['DEBUG_VERBOSE'] = "0"
# if os.path.realpath('../') not in sys.path:
# sys.path.insert(0, os.path.realpath('../'))
#
# if os.path.realpath('.') not in sys.path:
# sys.path.insert(0, os.path.realpath('.'))
import pyorient
from pyorient import OrientRecord
class CommandTestCase(unittest.TestCase):
""" Command Test Case """
def test_new_client_interface(self):
client = pyorient.OrientDB("localhost", 2424)
session_id = client.connect("root", "root")
db_name = "GratefulDeadConcerts"
cluster_info = client.db_open(db_name, "admin", "admin")
assert cluster_info != []
result = client.query("select from followed_by", 10, '*:0')
assert True
assert result != []
assert isinstance(result[0], OrientRecord)
assert len(result) == 10
assert result[0]._in != 0
assert result[0]._out != 0
assert result[0].weight == 1
def _callback(item):
assert True
assert item != []
assert isinstance(item, OrientRecord)
result = client.query_async("select from followed_by", 10, '*:0', _callback)
assert True
assert result is None
res = client.record_load("#25:0", "*:-1", _callback) # Updated for newer version of GratefulDead DB
assert res._rid == "#25:0"
assert res._class == 'followed_by'
assert res._in != 0
assert res._out != 0
session_id = client.connect("root", "root")
# TEST COMMANDS
db_name = 'test_commands'
exists = client.db_exists(db_name, pyorient.STORAGE_TYPE_MEMORY)
print("Before %r" % exists)
try:
client.db_drop(db_name)
assert True
except pyorient.PyOrientStorageException as e:
print(str(e))
finally:
client.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
cluster_info = client.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
cluster_id = client.command("create class my_class extends V")[0]
assert cluster_id != 0
rec = {'@my_class': {'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare'}}
rec_position = client.record_create(cluster_id, rec)
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
assert rec_position._rid != 0
assert rec_position._rid != -1
res = client.record_load(rec_position._rid, "*:0")
assert res._rid == rec_position._rid
# assert res._class == 'my_class'
assert res.alloggio == 'casa'
assert res.lavoro == 'ufficio'
assert res.vacanza == 'mare'
deletion = client.record_delete(cluster_id, rec_position._rid)
assert deletion is True
result = client.query("select from my_class", 10, '*:0')
assert True
assert result == []
def test_cluster_add_drop_recount(self):
client = pyorient.OrientDB("localhost", 2424) # TEST COMMANDS
client.connect("root", "root")
db_name = 'test_commands'
try:
client.db_drop(db_name)
assert True
except pyorient.PyOrientStorageException as e:
print(str(e))
finally:
client.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
cluster_info = client.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
# CLUSTERS
new_cluster_id = client.data_cluster_add(
'my_cluster_1234567', pyorient.CLUSTER_TYPE_PHYSICAL
)
assert new_cluster_id > 0
new_cluster_list = client.db_reload()
new_cluster_list.sort(key=lambda cluster: cluster.id)
_list = []
for cluster in new_cluster_list:
print("Cluster Name: %s, ID: %u " % (cluster.name, cluster.id))
value = client.data_cluster_data_range(cluster.id)
print("Value: %s " % value)
_list.append(cluster.id)
assert value is not []
assert value is not None
# check for new cluster in database
try:
_list.index(new_cluster_id)
print("New cluster %r found in reload." % new_cluster_id)
assert True
except ValueError:
assert False
# delete the new cluster TODO: broken test
print("Drop Cluster ID: %r" % new_cluster_id)
drop_cluster = client.data_cluster_drop(new_cluster_id)
assert drop_cluster is True
def test_transaction_new_iface(self):
client = pyorient.OrientDB('localhost', 2424)
client.connect("root", "root")
db_name = 'test_transactions'
try:
client.db_drop(db_name)
except pyorient.PyOrientStorageException as e:
print(str(e))
finally:
client.db_create(db_name, pyorient.DB_TYPE_GRAPH, pyorient.STORAGE_TYPE_MEMORY)
cluster_info = client.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
#######################################
cluster_id = client.command("create class Test extends V")[0]
# execute real create
rec = {'alloggio': 'baita', 'lavoro': 'no', 'vacanza': 'lago'}
rec_position = client.record_create(cluster_id, rec)
# START TRANSACTION
print("debug breakpoint line")
tx = client.tx_commit()
tx.begin()
# prepare for an update
rec3 = {'alloggio': 'albergo', 'lavoro': 'ufficio', 'vacanza': 'montagna'}
tx_update1 = client.record_update(cluster_id, rec_position._rid, rec3, rec_position._version)
# prepare transaction
rec1 = {"@Test": {'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare'}}
tx_create_1 = client.record_create(cluster_id, rec1)
rec2 = {"@Test": {'alloggio': 'baita', 'lavoro': 'no', 'vacanza': 'lago'}}
tx_create_2 = client.record_create(-1, rec2)
tx_delete_1 = client.record_delete(cluster_id, rec_position._rid)
tx.attach(tx_create_1)
tx.attach(tx_create_1)
tx.attach(tx_create_2)
tx.attach(tx_update1)
tx.attach(tx_delete_1)
res = tx.commit()
for k, v in res.items():
print(k + " -> " + v.vacanza)
# in OrientDB version 2.2.9 transactions are executed in reverse order ( list pop )
# in previous versions, instead, transaction are executed in crescent order ( list shift )
assert len(res) == 3
if client.version.major >= 2 and client.version.minor >= 2:
assert res["#3:1"].vacanza == 'mare'
assert res["#3:2"].vacanza == 'mare'
assert res["#3:3"].vacanza == 'lago'
else:
assert res["#3:1"].vacanza == 'lago'
assert res["#3:2"].vacanza == 'mare'
assert res["#3:3"].vacanza == 'mare'
client.connect("root", "root")
client.db_drop(db_name, pyorient.STORAGE_TYPE_MEMORY)
def test_reserved_words_and_batch_scripts(self):
client = pyorient.OrientDB("localhost", 2424)
client.connect("root", "root")
if client._connection.protocol <= 21:
return unittest.skip("Protocol {!r} does not works well".format(
client._connection.protocol)) # skip test
db_name = "test_tr"
try:
client.db_drop(db_name)
except pyorient.PyOrientStorageException as e:
print(e)
finally:
db = client.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
cluster_info = client.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
class_id1 = client.command("create class my_v_class extends V")[0]
class_id2 = client.command("create class my_e_class extends E")[0]
rec1 = {'@my_v_class': {'accommodation': 'house', 'work': 'office', 'holiday': 'sea'}}
rec2 = {'@my_v_class': {'accommodation': 'house', 'work2': 'office', 'holiday': 'sea3'}}
rec_position1 = client.record_create(class_id1, rec1)
rec_position2 = client.record_create(class_id1, rec2)
sql_edge = "create edge from " + rec_position1._rid + " to " + rec_position2._rid
res = client.command(sql_edge)
def test_use_of_dir(self):
client = pyorient.OrientDB("localhost", 2424)
client.connect("root", "root")
dir(client)
def test_alter_statement(self):
client = pyorient.OrientDB("localhost", 2424)
client.connect("root", "root")
db_name = "test_1234_db"
try:
client.db_drop(db_name)
except pyorient.PyOrientStorageException as e:
print(e)
finally:
db = client.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
cluster_info = client.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
client.command("create class obj")
client.command("create property obj._KEY string")
client.command("alter property obj._KEY mandatory true")
with self.assertRaises(pyorient.PyOrientSQLParsingException) as context:
client.command("create index KEY on obj _KEY unique")
self.assertTrue('Error '
in str(context.exception))
self.assertTrue('parsing '
in str(context.exception))
client.command("create index KEY on obj ( _KEY ) unique")
assert True is True
def test_limit(self):
client = pyorient.OrientDB("localhost", 2424)
client.connect("root", "root")
db_name = "GratefulDeadConcerts"
cluster_info = client.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
assert len(client.query("select from V Limit 1")) == 1
assert len(client.query("select from V Limit 51")) == 51
assert len(client.query("select from V lIMit 51")) == 51
assert len(client.query("select from V LIMIT 51")) == 51
assert len(client.query("select from V limit 51")) == 51
assert len(client.query("select from V limit 1")) == 1
assert len(client.query("select from V", 25)) == 25
assert len(client.query("select from V limit 21", 10)) == 21
assert len(client.query("select from V LIMIT 21", 10)) == 21
assert len(client.query("select from V")) == 20
# x = CommandTestCase('test_command').run()
# x = CommandTestCase('test_new_client_interface').run()
|
|
#!/usr/bin/env python3
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO:
# - Implement approximate value functions
import argparse
import collections
import curses
import random
import sys
import tensorflow as tf
import time
from srl import context
from srl import movement
from srl import policy_gradient
from srl import simulation
from srl import world
# There is also an interactive version of the game. These are keycodes
# for interacting with it.
KEY_Q = ord('q')
KEY_ESC = 27
KEY_SPACE = ord(' ')
KEY_UP = 259
KEY_DOWN = 258
KEY_LEFT = 260
KEY_RIGHT = 261
KEY_ACTION_MAP = {
KEY_UP: movement.ACTION_UP,
KEY_DOWN: movement.ACTION_DOWN,
KEY_LEFT: movement.ACTION_LEFT,
KEY_RIGHT: movement.ACTION_RIGHT
}
QUIT_KEYS = set([KEY_Q, KEY_ESC])
class Game(object):
'''A simulation that uses curses.'''
def __init__(self, ctx, generator, driver):
'''Creates a new game in world where driver will interact with the game.'''
self._context = ctx
self._sim = simulation.Simulation(generator)
self._driver = driver
self._wins = 0
self._losses = 0
self._was_in_terminal_state = False
# The game loop.
def step(self):
# Paint
self._draw(self._context.window)
# Get input, etc.
self._driver.interact(self._context, self._sim)
if self._sim.in_terminal_state and not self._was_in_terminal_state:
if self._sim.score < 0:
self._losses += 1
else:
self._wins += 1
self._was_in_terminal_state = self._sim.in_terminal_state
# Paints the window.
def _draw(self, window):
window.erase()
# Draw the environment
for y, line in enumerate(self._sim.world._lines):
window.addstr(y, 0, line)
# Draw the player
window.addstr(self._sim.y, self._sim.x, '@')
# Draw status
window.addstr(self._sim.world.h, 0,
'W/L: %d/%d Score: %d' %
(self._wins, self._losses, self._sim.score))
window.move(self._sim.y, self._sim.x)
# TODO: Add a display so multiple things can contribute to the output.
window.refresh()
class Player(object):
'''A Player provides input to the game as a simulation evolves.'''
def interact(self, ctx, sim):
# All players have the same interface
# pylint: disable=unused-argument
pass
class HumanPlayer(Player):
'''A game driver that reads input from the keyboard.'''
def __init__(self):
super(HumanPlayer, self).__init__()
self._ch = 0
def interact(self, ctx, sim):
self._ch = ctx.window.getch()
if self._ch in KEY_ACTION_MAP and not sim.in_terminal_state:
sim.act(KEY_ACTION_MAP[self._ch])
elif self._ch == KEY_SPACE and sim.in_terminal_state:
sim.reset()
elif self._ch in QUIT_KEYS:
ctx.run_loop.post_quit()
class MachinePlayer(Player):
'''A game driver which applies a policy, observed by a learner.
The learner can adjust the policy.
'''
def __init__(self, policy, learner):
super(MachinePlayer, self).__init__()
self._policy = policy
self._learner = learner
def interact(self, ctx, sim):
super(MachinePlayer, self).interact(ctx, sim)
if sim.in_terminal_state:
sim.reset()
else:
old_state = sim.state
action = self._policy.pick_action(sim.state)
reward = sim.act(action)
self._learner.observe(old_state, action, reward, sim.state)
class StubLearner(object):
'''Plugs in as a learner but doesn't update anything.'''
def observe(self, old_state, action, reward, new_state):
pass
class RandomPolicy(object):
'''A policy which picks actions at random.'''
def pick_action(self, _):
return random.choice(movement.ALL_ACTIONS)
class EpsilonPolicy(object):
'''Pursues policy A, but uses policy B with probability epsilon.
Be careful when using a learned function for one of these policies;
the epsilon policy needs an off-policy learner.
'''
def __init__(self, policy_a, policy_b, epsilon):
self._policy_a = policy_a
self._policy_b = policy_b
self._epsilon = epsilon
def pick_action(self, state):
if random.random() < self._epsilon:
return self._policy_b.pick_action(state)
else:
return self._policy_a.pick_action(state)
class QTable(object):
'''An approximation of the Q function based on a look-up table.
As such it is only appropriate for discrete state-action spaces.'''
def __init__(self, init_reward = 0):
self._table = collections.defaultdict(lambda: init_reward)
def get(self, state, action):
return self._table[(state, action)]
def set(self, state, action, value):
self._table[(state, action)] = value
def best(self, state):
'''Gets the best predicted action and its value for |state|.'''
best_value = -1e20
best_action = None
for action in movement.ALL_ACTIONS:
value = self.get(state, action)
if value > best_value:
best_action, best_value = action, value
return best_action, best_value
class GreedyQ(object):
'''A policy which chooses the action with the highest reward estimate.'''
def __init__(self, q):
self._q = q
def pick_action(self, state):
return self._q.best(state)[0]
class QLearner(object):
'''An off-policy learner which updates a QTable.'''
def __init__(self, q, learning_rate, discount_rate):
self._q = q
self._alpha = learning_rate
self._gamma = discount_rate
def observe(self, old_state, action, reward, new_state):
prev = self._q.get(old_state, action)
self._q.set(old_state, action, prev + self._alpha * (
reward + self._gamma * self._q.best(new_state)[1] - prev))
def main():
parser = argparse.ArgumentParser(description='Simple Reinforcement Learning.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--interactive', action='store_true',
help='use the keyboard arrow keys to play')
group.add_argument('--q', action='store_true',
help='play automatically with Q-learning')
group.add_argument('--pg', action='store_true',
help='play automatically with policy gradients')
parser.add_argument('--random', action='store_true',
help='generate a random map')
args = parser.parse_args()
ctx = context.Context()
if args.random:
generator = world.Generator(25, 15)
else:
generator = world.Static(world.World.parse('''\
########
#..#...#
#.@#.$.#
#.##^^.#
#......#
########
'''))
if args.interactive:
player = HumanPlayer()
elif args.q:
q = QTable()
learner = QLearner(q, 0.05, 0.1)
policy = EpsilonPolicy(GreedyQ(q), RandomPolicy(), 0.01)
player = MachinePlayer(policy, learner)
elif args.pg:
g = tf.Graph()
s = tf.Session(graph=g)
player = policy_gradient.PolicyGradientPlayer(g, s, generator.size)
with g.as_default():
init = tf.global_variables_initializer()
s.run(init)
else:
sys.exit(1)
is_automatic = args.q or args.pg
if is_automatic:
# Slow the game down to make it fun? to watch.
ctx.run_loop.post_task(lambda: time.sleep(0.1), repeat=True)
game = Game(ctx, generator, player)
ctx.run_loop.post_task(game.step, repeat=True)
ctx.start()
if __name__ == '__main__':
main()
|
|
"""
homeassistant.components.light.hue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Hue lights.
"""
import logging
import socket
from datetime import timedelta
from urllib.parse import urlparse
from homeassistant.loader import get_component
import homeassistant.util as util
from homeassistant.const import CONF_HOST, DEVICE_DEFAULT_NAME
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_XY_COLOR, ATTR_TRANSITION,
ATTR_FLASH, FLASH_LONG, FLASH_SHORT, ATTR_EFFECT,
EFFECT_COLORLOOP)
REQUIREMENTS = ['phue==0.8']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
PHUE_CONFIG_FILE = "phue.conf"
# Map ip to request id for configuring
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Gets the Hue lights. """
try:
# pylint: disable=unused-variable
import phue # noqa
except ImportError:
_LOGGER.exception("Error while importing dependency phue.")
return
if discovery_info is not None:
host = urlparse(discovery_info[1]).hostname
else:
host = config.get(CONF_HOST, None)
# Only act if we are not already configuring this host
if host in _CONFIGURING:
return
setup_bridge(host, hass, add_devices_callback)
def setup_bridge(host, hass, add_devices_callback):
""" Setup a phue bridge based on host parameter. """
import phue
try:
bridge = phue.Bridge(
host,
config_file_path=hass.config.path(PHUE_CONFIG_FILE))
except ConnectionRefusedError: # Wrong host was given
_LOGGER.exception("Error connecting to the Hue bridge at %s", host)
return
except phue.PhueRegistrationException:
_LOGGER.warning("Connected to Hue at %s but not registered.", host)
request_configuration(host, hass, add_devices_callback)
return
# If we came here and configuring this host, mark as done
if host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = get_component('configurator')
configurator.request_done(request_id)
lights = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_lights():
""" Updates the Hue light objects with latest info from the bridge. """
try:
api = bridge.get_api()
except socket.error:
# socket.error when we cannot reach Hue
_LOGGER.exception("Cannot reach the bridge")
return
api_states = api.get('lights')
if not isinstance(api_states, dict):
_LOGGER.error("Got unexpected result from Hue API")
return
new_lights = []
for light_id, info in api_states.items():
if light_id not in lights:
lights[light_id] = HueLight(int(light_id), info,
bridge, update_lights)
new_lights.append(lights[light_id])
else:
lights[light_id].info = info
if new_lights:
add_devices_callback(new_lights)
update_lights()
def request_configuration(host, hass, add_devices_callback):
""" Request configuration steps from the user. """
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], "Failed to register, please try again.")
return
def hue_configuration_callback(data):
""" Actions to do when our configuration callback is called. """
setup_bridge(host, hass, add_devices_callback)
_CONFIGURING[host] = configurator.request_config(
hass, "Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips Hue "
"with Home Assistant."),
description_image="/static/images/config_philips_hue.jpg",
submit_caption="I have pressed the button"
)
class HueLight(Light):
""" Represents a Hue light """
def __init__(self, light_id, info, bridge, update_lights):
self.light_id = light_id
self.info = info
self.bridge = bridge
self.update_lights = update_lights
@property
def unique_id(self):
""" Returns the id of this Hue light """
return "{}.{}".format(
self.__class__, self.info.get('uniqueid', self.name))
@property
def name(self):
""" Get the mame of the Hue light. """
return self.info.get('name', DEVICE_DEFAULT_NAME)
@property
def brightness(self):
""" Brightness of this light between 0..255. """
return self.info['state']['bri']
@property
def color_xy(self):
""" XY color value. """
return self.info['state'].get('xy')
@property
def is_on(self):
""" True if device is on. """
self.update_lights()
return self.info['state']['reachable'] and self.info['state']['on']
def turn_on(self, **kwargs):
""" Turn the specified or all lights on. """
command = {'on': True}
if ATTR_TRANSITION in kwargs:
# Transition time is in 1/10th seconds and cannot exceed
# 900 seconds.
command['transitiontime'] = min(9000, kwargs[ATTR_TRANSITION] * 10)
if ATTR_BRIGHTNESS in kwargs:
command['bri'] = kwargs[ATTR_BRIGHTNESS]
if ATTR_XY_COLOR in kwargs:
command['xy'] = kwargs[ATTR_XY_COLOR]
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command['alert'] = 'lselect'
elif flash == FLASH_SHORT:
command['alert'] = 'select'
else:
command['alert'] = 'none'
effect = kwargs.get(ATTR_EFFECT)
if effect == EFFECT_COLORLOOP:
command['effect'] = 'colorloop'
else:
command['effect'] = 'none'
self.bridge.set_light(self.light_id, command)
def turn_off(self, **kwargs):
""" Turn the specified or all lights off. """
command = {'on': False}
if ATTR_TRANSITION in kwargs:
# Transition time is in 1/10th seconds and cannot exceed
# 900 seconds.
command['transitiontime'] = min(9000, kwargs[ATTR_TRANSITION] * 10)
self.bridge.set_light(self.light_id, command)
def update(self):
""" Synchronize state with bridge. """
self.update_lights(no_throttle=True)
|
|
# Copyright (c) 2017, Dirk Hartmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import group
from . import hook, window, utils
from .log_utils import logger
class WindowVisibilityToggler:
"""
WindowVisibilityToggler is a wrapper for a window, used in ScratchPad group
to toggle visibility of a window by toggling the group it belongs to.
The window is either sent to the named ScratchPad, which is by default
invisble, or the current group on the current screen.
With this functionality the window can be shown and hidden by a single
keystroke (bound to command of ScratchPad group).
By default, the window is also hidden if it looses focus.
"""
def __init__(self, scratchpad_name, window, on_focus_lost_hide, warp_pointer):
"""
Initiliaze the WindowVisibilityToggler.
Parameters:
===========
scratchpad_name : string
The name (not label) of the ScratchPad group used to hide the window
window : window
The window to toggle
on_focus_lost_hide : bool
if True the associated window is hidden if it looses focus
warp_pointer : bool
if True the mouse pointer is warped to center of associated window
if shown. Only used if on_focus_lost_hide is True
"""
self.scratchpad_name = scratchpad_name
self.window = window
self.on_focus_lost_hide = on_focus_lost_hide
self.warp_pointer = warp_pointer
# determine current status based on visibility
self.shown = False
self.show()
def info(self):
return dict(window=self.window.info(),
scratchpad_name=self.scratchpad_name,
visible=self.visible,
on_focus_lost_hide=self.on_focus_lost_hide,
warp_pointer=self.warp_pointer)
@property
def visible(self):
"""
Determine if associated window is currently visible.
That is the window is on a group different from the scratchpad
and that group is the current visible group.
"""
if self.window.group is None:
return False
return (self.window.group.name != self.scratchpad_name and
self.window.group is self.window.qtile.current_group)
def toggle(self):
"""
Toggle the visibility of associated window. Either show() or hide().
"""
if (not self.visible or not self.shown):
self.show()
else:
self.hide()
def show(self):
"""
Show the associated window on top of current screen.
The window is moved to the current group as floating window.
If 'warp_pointer' is True the mouse pointer is warped to center of the
window if 'on_focus_lost_hide' is True.
Otherwise, if pointer is moved manually to window by the user
the window might be hidden again before actually reaching it.
"""
if (not self.visible) or (not self.shown):
win = self.window
# always set the floating state before changing group
# to avoid disturbance of tiling layout
win._float_state = window.TOP
# add to group and bring it to front.
win.togroup()
win.cmd_bring_to_front()
# toggle internal flag of visibility
self.shown = True
# add hooks to determine if focus get lost
if self.on_focus_lost_hide:
if self.warp_pointer:
win.window.warp_pointer(win.width // 2, win.height // 2)
hook.subscribe.client_focus(self.on_focus_change)
hook.subscribe.setgroup(self.on_focus_change)
def hide(self):
"""
Hide the associated window. That is, send it to the scratchpad group.
"""
if self.visible or self.shown:
# unsubscribe the hook methods, since the window is not shown
if self.on_focus_lost_hide:
hook.unsubscribe.client_focus(self.on_focus_change)
hook.unsubscribe.setgroup(self.on_focus_change)
self.window.togroup(self.scratchpad_name)
self.shown = False
def unsubscribe(self):
"""unsubscribe all hooks"""
try:
hook.unsubscribe.client_focus(self.on_focus_change)
except utils.QtileError as err:
logger.exception("Scratchpad failed to unsubscribe on_focus_change"
": %s" % err)
try:
hook.unsubscribe.setgroup(self.on_focus_change)
except utils.QtileError as err:
logger.exception("Scratchpad failed to unsubscribe on_focus_change"
": %s" % err)
def on_focus_change(self, *args, **kwargs):
"""
hook method which is called on window focus change and group change.
Depending on 'on_focus_lost_xxx' arguments, the associated window may
get hidden (by call to hide) or even killed.
"""
if self.shown:
current_group = self.window.qtile.current_group
if (self.window.group is not current_group or
self.window is not current_group.current_window):
if self.on_focus_lost_hide:
self.hide()
class DropDownToggler(WindowVisibilityToggler):
"""
Specialized WindowVisibilityToggler which places the associatd window
each time it is shown at desired location.
For example this can be used to create a quake-like terminal.
"""
def __init__(self, window, scratchpad_name, ddconfig):
self.name = ddconfig.name
self.x = ddconfig.x
self.y = ddconfig.y
self.width = ddconfig.width
self.height = ddconfig.height
window.set_opacity(ddconfig.opacity)
WindowVisibilityToggler.__init__(
self, scratchpad_name, window, ddconfig.on_focus_lost_hide, ddconfig.warp_pointer
)
def info(self):
info = WindowVisibilityToggler.info(self)
info.update(dict(name=self.name,
x=self.x,
y=self.y,
width=self.width,
height=self.height))
return info
def show(self):
"""
Like WindowVisibilityToggler.show, but before showing the window,
its floating x, y, width and height is set.
"""
if (not self.visible) or (not self.shown):
win = self.window
screen = win.qtile.current_screen
# calculate windows floating position and width/height
# these may differ for screens, and thus always recalculated.
win.x = int(screen.dx + self.x * screen.dwidth)
win.y = int(screen.dy + self.y * screen.dheight)
win.float_x = win.x
win.float_y = win.y
win.width = int(screen.dwidth * self.width)
win.height = int(screen.dheight * self.height)
# SHOW
WindowVisibilityToggler.show(self)
class ScratchPad(group._Group):
"""
Specialized group which is by default invisible and can be configured, to
spawn windows and toggle its visibility (in the current group) by command.
The ScratchPad group acts as a container for windows which are currently
not visible but associated to a DropDownToggler and can toggle their
group by command (of ScratchPad group).
The ScratchPad, by default, has no label and thus is not shown in
GroupBox widget.
"""
def __init__(self, name='scratchpad', dropdowns=[], label=''):
group._Group.__init__(self, name, label=label)
self._dropdownconfig = {dd.name: dd for dd in dropdowns}
self.dropdowns = {}
self._spawned = {}
def _check_unsubscribe(self):
if not self.dropdowns:
hook.unsubscribe.client_killed(self.on_client_killed)
hook.unsubscribe.float_change(self.on_float_change)
def _spawn(self, ddconfig):
"""
Spawn a process by defined command.
Method is only called if no window is associated. This is either on the
first call to show or if the window was killed.
The process id of spawned process is saved and compared to new windows.
In case of a match the window gets associated to this DropDown object.
"""
name = ddconfig.name
if name not in self._spawned.values():
if not self._spawned:
hook.subscribe.client_new(self.on_client_new)
cmd = self._dropdownconfig[name].command
pid = self.qtile.cmd_spawn(cmd)
self._spawned[pid] = name
def on_client_new(self, client, *args, **kwargs):
"""
hook method which is called on new windows.
This method is subscribed if the given command is spawned
and unsubscribed immediately if the associated window is detected.
"""
client_pid = client.window.get_net_wm_pid()
if client_pid in self._spawned:
name = self._spawned.pop(client_pid)
if not self._spawned:
hook.unsubscribe.client_new(self.on_client_new)
self.dropdowns[name] = DropDownToggler(client, self.name,
self._dropdownconfig[name])
if len(self.dropdowns) == 1:
hook.subscribe.client_killed(self.on_client_killed)
hook.subscribe.float_change(self.on_float_change)
def on_client_killed(self, client, *args, **kwargs):
"""
hook method which is called if a client is killed.
If the associated window is killed, reset internal state.
"""
name = None
for name, dd in self.dropdowns.items():
if dd.window is client:
dd.unsubscribe()
del self.dropdowns[name]
break
self._check_unsubscribe()
def on_float_change(self, *args, **kwargs):
"""
hook method which is called if window float state is changed.
If the current associated window is not floated (any more) the window
and process is detached from DRopDown, thus the next call to Show
will spawn a new process.
"""
name = None
for name, dd in self.dropdowns.items():
if not dd.window.floating:
if dd.window.group is not self:
dd.unsubscribe()
del self.dropdowns[name]
break
self._check_unsubscribe()
def cmd_dropdown_toggle(self, name):
"""
Toggle visibility of named DropDown.
"""
if name in self.dropdowns:
self.dropdowns[name].toggle()
else:
if name in self._dropdownconfig:
self._spawn(self._dropdownconfig[name])
def cmd_dropdown_reconfigure(self, name, **kwargs):
"""
reconfigure the named DropDown configuration.
Note that changed attributes only have an effect on spawning the window.
"""
if name not in self._dropdownconfig:
return
dd = self._dropdownconfig[name]
for attr, value in kwargs.items():
if hasattr(dd, attr):
setattr(dd, attr, value)
def cmd_dropdown_info(self, name=None):
"""
Get information on configured or currently active DropDowns.
If name is None, a list of all dropdown names is returned.
"""
if name is None:
return {'dropdowns': [ddname for ddname in self._dropdownconfig]}
elif name in self.dropdowns:
return self.dropdowns[name].info()
elif name in self._dropdownconfig:
return self._dropdownconfig[name].info()
else:
raise ValueError('No DropDown named "%s".' % name)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, [email protected], Big Switch Networks, Inc.
import contextlib
import mock
from webob import exc
from neutron import context
from neutron.extensions import firewall
from neutron.plugins.common import constants as const
from neutron.services.firewall import fwaas_plugin
from neutron.tests import base
from neutron.tests.unit.db.firewall import test_db_firewall
FW_PLUGIN_KLASS = (
"neutron.services.firewall.fwaas_plugin.FirewallPlugin"
)
class TestFirewallCallbacks(test_db_firewall.FirewallPluginDbTestCase):
def setUp(self):
super(TestFirewallCallbacks,
self).setUp(fw_plugin=FW_PLUGIN_KLASS)
self.callbacks = self.plugin.callbacks
def test_set_firewall_status(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP) as fw:
fw_id = fw['firewall']['id']
res = self.callbacks.set_firewall_status(ctx, fw_id,
const.ACTIVE,
host='dummy')
fw_db = self.plugin.get_firewall(ctx, fw_id)
self.assertEqual(fw_db['status'], const.ACTIVE)
self.assertTrue(res)
res = self.callbacks.set_firewall_status(ctx, fw_id,
const.ERROR)
fw_db = self.plugin.get_firewall(ctx, fw_id)
self.assertEqual(fw_db['status'], const.ERROR)
self.assertFalse(res)
def test_set_firewall_status_pending_delete(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP) as fw:
fw_id = fw['firewall']['id']
fw_db = self.plugin._get_firewall(ctx, fw_id)
fw_db['status'] = const.PENDING_DELETE
ctx.session.flush()
res = self.callbacks.set_firewall_status(ctx, fw_id,
const.ACTIVE,
host='dummy')
fw_db = self.plugin.get_firewall(ctx, fw_id)
self.assertEqual(fw_db['status'], const.PENDING_DELETE)
self.assertFalse(res)
def test_firewall_deleted(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=test_db_firewall.ADMIN_STATE_UP,
no_delete=True) as fw:
fw_id = fw['firewall']['id']
with ctx.session.begin(subtransactions=True):
fw_db = self.plugin._get_firewall(ctx, fw_id)
fw_db['status'] = const.PENDING_DELETE
ctx.session.flush()
res = self.callbacks.firewall_deleted(ctx, fw_id,
host='dummy')
self.assertTrue(res)
self.assertRaises(firewall.FirewallNotFound,
self.plugin.get_firewall,
ctx, fw_id)
def test_firewall_deleted_error(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(
firewall_policy_id=fwp_id,
admin_state_up=test_db_firewall.ADMIN_STATE_UP,
) as fw:
fw_id = fw['firewall']['id']
res = self.callbacks.firewall_deleted(ctx, fw_id,
host='dummy')
self.assertFalse(res)
fw_db = self.plugin._get_firewall(ctx, fw_id)
self.assertEqual(fw_db['status'], const.ERROR)
def test_get_firewall_for_tenant(self):
tenant_id = 'test-tenant'
ctx = context.Context('', tenant_id)
with contextlib.nested(self.firewall_rule(name='fwr1',
tenant_id=tenant_id),
self.firewall_rule(name='fwr2',
tenant_id=tenant_id),
self.firewall_rule(name='fwr3',
tenant_id=tenant_id)
) as fr:
with self.firewall_policy(tenant_id=tenant_id) as fwp:
fwp_id = fwp['firewall_policy']['id']
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
data = {'firewall_policy':
{'firewall_rules': fw_rule_ids}}
req = self.new_update_request('firewall_policies', data,
fwp_id)
res = req.get_response(self.ext_api)
attrs = self._get_test_firewall_attrs()
attrs['firewall_policy_id'] = fwp_id
with self.firewall(firewall_policy_id=fwp_id,
tenant_id=tenant_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP) as fw:
fw_id = fw['firewall']['id']
res = self.callbacks.get_firewalls_for_tenant(ctx,
host='dummy')
fw_rules = (
self.plugin._make_firewall_dict_with_rules(ctx,
fw_id)
)
self.assertEqual(res[0], fw_rules)
self._compare_firewall_rule_lists(
fwp_id, fr, res[0]['firewall_rule_list'])
def test_get_firewall_for_tenant_without_rules(self):
tenant_id = 'test-tenant'
ctx = context.Context('', tenant_id)
with self.firewall_policy(tenant_id=tenant_id) as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs = self._get_test_firewall_attrs()
attrs['firewall_policy_id'] = fwp_id
with self.firewall(firewall_policy_id=fwp_id, tenant_id=tenant_id,
admin_state_up=test_db_firewall.ADMIN_STATE_UP
) as fw:
fw_list = [fw['firewall']]
f = self.callbacks.get_firewalls_for_tenant_without_rules
res = f(ctx, host='dummy')
for fw in res:
del fw['shared']
self.assertEqual(res, fw_list)
class TestFirewallAgentApi(base.BaseTestCase):
def setUp(self):
super(TestFirewallAgentApi, self).setUp()
self.api = fwaas_plugin.FirewallAgentApi('topic', 'host')
self.mock_fanoutcast = mock.patch.object(self.api,
'fanout_cast').start()
self.mock_msg = mock.patch.object(self.api, 'make_msg').start()
def test_init(self):
self.assertEqual(self.api.topic, 'topic')
self.assertEqual(self.api.host, 'host')
def _call_test_helper(self, method_name):
rv = getattr(self.api, method_name)(mock.sentinel.context, 'test')
self.assertEqual(rv, self.mock_fanoutcast.return_value)
self.mock_fanoutcast.assert_called_once_with(
mock.sentinel.context,
self.mock_msg.return_value,
topic='topic'
)
self.mock_msg.assert_called_once_with(
method_name,
firewall='test',
host='host'
)
def test_create_firewall(self):
self._call_test_helper('create_firewall')
def test_update_firewall(self):
self._call_test_helper('update_firewall')
def test_delete_firewall(self):
self._call_test_helper('delete_firewall')
class TestFirewallPluginBase(test_db_firewall.TestFirewallDBPlugin):
def setUp(self):
super(TestFirewallPluginBase, self).setUp(fw_plugin=FW_PLUGIN_KLASS)
self.callbacks = self.plugin.callbacks
def test_create_second_firewall_not_permitted(self):
with self.firewall():
res = self._create_firewall(
None, 'firewall2', description='test',
firewall_policy_id=None, admin_state_up=True)
self.assertEqual(res.status_int, exc.HTTPConflict.code)
def test_create_firewall_admin_not_affected_by_other_tenant(self):
# Create fw with admin after creating fw with other tenant
with self.firewall(tenant_id='other-tenant') as fw1:
with self.firewall() as fw2:
self.assertEqual('other-tenant', fw1['firewall']['tenant_id'])
self.assertEqual(self._tenant_id, fw2['firewall']['tenant_id'])
def test_update_firewall(self):
ctx = context.get_admin_context()
name = "new_firewall1"
attrs = self._get_test_firewall_attrs(name)
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['firewall_policy_id'] = fwp_id
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP) as firewall:
fw_id = firewall['firewall']['id']
res = self.callbacks.set_firewall_status(ctx, fw_id,
const.ACTIVE)
data = {'firewall': {'name': name}}
req = self.new_update_request('firewalls', data, fw_id)
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
attrs = self._replace_firewall_status(attrs,
const.PENDING_CREATE,
const.PENDING_UPDATE)
for k, v in attrs.iteritems():
self.assertEqual(res['firewall'][k], v)
def test_update_firewall_fails_when_firewall_pending(self):
name = "new_firewall1"
attrs = self._get_test_firewall_attrs(name)
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['firewall_policy_id'] = fwp_id
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP) as firewall:
fw_id = firewall['firewall']['id']
data = {'firewall': {'name': name}}
req = self.new_update_request('firewalls', data, fw_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPConflict.code)
def test_update_firewall_shared_fails_for_non_admin(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP,
tenant_id='noadmin') as firewall:
fw_id = firewall['firewall']['id']
self.callbacks.set_firewall_status(ctx, fw_id,
const.ACTIVE)
data = {'firewall': {'shared': True}}
req = self.new_update_request(
'firewalls', data, fw_id,
context=context.Context('', 'noadmin'))
res = req.get_response(self.ext_api)
# returns 404 due to security reasons
self.assertEqual(res.status_int, exc.HTTPNotFound.code)
def test_update_firewall_policy_fails_when_firewall_pending(self):
name = "new_firewall1"
attrs = self._get_test_firewall_attrs(name)
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['firewall_policy_id'] = fwp_id
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP):
data = {'firewall_policy': {'name': name}}
req = self.new_update_request('firewall_policies',
data, fwp_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPConflict.code)
def test_update_firewall_rule_fails_when_firewall_pending(self):
with self.firewall_rule(name='fwr1') as fr:
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
fr_id = fr['firewall_rule']['id']
fw_rule_ids = [fr_id]
data = {'firewall_policy':
{'firewall_rules': fw_rule_ids}}
req = self.new_update_request('firewall_policies', data,
fwp_id)
req.get_response(self.ext_api)
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP):
data = {'firewall_rule': {'protocol': 'udp'}}
req = self.new_update_request('firewall_rules',
data, fr_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPConflict.code)
def test_delete_firewall(self):
ctx = context.get_admin_context()
attrs = self._get_test_firewall_attrs()
# stop the AgentRPC patch for this one to test pending states
self.agentapi_delf_p.stop()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
attrs['firewall_policy_id'] = fwp_id
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP) as firewall:
fw_id = firewall['firewall']['id']
attrs = self._replace_firewall_status(attrs,
const.PENDING_CREATE,
const.PENDING_DELETE)
req = self.new_delete_request('firewalls', fw_id)
req.get_response(self.ext_api)
fw_db = self.plugin._get_firewall(ctx, fw_id)
for k, v in attrs.iteritems():
self.assertEqual(fw_db[k], v)
# cleanup the pending firewall
self.plugin.callbacks.firewall_deleted(ctx, fw_id)
def test_delete_firewall_after_agent_delete(self):
ctx = context.get_admin_context()
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(firewall_policy_id=fwp_id,
no_delete=True) as fw:
fw_id = fw['firewall']['id']
req = self.new_delete_request('firewalls', fw_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
self.assertRaises(firewall.FirewallNotFound,
self.plugin.get_firewall,
ctx, fw_id)
def test_make_firewall_dict_with_in_place_rules(self):
ctx = context.get_admin_context()
with contextlib.nested(self.firewall_rule(name='fwr1'),
self.firewall_rule(name='fwr2'),
self.firewall_rule(name='fwr3')) as fr:
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
fw_rule_ids = [r['firewall_rule']['id'] for r in fr]
data = {'firewall_policy':
{'firewall_rules': fw_rule_ids}}
req = self.new_update_request('firewall_policies', data,
fwp_id)
req.get_response(self.ext_api)
attrs = self._get_test_firewall_attrs()
attrs['firewall_policy_id'] = fwp_id
with self.firewall(firewall_policy_id=fwp_id,
admin_state_up=
test_db_firewall.ADMIN_STATE_UP) as fw:
fw_id = fw['firewall']['id']
fw_rules = (
self.plugin._make_firewall_dict_with_rules(ctx,
fw_id)
)
self.assertEqual(fw_rules['id'], fw_id)
self._compare_firewall_rule_lists(
fwp_id, fr, fw_rules['firewall_rule_list'])
def test_make_firewall_dict_with_in_place_rules_no_policy(self):
ctx = context.get_admin_context()
with self.firewall() as fw:
fw_id = fw['firewall']['id']
fw_rules = self.plugin._make_firewall_dict_with_rules(ctx, fw_id)
self.assertEqual(fw_rules['firewall_rule_list'], [])
def test_list_firewalls(self):
with self.firewall_policy() as fwp:
fwp_id = fwp['firewall_policy']['id']
with self.firewall(name='fw1', firewall_policy_id=fwp_id,
description='fw') as fwalls:
self._test_list_resources('firewall', [fwalls],
query_params='description=fw')
|
|
"""Command line utilities"""
from __future__ import absolute_import
from .utils import _wrap_run_cmd, read_file, write_file
from .status import str_status
from . import JobScript, AsyncResult, __version__
import importlib
import sys
import os
import logging
import click
DEFAULT_TEST_BODY = r'''
echo "####################################################"
echo "Job id : $CLUSTERJOB_ID"
echo "Job name : $CLUSTERJOB_NAME"
echo "Workdir : $CLUSTERJOB_WORKDIR"
echo "Submission Host: $CLUSTERJOB_SUBMIT_HOST"
echo "Compute Node : $CLUSTERJOB_NODELIST"
echo "Job started on" `hostname` `date`
echo "Current directory:" `pwd`
echo "####################################################"
sleep 60
echo "Job Finished: " `date`
exit 0
'''
def _print_default_test_body(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(DEFAULT_TEST_BODY)
ctx.exit()
def _abort_pause(msg):
try:
msg += " Press ENTER to continue, CTRL+C to abort."
click.confirm(msg, abort=True, show_default=False,
default=True, prompt_suffix='')
except click.Abort:
click.echo("")
sys.exit(0)
def _run_testing_workflow(job, prompt=True):
"""For the given job, interactively go through the workflow of submitting,
canceling, resubmitting, and polling the job (while it runs and after it
ends). Giving ``prompt=False`` indicates "replay" mode.
"""
click.echo("\n*** Submitting Job ***\n")
ar = job.submit()
if prompt:
_abort_pause("\nPlease verify that job has been submitted.")
click.echo("\n*** Cancelling Job ***\n")
ar.cancel()
if prompt:
_abort_pause("\nPlease verify that job has been cancelled.")
click.echo("\n*** Resubmitting Job ***\n")
ar = job.submit(retry=True)
if not prompt:
ar.max_sleep_interval = 0
# this relies on test_workflow monkeypatching the _min_sleep_interval
# to 0
if prompt:
_abort_pause("\nPlease verify that job has been resubmitted.")
click.echo("\nStatus of running job: "+str_status[ar.status])
if prompt:
_abort_pause("\nPlease wait for job to finish.")
click.echo("\nStatus of finished job: "+str_status[ar.get()])
@click.command()
@click.help_option('-h', '--help')
@click.version_option(version=__version__)
@click.option('--body', help="File containing the body of the script to be "
"used. If not given, a default script will be used , see "
"--show-default-body.", type=click.Path(exists=True))
@click.option('--jobname', metavar='JOBNAME', show_default=True,
default='test_clj', help="Name of the job")
@click.option('--backend', metavar='CLS', help="Class from which to load "
"custom backend.")
@click.option('--show-default-body', is_flag=True, help="Print the default "
"script body and exit.", callback=_print_default_test_body,
expose_value=False, is_eager=True)
@click.argument('inifile', type=click.Path(exists=True))
def test_backend(inifile, body, backend, jobname):
"""Perform a workflow test for a backend/job configuration specified in
INIFILE. Create a clusterjob.JobScript instance of a simple default script
(or any other script specified via --body). Read settings from the INIFILE
(see JobScript.read_settings method). The INIFILE may refer to a custom
backend loaded via the '--backend' option: E.g. `-backend mymod.MyBackend`
is equivalent to the Python code
\b
import mymod; JobScript.register_backend(mymod.MyBackend())
See the clusterjob documentation for details.
\b
Interactively guide through the following workflow:
* Submit the job script.
* Cancel the submitted job.
* Re-submit the job script.
* Poll the job status while it is running.
* Wait for job to end, poll job status after job has ended.
The script used for the test should have a run time of at least 1 minute,
in order to allow for interactively checking the successful operations of
the script. Assuming the INIFILE is named `<basename>.ini`, the following
files will be generated:
\b
* <basename>.body.sh: the original script body (If --body is given, this
will be a copy of the file specified there)
* <basename>.rendered.sh: the rendered script, i.e. with backend-specific
resource headers, and with inserted placeholders
* <basename>.out: the output generated by the scheduler, via the 'stdout'
resource specification. Note that a 'stdout' specified in INIFILE will
be overwritten to the value 'clusterjob_test.out'
* <basename>.json: a record of the communication with the scheduler
during the workflow
The above set of files may be added to the clusterjob test suite, by moving
them to `clusterjob/tests/test_workflow/` and by adding the INIFILE to the
INI_FILES list in `clusterjob/tests/test_workflow.py`. By using the
information recorded in the json file, the test suite will then be able to
re-run the workflow without actually connecting to the scheduler.
"""
click.clear()
click.echo("\nSTART WORKFLOW TEST -- RECORDING MODE\n")
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if backend is not None:
click.echo("Loading backend from %s" % backend)
if "." not in backend:
click.echo("ERROR: --backend CLS must contain the module from "
"which to import the class")
sys.exit(1)
backend_parts = backend.split(".")
backend_module = ".".join(backend_parts[:-1])
if os.path.isfile(backend_module.replace(".", os.path.sep)+".py"):
sys.path.append('.')
backend_class = backend_parts[-1]
mod = importlib.import_module(backend_module)
try:
JobScript.register_backend(mod.__dict__[backend_class]())
except (TypeError, ImportError, KeyError, AttributeError) as exc_info:
click.echo("ERROR: %s" % str(exc_info))
sys.exit(1)
click.echo("")
basename = os.path.splitext(inifile)[0]
jsonfile = basename + ".json"
outfile = basename + ".out"
bodyfile = basename + ".body.sh"
renderedfile = basename + ".rendered.sh"
files = [inifile, jsonfile, outfile, bodyfile, renderedfile]
if body is not None:
click.echo("Reading body from %s" % body)
body_str = read_file(body)
else:
click.echo("Using default body:\n")
click.echo("------------------------&<-------------------------------")
click.echo(DEFAULT_TEST_BODY)
click.echo("------------------------&<-------------------------------")
body_str = DEFAULT_TEST_BODY
if body != bodyfile:
_abort_pause("\nBody will be written to %s." % bodyfile)
write_file(bodyfile, body_str)
click.clear()
JobScript.debug_cmds = True
JobScript._run_cmd = staticmethod(_wrap_run_cmd(jsonfile, 'record'))
AsyncResult._run_cmd = staticmethod(JobScript._run_cmd)
# configure job script
job = JobScript(body_str, jobname=jobname)
click.echo("\nConfiguring job from %s\n" % inifile)
try:
job.read_settings(inifile)
except ValueError as exc_info:
click.echo("\nERROR while loading %s: %s"
% (inifile, str(exc_info)))
sys.exit(1)
stdout = 'clusterjob_test.out'
job.resources['stdout'] = stdout
# set up epilogue to get us the script output
epilogue = ''
has_epilogue = False
if len(job.epilogue) > 0:
has_epilogue = True
epilogue = job.epilogue + "\n"
if job.remote is None:
local_out = '{rootdir}/{workdir}/' + stdout
epilogue += 'cp %s %s' % (local_out, outfile)
else:
remote_out = '{remote}:{rootdir}/{workdir}/' + stdout
epilogue += 'rsync -av %s %s' % (remote_out, outfile)
epilogue += "\n" + 'echo ""; echo "STDOUT:"; cat ' + outfile
job.epilogue = epilogue
has_prologue = False
if len(job.prologue) > 0:
has_prologue = True
if len(body_str.splitlines()) < 20:
click.echo("\nRendered job script:\n")
click.echo("------------------------&<-------------------------------")
click.echo(str(job))
click.echo("------------------------&<-------------------------------")
else:
click.pause("\nPress ENTER to view rendered job script.")
click.echo_via_pager(str(job))
_abort_pause("\nRendered job script will be written to %s." % renderedfile)
write_file(renderedfile, str(job))
click.clear()
_abort_pause("\nReady to run live workflow.")
_run_testing_workflow(job, prompt=True)
click.pause(("\nThe job output has been recorded in %s. Press ENTER to "
"view file") % outfile)
click.echo_via_pager(read_file(outfile))
click.clear()
click.echo("\nThe interaction has been recorded in %s" % jsonfile)
if job.remote is None:
click.echo(
("\nIf you intend to add %s to the clusterjob test suite, "
"you may have to take into acount that the tuest suite "
"will set $HOME to '/home/clusterjob_test', instead "
"of '%s'") % (jsonfile, os.environ['HOME']))
if click.confirm("\nDo you want to edit %s now?" % jsonfile):
click.edit(filename=jsonfile)
click.echo(
"\nYou can now add this test to the clusterjob test suite. "
"Please move the files %s to clusterjob/tests/test_workflow/, "
"and add %s to the INI_FILES list in "
"clusterjob/tests/test_workflow.py" % (str(files), inifile))
if has_epilogue or has_prologue:
click.echo("\nNOTE THAT IN THE TEST SUITE ANY EPILOGUE OR "
"PROLOGUE WILL BE DISABLED.")
click.pause("\nPress Enter to finish")
click.echo("\n\nFINISHED WORKFLOW TEST -- RECORDING MODE\n")
|
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from scipy.io import loadmat
from sys import exit
from os.path import isfile
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import argparse
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
def sigmoid(x):
return (1.0/(1.0 + np.exp((-1.0)*x)))
def plotme(regression,ytypes,y,h=0.2):
if regression[0].canPlot():
new_x = regression[0].get_x()
x_min, x_max = new_x[:, 1].min() - .5, new_x[:, 1].max() + .5
y_min, y_max = new_x[:, 2].min() - .5, new_x[:, 2].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
fig1 = plt.figure("multi class")
Z = [ regression[i].predict(np.c_[ xx.ravel(), yy.ravel()],scale=False) for i in range(ytypes.size)]
colors = cm.plasma(np.linspace(0,1,ytypes.size))
for i in range(ytypes.size):
Z[i] = Z[i].reshape(xx.shape)
plt.contour(xx, yy, Z[i],levels=[0.5],colors=( tuple(colors[i]) , 0 ))
plt.scatter(new_x[:,1],new_x[:,2],c=y,cmap=cm.plasma)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
fig1.show()
class scale:
def __init__(self, data):
self.mean = np.mean(data,axis=0)
self.std = np.std(data,axis=0)
def scaling(self,data):
data = (data - self.mean)/self.std
return data
class Regression:
def __init__(self):
self.x = np.array([])
self.y = np.array([])
def fit(self, Features, Result, RegressionType, mapping = True):
self.x = Features
self.type = RegressionType
self.y = Result
self.original_x = self.x
self.Scale = scale(self.x)
#self.x = self.Scale.scaling(self.x)
#self.x = np.c_[np.ones(self.x.shape[0]),self.x]
if self.type == 'logistic' and mapping:
self.featureMaps = 6
poly = PolynomialFeatures(self.featureMaps)
self.x = poly.fit_transform(self.x)
self.lam = 0.1
else:
self.lam = 0
self.mapping = mapping
self.Parameters = np.zeros(self.x.shape[1])
self.Hypothesis = np.dot(self.x,self.Parameters)
self.cost = np.array([])
self.precisionValue = 10**-10
self.displayIteration = False
if(self.type != 'logistic' and self.type != 'linear'):
print("\n\nError: Can't find Regression Type")
exit()
if(self.type == 'logistic'):
self.Hypothesis = sigmoid(self.Hypothesis)
def canPlot (self):
if self.original_x.shape[1] == 2:
return True
else:
return False
def calculateCost (self):
m, n = self.x.shape
if (self.type == 'linear'):
error = self.Hypothesis - self.y
cost = (np.sum((error)**2)/(2*error.size))
if (self.type == 'logistic'):
cost1 = (-1) * self.y * np.log(self.Hypothesis)
cost2 = (1.0 - self.y) * np.log(1.0 - self.Hypothesis)
cost = ((np.sum(cost1 - cost2) )/ m) + (0.5 * self.lam * sum(self.Parameters[1:] ** 2)) / m
return cost
def derivativeOf(self):
error = self.Hypothesis - self.y
derivative = np.dot(self.x.transpose(), error)
if self.type == 'logistic' and self.mapping:
derivative[1:] += 0.5 * self.lam * self.Parameters[1:]
derivative /= self.y.shape[0]
return derivative
def setPrecisionValue(self,new_value):
self.precisionValue = new_value
def get_x(self):
return self.x
def minimize(self):
LearningRate = 1
if(self.type == 'logistic'):
LearningRate = 1
iterations = 0
while True:
self.Parameters = self.Parameters - LearningRate*(self.derivativeOf())
self.Hypothesis = np.dot(self.x,self.Parameters)
if(self.type == 'logistic'):
self.Hypothesis = sigmoid(self.Hypothesis)
self.cost = np.append(self.cost, self.calculateCost())
if (self.displayIteration):
print(self.cost[iterations])
if (iterations >= 1 ):
if (self.cost[iterations] <= self.precisionValue):
break
#if (abs(self.cost[iterations] - self.cost[iterations-1]) <= (self.precisionValue*self.cost[iterations])):
# break
if (np.isnan(self.cost[iterations])):
iterations -= 1
break
if ( self.cost[iterations] > self.cost[iterations - 1]):
LearningRate /= 10
if (self.displayIteration):
print ("\nLearning rate change: ", LearningRate)
if (iterations > 10**4):
break
iterations += 1
#print("\nLearning rate: ", LearningRate)
def verbose(self):
self.displayIteration = True
def hypothesis(self):
return self.Parameters
def niceOutput(self):
print("\nThe Hypothesis Function is \n",self.Hypothesis)
print("\nFinal Cost is ",self.cost[-1])
def predict(self,data,scale=True):
if scale:
data = self.Scale.scaling(data)
#data = np.c_[np.ones(data.shape[0]),data]
poly = PolynomialFeatures(self.featureMaps)
data = poly.fit_transform(data)
prediction = np.dot ( data, self.Parameters)
#if ( self.type == 'logistic'):
# prediction = sigmoid(prediction)
return prediction
def plotCost(self):
plt.plot(self.cost)
def classfinder(y):
ytypes = np.array([])
for i in y:
found = False
for j in ytypes:
if i == j:
found = True
break;
if not found:
ytypes = np.append(ytypes,i)
return ytypes
def y_splitter(y,ytypes):
new_y = [ y.copy() for i in ytypes]
for i in range(0,ytypes.size):
if (ytypes[i] == 0.0):
new_y[i][new_y[i] != ytypes[i]] = 1.0
new_y[i] = 1 - new_y[i]
else:
new_y[i][new_y[i] != ytypes[i]] = 0
new_y[i][new_y[i] == ytypes[i]] = 1
return new_y
def skcompare(X,y,args):
if (args.RegressionType == 'logistic'):
mapping = True
if args.nomapping:
mapping = False
canPlot = False
if X.shape[1] == 2:
canPlot = True
if mapping:
poly = PolynomialFeatures(6)
X = poly.fit_transform(X)
ytypes = classfinder(y)
if ytypes.size >= 2:
logreg = linear_model.LogisticRegression(C=0.01,max_iter=10000)
logreg.fit(X,y)
if canPlot:
h = 0.2
x_min, x_max = X[:, 1].min() - .5, X[:, 1].max() + .5
y_min, y_max = X[:, 2].min() - .5, X[:, 2].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(poly.fit_transform(np.c_[xx.ravel(), yy.ravel()]))
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=cm.plasma)
# Plot also the training points
plt.scatter(X[:, 1], X[:, 2], c=y, edgecolors='k', cmap=cm.plasma)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
def multi_class(x,y,args):
if(args.RegressionType == 'logistic'):
ytypes = classfinder(y)
if ytypes.size == 2:
print ("\n\nBinary Classification for ",ytypes)
ytypes = ytypes[:-1]
else:
print ("\n\nAll vs One Classification for", ytypes)
split_y = y_splitter(y,ytypes)
regression = [Regression() for i in ytypes]
h = 0.2
mapping = True
for i in range(ytypes.size):
if args.nomapping:
mapping = False
regression[i].fit(x,split_y[i],args.RegressionType,mapping)
if args.verbose:
regression[i].verbose()
print ("\n\nMinimizing for class ", ytypes[i])
regression[i].minimize()
regression[i].niceOutput()
plotme(regression,ytypes,y)
if(args.RegressionType == 'linear'):
regression = Regression()
regression.fit(x,y,args.RegressionType)
if args.verbose:
regression.verbose()
regression.minimize()
print("\n\nThe Hypothesis Function is \n",regression.hypothesis())
predicted = 0
if args.test_file is not None :
if isfile(args.test_file):
test_data = pd.read_csv(args.test_file,header = 0)
test_data = test_data.values
prediction = regression.predict(test_data)
test_data = np.c_[test_data,prediction]
predicted = 1
print ("\n\nPredictions complete.")
if args.verbose:
print ("\n\n", test_data)
else:
print ("\n\nERROR: Invalid --predict argument. File doesn't exist.")
if args.output_file is not None :
if predicted is 1 :
np.savetxt(args.output_file,test_data,delimiter=",")
else :
np.savetxt(args.output_file,regression.hypothesis(),delimiter=",")
print ("\n\nThe ouput file <",args.output_file,"> has been created.")
print ("\n\nShowing Cost Plot ...")
fig2 = plt.figure("Cost Plot")
regression.plotCost()
fig2.show()
#skcompare(x,y,args.RegressionType)
def main():
parser = argparse.ArgumentParser(description = 'This is a regression algorithm')
parser.add_argument('-v','--verbose', help = 'displays more details.', action="store_true")
parser.add_argument('-p','--predict', help='to add a csv file that holds input data whose output values need to be predicted', dest='test_file', metavar='data_file')
parser.add_argument('-nm','--nomapping', help='Switch off feature mapping',action = "store_true")
parser.add_argument('RegressionType', help='what type of regression algorithm to use')
parser.add_argument('train_file', help='to input the csv file with the data to be trained on')
parser.add_argument('-o','--output_file', help='to input the file name used to store the prediction data or the hypothesis function (depending on whether --predict is used or not)', metavar='file_name')
args = parser.parse_args()
if args.train_file == 'iris':
iris = load_iris()
# Reduce iris data from 4 dimensions to 2 for ease of plotting
pca = PCA(n_components=2).fit(iris.data)
pca_2d = pca.transform(iris.data)
y = iris.target
x = pca_2d
elif args.train_file == 'mnist':
mnist = loadmat('mnist.mat')
x = np.array(mnist['X'])
y = np.array(mnist['y'])
y = np.reshape(y,y.shape[0])
else:
train_data = pd.read_csv(args.train_file,header = 0)
y = train_data.iloc[:,-1]
y = y.values
train_data = train_data.iloc[:,:-1]
x = train_data.values
multi_class(x,y,args)
#skcompare(x,y,args)
input()
if __name__ == "__main__":
main()
|
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Boot Interface for iLO drivers and its supporting methods.
"""
import os
import tempfile
from ironic_lib import utils as ironic_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six.moves.urllib.parse as urlparse
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import image_service
from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
REQUIRED_PROPERTIES = {
'ilo_deploy_iso': _("UUID (from Glance) of the deployment ISO. "
"Required.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
def parse_driver_info(node):
"""Gets the driver specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the driver_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
"""
info = node.driver_info
d_info = {}
d_info['ilo_deploy_iso'] = info.get('ilo_deploy_iso')
error_msg = _("Error validating iLO virtual media deploy. Some parameters"
" were missing in node's driver_info")
deploy_utils.check_for_missing_params(d_info, error_msg)
return d_info
def _get_boot_iso_object_name(node):
"""Returns the boot iso object name for a given node.
:param node: the node for which object name is to be provided.
"""
return "boot-%s" % node.uuid
def _get_boot_iso(task, root_uuid):
"""This method returns a boot ISO to boot the node.
It chooses one of the three options in the order as below:
1. Does nothing if 'ilo_boot_iso' is present in node's instance_info and
'boot_iso_created_in_web_server' is not set in 'driver_internal_info'.
2. Image deployed has a meta-property 'boot_iso' in Glance. This should
refer to the UUID of the boot_iso which exists in Glance.
3. Generates a boot ISO on the fly using kernel and ramdisk mentioned in
the image deployed. It uploads the generated boot ISO to Swift.
:param task: a TaskManager instance containing the node to act on.
:param root_uuid: the uuid of the root partition.
:returns: boot ISO URL. Should be either of below:
* A Swift object - It should be of format 'swift:<object-name>'. It is
assumed that the image object is present in
CONF.ilo.swift_ilo_container;
* A Glance image - It should be format 'glance://<glance-image-uuid>'
or just <glance-image-uuid>;
* An HTTP URL.
On error finding the boot iso, it returns None.
:raises: MissingParameterValue, if any of the required parameters are
missing in the node's driver_info or instance_info.
:raises: InvalidParameterValue, if any of the parameters have invalid
value in the node's driver_info or instance_info.
:raises: SwiftOperationError, if operation with Swift fails.
:raises: ImageCreationFailed, if creation of boot ISO failed.
:raises: exception.ImageRefValidationFailed if ilo_boot_iso is not
HTTP(S) URL.
"""
LOG.debug("Trying to get a boot ISO to boot the baremetal node")
# Option 1 - Check if user has provided ilo_boot_iso in node's
# instance_info
driver_internal_info = task.node.driver_internal_info
boot_iso_created_in_web_server = (
driver_internal_info.get('boot_iso_created_in_web_server'))
if (task.node.instance_info.get('ilo_boot_iso')
and not boot_iso_created_in_web_server):
LOG.debug("Using ilo_boot_iso provided in node's instance_info")
boot_iso = task.node.instance_info['ilo_boot_iso']
if not service_utils.is_glance_image(boot_iso):
try:
image_service.HttpImageService().validate_href(boot_iso)
except exception.ImageRefValidationFailed:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Virtual media deploy accepts only Glance "
"images or HTTP(S) URLs as "
"instance_info['ilo_boot_iso']. Either %s "
"is not a valid HTTP(S) URL or is "
"not reachable."), boot_iso)
return task.node.instance_info['ilo_boot_iso']
# Option 2 - Check if user has provided a boot_iso in Glance. If boot_iso
# is a supported non-glance href execution will proceed to option 3.
deploy_info = _parse_deploy_info(task.node)
image_href = deploy_info['image_source']
image_properties = (
images.get_image_properties(
task.context, image_href, ['boot_iso', 'kernel_id', 'ramdisk_id']))
boot_iso_uuid = image_properties.get('boot_iso')
kernel_href = (task.node.instance_info.get('kernel') or
image_properties.get('kernel_id'))
ramdisk_href = (task.node.instance_info.get('ramdisk') or
image_properties.get('ramdisk_id'))
if boot_iso_uuid:
LOG.debug("Found boot_iso %s in Glance", boot_iso_uuid)
return boot_iso_uuid
if not kernel_href or not ramdisk_href:
LOG.error(_LE("Unable to find kernel or ramdisk for "
"image %(image)s to generate boot ISO for %(node)s"),
{'image': image_href, 'node': task.node.uuid})
return
# NOTE(rameshg87): Functionality to share the boot ISOs created for
# similar instances (instances with same deployed image) is
# not implemented as of now. Creation/Deletion of such a shared boot ISO
# will require synchronisation across conductor nodes for the shared boot
# ISO. Such a synchronisation mechanism doesn't exist in ironic as of now.
# Option 3 - Create boot_iso from kernel/ramdisk, upload to Swift
# or web server and provide its name.
deploy_iso_uuid = deploy_info['ilo_deploy_iso']
boot_mode = deploy_utils.get_boot_mode_for_deploy(task.node)
boot_iso_object_name = _get_boot_iso_object_name(task.node)
kernel_params = CONF.pxe.pxe_append_params
with tempfile.NamedTemporaryFile(dir=CONF.tempdir) as fileobj:
boot_iso_tmp_file = fileobj.name
images.create_boot_iso(task.context, boot_iso_tmp_file,
kernel_href, ramdisk_href,
deploy_iso_uuid, root_uuid,
kernel_params, boot_mode)
if CONF.ilo.use_web_server_for_images:
boot_iso_url = (
ilo_common.copy_image_to_web_server(boot_iso_tmp_file,
boot_iso_object_name))
driver_internal_info = task.node.driver_internal_info
driver_internal_info['boot_iso_created_in_web_server'] = True
task.node.driver_internal_info = driver_internal_info
task.node.save()
LOG.debug("Created boot_iso %(boot_iso)s for node %(node)s",
{'boot_iso': boot_iso_url, 'node': task.node.uuid})
return boot_iso_url
else:
container = CONF.ilo.swift_ilo_container
swift_api = swift.SwiftAPI()
swift_api.create_object(container, boot_iso_object_name,
boot_iso_tmp_file)
LOG.debug("Created boot_iso %s in Swift", boot_iso_object_name)
return 'swift:%s' % boot_iso_object_name
def _clean_up_boot_iso_for_instance(node):
"""Deletes the boot ISO if it was created for the instance.
:param node: an ironic node object.
"""
ilo_boot_iso = node.instance_info.get('ilo_boot_iso')
if not ilo_boot_iso:
return
if ilo_boot_iso.startswith('swift'):
swift_api = swift.SwiftAPI()
container = CONF.ilo.swift_ilo_container
boot_iso_object_name = _get_boot_iso_object_name(node)
try:
swift_api.delete_object(container, boot_iso_object_name)
except exception.SwiftOperationError as e:
LOG.exception(_LE("Failed to clean up boot ISO for node "
"%(node)s. Error: %(error)s."),
{'node': node.uuid, 'error': e})
elif CONF.ilo.use_web_server_for_images:
result = urlparse.urlparse(ilo_boot_iso)
ilo_boot_iso_name = os.path.basename(result.path)
boot_iso_path = os.path.join(
CONF.deploy.http_root, ilo_boot_iso_name)
ironic_utils.unlink_without_raise(boot_iso_path)
def _parse_deploy_info(node):
"""Gets the instance and driver specific Node deployment info.
This method validates whether the 'instance_info' and 'driver_info'
property of the supplied node contains the required information for
this driver to deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info and driver_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
info = {}
info.update(deploy_utils.get_image_instance_info(node))
info.update(parse_driver_info(node))
return info
class IloVirtualMediaBoot(base.BootInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate the deployment information for the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue, if some information is invalid.
:raises: MissingParameterValue if 'kernel_id' and 'ramdisk_id' are
missing in the Glance image or 'kernel' and 'ramdisk' not provided
in instance_info for non-Glance image.
"""
node = task.node
d_info = _parse_deploy_info(node)
if node.driver_internal_info.get('is_whole_disk_image'):
props = []
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
else:
props = ['kernel', 'ramdisk']
deploy_utils.validate_image_properties(task.context, d_info, props)
def prepare_ramdisk(self, task, ramdisk_params):
"""Prepares the boot of deploy ramdisk using virtual media.
This method prepares the boot of the deploy ramdisk after
reading relevant information from the node's driver_info and
instance_info.
:param task: a task from TaskManager.
:param ramdisk_params: the parameters to be passed to the ramdisk.
:returns: None
:raises: MissingParameterValue, if some information is missing in
node's driver_info or instance_info.
:raises: InvalidParameterValue, if some information provided is
invalid.
:raises: IronicException, if some power or set boot boot device
operation failed on the node.
:raises: IloOperationError, if some operation on iLO failed.
"""
node = task.node
# NOTE(TheJulia): If this method is being called by something
# aside from a deployment, such as conductor takeover, we should
# treat this as a no-op and move on otherwise we would modify
# the state of the node due to virtual media operations.
if node.provision_state != states.DEPLOYING:
return
# Clear ilo_boot_iso if it's a glance image to force recreate
# another one again (or use existing one in glance).
# This is mainly for rebuild scenario.
if service_utils.is_glance_image(
node.instance_info.get('image_source')):
instance_info = node.instance_info
instance_info.pop('ilo_boot_iso', None)
node.instance_info = instance_info
node.save()
# Eject all virtual media devices, as we are going to use them
# during deploy.
ilo_common.eject_vmedia_devices(task)
deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)
ramdisk_params['BOOTIF'] = deploy_nic_mac
deploy_iso = node.driver_info['ilo_deploy_iso']
ilo_common.setup_vmedia(task, deploy_iso, ramdisk_params)
def prepare_instance(self, task):
"""Prepares the boot of instance.
This method prepares the boot of the instance after reading
relevant information from the node's instance_info.
It does the following depending on boot_option for deploy:
- If the boot_option requested for this deploy is 'local' or image
is a whole disk image, then it sets the node to boot from disk.
- Otherwise it finds/creates the boot ISO to boot the instance
image, attaches the boot ISO to the bare metal and then sets
the node to boot from CDROM.
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
ilo_common.cleanup_vmedia_boot(task)
# For iscsi_ilo driver, we boot from disk every time if the image
# deployed is a whole disk image.
node = task.node
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if deploy_utils.get_boot_option(node) == "local" or iwdi:
manager_utils.node_set_boot_device(task, boot_devices.DISK,
persistent=True)
else:
drv_int_info = node.driver_internal_info
root_uuid_or_disk_id = drv_int_info.get('root_uuid_or_disk_id')
if root_uuid_or_disk_id:
self._configure_vmedia_boot(task, root_uuid_or_disk_id)
else:
LOG.warning(_LW("The UUID for the root partition could not "
"be found for node %s"), node.uuid)
def clean_up_instance(self, task):
"""Cleans up the boot of instance.
This method cleans up the environment that was setup for booting
the instance. It ejects virtual media
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
_clean_up_boot_iso_for_instance(task.node)
driver_internal_info = task.node.driver_internal_info
driver_internal_info.pop('boot_iso_created_in_web_server', None)
driver_internal_info.pop('root_uuid_or_disk_id', None)
task.node.driver_internal_info = driver_internal_info
task.node.save()
ilo_common.cleanup_vmedia_boot(task)
def clean_up_ramdisk(self, task):
"""Cleans up the boot of ironic ramdisk.
This method cleans up virtual media devices setup for the deploy
ramdisk.
:param task: a task from TaskManager.
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
ilo_common.cleanup_vmedia_boot(task)
def _configure_vmedia_boot(self, task, root_uuid):
"""Configure vmedia boot for the node.
:param task: a task from TaskManager.
:param root_uuid: uuid of the root partition
:returns: None
:raises: IloOperationError, if some operation on iLO failed.
"""
node = task.node
boot_iso = _get_boot_iso(task, root_uuid)
if not boot_iso:
LOG.error(_LE("Cannot get boot ISO for node %s"), node.uuid)
return
# Upon deploy complete, some distros cloud images reboot the system as
# part of its configuration. Hence boot device should be persistent and
# not one-time.
ilo_common.setup_vmedia_for_boot(task, boot_iso)
manager_utils.node_set_boot_device(task,
boot_devices.CDROM,
persistent=True)
i_info = node.instance_info
i_info['ilo_boot_iso'] = boot_iso
node.instance_info = i_info
node.save()
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_switch_controller_switch_profile
short_description: Configure FortiSwitch switch profile in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify switch_controller feature and switch_profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
switch_controller_switch_profile:
description:
- Configure FortiSwitch switch profile.
default: null
type: dict
suboptions:
login_passwd:
description:
- Login password of managed FortiSwitch.
type: str
login_passwd_override:
description:
- Enable/disable overriding the admin administrator password for a managed FortiSwitch with the FortiGate admin administrator account
password.
type: str
choices:
- enable
- disable
name:
description:
- FortiSwitch Profile name.
required: true
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure FortiSwitch switch profile.
fortios_switch_controller_switch_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
switch_controller_switch_profile:
login_passwd: "<your_own_value>"
login_passwd_override: "enable"
name: "default_name_5"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_switch_controller_switch_profile_data(json):
option_list = ['login_passwd', 'login_passwd_override', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def switch_controller_switch_profile(data, fos):
vdom = data['vdom']
state = data['state']
switch_controller_switch_profile_data = data['switch_controller_switch_profile']
filtered_data = underscore_to_hyphen(filter_switch_controller_switch_profile_data(switch_controller_switch_profile_data))
if state == "present":
return fos.set('switch-controller',
'switch-profile',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('switch-controller',
'switch-profile',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_switch_controller(data, fos):
if data['switch_controller_switch_profile']:
resp = switch_controller_switch_profile(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"switch_controller_switch_profile": {
"required": False, "type": "dict", "default": None,
"options": {
"login_passwd": {"required": False, "type": "str"},
"login_passwd_override": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
from ..workspace import Block, Disconnected, Cancelled
from twisted.internet import defer
from twisted.python import log
from octopus import data
from octopus.constants import State
from octopus.image.data import Image, DerivedImageProperty
def variableName (name):
split = name.split('::')
if len(split) == 2:
return (split[0] + "::" + split[1], None)
elif len(split) == 3:
return (split[0] + "::" + split[1], split[2].split('.'))
else:
raise InvalidVariableNameError(name)
class InvalidVariableNameError (Exception):
""" Raised by variableName """
class global_declaration (Block):
def _varName (self, name = None):
return "global.global::" + (name or self.getFieldValue('NAME', ''))
def created (self):
self._variables = []
# Deal with name changes
@self.on('value-changed')
def onVarNameChanged (data):
if not (data["block"] is self and data["field"] == 'NAME'):
self._onConnectivityChanged()
self._onChange()
return
self.workspace.variables.rename(data["oldValue"], data["newValue"])
self.on('connectivity-changed', self._onConnectivityChanged)
self._onConnectivityChanged()
# Set up event listeners whenever connections change
def _onConnectivityChanged (self, data = None):
for v in self._variables:
v.off('change', self._onChange)
try:
self._variables = set(self.getInput('VALUE').getReferencedVariables())
except (KeyError, AttributeError):
self._variables = []
for v in self._variables:
v.on('change', self._onChange)
# Handle any changes in variables
@defer.inlineCallbacks
def _onChange (self, data = None):
if self.workspace.state not in (State.RUNNING, State.PAUSED):
return
try:
result = yield self.getInput('VALUE').eval()
except (KeyError, AttributeError, Disconnected, Cancelled):
return
variable = self.workspace.variables[self._varName()]
try:
yield variable.set(result)
except AttributeError:
pass
except:
log.err()
@defer.inlineCallbacks
def _run (self):
result = yield self.getInputValue('VALUE', None)
if result is None:
try:
resultType = self.getInput('VALUE').outputType
except (KeyError, AttributeError):
raise Exception("Global declared value cannot be None")
if resultType is None:
raise Exception("Global declared value cannot be None")
else:
resultType = type(result)
# Special handling if the variable is an image.
if resultType is Image:
variable = DerivedImageProperty()
else:
variable = data.Variable(resultType)
variable.alias = self.getFieldValue('NAME')
self.workspace.variables[self._varName()] = variable
if result is not None:
yield variable.set(result)
self._onConnectivityChanged()
def disposed (self):
for v in self._variables:
v.off('change', self._onChange)
self.workspace.variables.remove(self._varName())
def getGlobalDeclarationNames (self):
name = self._varName()
return Block.getGlobalDeclarationNames(self,
[name] if not self.disabled else []
)
class lexical_variable (Block):
def _getVariable (self):
try:
name, attr = variableName(self.getFieldValue('VAR', ''))
variable = self.workspace.variables[name]
except (InvalidVariableNameError, KeyError):
return None
try:
if attr is not None:
for key in attr:
variable = getattr(variable, key)
except AttributeError:
return None
return variable
def getReferencedVariables (self):
variable = self._getVariable()
return Block.getReferencedVariables(self,
[variable] if not self.disabled and variable is not None else []
)
def getReferencedVariableNames (self):
name, attr = variableName(self.getFieldValue('VAR', ''))
return Block.getReferencedVariableNames(self,
[name] if not self.disabled else []
)
getUnmatchedVariableNames = getReferencedVariableNames
class lexical_variable_set (lexical_variable):
@defer.inlineCallbacks
def _run (self):
result = yield self.getInputValue("VALUE")
variable = self._getVariable()
yield self._setVariable(variable, result)
@defer.inlineCallbacks
def _setVariable (self, variable, value):
if variable is None:
self.emitLogMessage(
"Cannot set unknown variable: " + str(self.getFieldValue('VAR', '')),
"error"
)
return
try:
yield variable.set(value)
except Exception as error:
self.emitLogMessage(str(error), "error")
class lexical_variable_set_to (lexical_variable_set):
@defer.inlineCallbacks
def _run (self):
result = self.getFieldValue('VALUE')
unit = self.getFieldValue('UNIT', None)
if isinstance(unit, (int, float)):
result *= unit
variable = self._getVariable()
yield self._setVariable(variable, result)
class lexical_variable_get (lexical_variable):
def eval (self):
try:
variable = self._getVariable()
except (AttributeError):
self.emitLogMessage(
"Unknown variable: " + str(self.getFieldValue('VAR')),
"error"
)
return defer.succeed(None)
unit = self.getFieldValue('UNIT', None)
result = variable.value
self.outputType = variable.type
if isinstance(unit, (int, float)):
result /= unit
return defer.succeed(result)
class math_change (lexical_variable_set):
def _run (self):
add = 1 if self.getFieldValue("MODE") == 'INCREMENT' else -1
variable = self._getVariable()
unit = self.getFieldValue('UNIT', None)
if isinstance(unit, (int, float)):
add *= unit
return self._setVariable(variable, variable.value + add)
class connection_gsioc (lexical_variable):
def eval (self):
machine = self._getVariable()
gsioc = machine.gsioc
return defer.maybeDeferred(
gsioc,
int(self.getFieldValue('ID', 0)),
)
|
|
from datetime import datetime
from unittest import TestCase
from unittest.mock import MagicMock
from test.base import ClientBaseCase
from linode_api4 import LongviewSubscription, LinodeClient, ApiError
class LinodeClientGeneralTest(ClientBaseCase):
"""
Tests methods of the LinodeClient class that do not live inside of a group.
"""
def test_get_no_empty_body(self):
"""
Tests that a valid JSON body is passed for a GET call
"""
with self.mock_get('linode/instances') as m:
self.client.regions()
self.assertEqual(m.call_data_raw, None)
def test_get_account(self):
a = self.client.account()
self.assertEqual(a._populated, True)
self.assertEqual(a.first_name, 'Test')
self.assertEqual(a.last_name, 'Guy')
self.assertEqual(a.email, '[email protected]')
self.assertEqual(a.phone, '123-456-7890')
self.assertEqual(a.company, 'Linode')
self.assertEqual(a.address_1, '3rd & Arch St')
self.assertEqual(a.address_2, '')
self.assertEqual(a.city, 'Philadelphia')
self.assertEqual(a.state, 'PA')
self.assertEqual(a.country, 'US')
self.assertEqual(a.zip, '19106')
self.assertEqual(a.tax_id, '')
self.assertEqual(a.balance, 0)
self.assertEqual(a.capabilities, ["Linodes","NodeBalancers","Block Storage","Object Storage"])
def test_get_regions(self):
r = self.client.regions()
self.assertEqual(len(r), 11)
for region in r:
self.assertTrue(region._populated)
self.assertIsNotNone(region.id)
self.assertIsNotNone(region.country)
if region.id in ('us-east', 'eu-central', 'ap-south'):
self.assertEqual(region.capabilities, ["Linodes","NodeBalancers","Block Storage","Object Storage"])
else:
self.assertEqual(region.capabilities, ["Linodes","NodeBalancers","Block Storage"])
self.assertEqual(region.status, "ok")
self.assertIsNotNone(region.resolvers)
self.assertIsNotNone(region.resolvers.ipv4)
self.assertIsNotNone(region.resolvers.ipv6)
def test_get_images(self):
r = self.client.images()
self.assertEqual(len(r), 4)
for image in r:
self.assertTrue(image._populated)
self.assertIsNotNone(image.id)
def test_get_domains(self):
"""
Tests that domains can be retrieved and are marshalled properly
"""
r = self.client.domains()
self.assertEqual(len(r), 1)
domain = r.first()
self.assertEqual(domain.domain, 'example.org')
self.assertEqual(domain.type, 'master')
self.assertEqual(domain.id, 12345)
self.assertEqual(domain.axfr_ips, [])
self.assertEqual(domain.retry_sec, 0)
self.assertEqual(domain.ttl_sec, 300)
self.assertEqual(domain.status, 'active')
self.assertEqual(domain.master_ips, [],)
self.assertEqual(domain.description, "",)
self.assertEqual(domain.group, "",)
self.assertEqual(domain.expire_sec, 0,)
self.assertEqual(domain.soa_email, "[email protected]",)
self.assertEqual(domain.refresh_sec, 0)
def test_image_create(self):
"""
Tests that an Image can be created successfully
"""
with self.mock_post('images/private/123') as m:
i = self.client.image_create(654, 'Test-Image', 'This is a test')
self.assertIsNotNone(i)
self.assertEqual(i.id, 'private/123')
self.assertEqual(m.call_url, '/images')
self.assertEqual(m.call_data, {
"disk_id": 654,
"label": "Test-Image",
"description": "This is a test",
})
def test_get_volumes(self):
v = self.client.volumes()
self.assertEqual(len(v), 3)
self.assertEqual(v[0].label, 'block1')
self.assertEqual(v[0].region.id, 'us-east-1a')
self.assertEqual(v[1].label, 'block2')
self.assertEqual(v[1].size, 100)
self.assertEqual(v[2].size, 200)
self.assertEqual(v[2].label, 'block3')
assert v[0].tags == ["something"]
assert v[1].tags == []
assert v[2].tags == ["attached"]
def test_get_tags(self):
"""
Tests that a list of Tags can be retrieved as expected
"""
t = self.client.tags()
self.assertEqual(len(t), 2)
self.assertEqual(t[0].label, 'nothing')
self.assertEqual(t[1].label, 'something')
def test_tag_create(self):
"""
Tests that creating a tag works as expected
"""
# tags don't work like a normal RESTful collection, so we have to do this
with self.mock_post({'label':'nothing'}) as m:
t = self.client.tag_create('nothing')
self.assertIsNotNone(t)
self.assertEqual(t.label, 'nothing')
self.assertEqual(m.call_url, '/tags')
self.assertEqual(m.call_data, {
'label': 'nothing',
})
def test_tag_create_with_ids(self):
"""
Tests that creating a tag with IDs sends the correct request
"""
instance1, instance2 = self.client.linode.instances()[:2]
domain1 = self.client.domains().first()
nodebalancer1, nodebalancer2 = self.client.nodebalancers()[:2]
volume1, volume2 = self.client.volumes()[:2]
# tags don't work like a normal RESTful collection, so we have to do this
with self.mock_post({'label':'pytest'}) as m:
t = self.client.tag_create('pytest',
instances=[instance1.id, instance2],
nodebalancers=[nodebalancer1.id, nodebalancer2],
domains=[domain1.id],
volumes=[volume1.id, volume2])
self.assertIsNotNone(t)
self.assertEqual(t.label, 'pytest')
self.assertEqual(m.call_url, '/tags')
self.assertEqual(m.call_data, {
'label': 'pytest',
'linodes': [instance1.id, instance2.id],
'domains': [domain1.id],
'nodebalancers': [nodebalancer1.id, nodebalancer2.id],
'volumes': [volume1.id, volume2.id],
})
def test_tag_create_with_entities(self):
"""
Tests that creating a tag with entities sends the correct request
"""
instance1, instance2 = self.client.linode.instances()[:2]
domain = self.client.domains().first()
nodebalancer = self.client.nodebalancers().first()
volume = self.client.volumes().first()
# tags don't work like a normal RESTful collection, so we have to do this
with self.mock_post({'label':'pytest'}) as m:
t = self.client.tag_create('pytest',
entities=[instance1, domain, nodebalancer, volume, instance2])
self.assertIsNotNone(t)
self.assertEqual(t.label, 'pytest')
self.assertEqual(m.call_url, '/tags')
self.assertEqual(m.call_data, {
'label': 'pytest',
'linodes': [instance1.id, instance2.id],
'domains': [domain.id],
'nodebalancers': [nodebalancer.id],
'volumes': [volume.id],
})
class AccountGroupTest(ClientBaseCase):
"""
Tests methods of the AccountGroup
"""
def test_get_settings(self):
"""
Tests that account settings can be retrieved.
"""
s = self.client.account.settings()
self.assertEqual(s._populated, True)
self.assertEqual(s.network_helper, False)
self.assertEqual(s.managed, False)
self.assertEqual(type(s.longview_subscription), LongviewSubscription)
self.assertEqual(s.longview_subscription.id, 'longview-100')
self.assertEqual(s.object_storage, "active")
def test_get_invoices(self):
"""
Tests that invoices can be retrieved
"""
i = self.client.account.invoices()
self.assertEqual(len(i), 1)
invoice = i[0]
self.assertEqual(invoice.id, 123456)
self.assertEqual(invoice.date, datetime(2015, 1, 1, 5, 1, 2))
self.assertEqual(invoice.label, 'Invoice #123456')
self.assertEqual(invoice.total, 9.51)
class LinodeGroupTest(ClientBaseCase):
"""
Tests methods of the LinodeGroup
"""
def test_instance_create(self):
"""
Tests that a Linode Instance can be created successfully
"""
with self.mock_post('linode/instances/123') as m:
l = self.client.linode.instance_create('g5-standard-1', 'us-east-1a')
self.assertIsNotNone(l)
self.assertEqual(l.id, 123)
self.assertEqual(m.call_url, '/linode/instances')
self.assertEqual(m.call_data, {
"region": "us-east-1a",
"type": "g5-standard-1"
})
def test_instance_create_with_image(self):
"""
Tests that a Linode Instance can be created with an image, and a password generated
"""
with self.mock_post('linode/instances/123') as m:
l, pw = self.client.linode.instance_create(
'g5-standard-1', 'us-east-1a', image='linode/debian9')
self.assertIsNotNone(l)
self.assertEqual(l.id, 123)
self.assertEqual(m.call_url, '/linode/instances')
self.assertEqual(m.call_data, {
"region": "us-east-1a",
"type": "g5-standard-1",
"image": "linode/debian9",
"root_pass": pw,
})
class LongviewGroupTest(ClientBaseCase):
"""
Tests methods of the LongviewGroup
"""
def test_get_clients(self):
"""
Tests that a list of LongviewClients can be retrieved
"""
r = self.client.longview.clients()
self.assertEqual(len(r), 2)
self.assertEqual(r[0].label, "test_client_1")
self.assertEqual(r[0].id, 1234)
self.assertEqual(r[1].label, "longview5678")
self.assertEqual(r[1].id, 5678)
def test_client_create(self):
"""
Tests that creating a client calls the api correctly
"""
with self.mock_post('longview/clients/5678') as m:
client = self.client.longview.client_create()
self.assertIsNotNone(client)
self.assertEqual(client.id, 5678)
self.assertEqual(client.label, 'longview5678')
self.assertEqual(m.call_url, '/longview/clients')
self.assertEqual(m.call_data, {})
def test_client_create_with_label(self):
"""
Tests that creating a client with a label calls the api correctly
"""
with self.mock_post('longview/clients/1234') as m:
client = self.client.longview.client_create(label='test_client_1')
self.assertIsNotNone(client)
self.assertEqual(client.id, 1234)
self.assertEqual(client.label, 'test_client_1')
self.assertEqual(m.call_url, '/longview/clients')
self.assertEqual(m.call_data, {"label": "test_client_1"})
def test_get_subscriptions(self):
"""
Tests that Longview subscriptions can be retrieved
"""
r = self.client.longview.subscriptions()
self.assertEqual(len(r), 4)
expected_results = (
("longview-10", "Longview Pro 10 pack"),
("longview-100", "Longview Pro 100 pack"),
("longview-3", "Longview Pro 3 pack"),
("longview-40", "Longview Pro 40 pack"),
)
for result, (expected_id, expected_label) in zip(r, expected_results):
self.assertEqual(result.id, expected_id)
self.assertEqual(result.label, expected_label)
class LKEGroupTest(ClientBaseCase):
"""
Tests methods of the LKEGroupTest
"""
def test_kube_version(self):
"""
Tests that KubeVersions can be retrieved
"""
versions = self.client.lke.versions()
self.assertEqual(len(versions), 3)
self.assertEqual(versions[0].id, "1.19")
self.assertEqual(versions[1].id, "1.18")
self.assertEqual(versions[2].id, "1.17")
def test_cluster_create_with_api_objects(self):
"""
Tests clusters can be created using api objects
"""
region = self.client.regions().first()
node_type = self.client.linode.types()[0]
version = self.client.lke.versions()[0]
node_pools = self.client.lke.node_pool(node_type, 3)
with self.mock_post("lke/clusters") as m:
cluster = self.client.lke.cluster_create(
region, "example-cluster", node_pools, version
)
self.assertEqual(m.call_data["region"], "ap-west")
self.assertEqual(m.call_data["node_pools"],
[{"type": "g5-nanode-1", "count": 3}])
self.assertEqual(m.call_data["k8s_version"], "1.19")
self.assertEqual(cluster.id, 18881)
self.assertEqual(cluster.region.id, "ap-west")
self.assertEqual(cluster.k8s_version.id, "1.19")
def test_cluster_create_with_string_repr(self):
"""
Tests clusters can be created using string representations
"""
with self.mock_post("lke/clusters") as m:
cluster = self.client.lke.cluster_create(
"ap-west", "example-cluster",
{"type": "g6-standard-1", "count": 3}, "1.19"
)
self.assertEqual(m.call_data["region"], "ap-west")
self.assertEqual(m.call_data["node_pools"],
[{"type": "g6-standard-1", "count": 3}])
self.assertEqual(m.call_data["k8s_version"], "1.19")
self.assertEqual(cluster.id, 18881)
self.assertEqual(cluster.region.id, "ap-west")
self.assertEqual(cluster.k8s_version.id, "1.19")
class ProfileGroupTest(ClientBaseCase):
"""
Tests methods of the ProfileGroup
"""
def test_get_sshkeys(self):
"""
Tests that a list of SSH Keys can be retrieved
"""
r = self.client.profile.ssh_keys()
self.assertEqual(len(r), 2)
key1, key2 = r
self.assertEqual(key1.label, 'Home Ubuntu PC')
self.assertEqual(key1.created, datetime(year=2018, month=9, day=14, hour=13,
minute=0, second=0))
self.assertEqual(key1.id, 22)
self.assertEqual(
key1.ssh_key, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98"
"ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD"
"eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO"
"TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i"
"muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo"
"F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC"
"MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy"
"nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz"
"/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz"
"tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v"
"TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH"
"d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b"
"cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI"
"CmhW7erNJNVxYjtzseGpBLmRRUTsT038w== dorthu@dorthu-command")
def test_client_create(self):
"""
Tests that creating a client calls the api correctly
"""
with self.mock_post('longview/clients/5678') as m:
client = self.client.longview.client_create()
self.assertIsNotNone(client)
self.assertEqual(client.id, 5678)
self.assertEqual(client.label, 'longview5678')
self.assertEqual(m.call_url, '/longview/clients')
self.assertEqual(m.call_data, {})
def test_ssh_key_create(self):
"""
Tests that creating an ssh key works as expected
"""
with self.mock_post('profile/sshkeys/72') as m:
key = self.client.profile.ssh_key_upload(
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98"
"ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD"
"eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO"
"TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i"
"muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo"
"F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC"
"MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy"
"nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz"
"/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz"
"tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v"
"TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH"
"d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b"
"cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI"
"CmhW7erNJNVxYjtzseGpBLmRRUTsT038w==dorthu@dorthu-command",
'Work Laptop')
self.assertIsNotNone(key)
self.assertEqual(key.id, 72)
self.assertEqual(key.label, 'Work Laptop')
self.assertEqual(m.call_url, '/profile/sshkeys')
self.assertEqual(m.call_data, {
"ssh_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98"
"ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD"
"eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO"
"TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i"
"muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo"
"F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC"
"MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy"
"nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz"
"/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz"
"tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v"
"TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH"
"d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b"
"cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI"
"CmhW7erNJNVxYjtzseGpBLmRRUTsT038w==dorthu@dorthu-command",
"label": "Work Laptop"
})
class ObjectStorageGroupTest(ClientBaseCase):
"""
Tests for the ObjectStorageGroup
"""
def test_get_clusters(self):
"""
Tests that Object Storage Clusters can be retrieved
"""
clusters = self.client.object_storage.clusters()
self.assertEqual(len(clusters), 1)
cluster = clusters[0]
self.assertEqual(cluster.id, 'us-east-1')
self.assertEqual(cluster.region.id, 'us-east')
self.assertEqual(cluster.domain, 'us-east-1.linodeobjects.com')
self.assertEqual(cluster.static_site_domain, 'website-us-east-1.linodeobjects.com')
def test_get_keys(self):
"""
Tests that you can retrieve Object Storage Keys
"""
keys = self.client.object_storage.keys()
self.assertEqual(len(keys), 2)
key1 = keys[0]
key2 = keys[1]
self.assertEqual(key1.id, 1)
self.assertEqual(key1.label, 'object-storage-key-1')
self.assertEqual(key1.access_key, 'testAccessKeyHere123')
self.assertEqual(key1.secret_key, '[REDACTED]')
self.assertEqual(key2.id, 2)
self.assertEqual(key2.label, 'object-storage-key-2')
self.assertEqual(key2.access_key, 'testAccessKeyHere456')
self.assertEqual(key2.secret_key, '[REDACTED]')
def test_keys_create(self):
"""
Tests that you can create Object Storage Keys
"""
with self.mock_post('object-storage/keys/1') as m:
keys = self.client.object_storage.keys_create('object-storage-key-1')
self.assertIsNotNone(keys)
self.assertEqual(keys.id, 1)
self.assertEqual(keys.label, 'object-storage-key-1')
self.assertEqual(m.call_url, '/object-storage/keys')
self.assertEqual(m.call_data, {"label":"object-storage-key-1"})
class NetworkingGroupTest(ClientBaseCase):
"""
Tests for the NetworkingGroup
"""
def test_get_vlans(self):
"""
Tests that Object Storage Clusters can be retrieved
"""
vlans = self.client.networking.vlans()
self.assertEqual(len(vlans), 1)
self.assertEqual(vlans[0].label, 'vlan-test')
self.assertEqual(vlans[0].region.id, 'us-southeast')
self.assertEqual(len(vlans[0].linodes), 2)
self.assertEqual(vlans[0].linodes[0], 111)
self.assertEqual(vlans[0].linodes[1], 222)
def test_firewall_create(self):
with self.mock_post('networking/firewalls/123') as m:
rules = {
'outbound': [],
'outbound_policy': 'DROP',
'inbound': [],
'inbound_policy': 'DROP'
}
f = self.client.networking.firewall_create('test-firewall-1', rules,
status='enabled')
self.assertIsNotNone(f)
self.assertEqual(m.call_url, '/networking/firewalls')
self.assertEqual(m.method, 'post')
self.assertEqual(f.id, 123)
self.assertEqual(m.call_data, {
'label': 'test-firewall-1',
'status': 'enabled',
'rules': rules
})
def test_get_firewalls(self):
"""
Tests that firewalls can be retrieved
"""
f = self.client.networking.firewalls()
self.assertEqual(len(f), 1)
firewall = f[0]
self.assertEqual(firewall.id, 123)
class LinodeClientRateLimitRetryTest(TestCase):
"""
Tests for rate limiting errors.
.. warning::
This test class _does not_ follow normal testing conventions for this project,
as requests are not automatically mocked. Only add tests to this class if they
pertain to the 429 retry logic, and make sure you mock the requests calls yourself
(or else they will make real requests and those won't work).
"""
def setUp(self):
self.client = LinodeClient("testing", base_url="/", retry_rate_limit_interval=1)
# sidestep the validation to do immediate retries so tests aren't slow
self.client.retry_rate_limit_interval = 0.1
def _get_mock_response(self, response_code):
"""
Helper function to return a mock response
"""
ret = MagicMock()
ret.status_code = response_code
ret.json.return_value = {}
return ret
def test_retry_429s(self):
"""
Tests that 429 responses are automatically retried
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
if called < 2:
return self._get_mock_response(429)
return self._get_mock_response(200)
response = self.client._api_call('/test', method=test_method)
# it retried once, got the empty object
assert called == 2
assert response == {}, response
def test_retry_max_attempts(self):
"""
Tests that a request will fail after 5 429 responses in a row
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
return self._get_mock_response(429)
try:
response = self.client._api_call('/test', method=test_method)
assert False, "Unexpectedly did not raise ApiError!"
except ApiError as e:
assert e.status == 429
# it tried 5 times
assert called == 5
def test_api_error_with_retry(self):
"""
Tests that a 300+ response still raises an ApiError even if retries are
enabled
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
return self._get_mock_response(400)
try:
response = self.client._api_call('/test', method=test_method)
assert False, "Unexpectedly did not raise ApiError!"
except ApiError as e:
assert e.status == 400
# it tried 5 times
assert called == 1
def test_api_error_on_retry(self):
"""
Tests that we'll stop retrying and raise immediately if we get a 300+
response after a 429
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
if called < 2:
return self._get_mock_response(429)
return self._get_mock_response(400)
try:
response = self.client._api_call('/test', method=test_method)
assert False, "Unexpectedly did not raise ApiError!"
except ApiError as e:
assert e.status == 400
# it tried 5 times
assert called == 2
def test_works_first_time(self):
"""
Tests that the response is handled correctly if we got a 200 on the first
try
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
return self._get_mock_response(200)
response = self.client._api_call('/test', method=test_method)
# it tried 5 times
assert called == 1
assert response == {}
|
|
#!/usr/bin/env python
#==============================================================================
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
import hashlib
import lockfile
import logging
import os
import subprocess
import sys
import tempfile
import re
try:
import simplejson as json
except ImportError:
import json
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"))
logging.getLogger().addHandler(handler)
log = logging.getLogger('schema')
log.setLevel(logging.INFO)
_BOOLEANS = ["autoIncrement", "primaryKey", "nullable", "alwaysRun", "runOnChange", "failOnError", "runInTransaction",
"defaultValueBoolean", "unique", "deleteCascade", "initiallyDeferred", "deferrable"]
_NUMBERS = ["defaultValueNumeric", "startWith", "incrementBy"]
def type_fixer(d):
return_value = dict(d)
for k, v in d.iteritems():
if not isinstance(v, basestring):
continue
if k in _BOOLEANS:
return_value[k] = (True if "true" == v.lower() else False)
elif k in _NUMBERS:
try:
return_value[k] = int(v)
except ValueError:
return_value[k] = float(v)
return return_value
class FatalError(SystemExit):
def __init__(self, reason, code):
super(FatalError, self).__init__(code)
log.error('Failing resource: %s', reason)
print json.dumps({'Reason': reason})
class Liquifier(object):
def _get_property_or_fail(self, key, properties=None):
if not properties:
properties = self._resource_properties
try:
return properties[key]
except KeyError:
raise FatalError('Properties did not contain required field %s' % key, -1)
def __init__(self, properties, old_properties, stackId, logicalId,
driver='com.mysql.jdbc.Driver', liquibase_home='/home/ec2-user/liquibase'):
self._stack_id = stackId
self._logical_id = logicalId
self._resource_properties = properties
self._url = self._get_property_or_fail('DatabaseURL')
self._user = self._get_property_or_fail('DatabaseUsername')
self._passwd = self._get_property_or_fail('DatabasePassword')
self._old_properties = old_properties
# TODO: fail if old url/username/password do not match (not allowed to update)
self._driver = driver
self._libjars = ':'.join(
[os.path.join('%s/lib/' % liquibase_home, f) for f in os.listdir(liquibase_home + '/lib') if f.endswith('.jar')]
)
self._libjars += ':%s/liquibase.jar' % liquibase_home
self._liquibase_home = liquibase_home
def run_event(self, event_type):
change_log = self._get_property_or_fail('databaseChangeLog')
change_tag = hashlib.sha256(json.dumps(change_log)).hexdigest()
old_change_log = self._get_property_or_fail('databaseChangeLog', self._old_properties)
if event_type == 'Create':
self._update_to_tag(change_log, change_tag)
elif event_type == 'Update':
# to roll back to a previous changelog, liquibase needs the "latest" changelog, but the previous tag.
if not self._roll_back_to_tag(old_change_log, change_tag):
self._update_to_tag(change_log, change_tag)
elif event_type == 'Delete':
if self._resource_properties.get('DropAllOnDelete', 'false').lower() == 'true':
self._drop_all()
def _get_command_base(self):
return ['java',
'-cp', self._libjars,
'liquibase.integration.commandline.Main',
'--logLevel=debug',
'--classpath=%s' % self._libjars,
'--driver=%s' % self._driver,
'--url=%s' % self._url,
'--username=%s' % self._user,
'--password=%s' % self._passwd]
def _run_cmd(self, cmdline, liquibase_cmd):
log.info("Running command: %s", cmdline)
proc = subprocess.Popen(cmdline, cwd=self._liquibase_home, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = proc.communicate()[0]
log.info('Liquibase %s output: %s', liquibase_cmd, out)
return proc.returncode, out
def _call_with_changelog(self, func, change_log):
changelog_parent = os.path.join(tempfile.gettempdir(),
re.sub('[^a-zA-Z0-9_-]', '_', self._stack_id),
self._logical_id)
if not os.path.isdir(changelog_parent):
try:
os.makedirs(changelog_parent)
except OSError, e:
raise FatalError(str(e), -2)
lock = lockfile.FileLock(os.path.join(changelog_parent, 'changelog.json.lock'))
with lock:
changelog_path = os.path.join(changelog_parent, 'changelog.json')
with file(changelog_path, 'w') as f:
json.dump({'databaseChangeLog': change_log}, f, indent=4)
f.flush()
retval = func(changelog_path)
os.remove(changelog_path)
return retval
def _update(self, changelog_file):
cmd = self._get_command_base()
cmd.append('--changeLogFile=%s' % changelog_file)
cmd.append('update')
retcode, output = self._run_cmd(cmd, 'update')
if retcode:
raise FatalError('Liquibase update failed with error %s' % retcode, retcode)
def _update_to_tag(self, change_log, change_tag):
self._call_with_changelog(lambda path: self._update(path), change_log)
self._tag(change_tag)
def _rollback(self, changelog_file, change_tag):
cmd = self._get_command_base()
cmd.append('--changeLogFile=%s' % changelog_file)
cmd.append('rollback')
cmd.append(change_tag)
retcode, output = self._run_cmd(cmd, 'update_rollback')
return False if retcode else True
def _roll_back_to_tag(self, change_log, change_tag):
return self._call_with_changelog(lambda path: self._rollback(path, change_tag), change_log)
def _drop_all(self):
cmd = self._get_command_base()
cmd.append('dropAll')
retcode, output = self._run_cmd(cmd, 'dropAll')
if retcode:
raise FatalError('Liquibase drop failed with error %s' % retcode, retcode)
def _tag(self, tag):
cmd = self._get_command_base()
cmd.append('tag')
cmd.append(tag)
retcode, output = self._run_cmd(cmd, 'tag')
if retcode:
raise FatalError('Liquibase tag failed with error %s' % retcode, retcode)
try:
event_obj = json.loads(os.environ.get('EventProperties'), object_hook=type_fixer)
except ValueError:
raise FatalError('Could not parse properties as JSON', -1)
event_type = event_obj['RequestType']
log.info('%s received event: %s', event_type, json.dumps(event_obj, indent=4))
resource_properties = event_obj.get('ResourceProperties')
if not resource_properties:
raise FatalError('Resource Properties not found.', -1)
stack_id = event_obj['StackId']
logical_id = event_obj['LogicalResourceId']
Liquifier(resource_properties, event_obj.get('OldResourceProperties', {}), stack_id, logical_id) .run_event(event_type)
print json.dumps({})
|
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
import binascii
import json
import StringIO
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls with a request body
def http_get_call(host, port, path, requestdata = '', response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
print "Mining blocks..."
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 40)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += binascii.unhexlify(txid)
binaryRequest += pack("i", n);
binaryRequest += binascii.unhexlify(vintx);
binaryRequest += pack("i", 0);
bin_response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = StringIO.StringIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(65).rstrip("L")
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_get_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/");
response = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# Block header length of 237 is:
# - 76 bytes: regular fields
# - 32 bytes: nonce
# - 129 bytes: Equihash solution vector:
# - 1 byte vector length
# - 2^k 4-byte indices
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 237)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", "", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 237)
response_header_str = response_header.read()
assert_equal(response_str[0:237], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 474)
response_hex_str = response_hex.read()
assert_equal(response_str.encode("hex")[0:474], response_hex_str[0:474])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 474)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:474], response_header_hex_str[0:474])
assert_equal(response_header_str.encode("hex")[0:474], response_header_hex_str[0:474])
# check json format
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['hash'], bb_hash)
# do tx test
tx_hash = json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", "", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from decimal import Decimal
import random
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.script import (
CScript,
OP_1,
OP_2,
OP_DROP,
OP_TRUE,
)
from test_framework.script_util import (
script_to_p2sh_script,
)
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
satoshi_round,
)
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
REDEEM_SCRIPT_2 = CScript([OP_2, OP_DROP])
P2SH_1 = script_to_p2sh_script(REDEEM_SCRIPT_1)
P2SH_2 = script_to_p2sh_script(REDEEM_SCRIPT_2)
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, REDEEM_SCRIPT_1]), CScript([OP_TRUE, REDEEM_SCRIPT_2])]
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)."""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount + fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
unconflist.append({"txid": txid, "vout": 0, "amount": total_in - amount - fee})
unconflist.append({"txid": txid, "vout": 1, "amount": amount})
return (tx.serialize().hex(), fee)
def split_inputs(from_node, txins, txouts, initial_split=False):
"""Generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed."""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"] / 2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change * COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change * COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split):
completetx = from_node.signrawtransactionwithwallet(tx.serialize().hex())["hex"]
else:
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = tx.serialize().hex()
txid = from_node.sendrawtransaction(hexstring=completetx, maxfeerate=0)
txouts.append({"txid": txid, "vout": 0, "amount": half_change})
txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
def check_raw_estimates(node, fees_seen):
"""Call estimaterawfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
for i in range(1, 26):
for _, e in node.estimaterawfee(i).items():
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
def check_smart_estimates(node, fees_seen):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
for i, e in enumerate(all_smart_estimates): # estimate is for i+1
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
if feerate - delta > last_feerate:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
% (feerate, last_feerate))
last_feerate = feerate
if i == 0:
assert_equal(e["blocks"], 2)
else:
assert_greater_than_or_equal(i + 1, e["blocks"])
def check_estimates(node, fees_seen):
check_raw_estimates(node, fees_seen)
check_smart_estimates(node, fees_seen)
class EstimateFeeTest(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
# mine non-standard txs (e.g. txs with "dust" outputs)
# Force fSendTrickle to true (via whitelist.noban)
self.extra_args = [
["-acceptnonstdtxn", "[email protected]"],
["-acceptnonstdtxn", "[email protected]", "-blockmaxweight=68000"],
["-acceptnonstdtxn", "[email protected]", "-blockmaxweight=32000"],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=self.extra_args)
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight,
# (68k weight is room enough for 120 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
self.stop_nodes()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for _ in range(numblocks):
random.shuffle(self.confutxo)
for _ in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee) / tx_kbytes)
self.sync_mempools(wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0], True)["tx"]
self.sync_blocks(wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while reps < 5:
# Double txouts to txouts2
while len(self.txouts) > 0:
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
# Double txouts2 to txouts
while len(self.txouts2) > 0:
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
self.connect_nodes(1, 0)
self.connect_nodes(0, 2)
self.connect_nodes(2, 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for _ in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
self.sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info("Testing that fee estimation is disabled in blocksonly.")
self.restart_node(0, ["-blocksonly"])
assert_raises_rpc_error(-32603, "Fee estimation disabled",
self.nodes[0].estimatesmartfee, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
|
|
import random
import numpy
lane_num = 2
lane = [[], []]
max_car_num = 10000
road_len = 1000
h = 6
p_b = 0.94
p_0 = 0.5
p_d = 0.1
v_max = [6, 10]
gap = 7
p_car = 1
p_crash = 0
time_period = 200
class Car:
car_cnt = 0
def __init__(self, v = 1, lane = 1):
self.size = 1
if random.random() < 0.1:
self.size = 0
self.c_id = Car.car_cnt
Car.car_cnt += 1
#print(Car.car_cnt, max_car_num)
#self.v = 0
self.v = random.randint(0, v_max[self.size])
self.lane = random.randint(0, 1)
#self.pos = random.randint(0, road_len)
self.b = 0
def __lt__(self, other):
return self.pos < other.pos
def mmod(a, b):
return (a - b + road_len) % road_len
def rand(v, b, t_h, t_s):
if b == 1 and t_h < t_s:
return p_b
if v == 0 and not(b == 1 and t_h < t_s):
return p_0
return p_d
def main():
Car.car_cnt = 0
lane_cnt = [0] * lane_num
car_num = 0
flow = [0.0] * lane_num
lane_change_cnt = 0
car = [0] * max_car_num
'''
for i in range(max_car_num):
car[i] = Car()
'''
for i in range(2):
for j in range(road_len):
lane[i].append('.')
v_now = 0
#adding new cars
cur = 0
vis = [[0]*road_len, [0]*road_len]
for i in range(max_car_num):
car[car_num] = Car()
car[car_num].pos = int(cur / lane_num)
cur += float(road_len) * lane_num / max_car_num
car[car_num].lane = random.randint(0,1)
if vis[car[car_num].lane][car[car_num].pos] == 0:
vis[car[car_num].lane][car[car_num].pos] = 1
else:
car[car_num].lane = 1 - car[car_num].lane
vis[car[car_num].lane][car[car_num].pos] = 1
#print (car[car_num].pos, car[car_num].lane)
#print car[car_num].pos, car[car_num].lane
# random lane
#car[car_num].lane = random.randint(0,1)
lane_cnt[car[car_num].lane] += 1
car_num += 1
#print car_num,lane_cnt
sum_v = 0
sum_vs = [0] * lane_num
speed_1 = []
[speed_1.append([]) for i in range(max_car_num)]
for i in range(time_period):
v_succ = [2 ** 31] * lane_num
if time_period-1 == i:
for j in range(car_num):
sum_v += car[j].v
sum_vs[car[j].lane] += car[j].v
for j in list(reversed(range(car_num))):
#for j in (range(car_num)):
if (time_period-1)*0.618 >= i:
#print (max_car_num, car[j].c_id)
speed_1[car[j].c_id].append(car[j].v)
v_succ_cur = v_succ
k = j + 1
while k < car_num and car[k].lane != car[j].lane:
k += 1
if k >= car_num:
k = 0
while k < car_num and car[k].lane != car[j].lane:
k += 1
kk = k + 1
while kk < car_num and car[kk].lane != car[j].lane:
kk += 1
if kk >= car_num:
kk = 0
while kk < car_num and car[kk].lane != car[j].lane:
kk += 1
#0 Determine p
t_s = min(car[j].v, h)
d = mmod(car[k].pos, car[j].pos)
if k >= car_num - 1:
d = 2 ** 31
if int(car[j].v) > 0 and d != 2 ** 31:
t_h = d / car[j].v
else:
t_h = t_s
p = rand(car[j].v, car[k].b, t_h, t_s)
b_now = car[j].b
car[j].b = 0
if j > 0:
v_succ[car[j - 1].lane] = v_now
else:
v_succ[0] = [2**31] * lane_num
v_now = car[j].v
#1 accelerate
if k >= car_num or (b_now == 0 and car[j].b == 0) or t_h >= t_s:
car[j].v = min(car[j].v + 1, v_max[car[j].size])
#2 braking
if kk < car_num:
v_anti = min(mmod(car[kk].pos, car[k].pos), car[k].v)
else:
v_anti = car[k].v
d_eff = d - 1 + max(v_anti - gap, 0)
car[j].v = max(min(d_eff, car[j].v), 0)
#car[j].v = max(min(d_eff, v_now), 0)
if car[j].v < v_now:
car[j].b = 1
#3 random brake
if random.random() < p:
car[j].v = max(car[j].v - 1, 0)
if p == p_b:
car[j].b = 1
#traffic accident
#if random.random() < p_crash:
#car[j].v /= 3
#lane changing
k = j + 1
l = j - 1
while (k < car_num and car[k].lane == car[j].lane):
k += 1
if k >= car_num:
k = 0
while (k < car_num and car[k].lane == car[j].lane):
k += 1
kk = k + 1
while (kk < car_num and car[kk].lane == car[j].lane):
kk += 1
if kk >= car_num:
kk = 0
while (kk < car_num and car[kk].lane == car[j].lane):
kk += 1
if k < car_num:
v_anti = car[k].v
if kk < car_num:
v_anti = min(mmod(car[kk].pos, car[k].pos), car[k].v)
d_p = mmod(car[k].pos, car[j].pos)
d_p_eff = mmod(car[k].pos, car[j].pos) + max(car[k].v - gap, 0)
else:
d_p = 2 ** 31
d_p_eff = 2 ** 31
while (l > 0 and car[l].lane == car[j].lane):
l -= 1
if l < 0:
l = car_num - 1
while (l > 0 and car[l].lane == car[j].lane):
l -= 1
dst = 1 - car[j].lane
#if ((dst == 1 and v_now <= 7) or v_now > d) and b_now == 0: # velocity based rule
#if (car[j].b == 0 and (\
#v_now > d or\
#(dst == 1 and v_max[car[j].size] <= 7))): # slow car right rule
#if (car[j].b == 0 and (\
#v_now > d or\
#dst == 1\
#)):
'''
if v_now > 0:
t_p_h = float(d_p) / v_now
else:
t_p_h = 4
if (dst == 0 and b_now == 0 and v_now > d) or\
(dst == 1 and b_now == 0 and t_p_h > 3.0 and (t_h > 6.0 or v_now > d)): # right priority rule in paper
if v_now > 0:
t_p_h = float(d_p) / v_now
else:
t_p_h = 4
if (dst == 0 and b_now == 0 and v_now > d) or\
(dst == 1 and v_now <= 7 and b_now == 0 and t_p_h > 3.0 and (t_h > 6.0 or v_now > d)): # velocity-based with paper
'''
if b_now == 0 and v_now > d: # symmetric rule
if (d_p_eff >= v_now and
(j == 0 or l < 0 or (mmod(car[j].pos, car[l].pos)) >=
v_succ_cur[dst])):
lane_cnt[dst] += 1
lane_cnt[1-dst] -= 1
car[j].lane = dst
car[j].v = v_now
lane_change_cnt += 1
#4 car motion
car[j].pos = (car[j].pos + car[j].v) % road_len
if i > int(time_period * 0.618):
# calculate flow
for m in range(road_len/10):
pinp = m * 10
for k in range(car_num):
if car[k].pos <= pinp and car[k].pos + car[k].v >= pinp and car[k].v > 0:
flow[car[k].lane] += 1
flow = [float(i)*10/road_len for i in flow]
car[:car_num] = sorted(car[:car_num])
for i in reversed(range(car_num)):
if car[i].pos > road_len:
del car[i]
car_num -= 1
'''
line = '.' * road_len
for j in range(car_num):
if car[j].lane == 0:
#line = line[:car[j].pos] + str((car[j].v + 1) / 2) + line[(car[j].pos + 1):]
line = line[:car[j].pos] + str((car[j].v)) + line[(car[j].pos + 1):]
print "car %d: id=%d pos=%d v=%d" % (j, car[j].c_id, car[j].pos, car[j].v)
print line
line = '.' * road_len
for j in range(car_num):
if car[j].lane == 1:
#line = line[:car[j].pos] + str((car[j].v + 1) / 2) + line[(car[j].pos + 1):]
line = line[:car[j].pos] + str((car[j].v)) + line[(car[j].pos + 1):]
print line
print
#'''
avg_vs = [float(sum_vs[i])/lane_cnt[i] if lane_cnt[i] != 0 else 0 for i in range(lane_num)]
return lane_cnt, car_num, float(sum_v)/car_num, flow,\
lane_change_cnt,avg_vs, speed_1
if __name__ == "__main__":
#print main()
#'''
#print('rolen car_l car_r car_n avg_v flow_l flow_r flow lanchge v_l v_r')
print("car_num mean std" )
for i in range(0,20):
#p_car = float(i + 1) / 10
max_car_num = (i+1)*100
lane_cnt = [0] * lane_num
flow_sum = [0] * lane_num
rep = 1
avg_vs = []
lane_change_sum = 0
avg_v_lrs = [[],[]]
speed_1 = [[]*max_car_num]
for j in range(rep):
ret, car_num, avg_v_instance, flow, lane_change_cnt, avg_v_lr,\
speed_1 = main()
lane_cnt[0] += ret[0]
lane_cnt[1] += ret[1]
flow_sum[0] += flow[0]
flow_sum[1] += flow[1]
lane_change_sum += lane_change_cnt
avg_vs.append(avg_v_instance)
[avg_v_lrs[i].append(avg_v_lr[i]) for i in
range(lane_num)]
lane_cnt = [i / rep for i in lane_cnt]
avg_v = sum(avg_vs)/float(len(avg_vs))
avg_v_lr = [sum(i)/float(len(i)) for i in avg_v_lrs]
flow_l = flow_sum[0]/rep
flow_r = flow_sum[1]/rep + 0.00001
'''
print('%4d %5d %5d %5d %6.2f %6.2f %6.2f %5.2f %8d %7.2f %7.2f'
% (road_len,lane_cnt[0], lane_cnt[1],
car_num, avg_v, flow_l,
flow_r,
flow_l+flow_r,
lane_change_sum / rep,\
avg_v_lr[0],\
avg_v_lr[1]
))
'''
speed_stat = numpy.array(speed_1)
speed_mean = (numpy.mean(speed_stat, axis = 1)).tolist()
speed_std = (numpy.std(speed_stat, axis = 1).tolist())
for i in range(len(speed_mean)):
print('%7d %6.2f %6.2f' % (max_car_num, speed_mean[i],
speed_std[i]))
#for lis in speed_1:
#print(' '.join(map(str,lis)))
#print("="*10, " "*5, "%.2f" % (avg_v*car_num/road_len))
#print(", ".join(map(lambda x:"%.2f" % x,avg_vs[:5])))
#print("="*10)
#'''
|
|
#!/usr/bin/env python
"""
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import struct
import xfcp
import uart_ep
import i2c
import eth_ep
import arp_ep
import udp_ep
import gmii_ep
module = 'fpga_core'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../lib/xfcp/rtl/xfcp_interface_uart.v")
srcs.append("../lib/xfcp/rtl/xfcp_interface_udp.v")
srcs.append("../lib/xfcp/rtl/xfcp_mod_i2c_master.v")
srcs.append("../lib/xfcp/rtl/xfcp_mod_wb.v")
srcs.append("../lib/xfcp/rtl/xfcp_arb.v")
srcs.append("../lib/xfcp/rtl/xfcp_switch.v")
srcs.append("../lib/i2c/rtl/i2c_master.v")
srcs.append("../lib/eth/rtl/oddr.v")
srcs.append("../lib/eth/rtl/ssio_sdr_in.v")
srcs.append("../lib/eth/rtl/ssio_sdr_out.v")
srcs.append("../lib/eth/rtl/gmii_phy_if.v")
srcs.append("../lib/eth/rtl/eth_mac_1g_gmii_fifo.v")
srcs.append("../lib/eth/rtl/eth_mac_1g_gmii.v")
srcs.append("../lib/eth/rtl/eth_mac_1g.v")
srcs.append("../lib/eth/rtl/axis_gmii_rx.v")
srcs.append("../lib/eth/rtl/axis_gmii_tx.v")
srcs.append("../lib/eth/rtl/lfsr.v")
srcs.append("../lib/eth/rtl/eth_axis_rx.v")
srcs.append("../lib/eth/rtl/eth_axis_tx.v")
srcs.append("../lib/eth/rtl/udp_complete.v")
srcs.append("../lib/eth/rtl/udp_checksum_gen.v")
srcs.append("../lib/eth/rtl/udp.v")
srcs.append("../lib/eth/rtl/udp_ip_rx.v")
srcs.append("../lib/eth/rtl/udp_ip_tx.v")
srcs.append("../lib/eth/rtl/ip_complete.v")
srcs.append("../lib/eth/rtl/ip.v")
srcs.append("../lib/eth/rtl/ip_eth_rx.v")
srcs.append("../lib/eth/rtl/ip_eth_tx.v")
srcs.append("../lib/eth/rtl/ip_arb_mux.v")
srcs.append("../lib/eth/rtl/arp.v")
srcs.append("../lib/eth/rtl/arp_cache.v")
srcs.append("../lib/eth/rtl/arp_eth_rx.v")
srcs.append("../lib/eth/rtl/arp_eth_tx.v")
srcs.append("../lib/eth/rtl/eth_arb_mux.v")
srcs.append("../lib/uart/rtl/uart.v")
srcs.append("../lib/uart/rtl/uart_rx.v")
srcs.append("../lib/uart/rtl/uart_tx.v")
srcs.append("../lib/wb/rtl/wb_ram.v")
srcs.append("../lib/axis/rtl/arbiter.v")
srcs.append("../lib/axis/rtl/priority_encoder.v")
srcs.append("../lib/axis/rtl/axis_cobs_encode.v")
srcs.append("../lib/axis/rtl/axis_cobs_decode.v")
srcs.append("../lib/axis/rtl/axis_fifo.v")
srcs.append("../lib/axis/rtl/axis_async_fifo.v")
srcs.append("../lib/axis/rtl/axis_async_fifo_adapter.v")
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
TARGET = "SIM"
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
btnu = Signal(bool(0))
btnl = Signal(bool(0))
btnd = Signal(bool(0))
btnr = Signal(bool(0))
btnc = Signal(bool(0))
sw = Signal(intbv(0)[8:])
i2c_scl_i = Signal(bool(1))
i2c_sda_i = Signal(bool(1))
phy_rx_clk = Signal(bool(0))
phy_rxd = Signal(intbv(0)[8:])
phy_rx_dv = Signal(bool(0))
phy_rx_er = Signal(bool(0))
phy_tx_clk = Signal(bool(0))
phy_int_n = Signal(bool(1))
uart_rxd = Signal(bool(1))
uart_cts = Signal(bool(1))
s1_scl_i = Signal(bool(1))
s1_sda_i = Signal(bool(1))
s2_scl_i = Signal(bool(1))
s2_sda_i = Signal(bool(1))
# Outputs
led = Signal(intbv(0)[8:])
i2c_scl_o = Signal(bool(1))
i2c_scl_t = Signal(bool(1))
i2c_sda_o = Signal(bool(1))
i2c_sda_t = Signal(bool(1))
phy_gtx_clk = Signal(bool(0))
phy_txd = Signal(intbv(0)[8:])
phy_tx_en = Signal(bool(0))
phy_tx_er = Signal(bool(0))
phy_reset_n = Signal(bool(0))
uart_txd = Signal(bool(1))
uart_rts = Signal(bool(1))
s1_scl_o = Signal(bool(1))
s1_scl_t = Signal(bool(1))
s1_sda_o = Signal(bool(1))
s1_sda_t = Signal(bool(1))
s2_scl_o = Signal(bool(1))
s2_scl_t = Signal(bool(1))
s2_sda_o = Signal(bool(1))
s2_sda_t = Signal(bool(1))
# sources and sinks
mii_select = Signal(bool(0))
gmii_source = gmii_ep.GMIISource()
gmii_source_logic = gmii_source.create_logic(
phy_rx_clk,
rst,
txd=phy_rxd,
tx_en=phy_rx_dv,
tx_er=phy_rx_er,
mii_select=mii_select,
name='gmii_source'
)
gmii_sink = gmii_ep.GMIISink()
gmii_sink_logic = gmii_sink.create_logic(
phy_tx_clk,
rst,
rxd=phy_txd,
rx_dv=phy_tx_en,
rx_er=phy_tx_er,
mii_select=mii_select,
name='gmii_sink'
)
uart_source = uart_ep.UARTSource()
uart_source_logic = uart_source.create_logic(
clk,
rst,
txd=uart_rxd,
prescale=int(125000000/(115200*8)),
name='uart_source'
)
uart_sink = uart_ep.UARTSink()
uart_sink_logic = uart_sink.create_logic(
clk,
rst,
rxd=uart_txd,
prescale=int(125000000/(115200*8)),
name='uart_sink'
)
# I2C memory model 1
i2c_mem1 = i2c.I2CMem(1024)
i2c_mem_logic1 = i2c_mem1.create_logic(
scl_i=s1_scl_i,
scl_o=s1_scl_o,
scl_t=s1_scl_t,
sda_i=s1_sda_i,
sda_o=s1_sda_o,
sda_t=s1_sda_t,
abw=2,
address=0x50,
latency=0,
name='slave1'
)
# I2C memory model 2
i2c_mem2 = i2c.I2CMem(1024)
i2c_mem_logic2 = i2c_mem2.create_logic(
scl_i=s2_scl_i,
scl_o=s2_scl_o,
scl_t=s2_scl_t,
sda_i=s2_sda_i,
sda_o=s2_sda_o,
sda_t=s2_sda_t,
abw=2,
address=0x51,
latency=1000,
name='slave2'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
btnu=btnu,
btnl=btnl,
btnd=btnd,
btnr=btnr,
btnc=btnc,
sw=sw,
led=led,
i2c_scl_i=i2c_scl_i,
i2c_scl_o=i2c_scl_o,
i2c_scl_t=i2c_scl_t,
i2c_sda_i=i2c_sda_i,
i2c_sda_o=i2c_sda_o,
i2c_sda_t=i2c_sda_t,
phy_rx_clk=phy_rx_clk,
phy_rxd=phy_rxd,
phy_rx_dv=phy_rx_dv,
phy_rx_er=phy_rx_er,
phy_gtx_clk=phy_gtx_clk,
phy_tx_clk=phy_tx_clk,
phy_txd=phy_txd,
phy_tx_en=phy_tx_en,
phy_tx_er=phy_tx_er,
phy_reset_n=phy_reset_n,
phy_int_n=phy_int_n,
uart_rxd=uart_rxd,
uart_txd=uart_txd,
uart_rts=uart_rts,
uart_cts=uart_cts
)
@always_comb
def bus():
# emulate I2C wired AND
i2c_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
i2c_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
s1_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
s1_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
s2_scl_i.next = i2c_scl_o & s1_scl_o & s2_scl_o
s2_sda_i.next = i2c_sda_o & s1_sda_o & s2_sda_o
@always(delay(4))
def clkgen():
clk.next = not clk
rx_clk_hp = Signal(int(4))
@instance
def rx_clk_gen():
while True:
yield delay(int(rx_clk_hp))
phy_rx_clk.next = not phy_rx_clk
phy_tx_clk.next = not phy_tx_clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: enumerate via UDP")
current_test.next = 1
pkt = xfcp.XFCPFrame()
pkt.path = []
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0x020000000000
test_frame.eth_src_mac = 0xDAD1D2D3D4D5
test_frame.eth_type = 0x0800
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80181
test_frame.ip_dest_ip = 0xc0a80180
test_frame.udp_source_port = 1234
test_frame.udp_dest_port = 14000
test_frame.payload = pkt.build_axis()
test_frame.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame.build_eth().build_axis_fcs().data)
# wait for ARP request packet
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0x020000000000
assert check_frame.arp_spa == 0xc0a80180
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80181
# generate response
arp_frame = arp_ep.ARPFrame()
arp_frame.eth_dest_mac = 0x020000000000
arp_frame.eth_src_mac = 0xDAD1D2D3D4D5
arp_frame.eth_type = 0x0806
arp_frame.arp_htype = 0x0001
arp_frame.arp_ptype = 0x0800
arp_frame.arp_hlen = 6
arp_frame.arp_plen = 4
arp_frame.arp_oper = 2
arp_frame.arp_sha = 0xDAD1D2D3D4D5
arp_frame.arp_spa = 0xc0a80181
arp_frame.arp_tha = 0x020000000000
arp_frame.arp_tpa = 0xc0a80180
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+arp_frame.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0800
assert check_frame.ip_version == 4
assert check_frame.ip_ihl == 5
assert check_frame.ip_dscp == 0
assert check_frame.ip_ecn == 0
assert check_frame.ip_identification == 0
assert check_frame.ip_flags == 2
assert check_frame.ip_fragment_offset == 0
assert check_frame.ip_ttl == 64
assert check_frame.ip_protocol == 0x11
assert check_frame.ip_source_ip == 0xc0a80180
assert check_frame.ip_dest_ip == 0xc0a80181
assert check_frame.udp_source_port == 14000
assert check_frame.udp_dest_port == 1234
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == []
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 64
pkt = xfcp.XFCPFrame()
pkt.path = [0]
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0x020000000000
test_frame.eth_src_mac = 0xDAD1D2D3D4D5
test_frame.eth_type = 0x0800
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80181
test_frame.ip_dest_ip = 0xc0a80180
test_frame.udp_source_port = 1234
test_frame.udp_dest_port = 14000
test_frame.payload = pkt.build_axis()
test_frame.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
assert check_frame.eth_dest_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_src_mac == 0x020000000000
assert check_frame.eth_type == 0x0800
assert check_frame.ip_version == 4
assert check_frame.ip_ihl == 5
assert check_frame.ip_dscp == 0
assert check_frame.ip_ecn == 0
assert check_frame.ip_identification == 0
assert check_frame.ip_flags == 2
assert check_frame.ip_fragment_offset == 0
assert check_frame.ip_ttl == 64
assert check_frame.ip_protocol == 0x11
assert check_frame.ip_source_ip == 0xc0a80180
assert check_frame.ip_dest_ip == 0xc0a80181
assert check_frame.udp_source_port == 14000
assert check_frame.udp_dest_port == 1234
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == [0]
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 32
assert gmii_source.empty()
assert gmii_sink.empty()
yield delay(100)
yield clk.posedge
print("test 1: test write and read RAM 0")
current_test.next = 1
pkt1 = xfcp.XFCPFrame()
pkt1.path = [0]
pkt1.ptype = 0x12
pkt1.payload = bytearray(struct.pack('<BH', 0, 4)+b'\x11\x22\x33\x44')
pkt2 = xfcp.XFCPFrame()
pkt2.path = [0]
pkt2.ptype = 0x10
pkt2.payload = bytearray(struct.pack('<BH', 0, 4))
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0x020000000000
test_frame1.eth_src_mac = 0xDAD1D2D3D4D5
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80181
test_frame1.ip_dest_ip = 0xc0a80180
test_frame1.udp_source_port = 1234
test_frame1.udp_dest_port = 14000
test_frame1.payload = pkt1.build_axis()
test_frame1.build()
test_frame2 = udp_ep.UDPFrame(test_frame1)
test_frame2.payload = pkt2.build_axis()
test_frame2.build()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame1.build_eth().build_axis_fcs().data)
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame2.build_eth().build_axis_fcs().data)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0x13
assert rx_pkt.payload.data == struct.pack('<BH', 0, 4)
rx_frame = None
while rx_frame is None:
yield clk.posedge
rx_frame = gmii_sink.recv()
check_eth_frame = eth_ep.EthFrame()
check_eth_frame.parse_axis_fcs(rx_frame.data[8:])
check_frame = udp_ep.UDPFrame()
check_frame.parse_eth(check_eth_frame)
print(check_frame)
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis(check_frame.payload.data)
print(rx_pkt)
assert rx_pkt.ptype == 0x11
assert rx_pkt.payload.data == struct.pack('<BH', 0, 4)+b'\x11\x22\x33\x44'
assert gmii_source.empty()
assert gmii_sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: enumerate via UART")
current_test.next = 3
pkt = xfcp.XFCPFrame()
pkt.path = []
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
uart_source.write(pkt.build_axis_cobs().data+b'\x00')
yield clk.posedge
rx_data = b''
while True:
if not uart_sink.empty():
b = bytearray(uart_sink.read(1))
rx_data += b
if b[0] == 0:
break
yield clk.posedge
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis_cobs(rx_data[:-1])
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == []
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 64
pkt = xfcp.XFCPFrame()
pkt.path = [0]
pkt.rpath = []
pkt.ptype = 0xfe
pkt.payload = b''
uart_source.write(pkt.build_axis_cobs().data+b'\x00')
yield clk.posedge
rx_data = b''
while True:
if not uart_sink.empty():
b = bytearray(uart_sink.read(1))
rx_data += b
if b[0] == 0:
break
yield clk.posedge
rx_pkt = xfcp.XFCPFrame()
rx_pkt.parse_axis_cobs(rx_data[:-1])
print(rx_pkt)
assert rx_pkt.ptype == 0xff
assert rx_pkt.path == [0]
assert rx_pkt.rpath == []
assert len(rx_pkt.payload.data) == 32
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
#!/usr/bin/env python
from pytest import mark
from translate.convert import html2po
from translate.convert import po2html
from translate.convert import test_convert
from translate.misc import wStringIO
class TestHTML2PO:
def html2po(self, markup, includeuntagged=False, duplicatestyle="msgctxt", keepcomments=False):
"""Helper to convert html to po without a file."""
inputfile = wStringIO.StringIO(markup)
convertor = html2po.html2po()
outputpo = convertor.convertfile(inputfile, "test", includeuntagged, duplicatestyle, keepcomments)
return outputpo
def po2html(self, posource, htmltemplate):
"""Helper to convert po to html without a file."""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(htmltemplate)
assert po2html.converthtml(inputfile, outputfile, templatefile)
return outputfile.getvalue()
def countunits(self, pofile, expected):
"""helper to check that we got the expected number of messages"""
actual = len(pofile.units)
if actual > 0:
if pofile.units[0].isheader():
actual = actual - 1
print pofile
assert actual == expected
def compareunit(self, pofile, unitnumber, expected):
"""helper to validate a PO message"""
if not pofile.units[0].isheader():
unitnumber = unitnumber - 1
print 'unit source: ' + pofile.units[unitnumber].source.encode('utf-8') + '|'
print 'expected: ' + expected.encode('utf-8') + '|'
assert unicode(pofile.units[unitnumber].source) == unicode(expected)
def check_single(self, markup, itemtext):
"""checks that converting this markup produces a single element with value itemtext"""
pofile = self.html2po(markup)
self.countunits(pofile, 1)
self.compareunit(pofile, 1, itemtext)
def check_null(self, markup):
"""checks that converting this markup produces no elements"""
pofile = self.html2po(markup)
self.countunits(pofile, 0)
def check_phpsnippet(self, php):
"""Given a snippet of php, put it into an HTML shell and see
if the results are as expected"""
self.check_single('<html><head></head><body><p><a href="' + php + '/site.html">Body text</a></p></body></html>', "Body text")
self.check_single('<html><head></head><body><p>More things in <a href="' + php + '/site.html">Body text</a></p></body></html>', 'More things in <a href="' + php + '/site.html">Body text</a>')
self.check_null('<html><head></head><body><p>' + php + '</p></body></html>')
def test_htmllang(self):
"""test to ensure that we no longer use the lang attribure"""
markup = '''<html lang="en"><head><title>My title</title></head><body></body></html>'''
pofile = self.html2po(markup)
self.countunits(pofile, 1)
# Check that the first item is the <title> not <head>
self.compareunit(pofile, 1, "My title")
def test_title(self):
"""test that we can extract the <title> tag"""
self.check_single("<html><head><title>My title</title></head><body></body></html>", "My title")
def test_title_with_linebreak(self):
"""Test a linebreak in the <title> tag"""
htmltext = '''<html>
<head>
<title>My
title</title>
</head>
<body>
</body>
</html>
'''
self.check_single(htmltext, "My title")
def test_meta(self):
"""Test that we can extract certain <meta> info from <head>."""
self.check_single('''<html><head><meta name="keywords" content="these are keywords"></head><body></body></html>''', "these are keywords")
def test_tag_p(self):
"""test that we can extract the <p> tag"""
self.check_single("<html><head></head><body><p>A paragraph.</p></body></html>", "A paragraph.")
markup = "<p>First line.<br>Second line.</p>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_p_with_linebreak(self):
"""Test newlines within the <p> tag."""
htmltext = '''<html>
<head>
</head>
<body>
<p>
A paragraph is a section in a piece of writing, usually highlighting a
particular point or topic. It always begins on a new line and usually
with indentation, and it consists of at least one sentence.
</p>
</body>
</html>
'''
self.check_single(htmltext, "A paragraph is a section in a piece of writing, usually highlighting a particular point or topic. It always begins on a new line and usually with indentation, and it consists of at least one sentence.")
markup = "<p>First\nline.<br>Second\nline.</p>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_div(self):
"""test that we can extract the <div> tag"""
self.check_single("<html><head></head><body><div>A paragraph.</div></body></html>", "A paragraph.")
markup = "<div>First line.<br>Second line.</div>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_div_with_linebreaks(self):
"""Test linebreaks within a <div> tag."""
htmltext = '''<html>
<head>
</head>
<body>
<div>
A paragraph is a section in a piece of writing, usually highlighting a
particular point or topic. It always begins on a new line and usually
with indentation, and it consists of at least one sentence.
</div>
</body>
</html>
'''
self.check_single(htmltext, "A paragraph is a section in a piece of writing, usually highlighting a particular point or topic. It always begins on a new line and usually with indentation, and it consists of at least one sentence.")
markup = "<div>First\nline.<br>Second\nline.</div>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_a(self):
"""test that we can extract the <a> tag"""
self.check_single('<html><head></head><body><p>A paragraph with <a href="http://translate.org.za/">hyperlink</a>.</p></body></html>', 'A paragraph with <a href="http://translate.org.za/">hyperlink</a>.')
def test_tag_a_with_linebreak(self):
"""Test that we can extract the <a> tag with newlines in it."""
htmltext = '''<html>
<head>
</head>
<body>
<p>A
paragraph
with <a
href="http://translate.org.za/">hyperlink</a>
and
newlines.</p></body></html>
'''
self.check_single(htmltext, 'A paragraph with <a href="http://translate.org.za/">hyperlink</a> and newlines.')
def test_tag_img(self):
"""Test that we can extract the alt attribute from the <img> tag."""
self.check_single('''<html><head></head><body><img src="picture.png" alt="A picture"></body></html>''', "A picture")
def test_img_empty(self):
"""Test that we can extract the alt attribute from the <img> tag."""
htmlsource = '''<html><head></head><body><img src="images/topbar.jpg" width="750" height="80"></body></html>'''
self.check_null(htmlsource)
def test_tag_table_summary(self):
"""Test that we can extract the summary attribute."""
self.check_single('''<html><head></head><body><table summary="Table summary"></table></body></html>''', "Table summary")
def test_table_simple(self):
"""Test that we can fully extract a simple table."""
markup = '''<html><head></head><body><table><tr><th>Heading One</th><th>Heading Two</th></tr><tr><td>One</td><td>Two</td></tr></table></body></html>'''
pofile = self.html2po(markup)
self.countunits(pofile, 4)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "One")
self.compareunit(pofile, 4, "Two")
def test_table_complex(self):
markup = '''<table summary="This is the summary"><caption>A caption</caption><thead><tr><th abbr="Head 1">Heading One</th><th>Heading Two</th></tr></thead><tfoot><tr><td>Foot One</td><td>Foot Two</td></tr></tfoot><tbody><tr><td>One</td><td>Two</td></tr></tbody></table>'''
pofile = self.html2po(markup)
self.countunits(pofile, 9)
self.compareunit(pofile, 1, "This is the summary")
self.compareunit(pofile, 2, "A caption")
self.compareunit(pofile, 3, "Head 1")
self.compareunit(pofile, 4, "Heading One")
self.compareunit(pofile, 5, "Heading Two")
self.compareunit(pofile, 6, "Foot One")
self.compareunit(pofile, 7, "Foot Two")
self.compareunit(pofile, 8, "One")
self.compareunit(pofile, 9, "Two")
def test_table_empty(self):
"""Test that we ignore tables that are empty.
A table is deemed empty if it has no translatable content.
"""
self.check_null('''<html><head></head><body><table><tr><td><img src="bob.png"></td></tr></table></body></html>''')
self.check_null('''<html><head></head><body><table><tr><td> </td></tr></table></body></html>''')
self.check_null('''<html><head></head><body><table><tr><td><strong></strong></td></tr></table></body></html>''')
def test_address(self):
"""Test to see if the address element is extracted"""
self.check_single("<body><address>My address</address></body>", "My address")
def test_headings(self):
"""Test to see if the h* elements are extracted"""
markup = "<html><head></head><body><h1>Heading One</h1><h2>Heading Two</h2><h3>Heading Three</h3><h4>Heading Four</h4><h5>Heading Five</h5><h6>Heading Six</h6></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 6)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "Heading Three")
self.compareunit(pofile, 4, "Heading Four")
self.compareunit(pofile, 5, "Heading Five")
self.compareunit(pofile, 6, "Heading Six")
def test_headings_with_linebreaks(self):
"""Test to see if h* elements with newlines can be extracted"""
markup = "<html><head></head><body><h1>Heading\nOne</h1><h2>Heading\nTwo</h2><h3>Heading\nThree</h3><h4>Heading\nFour</h4><h5>Heading\nFive</h5><h6>Heading\nSix</h6></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 6)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "Heading Three")
self.compareunit(pofile, 4, "Heading Four")
self.compareunit(pofile, 5, "Heading Five")
self.compareunit(pofile, 6, "Heading Six")
def test_dt(self):
"""Test to see if the definition list title (dt) element is extracted"""
self.check_single("<html><head></head><body><dl><dt>Definition List Item Title</dt></dl></body></html>", "Definition List Item Title")
def test_dd(self):
"""Test to see if the definition list description (dd) element is extracted"""
self.check_single("<html><head></head><body><dl><dd>Definition List Item Description</dd></dl></body></html>", "Definition List Item Description")
def test_span(self):
"""test to check that we don't double extract a span item"""
self.check_single("<html><head></head><body><p>You are a <span>Spanish</span> sentence.</p></body></html>", "You are a <span>Spanish</span> sentence.")
def test_ul(self):
"""Test to see if the list item <li> is exracted"""
markup = "<html><head></head><body><ul><li>Unordered One</li><li>Unordered Two</li></ul><ol><li>Ordered One</li><li>Ordered Two</li></ol></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 4)
self.compareunit(pofile, 1, "Unordered One")
self.compareunit(pofile, 2, "Unordered Two")
self.compareunit(pofile, 3, "Ordered One")
self.compareunit(pofile, 4, "Ordered Two")
def test_duplicates(self):
"""check that we use the default style of msgctxt to disambiguate duplicate messages"""
markup = "<html><head></head><body><p>Duplicate</p><p>Duplicate</p></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 2)
# FIXME change this so that we check that the msgctxt is correctly added
self.compareunit(pofile, 1, "Duplicate")
self.compareunit(pofile, 2, "Duplicate")
def test_multiline_reflow(self):
"""check that we reflow multiline content to make it more readable for translators"""
self.check_single('''<td valign="middle" width="96%"><font class="headingwhite">South
Africa</font></td>''', '''South Africa''')
@mark.xfail(reason="Not Implemented")
def test_nested_tags(self):
"""check that we can extract items within nested tags"""
markup = "<div><p>Extract this</p>And this</div>"
pofile = self.html2po(markup)
self.countunits(pofile, 2)
self.compareunit(pofile, 1, "Extract this")
self.compareunit(pofile, 2, "And this")
def test_carriage_return(self):
"""Remove carriage returns from files in dos format."""
htmlsource = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\r
<html><!-- InstanceBegin template="/Templates/masterpage.dwt" codeOutsideHTMLIsLocked="false" -->\r
<head>\r
<!-- InstanceBeginEditable name="doctitle" -->\r
<link href="fmfi.css" rel="stylesheet" type="text/css">\r
</head>\r
\r
<body>\r
<p>The rapid expansion of telecommunications infrastructure in recent\r
years has helped to bridge the digital divide to a limited extent.</p> \r
</body>\r
<!-- InstanceEnd --></html>\r
'''
self.check_single(htmlsource, 'The rapid expansion of telecommunications infrastructure in recent years has helped to bridge the digital divide to a limited extent.')
def test_encoding_latin1(self):
"""Convert HTML input in iso-8859-1 correctly to unicode."""
htmlsource = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><!-- InstanceBegin template="/Templates/masterpage.dwt" codeOutsideHTMLIsLocked="false" -->
<head>
<!-- InstanceBeginEditable name="doctitle" -->
<title>FMFI - South Africa - CSIR Openphone - Overview</title>
<!-- InstanceEndEditable -->
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<meta name="keywords" content="fmfi, first mile, first inch, wireless, rural development, access devices, mobile devices, wifi, connectivity, rural connectivty, ict, low cost, cheap, digital divide, csir, idrc, community">
<!-- InstanceBeginEditable name="head" -->
<!-- InstanceEndEditable -->
<link href="../../../fmfi.css" rel="stylesheet" type="text/css">
</head>
<body>
<p>We aim to please \x96 will you aim too, please?</p>
<p>South Africa\x92s language diversity can be challenging.</p>
</body>
</html>
'''
pofile = self.html2po(htmlsource)
self.countunits(pofile, 4)
self.compareunit(pofile, 3, u'We aim to please \x96 will you aim too, please?')
self.compareunit(pofile, 4, u'South Africa\x92s language diversity can be challenging.')
def test_strip_html(self):
"""Ensure that unnecessary html is stripped from the resulting unit."""
htmlsource = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>FMFI - Contact</title>
</head>
<body>
<table width="100%" border="0" cellpadding="0" cellspacing="0">
<tr align="left" valign="top">
<td width="150" height="556">
<table width="157" height="100%" border="0" cellspacing="0" id="leftmenubg-color">
<tr>
<td align="left" valign="top" height="555">
<table width="100%" border="0" cellspacing="0" cellpadding="2">
<tr align="left" valign="top" bgcolor="#660000">
<td width="4%"><strong></strong></td>
<td width="96%"><strong><font class="headingwhite">Projects</font></strong></td>
</tr>
<tr align="left" valign="top">
<td valign="middle" width="4%"><img src="images/arrow.gif" width="8" height="8"></td>
<td width="96%"><a href="index.html">Home Page</a></td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
'''
pofile = self.html2po(htmlsource)
self.countunits(pofile, 3)
self.compareunit(pofile, 2, u'Projects')
self.compareunit(pofile, 3, u'Home Page')
# Translate and convert back:
pofile.units[2].target = 'Projekte'
pofile.units[3].target = 'Tuisblad'
htmlresult = self.po2html(str(pofile), htmlsource).replace('\n', ' ').replace('= "', '="').replace('> <', '><')
snippet = '<td width="96%"><strong><font class="headingwhite">Projekte</font></strong></td>'
assert snippet in htmlresult
snippet = '<td width="96%"><a href="index.html">Tuisblad</a></td>'
assert snippet in htmlresult
@mark.xfail(reason="Performing major HTML surgery")
def test_php(self):
"""Test that PHP snippets don't interfere"""
# A simple string
self.check_phpsnippet('''<?=$phpvariable?>''')
# Contains HTML tag charcters (< and >)
self.check_phpsnippet('''<?=($a < $b ? $foo : ($b > c ? $bar : $cat))?>''')
# Make sure basically any symbol can be handled
self.check_phpsnippet(''' <? asdfghjkl qwertyuiop 1234567890!@#$%^&*()-=_+[]\{}|;':",./<>? ?> ''')
def test_multiple_php(self):
"""Test multiple PHP snippets in a string to make sure they get restored properly"""
php1 = '''<?=$phpvariable?>'''
php2 = '''<?=($a < $b ? $foo : ($b > c ? $bar : $cat))?>'''
php3 = '''<? asdfghjklqwertyuiop1234567890!@#$%^&*()-=_+[]\{}|;':",./<>? ?>'''
# Put 3 different strings into an html string
innertext = '<a href="' + php1 + '/site.html">Body text</a> and some ' + php2 + ' more text ' + php2 + php3
htmlsource = '<html><head></head><body><p>' + innertext + '</p></body></html>'
self.check_single(htmlsource, innertext)
def test_php_multiline(self):
# A multi-line php string to test
php1 = '''<? abc
def
ghi ?>'''
# Scatter the php strings throughout the file, and show what the translation should be
innertext = '<a href="' + php1 + '/site.html">Body text</a> and some ' + php1 + ' more text ' + php1 + php1
innertrans = '<a href="' + php1 + '/site.html">Texte de corps</a> et encore de ' + php1 + ' plus de texte ' + php1 + php1
htmlsource = '<html><head></head><body><p>' + innertext + '</p></body></html>' # Current html file
transsource = '<html><head></head><body><p>' + innertrans + '</p></body></html>' # Expected translation
pofile = self.html2po(htmlsource)
pofile.units[1].target = innertrans # Register the translation in the PO file
htmlresult = self.po2html(pofile, htmlsource)
assert htmlresult == transsource
def test_comments(self):
"""Test that HTML comments are converted to translator notes in output"""
pofile = self.html2po('<!-- comment outside block --><p><!-- a comment -->A paragraph<!-- with another comment -->.</p>', keepcomments=True)
self.compareunit(pofile, 1, 'A paragraph.')
notes = pofile.getunits()[-1].getnotes()
assert unicode(notes) == ' a comment \n with another comment '
class TestHTML2POCommand(test_convert.TestConvertCommand, TestHTML2PO):
"""Tests running actual html2po commands on files"""
convertmodule = html2po
defaultoptions = {"progress": "none"}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-P, --pot")
options = self.help_check(options, "--duplicates=DUPLICATESTYLE")
options = self.help_check(options, "--keepcomments")
options = self.help_check(options, "-u, --untagged", last=True)
|
|
"""The checks module contains two classes that are used to verify
conda packages and recipes: CondaPackageCheck and CondaRecipeCheck.
Each class contains specific checks that relate to validating packages
and recipes. These checks start with the letter 'C', which is an
abbreviation for 'conda'.
Checks C1101 through C1145 are housed in CondaPackageCheck.
Checks C2101 through C2126 are housed in CondaRecipeCheck.
"""
import hashlib
import json
import os
import re
import sys
import tarfile
from conda_verify.errors import Error, PackageError
from conda_verify.constants import FIELDS, LICENSE_FAMILIES, CONDA_FORGE_COMMENTS
from conda_verify.utilities import (all_ascii, get_bad_seq, get_object_type,
ensure_list, fullmatch)
ver_spec_pat = '^(?:[><=]{0,2}(?:(?:[\d\*]+[!\._]?){1,})[+\w\*]*[|,]?){1,}'
class CondaPackageCheck(object):
"""Create checks in order to validate conda package tarballs."""
def __init__(self, path):
"""Initialize conda package information for use with package checks."""
super(CondaPackageCheck, self).__init__()
self.path = path
self.archive = tarfile.open(self.path)
self.dist = self.retrieve_package_name(self.path)
self.name, self.version, self.build = self.dist.rsplit('-', 2)
self.paths = set(member.path for member in self.archive.getmembers())
self.index = self.archive.extractfile('info/index.json').read()
self.info = json.loads(self.index.decode('utf-8'))
self.files_file = self.archive.extractfile('info/files').read()
self.paths_file = self.archive.extractfile('info/paths.json').read()
self.win_pkg = bool(self.info['platform'] == 'win')
self.name_pat = re.compile(r'[a-z0-9_][a-z0-9_\-\.]*$')
self.hash_pat = re.compile(r'[gh][0-9a-f]{5,}', re.I)
self.version_pat = re.compile(r'[\w\.]+$')
@staticmethod
def retrieve_package_name(path):
"""Retrieve the package name from the conda package path."""
path = os.path.basename(path)
seq = get_bad_seq(path)
if seq:
raise PackageError(u'Found invalid sequence "{}" in package in info/index.json' .format(seq))
if path.endswith('.tar.bz2'):
return path[:-8]
elif path.endswith('.tar'):
return path[:-4]
else:
raise PackageError('Found package with invalid extension "{}"' .format(os.path.splitext(path)[1]))
def check_package_name(self):
"""Check the package name located in info/index.json."""
package_name = self.info.get('name')
if package_name is None:
return Error(self.path, 'C1101', 'Missing package name in info/index.json')
if not self.name_pat.match(package_name) or package_name.endswith(('.', '-', '_')):
return Error(self.path, 'C1103', 'Found invalid package name in info/index.json')
if package_name != self.name:
return Error(self.path, 'C1102', u'Found package name in info/index.json "{}" does not match filename "{}"' .format(package_name, self.name))
def check_package_version(self):
"""Check the package version located in info/index.json."""
package_version = str(self.info.get('version'))
if package_version == 'None':
return Error(self.path, 'C1104', 'Missing package version in info/index.json')
if package_version.startswith(('_', '.')) or package_version.endswith(('_', '.')):
return Error(self.path, 'C1107', "Package version in info/index.json cannot start or end with '_' or '.'")
if not self.version_pat.match(package_version) or get_bad_seq(package_version):
return Error(self.path, 'C1105', 'Found invalid version number in info/index.json')
if package_version != self.version:
return Error(self.path, 'C1106', u'Found package version in info/index.json "{}" does not match filename version "{}"' .format(package_version, self.version))
def check_build_number(self):
"""Check the build number located in info/index.json."""
build_number = self.info.get('build_number')
if build_number is not None:
try:
build_number = int(build_number)
if build_number < 0:
return Error(self.path, 'C1109', 'Build number in info/index.json cannot be a negative integer')
except ValueError:
return Error(self.path, 'C1108', 'Build number in info/index.json must be an integer')
def check_build_string(self):
"""Check the build string in info/index.json."""
build_string = self.info.get('build')
if not self.version_pat.match(build_string):
return Error(self.path, 'C1110', 'Found invalid build string "{}" in info/index.json' .format(build_string))
if build_string != self.build:
return Error(self.path, 'C1111', 'Found build number in info/index.json "{}" does not match build number "{}" in filename' .format(build_string, self.build))
def check_index_dependencies(self):
"""Check that the dependencies field is present in info/index.json."""
depends = self.info.get('depends')
if depends is None:
return Error(self.path, 'C1112', 'Missing "depends" field in info/index.json')
def check_index_dependencies_specs(self):
"""Check that the dependencies in info/index.json are properly formatted."""
dependencies = ensure_list(self.info.get('depends'))
if dependencies != [None]:
for dependency in dependencies:
dependency_parts = dependency.split()
if len(dependency_parts) == 0:
return Error(self.path, 'C1113', 'Found empty dependencies in info/index.json')
elif len(dependency_parts) == 2 and not fullmatch(ver_spec_pat,
dependency_parts[1]) or len(dependency_parts) > 3:
return Error(self.path, 'C1114', 'Found invalid dependency "{}" in info/index.json' .format(dependency))
def check_license_family(self):
"""Check that the license family in info/index.json is valid."""
license = self.info.get('license_family', self.info.get('license'))
if license not in LICENSE_FAMILIES:
return Error(self.path, 'C1115', 'Found invalid license "{}" in info/index.json' .format(license))
def check_index_encoding(self):
"""Check that contents of info/index.json are all ascii characters."""
if not all_ascii(self.index, self.win_pkg):
return Error(self.path, 'C1116', 'Found non-ascii characters inside info/index.json')
def check_duplicate_members(self):
"""Check for duplicate members inside the package tarball."""
if len(self.archive.getmembers()) != len(self.paths):
return Error(self.path, 'C1117', 'Found duplicate members inside tar archive')
def check_members(self):
"""Check the tar archive members for non ascii characters."""
for member in self.archive.getmembers():
if sys.version_info.major == 2:
unicode_path = member.path.decode('utf-8')
else:
unicode_path = member.path.encode('utf-8')
if not all_ascii(unicode_path):
return Error(self.path, 'C1118', 'Found archive member names containing non-ascii characters')
def check_files_file_encoding(self):
"""Check the info/files file for non ascii characters."""
if not all_ascii(self.files_file, self.win_pkg):
return Error(self.path, 'C1119', 'Found filenames in info/files containing non-ascii characters')
def check_files_file_for_info(self):
"""Check that the info/files file does not contain any files found within the info directory."""
filenames = [path.strip() for path in self.files_file.decode('utf-8').splitlines()]
for filename in filenames:
if filename.startswith('info'):
return Error(self.path, 'C1120', 'Found filenames in info/files that start with "info"')
def check_files_file_for_duplicates(self):
"""Check the info/files file for duplicates."""
filenames = [path.strip() for path in self.files_file.decode('utf-8').splitlines()]
if len(filenames) != len(set(filenames)):
return Error(self.path, 'C1121', 'Found duplicate filenames in info/files')
def check_files_file_for_validity(self):
"""Check that the files listed in info/files exist in the tar archive and vice versa."""
members = [member.path for member in self.archive.getmembers()
if not member.isdir() and not member.path.startswith('info')]
filenames = [path.strip() for path in self.files_file.decode('utf-8').splitlines()
if not path.strip().startswith('info')]
for filename in sorted(set(members).union(set(filenames))):
if filename not in members:
return Error(self.path, 'C1122', (u'Found filename in info/files missing from tar '
'archive: {}').format(filename))
elif filename not in filenames:
return Error(self.path, 'C1123', u'Found filename in tar archive missing from info/files: {}' .format(filename))
def check_for_hardlinks(self):
"""Check the tar archive for hardlinks."""
for member in self.archive.getmembers():
if member.islnk():
return Error(self.path, 'C1124', u'Found hardlink {} in tar archive' .format(member.path))
def check_for_unallowed_files(self):
"""Check the tar archive for unallowed directories."""
unallowed_directories = {'conda-meta', 'conda-bld', 'pkgs', 'pkgs32', 'envs'}
for filepath in self.paths:
if filepath in unallowed_directories or filepath.endswith(('.DS_Store', '~')):
return Error(self.path, 'C1125', u'Found unallowed file in tar archive: {}' .format(filepath))
def check_for_noarch_info(self):
"""Check that noarch Python packages contain the proper files."""
for filepath in self.paths:
if 'info/package_metadata.json' in filepath or 'info/link.json' in filepath:
if self.info['subdir'] != 'noarch' and 'preferred_env' not in self.info:
return Error(self.path, 'C1126', u'Found {} however package is not a noarch package' .format(filepath))
def check_for_bat_and_exe(self):
"""Check that both .bat and .exe files don't exist in the same package."""
bat_files = [filepath[:-4] for filepath in self.paths if filepath.endswith('.bat')]
exe_files = [filepath[:-4] for filepath in self.paths if filepath.endswith('.exe')]
isect = set(bat_files).intersection(exe_files)
if len(isect) > 0:
return Error(self.path, 'C1127',
'Found both .bat and .exe files with same basename in same folder: {}'
.format(isect))
@property
def prefix_file(self):
"""Extract the has_prefix file from the archive and return it.
If the file does not exist in the archive, None is returned.
"""
prefix_file = None
for member in self.archive.getmembers():
if member.path == 'info/has_prefix':
prefix_file = self.archive.extractfile(member.path).read()
return prefix_file
def check_prefix_file(self):
"""Check the info/has_prefix file for proper formatting."""
if self.prefix_file is not None:
if not all_ascii(self.prefix_file, self.win_pkg):
return Error(self.path, 'C1128', 'Found non-ascii characters in info/has_prefix')
@property
def prefix_file_contents(self):
"""Extract the contents of the has_prefix file and return them.
If the has_prefix file does not exist, None is returned.
"""
if self.prefix_file is not None:
for line in self.prefix_file.decode('utf-8').splitlines():
line = line.strip()
try:
placeholder, mode, filename = line.split()
placeholder = placeholder.strip("'\"")
filename = filename.strip("'\"")
except ValueError:
placeholder, mode, filename = '/<dummy>/<placeholder>', 'text', line
return (placeholder, mode, filename)
return None
def check_prefix_file_filename(self):
"""Check that the filenames in has_prefix exist in the archive."""
if self.prefix_file_contents is not None:
_, _, filename = self.prefix_file_contents
if filename not in self.paths:
return Error(self.path, 'C1129', u'Found filename "{}" in info/has_prefix not included in archive' .format(filename))
def check_prefix_file_mode(self):
"""Check that the has_prefix mode is either binary or text."""
if self.prefix_file_contents is not None:
_, mode, _ = self.prefix_file_contents
if mode not in ['binary', 'text']:
return Error(self.path, 'C1130', u'Found invalid mode "{}" in info/has_prefix' .format(mode))
def check_prefix_file_binary_mode(self):
"""Check that the has_prefix file binary mode is correct."""
if self.prefix_file_contents is not None:
placeholder, mode, _ = self.prefix_file_contents
if mode == 'binary':
if self.name == 'python':
return Error(self.path, 'C1131', 'Binary placeholder found in info/has_prefix not allowed when building Python')
elif self.win_pkg:
return Error(self.path, 'C1132', 'Binary placeholder found in info/has_prefix not allowed in Windows package')
elif len(placeholder) != 255:
return Error(self.path, 'C1133', u'Binary placeholder "{}" found in info/has_prefix does not have a length of 255 bytes' .format(placeholder))
def check_for_post_links(self):
"""Check the tar archive for pre and post link files."""
for filepath in self.paths:
if filepath.endswith(('-post-link.sh', '-pre-link.sh', '-pre-unlink.sh',
'-post-link.bat', '-pre-link.bat', '-pre-unlink.bat')):
return Error(self.path, 'C1134', u'Found pre/post link file "{}" in archive' .format(filepath))
def check_for_egg(self):
"""Check the tar archive for egg files."""
for filepath in self.paths:
if filepath.endswith('.egg'):
return Error(self.path, 'C1135', u'Found egg file "{}" in archive' .format(filepath))
def check_for_easy_install_script(self):
"""Check the tar archive for easy_install scripts."""
for filepath in self.paths:
if filepath.startswith(('bin/easy_install', 'Scripts/easy_install')):
return Error(self.path, 'C1136', u'Found easy_install script "{}" in archive' .format(filepath))
def check_for_pth_file(self):
"""Check the tar archive for .pth files."""
for filepath in self.paths:
if filepath.endswith('.pth'):
return Error(self.path, 'C1137', u'Found namespace file "{}" in archive' .format(filepath))
def check_for_pyo_file(self):
"""Check the tar archive for .pyo files"""
for filepath in self.paths:
if filepath.endswith('.pyo') and self.name != 'python':
return Error(self.path, 'C1138', u'Found pyo file "{}" in archive' .format(filepath))
def check_for_pyc_in_site_packages(self):
"""Check that .pyc files are only found within the site-packages or disutils directories."""
for filepath in self.paths:
if filepath.endswith('.pyc') and 'site-packages' not in filepath and 'distutils' not in filepath:
return Error(self.path, 'C1139', u'Found pyc file "{}" in invalid directory' .format(filepath))
def check_for_2to3_pickle(self):
"""Check the tar archive for .pickle files."""
for filepath in self.paths:
if 'lib2to3' in filepath and filepath.endswith('.pickle'):
return Error(self.path, 'C1140', u'Found lib2to3 .pickle file "{}"' .format(filepath))
def check_pyc_files(self):
"""Check that a .pyc file exists for every .py file in a Python 2 package."""
if 'py3' not in self.build:
for filepath in self.paths:
if '/site-packages/' in filepath:
if filepath.endswith('.py') and (filepath + 'c') not in self.paths:
return Error(self.path, 'C1141', u'Found python file "{}" without a corresponding pyc file' .format(filepath))
def check_menu_json_name(self):
"""Check that the Menu/package.json filename is identical to the package name."""
menu_json_files = [filepath for filepath in self.paths
if filepath.startswith('Menu/') and filepath.endswith('.json')]
if len(menu_json_files) == 1:
filename = menu_json_files[0]
if filename != '{}.json' .format(self.name):
return Error(self.path, 'C1142', u'Found invalid Menu json file "{}"' .format(filename))
elif len(menu_json_files) > 1:
return Error(self.path, 'C1143', 'Found more than one Menu json file')
def check_windows_arch(self):
"""Check that Windows package .exes and .dlls contain the correct headers."""
if self.win_pkg:
arch = self.info['arch']
if arch not in ('x86', 'x86_64'):
return Error(self.path, 'C1144', u'Found unrecognized Windows architecture "{}"' .format(arch))
for member in self.archive.getmembers():
if member.path.endswith(('.exe', '.dll')):
file_header = self.archive.extractfile(member.path).read(4096)
file_object_type = get_object_type(file_header)
if ((arch == 'x86' and file_object_type != 'DLL I386') or
(arch == 'x86_64' and file_object_type != 'DLL AMD64')):
return Error(self.path, 'C1145', u'Found file "{}" with object type "{}" but with arch "{}"' .format(member.name, file_object_type, arch))
def check_package_hashes_and_size(self):
"""Check the sha256 checksum and filesize of each file in the package."""
paths_json = json.loads(self.paths_file.decode('utf-8'))
for member in self.archive.getmembers():
if member.isfile():
file_object = self.archive.extractfile(member.name).read()
sha256_digest = hashlib.sha256(file_object).hexdigest()
for path in paths_json['paths']:
if member.name == path['_path']:
if sha256_digest != path['sha256']:
return Error(self.path, 'C1146', 'Found file "{}" with sha256 hash different than listed in paths.json' .format(member.name))
elif member.size != path['size_in_bytes']:
return Error(self.path, 'C1147', 'Found file "{}" with filesize different than listed in paths.json' .format(member.name))
class CondaRecipeCheck(object):
"""Create checks in order to validate conda recipes."""
def __init__(self, meta, recipe_dir):
"""Initialize conda recipe information for use with recipe checks."""
super(CondaRecipeCheck, self).__init__()
self.meta = meta
self.recipe_dir = recipe_dir
self.name_pat = re.compile(r'[a-z0-9_][a-z0-9_\-\.]*$')
self.version_pat = re.compile(r'[\w\.]+$')
self.url_pat = re.compile(r'(ftp|http(s)?)://')
self.hash_pat = {'md5': re.compile(r'[a-f0-9]{32}$'),
'sha1': re.compile(r'[a-f0-9]{40}$'),
'sha256': re.compile(r'[a-f0-9]{64}$')}
def check_package_name(self):
"""Check the package name in meta.yaml for proper formatting."""
package_name = self.meta.get('package', {}).get('name', '')
if package_name == '':
return Error(self.recipe_dir, 'C2101', 'Missing package name in meta.yaml')
if not self.name_pat.match(package_name) or package_name.endswith(('.', '-', '_')):
return Error(self.recipe_dir, 'C2102', u'Found invalid package name "{}" in meta.yaml' .format(package_name))
seq = get_bad_seq(package_name)
if seq:
return Error(self.recipe_dir, 'C2103', u'Found invalid sequence "{}" in package name' .format(seq))
def check_package_version(self):
"""Check the package version in meta.yaml for proper formatting."""
package_version = self.meta.get('package', {}).get('version', '')
if package_version == '':
return Error(self.recipe_dir, 'C2104', 'Missing package version in meta.yaml')
if isinstance(package_version, str):
if not self.version_pat.match(package_version) or package_version.startswith(('_', '.')) or package_version.endswith(('_', '.')):
return Error(self.recipe_dir, 'C2105', u'Found invalid package version "{}" in meta.yaml' .format(package_version))
seq = get_bad_seq(package_version)
if seq:
return Error(self.recipe_dir, 'C2106', u'Found invalid sequence "{}" in package version' .format(seq))
def check_build_number(self):
"""Check the build number in meta.yaml for proper formatting."""
build_number = self.meta.get('build', {}).get('number')
if build_number is not None:
try:
build_number = int(build_number)
if build_number < 0:
return Error(self.recipe_dir, 'C2108', 'Build number in info/index.json cannot be a negative integer')
except ValueError:
return Error(self.recipe_dir, 'C2107', 'Build number in info/index.json must be an integer')
def check_fields(self):
"""Check that the fields listed in meta.yaml are valid."""
for section in self.meta:
if section not in FIELDS and section != 'extra':
return Error(self.recipe_dir, 'C2109', u'Found invalid section "{}"' .format(section))
if section != 'extra':
subfield = self.meta.get(section)
if hasattr(subfield, 'keys'):
for key in subfield:
if key not in FIELDS[section]:
return Error(self.recipe_dir, 'C2110', u'Found invalid field "{}" in section "{}"' .format(key, section))
else:
# list of dicts. Used in source and outputs.
for entry in subfield:
for key in entry:
if key not in FIELDS[section]:
return Error(self.recipe_dir, 'C2110', u'Found invalid field "{}" in section "{}"' .format(key, section))
def check_requirements(self):
"""Check that the requirements listed in meta.yaml are valid."""
build_requirements = self.meta.get('requirements', {}).get('build', [])
run_requirements = self.meta.get('requirements', {}).get('run', [])
for requirement in build_requirements + run_requirements:
requirement_parts = requirement.split()
requirement_name = requirement_parts[0]
if not self.name_pat.match(requirement_name):
if requirement in build_requirements:
return Error(self.recipe_dir, 'C2111', u'Found invalid build requirement "{}"' .format(requirement))
elif requirement in run_requirements:
return Error(self.recipe_dir, 'C2112', u'Found invalid run requirement "{}"' .format(requirement))
if len(requirement_parts) == 0:
return Error(self.recipe_dir, 'C2113', 'Found empty dependencies in meta.yaml')
elif len(requirement_parts) >= 2 and not fullmatch(ver_spec_pat, requirement_parts[1]):
return Error(self.recipe_dir, 'C2114', u'Found invalid dependency "{}" in meta.yaml' .format(requirement))
if len(build_requirements) != len(set(build_requirements)):
return Error(self.recipe_dir, 'C2115', u'Found duplicate build requirements: {}' .format(build_requirements))
if len(run_requirements) != len(set(run_requirements)):
return Error(self.recipe_dir, 'C2116', u'Found duplicate run requirements: {}' .format(run_requirements))
def check_about(self):
"""Check the about field in meta.yaml for proper formatting."""
summary = self.meta.get('about', {}).get('summary')
if summary is not None and len(summary) > 80:
return Error(self.recipe_dir, 'C2117', 'Found summary with length greater than 80 characters')
home = self.meta.get('about', {}).get('home')
dev_url = self.meta.get('about', {}).get('dev_url')
doc_url = self.meta.get('about', {}).get('doc_url')
license_url = self.meta.get('about', {}).get('license_url')
for url in [home, dev_url, doc_url, license_url]:
if url is not None and not self.url_pat.match(url):
return Error(self.recipe_dir, 'C2118', u'Found invalid URL "{}" in meta.yaml' .format(url))
def check_source(self):
"""Check the source field in meta.yaml for proper formatting."""
sources = ensure_list(self.meta.get('source', {}))
for source in sources:
url = source.get('url')
if url is not None:
if self.url_pat.match(url):
for hash_algorithm in ['md5', 'sha1', 'sha256']:
hexdigest = source.get(hash_algorithm)
if hexdigest is not None and not self.hash_pat[hash_algorithm].match(hexdigest):
return Error(self.recipe_dir, 'C2119', u'Found invalid hash "{}" in meta.yaml' .format(hexdigest))
else:
return Error(self.recipe_dir, 'C2120', u'Found invalid URL "{}" in meta.yaml' .format(url))
git_url = source.get('git_url')
if git_url and (source.get('git_tag') and source.get('git_branch')):
return Error(self.recipe_dir, 'C2121', 'Found both git_branch and git_tag in meta.yaml source field')
def check_license_family(self):
"""Check that the license family listed in meta.yaml is valid."""
license_family = (self.meta.get('about', {}).get('license_family',
self.meta.get('about', {}).get('license')))
if license_family is not None and license_family not in LICENSE_FAMILIES:
return Error(self.recipe_dir, 'C2122', u'Found invalid license family "{}"' .format(license_family))
def check_for_valid_files(self):
"""Check that the files listed in meta.yaml exist."""
test_files = self.meta.get('test', {}).get('files', [])
test_source_files = self.meta.get('test', {}).get('source_files', [])
sources = ensure_list(self.meta.get('source', {}))
source_patches = []
for source in sources:
source_patches.extend(source.get('patches', []))
for filename in test_files + test_source_files + source_patches:
filepath = os.path.join(self.recipe_dir, filename)
if filename.startswith('..'):
return Error(self.recipe_dir, 'C2123', u'Found file "{}" listed outside recipe directory' .format(filename))
if not os.path.exists(filepath):
return Error(self.recipe_dir, 'C2124', u'Found file "{}" in meta.yaml that doesn\'t exist' .format(filename))
def check_dir_content(self):
"""Check for disallowed files inside the recipe directory."""
disallowed_extensions = ('.tar', '.tar.gz', '.tar.bz2', '.tar.xz',
'.so', '.dylib', '.la', '.a', '.dll', '.pyd')
for dirpath, _, filenames in os.walk(self.recipe_dir):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith(disallowed_extensions):
return Error(self.recipe_dir, 'C2125', u'Found disallowed file with extension "{}"' .format(filepath))
def check_recipes_comments(self):
"""Check for default comments in conda-forge example recipe."""
meta = os.path.join(self.recipe_dir, 'meta.yaml')
with open(meta) as meta_file:
recipe = meta_file.read().splitlines()
for line in recipe:
if line.startswith('#') and line in CONDA_FORGE_COMMENTS.splitlines():
return Error(self.recipe_dir, 'C2126', 'Found conda-forge comment in meta.yaml file')
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from devil import devil_env
from devil.android import device_errors
from devil.android import md5sum
with devil_env.SysPath(devil_env.config.LocalPath('pymock')):
import mock # pylint: disable=import-error
TEST_OUT_DIR = os.path.join('test', 'out', 'directory')
HOST_MD5_EXECUTABLE = os.path.join(TEST_OUT_DIR, 'md5sum_bin_host')
MD5_DIST = os.path.join(TEST_OUT_DIR, 'md5sum_dist')
class Md5SumTest(unittest.TestCase):
def setUp(self):
mocked_attrs = {
'md5sum_host': HOST_MD5_EXECUTABLE,
'md5sum_device': MD5_DIST,
}
self._patchers = [
mock.patch('devil.devil_env._Environment.FetchPath',
mock.Mock(side_effect=lambda a, device=None: mocked_attrs[a])),
mock.patch('os.path.exists',
new=mock.Mock(return_value=True)),
]
for p in self._patchers:
p.start()
def tearDown(self):
for p in self._patchers:
p.stop()
def testCalculateHostMd5Sums_singlePath(self):
test_path = '/test/host/file.dat'
mock_get_cmd_output = mock.Mock(
return_value='0123456789abcdeffedcba9876543210 /test/host/file.dat')
with mock.patch('devil.utils.cmd_helper.GetCmdOutput',
new=mock_get_cmd_output):
out = md5sum.CalculateHostMd5Sums(test_path)
self.assertEquals(1, len(out))
self.assertTrue('/test/host/file.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/test/host/file.dat'])
mock_get_cmd_output.assert_called_once_with(
[HOST_MD5_EXECUTABLE, '/test/host/file.dat'])
def testCalculateHostMd5Sums_list(self):
test_paths = ['/test/host/file0.dat', '/test/host/file1.dat']
mock_get_cmd_output = mock.Mock(
return_value='0123456789abcdeffedcba9876543210 /test/host/file0.dat\n'
'123456789abcdef00fedcba987654321 /test/host/file1.dat\n')
with mock.patch('devil.utils.cmd_helper.GetCmdOutput',
new=mock_get_cmd_output):
out = md5sum.CalculateHostMd5Sums(test_paths)
self.assertEquals(2, len(out))
self.assertTrue('/test/host/file0.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/test/host/file0.dat'])
self.assertTrue('/test/host/file1.dat' in out)
self.assertEquals('123456789abcdef00fedcba987654321',
out['/test/host/file1.dat'])
mock_get_cmd_output.assert_called_once_with(
[HOST_MD5_EXECUTABLE, '/test/host/file0.dat',
'/test/host/file1.dat'])
def testCalculateHostMd5Sums_generator(self):
test_paths = ('/test/host/' + p for p in ['file0.dat', 'file1.dat'])
mock_get_cmd_output = mock.Mock(
return_value='0123456789abcdeffedcba9876543210 /test/host/file0.dat\n'
'123456789abcdef00fedcba987654321 /test/host/file1.dat\n')
with mock.patch('devil.utils.cmd_helper.GetCmdOutput',
new=mock_get_cmd_output):
out = md5sum.CalculateHostMd5Sums(test_paths)
self.assertEquals(2, len(out))
self.assertTrue('/test/host/file0.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/test/host/file0.dat'])
self.assertTrue('/test/host/file1.dat' in out)
self.assertEquals('123456789abcdef00fedcba987654321',
out['/test/host/file1.dat'])
mock_get_cmd_output.assert_called_once_with(
[HOST_MD5_EXECUTABLE, '/test/host/file0.dat', '/test/host/file1.dat'])
def testCalculateDeviceMd5Sums_noPaths(self):
device = mock.NonCallableMock()
device.RunShellCommand = mock.Mock(side_effect=Exception())
out = md5sum.CalculateDeviceMd5Sums([], device)
self.assertEquals(0, len(out))
def testCalculateDeviceMd5Sums_singlePath(self):
test_path = '/storage/emulated/legacy/test/file.dat'
device = mock.NonCallableMock()
device_md5sum_output = [
'0123456789abcdeffedcba9876543210 '
'/storage/emulated/legacy/test/file.dat',
]
device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
with mock.patch('os.path.getsize', return_value=1337):
out = md5sum.CalculateDeviceMd5Sums(test_path, device)
self.assertEquals(1, len(out))
self.assertTrue('/storage/emulated/legacy/test/file.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/storage/emulated/legacy/test/file.dat'])
self.assertEquals(1, len(device.RunShellCommand.call_args_list))
def testCalculateDeviceMd5Sums_list(self):
test_path = ['/storage/emulated/legacy/test/file0.dat',
'/storage/emulated/legacy/test/file1.dat']
device = mock.NonCallableMock()
device_md5sum_output = [
'0123456789abcdeffedcba9876543210 '
'/storage/emulated/legacy/test/file0.dat',
'123456789abcdef00fedcba987654321 '
'/storage/emulated/legacy/test/file1.dat',
]
device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
with mock.patch('os.path.getsize', return_value=1337):
out = md5sum.CalculateDeviceMd5Sums(test_path, device)
self.assertEquals(2, len(out))
self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/storage/emulated/legacy/test/file0.dat'])
self.assertTrue('/storage/emulated/legacy/test/file1.dat' in out)
self.assertEquals('123456789abcdef00fedcba987654321',
out['/storage/emulated/legacy/test/file1.dat'])
self.assertEquals(1, len(device.RunShellCommand.call_args_list))
def testCalculateDeviceMd5Sums_generator(self):
test_path = ('/storage/emulated/legacy/test/file%d.dat' % n
for n in xrange(0, 2))
device = mock.NonCallableMock()
device_md5sum_output = [
'0123456789abcdeffedcba9876543210 '
'/storage/emulated/legacy/test/file0.dat',
'123456789abcdef00fedcba987654321 '
'/storage/emulated/legacy/test/file1.dat',
]
device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
with mock.patch('os.path.getsize', return_value=1337):
out = md5sum.CalculateDeviceMd5Sums(test_path, device)
self.assertEquals(2, len(out))
self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/storage/emulated/legacy/test/file0.dat'])
self.assertTrue('/storage/emulated/legacy/test/file1.dat' in out)
self.assertEquals('123456789abcdef00fedcba987654321',
out['/storage/emulated/legacy/test/file1.dat'])
self.assertEquals(1, len(device.RunShellCommand.call_args_list))
def testCalculateDeviceMd5Sums_singlePath_linkerWarning(self):
# See crbug/479966
test_path = '/storage/emulated/legacy/test/file.dat'
device = mock.NonCallableMock()
device_md5sum_output = [
'WARNING: linker: /data/local/tmp/md5sum/md5sum_bin: '
'unused DT entry: type 0x1d arg 0x15db',
'THIS_IS_NOT_A_VALID_CHECKSUM_ZZZ some random text',
'0123456789abcdeffedcba9876543210 '
'/storage/emulated/legacy/test/file.dat',
]
device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
with mock.patch('os.path.getsize', return_value=1337):
out = md5sum.CalculateDeviceMd5Sums(test_path, device)
self.assertEquals(1, len(out))
self.assertTrue('/storage/emulated/legacy/test/file.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/storage/emulated/legacy/test/file.dat'])
self.assertEquals(1, len(device.RunShellCommand.call_args_list))
def testCalculateDeviceMd5Sums_list_fileMissing(self):
test_path = ['/storage/emulated/legacy/test/file0.dat',
'/storage/emulated/legacy/test/file1.dat']
device = mock.NonCallableMock()
device_md5sum_output = [
'0123456789abcdeffedcba9876543210 '
'/storage/emulated/legacy/test/file0.dat',
'[0819/203513:ERROR:md5sum.cc(25)] Could not open file asdf',
'',
]
device.RunShellCommand = mock.Mock(return_value=device_md5sum_output)
with mock.patch('os.path.getsize', return_value=1337):
out = md5sum.CalculateDeviceMd5Sums(test_path, device)
self.assertEquals(1, len(out))
self.assertTrue('/storage/emulated/legacy/test/file0.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/storage/emulated/legacy/test/file0.dat'])
self.assertEquals(1, len(device.RunShellCommand.call_args_list))
def testCalculateDeviceMd5Sums_requiresBinary(self):
test_path = '/storage/emulated/legacy/test/file.dat'
device = mock.NonCallableMock()
device.adb = mock.NonCallableMock()
device.adb.Push = mock.Mock()
device_md5sum_output = [
'WARNING: linker: /data/local/tmp/md5sum/md5sum_bin: '
'unused DT entry: type 0x1d arg 0x15db',
'THIS_IS_NOT_A_VALID_CHECKSUM_ZZZ some random text',
'0123456789abcdeffedcba9876543210 '
'/storage/emulated/legacy/test/file.dat',
]
error = device_errors.AdbShellCommandFailedError('cmd', 'out', 2)
device.RunShellCommand = mock.Mock(
side_effect=(error, '', device_md5sum_output))
with mock.patch('os.path.isdir', return_value=True), (
mock.patch('os.path.getsize', return_value=1337)):
out = md5sum.CalculateDeviceMd5Sums(test_path, device)
self.assertEquals(1, len(out))
self.assertTrue('/storage/emulated/legacy/test/file.dat' in out)
self.assertEquals('0123456789abcdeffedcba9876543210',
out['/storage/emulated/legacy/test/file.dat'])
self.assertEquals(3, len(device.RunShellCommand.call_args_list))
device.adb.Push.assert_called_once_with(
'test/out/directory/md5sum_dist', '/data/local/tmp/md5sum')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import shutil
import numpy as np
import os
from bigdl.orca.data.image.parquet_dataset import ParquetDataset
from bigdl.orca.data.image.parquet_dataset import _write_ndarrays, write_from_directory, write_parquet
from bigdl.orca.data.image.utils import DType, FeatureType, SchemaField
from bigdl.orca.learn.tf.estimator import Estimator
from bigdl.orca.data.image import write_mnist, write_voc
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
def test_write_parquet_simple(orca_context_fixture):
sc = orca_context_fixture
temp_dir = tempfile.mkdtemp()
def generator(num):
for i in range(num):
yield {"id": i, "feature": np.zeros((10,)), "label": np.ones((4,))}
schema = {
"id": SchemaField(feature_type=FeatureType.SCALAR, dtype=DType.INT32, shape=()),
"feature": SchemaField(feature_type=FeatureType.NDARRAY, dtype=DType.FLOAT32, shape=(10,)),
"label": SchemaField(feature_type=FeatureType.NDARRAY, dtype=DType.FLOAT32, shape=(4,))
}
try:
ParquetDataset.write("file://" + temp_dir, generator(100), schema)
data, schema = ParquetDataset._read_as_dict_rdd("file://" + temp_dir)
data = data.collect()[0]
assert data['id'] == 0
assert np.all(data['feature'] == np.zeros((10,), dtype=np.float32))
assert np.all(data['label'] == np.ones((4,), dtype=np.float32))
finally:
shutil.rmtree(temp_dir)
def test_write_parquet_images(orca_context_fixture):
sc = orca_context_fixture
temp_dir = tempfile.mkdtemp()
def generator():
dataset_path = os.path.join(resource_path, "cat_dog")
for root, dirs, files in os.walk(os.path.join(dataset_path, "cats")):
for name in files:
image_path = os.path.join(root, name)
yield {"image": image_path, "label": 1, "id": image_path}
for root, dirs, files in os.walk(os.path.join(dataset_path, "dogs")):
for name in files:
image_path = os.path.join(root, name)
yield {"image": image_path, "label": 0, "id": image_path}
schema = {
"image": SchemaField(feature_type=FeatureType.IMAGE, dtype=DType.FLOAT32, shape=(10,)),
"label": SchemaField(feature_type=FeatureType.NDARRAY, dtype=DType.FLOAT32, shape=(4,)),
"id": SchemaField(feature_type=FeatureType.SCALAR, dtype=DType.STRING, shape=())
}
try:
ParquetDataset.write("file://" + temp_dir, generator(), schema)
data, schema = ParquetDataset._read_as_dict_rdd("file://" + temp_dir)
data = data.collect()[0]
image_path = data['id']
with open(image_path, "rb") as f:
image_bytes = f.read()
assert image_bytes == data['image']
finally:
shutil.rmtree(temp_dir)
def _images_to_mnist_file(images, filepath):
assert len(images.shape) == 3
assert images.dtype == np.uint8
with open(filepath, "wb") as f:
f.write(int(2051).to_bytes(4, "big"))
f.write(np.array(images.shape).astype(np.int32).byteswap().tobytes())
f.write(images.tobytes())
def _labels_to_mnist_file(labels, filepath):
assert len(labels.shape) == 1
assert labels.dtype == np.uint8
with open(filepath, "wb") as f:
f.write(int(2049).to_bytes(4, "big"))
f.write(np.array(labels.shape).astype(np.int32).byteswap().tobytes())
f.write(labels.tobytes())
def test_write_mnist(orca_context_fixture, use_api=False):
sc = orca_context_fixture
temp_dir = tempfile.mkdtemp()
try:
train_image_file = os.path.join(temp_dir, "train-images")
train_label_file = os.path.join(temp_dir, "train-labels")
output_path = os.path.join(temp_dir, "output_dataset")
images = np.array([[i] * 16 for i in range(20)]).reshape((20, 4, 4)).astype(np.uint8)
labels = np.array(list(range(20))).reshape((20,)).astype(np.uint8)
_images_to_mnist_file(images, train_image_file)
_labels_to_mnist_file(labels, train_label_file)
if use_api:
write_parquet("mnist", "file://" + output_path,
image_file=train_image_file,
label_file=train_label_file)
else:
write_mnist(image_file=train_image_file,
label_file=train_label_file,
output_path="file://" + output_path)
data, schema = ParquetDataset._read_as_dict_rdd("file://" + output_path)
data = data.sortBy(lambda x: x['label']).collect()
images_load = np.reshape(np.stack([d['image'] for d in data]), (-1, 4, 4))
labels_load = np.stack([d['label'] for d in data])
assert np.all(images_load == images)
assert np.all(labels_load == labels_load)
finally:
shutil.rmtree(temp_dir)
def test_write_voc(orca_context_fixture, use_api=False):
sc = orca_context_fixture
temp_dir = tempfile.mkdtemp()
try:
from bigdl.orca.data import SparkXShards
dataset_path = os.path.join(resource_path, "VOCdevkit")
output_path = os.path.join(temp_dir, "output_dataset")
if use_api:
write_parquet("voc", "file://" + output_path, voc_root_path=dataset_path,
splits_names=[(2007, "trainval")])
else:
write_voc(dataset_path, splits_names=[(2007, "trainval")],
output_path="file://" + output_path)
data, schema = ParquetDataset._read_as_dict_rdd("file://" + output_path)
data = data.collect()[0]
image_path = data["image_id"]
with open(image_path, "rb") as f:
image_bytes = f.read()
assert image_bytes == data['image']
finally:
shutil.rmtree(temp_dir)
def test_train_simple(orca_context_fixture):
sc = orca_context_fixture
temp_dir = tempfile.mkdtemp()
try:
_write_ndarrays(images=np.random.randn(500, 28, 28, 1).astype(np.float32),
labels=np.random.randint(0, 10, (500,)).astype(np.int32),
output_path="file://" + temp_dir)
dataset = ParquetDataset.read_as_tf("file://" + temp_dir)
def preprocess(data):
return data['image'], data["label"]
dataset = dataset.map(preprocess)
import tensorflow as tf
model = tf.keras.Sequential(
[tf.keras.layers.Conv2D(20, kernel_size=(5, 5), strides=(1, 1), activation='tanh',
input_shape=(28, 28, 1), padding='valid'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Conv2D(50, kernel_size=(5, 5), strides=(1, 1), activation='tanh',
padding='valid'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(500, activation='tanh'),
tf.keras.layers.Dense(10, activation='softmax'),
]
)
model.compile(optimizer=tf.keras.optimizers.RMSprop(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
est = Estimator.from_keras(keras_model=model)
est.fit(data=dataset,
batch_size=100,
epochs=1)
finally:
shutil.rmtree(temp_dir)
def test_write_from_directory(orca_context_fixture, use_api=False):
sc = orca_context_fixture
temp_dir = tempfile.mkdtemp()
try:
label_map = {"cats": 0, "dogs": 1}
if use_api:
write_parquet("image_folder", "file://" + temp_dir,
directory=os.path.join(resource_path, "cat_dog"),
label_map=label_map)
else:
write_from_directory(os.path.join(resource_path, "cat_dog"),
label_map, "file://" + temp_dir)
train_xshard = ParquetDataset._read_as_xshards("file://" + temp_dir)
data = train_xshard.collect()[0]
image_path = data["image_id"][0]
with open(image_path, "rb") as f:
image_bytes = f.read()
assert image_bytes == data['image'][0]
finally:
shutil.rmtree(temp_dir)
def test_write_parquet_api(orca_context_fixture):
test_write_mnist(orca_context_fixture, True)
test_write_voc(orca_context_fixture, True)
test_write_from_directory(orca_context_fixture, True)
|
|
"""
Provides functionality to interact with hvacs.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/hvac/
"""
import logging
import os
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.config import load_yaml_config_file
import homeassistant.util as util
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.temperature import convert
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components import zwave
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, STATE_ON, STATE_OFF, STATE_UNKNOWN,
TEMP_CELCIUS)
DOMAIN = "hvac"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = 60
SERVICE_SET_AWAY_MODE = "set_away_mode"
SERVICE_SET_AUX_HEAT = "set_aux_heat"
SERVICE_SET_TEMPERATURE = "set_temperature"
SERVICE_SET_FAN_MODE = "set_fan_mode"
SERVICE_SET_OPERATION_MODE = "set_operation_mode"
SERVICE_SET_SWING_MODE = "set_swing_mode"
SERVICE_SET_HUMIDITY = "set_humidity"
STATE_HEAT = "heat"
STATE_COOL = "cool"
STATE_IDLE = "idle"
STATE_AUTO = "auto"
STATE_DRY = "dry"
STATE_FAN_ONLY = "fan_only"
ATTR_CURRENT_TEMPERATURE = "current_temperature"
ATTR_MAX_TEMP = "max_temp"
ATTR_MIN_TEMP = "min_temp"
ATTR_AWAY_MODE = "away_mode"
ATTR_AUX_HEAT = "aux_heat"
ATTR_FAN_MODE = "fan_mode"
ATTR_FAN_LIST = "fan_list"
ATTR_CURRENT_HUMIDITY = "current_humidity"
ATTR_HUMIDITY = "humidity"
ATTR_MAX_HUMIDITY = "max_humidity"
ATTR_MIN_HUMIDITY = "min_humidity"
ATTR_OPERATION_MODE = "operation_mode"
ATTR_OPERATION_LIST = "operation_list"
ATTR_SWING_MODE = "swing_mode"
ATTR_SWING_LIST = "swing_list"
_LOGGER = logging.getLogger(__name__)
DISCOVERY_PLATFORMS = {
zwave.DISCOVER_HVAC: 'zwave'
}
def set_away_mode(hass, away_mode, entity_id=None):
"""Turn all or specified hvac away mode on."""
data = {
ATTR_AWAY_MODE: away_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AWAY_MODE, data)
def set_aux_heat(hass, aux_heat, entity_id=None):
"""Turn all or specified hvac auxillary heater on."""
data = {
ATTR_AUX_HEAT: aux_heat
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AUX_HEAT, data)
def set_temperature(hass, temperature, entity_id=None):
"""Set new target temperature."""
data = {ATTR_TEMPERATURE: temperature}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_TEMPERATURE, data)
def set_humidity(hass, humidity, entity_id=None):
"""Set new target humidity."""
data = {ATTR_HUMIDITY: humidity}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_HUMIDITY, data)
def set_fan_mode(hass, fan, entity_id=None):
"""Turn all or specified hvac fan mode on."""
data = {ATTR_FAN_MODE: fan}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_FAN_MODE, data)
def set_operation_mode(hass, operation_mode, entity_id=None):
"""Set new target operation mode."""
data = {ATTR_OPERATION_MODE: operation_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_OPERATION_MODE, data)
def set_swing_mode(hass, swing_mode, entity_id=None):
"""Set new target swing mode."""
data = {ATTR_SWING_MODE: swing_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_SWING_MODE, data)
# pylint: disable=too-many-branches
def setup(hass, config):
"""Setup hvacs."""
component = EntityComponent(_LOGGER, DOMAIN, hass,
SCAN_INTERVAL, DISCOVERY_PLATFORMS)
component.setup(config)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
def away_mode_set_service(service):
"""Set away mode on target hvacs."""
target_hvacs = component.extract_from_service(service)
away_mode = service.data.get(ATTR_AWAY_MODE)
if away_mode is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_AWAY_MODE, ATTR_AWAY_MODE)
return
for hvac in target_hvacs:
if away_mode:
hvac.turn_away_mode_on()
else:
hvac.turn_away_mode_off()
if hvac.should_poll:
hvac.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_AWAY_MODE, away_mode_set_service,
descriptions.get(SERVICE_SET_AWAY_MODE))
def aux_heat_set_service(service):
"""Set auxillary heater on target hvacs."""
target_hvacs = component.extract_from_service(service)
aux_heat = service.data.get(ATTR_AUX_HEAT)
if aux_heat is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_AUX_HEAT, ATTR_AUX_HEAT)
return
for hvac in target_hvacs:
if aux_heat:
hvac.turn_aux_heat_on()
else:
hvac.turn_aux_heat_off()
if hvac.should_poll:
hvac.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_AUX_HEAT, aux_heat_set_service,
descriptions.get(SERVICE_SET_AUX_HEAT))
def temperature_set_service(service):
"""Set temperature on the target hvacs."""
target_hvacs = component.extract_from_service(service)
temperature = util.convert(
service.data.get(ATTR_TEMPERATURE), float)
if temperature is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_TEMPERATURE, ATTR_TEMPERATURE)
return
for hvac in target_hvacs:
hvac.set_temperature(convert(
temperature, hass.config.temperature_unit,
hvac.unit_of_measurement))
if hvac.should_poll:
hvac.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_TEMPERATURE, temperature_set_service,
descriptions.get(SERVICE_SET_TEMPERATURE))
def humidity_set_service(service):
"""Set humidity on the target hvacs."""
target_hvacs = component.extract_from_service(service)
humidity = service.data.get(ATTR_HUMIDITY)
if humidity is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_HUMIDITY, ATTR_HUMIDITY)
return
for hvac in target_hvacs:
hvac.set_humidity(humidity)
if hvac.should_poll:
hvac.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_HUMIDITY, humidity_set_service,
descriptions.get(SERVICE_SET_HUMIDITY))
def fan_mode_set_service(service):
"""Set fan mode on target hvacs."""
target_hvacs = component.extract_from_service(service)
fan = service.data.get(ATTR_FAN_MODE)
if fan is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_FAN_MODE, ATTR_FAN_MODE)
return
for hvac in target_hvacs:
hvac.set_fan_mode(fan)
if hvac.should_poll:
hvac.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_FAN_MODE, fan_mode_set_service,
descriptions.get(SERVICE_SET_FAN_MODE))
def operation_set_service(service):
"""Set operating mode on the target hvacs."""
target_hvacs = component.extract_from_service(service)
operation_mode = service.data.get(ATTR_OPERATION_MODE)
if operation_mode is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_OPERATION_MODE, ATTR_OPERATION_MODE)
return
for hvac in target_hvacs:
hvac.set_operation_mode(operation_mode)
if hvac.should_poll:
hvac.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_OPERATION_MODE, operation_set_service,
descriptions.get(SERVICE_SET_OPERATION_MODE))
def swing_set_service(service):
"""Set swing mode on the target hvacs."""
target_hvacs = component.extract_from_service(service)
swing_mode = service.data.get(ATTR_SWING_MODE)
if swing_mode is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_SWING_MODE, ATTR_SWING_MODE)
return
for hvac in target_hvacs:
hvac.set_swing_mode(swing_mode)
if hvac.should_poll:
hvac.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_SWING_MODE, swing_set_service,
descriptions.get(SERVICE_SET_SWING_MODE))
return True
class HvacDevice(Entity):
"""Representation of a hvac."""
# pylint: disable=too-many-public-methods,no-self-use
@property
def state(self):
"""Return the current state."""
return self.current_operation or STATE_UNKNOWN
@property
def state_attributes(self):
"""Return the optional state attributes."""
data = {
ATTR_CURRENT_TEMPERATURE:
self._convert_for_display(self.current_temperature),
ATTR_MIN_TEMP: self._convert_for_display(self.min_temp),
ATTR_MAX_TEMP: self._convert_for_display(self.max_temp),
ATTR_TEMPERATURE:
self._convert_for_display(self.target_temperature),
}
humidity = self.target_humidity
if humidity is not None:
data[ATTR_HUMIDITY] = humidity
data[ATTR_CURRENT_HUMIDITY] = self.current_humidity
data[ATTR_MIN_HUMIDITY] = self.min_humidity
data[ATTR_MAX_HUMIDITY] = self.max_humidity
fan_mode = self.current_fan_mode
if fan_mode is not None:
data[ATTR_FAN_MODE] = fan_mode
data[ATTR_FAN_LIST] = self.fan_list
operation_mode = self.current_operation
if operation_mode is not None:
data[ATTR_OPERATION_MODE] = operation_mode
data[ATTR_OPERATION_LIST] = self.operation_list
swing_mode = self.current_swing_mode
if swing_mode is not None:
data[ATTR_SWING_MODE] = swing_mode
data[ATTR_SWING_LIST] = self.swing_list
is_away = self.is_away_mode_on
if is_away is not None:
data[ATTR_AWAY_MODE] = STATE_ON if is_away else STATE_OFF
is_aux_heat = self.is_aux_heat_on
if is_aux_heat is not None:
data[ATTR_AUX_HEAT] = STATE_ON if is_aux_heat else STATE_OFF
return data
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
raise NotImplementedError
@property
def current_humidity(self):
"""Return the current humidity."""
return None
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return None
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return None
@property
def operation_list(self):
"""List of available operation modes."""
return None
@property
def current_temperature(self):
"""Return the current temperature."""
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
raise NotImplementedError
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return None
@property
def is_aux_heat_on(self):
"""Return true if away mode is on."""
return None
@property
def current_fan_mode(self):
"""Return the fan setting."""
return None
@property
def fan_list(self):
"""List of available fan modes."""
return None
@property
def current_swing_mode(self):
"""Return the fan setting."""
return None
@property
def swing_list(self):
"""List of available swing modes."""
return None
def set_temperature(self, temperature):
"""Set new target temperature."""
pass
def set_humidity(self, humidity):
"""Set new target humidity."""
pass
def set_fan_mode(self, fan):
"""Set new target fan mode."""
pass
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
pass
def set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
pass
def turn_away_mode_on(self):
"""Turn away mode on."""
pass
def turn_away_mode_off(self):
"""Turn away mode off."""
pass
def turn_aux_heat_on(self):
"""Turn auxillary heater on."""
pass
def turn_aux_heat_off(self):
"""Turn auxillary heater off."""
pass
@property
def min_temp(self):
"""Return the minimum temperature."""
return convert(19, TEMP_CELCIUS, self.unit_of_measurement)
@property
def max_temp(self):
"""Return the maximum temperature."""
return convert(30, TEMP_CELCIUS, self.unit_of_measurement)
@property
def min_humidity(self):
"""Return the minimum humidity."""
return 30
@property
def max_humidity(self):
"""Return the maximum humidity."""
return 99
def _convert_for_display(self, temp):
"""Convert temperature into preferred units for display purposes."""
if temp is None:
return None
value = convert(temp, self.unit_of_measurement,
self.hass.config.temperature_unit)
if self.hass.config.temperature_unit is TEMP_CELCIUS:
decimal_count = 1
else:
# Users of fahrenheit generally expect integer units.
decimal_count = 0
return round(value, decimal_count)
|
|
"""Main module."""
import time as _time
from math import log10 as _log10, floor as _floor
import numpy as _np
import matplotlib.pyplot as _plt
import matplotlib.gridspec as _mpl_gs
from siriuspy.devices import SOFB, RFGen, Tune, BunchbyBunch
from pymodels import si as _si
import pyaccel as _pyacc
from ..utils import ThreadedMeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class MeasParams(_ParamsBaseClass):
"""."""
def __init__(self):
"""."""
super().__init__()
self.delta_freq = 200 # [Hz]
self.meas_nrsteps = 8
self.npoints = 5
self.wait_tune = 5 # [s]
self.timeout_wait_sofb = 3 # [s]
self.sofb_nrpoints = 10
def __str__(self):
"""."""
ftmp = '{0:24s} = {1:9.3f} {2:s}\n'.format
dtmp = '{0:24s} = {1:9d} {2:s}\n'.format
stg = ftmp('delta_freq [Hz]', self.delta_freq, '')
stg += dtmp('meas_nrsteps', self.meas_nrsteps, '')
stg += ftmp('wait_tune [s]', self.wait_tune, '')
stg += ftmp(
'timeout_wait_sofb [s]', self.timeout_wait_sofb, '(get orbit)')
stg += dtmp('sofb_nrpoints', self.sofb_nrpoints, '')
return stg
class MeasDispChrom(_BaseClass):
"""."""
MOM_COMPACT = 1.68e-4
def __init__(self, isonline=True):
"""."""
super().__init__(
params=MeasParams(), target=self._do_meas, isonline=isonline)
if self.isonline:
self.devices['sofb'] = SOFB(SOFB.DEVICES.SI)
self.devices['tune'] = Tune(Tune.DEVICES.SI)
self.devices['rf'] = RFGen()
self.devices['bbbh'] = BunchbyBunch(BunchbyBunch.DEVICES.H)
self.devices['bbbv'] = BunchbyBunch(BunchbyBunch.DEVICES.V)
def __str__(self):
"""."""
stn = 'Params\n'
stp = self.params.__str__()
stp = ' ' + stp.replace('\n', '\n ')
stn += stp + '\n'
stn += 'Connected? ' + str(self.connected) + '\n\n'
return stn
def _do_meas(self):
sofb = self.devices['sofb']
rfgen = self.devices['rf']
tune = self.devices['tune']
bbbh, bbbv = self.devices['bbbh'], self.devices['bbbv']
loop_on = False
if sofb.autocorrsts:
loop_on = True
print('SOFB feedback is enable, disabling it...')
sofb.cmd_turn_off_autocorr()
delta_freq = self.params.delta_freq
npoints = self.params.meas_nrsteps
sofb.nr_points = self.params.sofb_nrpoints
freq0 = rfgen.frequency
tunex0, tuney0 = tune.tunex, tune.tuney
tunex0_bbb = bbbh.sram.spec_marker1_tune
tuney0_bbb = bbbv.sram.spec_marker1_tune
orbx0, orby0 = sofb.orbx, sofb.orby
span = _np.linspace(freq0-delta_freq/2, freq0+delta_freq/2, npoints)
freq = []
tunex, tuney = [], []
tunex_bbb, tuney_bbb = [], []
orbx, orby = [], []
for frq in span:
if self._stopevt.is_set():
print(' exiting...')
break
rfgen.frequency = frq
sofb.cmd_reset()
_time.sleep(self.params.wait_tune)
sofb.wait_buffer(self.params.timeout_wait_sofb)
freq.append(rfgen.frequency)
orbx.append(sofb.orbx)
orby.append(sofb.orby)
tunex.append(tune.tunex)
tuney.append(tune.tuney)
tunex_bbb.append(bbbh.sram.spec_marker1_tune)
tuney_bbb.append(bbbv.sram.spec_marker1_tune)
print('delta frequency: {} Hz'.format((
rfgen.frequency-freq0)))
dtunex = tunex[-1] - tunex0
dtuney = tuney[-1] - tuney0
dtunex_bbb = tunex_bbb[-1] - tunex0_bbb
dtuney_bbb = tuney_bbb[-1] - tuney0_bbb
print(f'(Spec. Analy.) dtune x: {dtunex:} y: {dtuney:}')
print(f'(BunchbyBunch) dtune x: {dtunex_bbb:} y: {dtuney_bbb:}')
print('')
print('Restoring RF frequency...')
rfgen.frequency = freq0
self.data['freq0'] = freq0
self.data['tunex0'] = tunex0
self.data['tuney0'] = tuney0
self.data['tunex0_bbb'] = tunex0_bbb
self.data['tuney0_bbb'] = tuney0_bbb
self.data['orbx0'] = _np.array(orbx0)
self.data['orby0'] = _np.array(orby0)
self.data['freq'] = _np.array(freq)
self.data['tunex'] = _np.array(tunex)
self.data['tuney'] = _np.array(tuney)
self.data['tunex_bbb'] = _np.array(tunex_bbb)
self.data['tuney_bbb'] = _np.array(tuney_bbb)
self.data['orbx'] = _np.array(orbx)
self.data['orby'] = _np.array(orby)
if loop_on:
print('SOFB feedback was enable, restoring original state...')
sofb.cmd_turn_on_autocorr()
print('Finished!')
def process_data(self, fitorder=1, discardpoints=None, tunes_from='spec'):
"""."""
data = self.data
usepts = set(range(data['tunex'].shape[0]))
if discardpoints is not None:
usepts = set(usepts) - set(discardpoints)
usepts = sorted(usepts)
freq0 = data['freq0']
den = -(data['freq'] - freq0)/freq0/self.MOM_COMPACT
den = den[usepts]
suffix = ''
if tunes_from.lower() == 'bbb':
suffix = '_bbb'
tunex = data['tunex' + suffix][usepts]
tuney = data['tuney' + suffix][usepts]
orbx = data['orbx'][usepts, :]
orby = data['orby'][usepts, :]
if tunex.size > fitorder + 1:
chromx, chromxcov = _np.polyfit(den, tunex, deg=fitorder, cov=True)
chromy, chromycov = _np.polyfit(den, tuney, deg=fitorder, cov=True)
dispx, dispxcov = _np.polyfit(den, orbx, deg=fitorder, cov=True)
dispy, dispycov = _np.polyfit(den, orby, deg=fitorder, cov=True)
else:
chromx = _np.polyfit(den, tunex, deg=fitorder, cov=False)
chromy = _np.polyfit(den, tuney, deg=fitorder, cov=False)
dispx = _np.polyfit(den, orbx, deg=fitorder, cov=False)
dispy = _np.polyfit(den, orby, deg=fitorder, cov=False)
chromxcov = chromycov = _np.zeros(
(fitorder+1, fitorder+1), dtype=float)
dispxcov = dispycov = _np.zeros(
(fitorder+1, fitorder+1, orbx.shape[1]), dtype=float)
um2m = 1e-6
self.analysis['delta'] = den
self.analysis['orbx'] = orbx
self.analysis['orby'] = orby
self.analysis['dispx'] = dispx * um2m
self.analysis['dispy'] = dispy * um2m
self.analysis['dispx_err'] = _np.sqrt(_np.diagonal(dispxcov)) * um2m
self.analysis['dispy_err'] = _np.sqrt(_np.diagonal(dispycov)) * um2m
self.analysis['tunex'] = tunex
self.analysis['tuney'] = tuney
self.analysis['chromx'] = chromx
self.analysis['chromy'] = chromy
self.analysis['chromx_err'] = _np.sqrt(_np.diagonal(chromxcov))
self.analysis['chromy_err'] = _np.sqrt(_np.diagonal(chromycov))
def make_figure_chrom(self, analysis=None, title='', fname=''):
"""."""
fig = _plt.figure(figsize=(10, 5))
grid = _mpl_gs.GridSpec(1, 1)
grid.update(
left=0.12, right=0.95, bottom=0.15, top=0.9,
hspace=0.5, wspace=0.35)
if title:
fig.suptitle(title)
if analysis is None:
analysis = self.analysis
den = self.analysis['delta']
tunex = self.analysis['tunex']
tuney = self.analysis['tuney']
chromx = self.analysis['chromx']
chromx_err = self.analysis['chromx_err']
chromy = self.analysis['chromy']
chromy_err = self.analysis['chromy_err']
dtunex = tunex - chromx[-1]
dtuney = tuney - chromy[-1]
dtunex_fit = _np.polyval(chromx, den) - chromx[-1]
dtuney_fit = _np.polyval(chromy, den) - chromy[-1]
axx = _plt.subplot(grid[0, 0])
axx.plot(den*100, dtunex*1000, '.b', label='horizontal')
axx.plot(den*100, dtunex_fit*1000, '-b')
axx.plot(den*100, dtuney*1000, '.r', label='vertical')
axx.plot(den*100, dtuney_fit*1000, '-r')
axx.set_xlabel(r'$\delta$ [%]')
axx.set_ylabel(r'$\Delta \nu \times 1000$')
chromx = _np.flip(chromx)
chromx_err = _np.flip(chromx_err)
chromy = _np.flip(chromy)
chromy_err = _np.flip(chromy_err)
stx = MeasDispChrom.polynomial_to_latex(chromx, chromx_err)
sty = MeasDispChrom.polynomial_to_latex(chromy, chromy_err)
stg = r'$\Delta\nu_x = $' + stx + '\n'
stg += r'$\Delta\nu_y = $' + sty
axx.text(
0.4, 0.05, stg, horizontalalignment='left',
verticalalignment='bottom', transform=axx.transAxes,
bbox=dict(edgecolor='k', facecolor='w', alpha=1.0))
axx.legend()
axx.grid(True)
if fname:
fig.savefig(fname+'.svg')
_plt.close()
else:
fig.show()
def make_figure_disp(self, analysis=None, disporder=1, title='', fname=''):
"""."""
fig = _plt.figure(figsize=(10, 5))
grid = _mpl_gs.GridSpec(1, 1)
grid.update(
left=0.12, right=0.95, bottom=0.15, top=0.9,
hspace=0.5, wspace=0.35)
if title:
fig.suptitle(title)
if analysis is None:
analysis = self.analysis
simod = _si.create_accelerator()
fam = _si.get_family_data(simod)
spos = _pyacc.lattice.find_spos(simod, indices='open')
bpmidx = _np.array(fam['BPM']['index']).ravel()
sposbpm = spos[bpmidx]
fitorder_anlys = analysis['dispx'].shape[0] - 1
if disporder > fitorder_anlys:
raise Exception(
'It does not make sense to plot a fit order higher than' +
'the analysis')
fitidx = fitorder_anlys - disporder
dispx = analysis['dispx'][fitidx, :]
dispy = analysis['dispy'][fitidx, :]
dispx_err = analysis['dispx_err'][:, fitidx]
dispy_err = analysis['dispy_err'][:, fitidx]
m2cm = 100
axx = _plt.subplot(grid[0, 0])
axx.errorbar(
sposbpm, dispx*m2cm, dispx_err*m2cm, None, '.-b',
label='horizontal')
axx.errorbar(
sposbpm, dispy*m2cm, dispy_err*m2cm, None, '.-r', label='vertical')
axx.set_xlabel('s [m]')
ylabel = r'$\eta_{:d}$ [cm]'.format(disporder)
axx.set_ylabel(ylabel)
axx.legend()
axx.grid(True)
if fname:
fig.savefig(fname+'.svg')
_plt.close()
else:
fig.show()
# Adapted from:
# https://perso.crans.org/besson/publis/notebooks/
# Demonstration%20of%20numpy.polynomial.
# Polynomial%20and%20nice%20display%20with%20LaTeX%20and%20MathJax%20
# (python3).html
@staticmethod
def polynomial_to_latex(poly, error):
""" Small function to print nicely the polynomial p as we write it in
maths, in LaTeX code."""
poly = _np.poly1d(poly)
coefs = poly.coef # List of coefficient, sorted by increasing degrees
res = '' # The resulting string
for idx, coef_idx in enumerate(coefs):
err = error[idx]
sig_fig = int(_floor(_log10(abs(err))))
err = round(err, -sig_fig)
coef_idx = round(coef_idx, -sig_fig)
if int(coef_idx) == coef_idx: # Remove the trailing .0
coef_idx = int(coef_idx)
if idx == 0: # First coefficient, no need for X
continue
elif idx == 1: # Second coefficient, only X and not X**i
if coef_idx == 1: # coef_idx = 1 does not need to be displayed
res += "\delta + "
elif coef_idx > 0:
res += "({a} \pm {b}) \;\delta + ".format(
a="{%g}" % coef_idx, b="{%g}" % err)
elif coef_idx < 0:
res += "({a} \pm {b}) \;\delta + ".format(
a="{%g}" % coef_idx, b="{%g}" % err)
else:
if coef_idx == 1:
# A special care needs to be addressed to put the exponent
# in {..} in LaTeX
res += "\delta^{i} + ".format(i="{%d}" % idx)
elif coef_idx > 0:
res += "({a} \pm {b}) \;\delta^{i} + ".format(
a="{%g}" % coef_idx, b="{%g}" % err, i="{%d}" % idx)
elif coef_idx < 0:
res += "({a} \pm {b}) \;\delta^{i} + ".format(
a="{%g}" % coef_idx, b="{%g}" % err, i="{%d}" % idx)
return "$" + res[:-3] + "$" if res else ""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.