text
stringlengths 4
1.02M
| meta
dict |
---|---|
import pytest
pytest.importorskip("kick")
from ansible.module_utils.network.ftd.device import FtdPlatformFactory, FtdModel, FtdAsa5500xPlatform, \
Ftd2100Platform, AbstractFtdPlatform
from units.modules.network.ftd.test_ftd_install import DEFAULT_MODULE_PARAMS
class TestFtdModel(object):
def test_has_value_should_return_true_for_existing_models(self):
assert FtdModel.FTD_2120 in FtdModel.supported_models()
assert FtdModel.FTD_ASA5516_X in FtdModel.supported_models()
def test_has_value_should_return_false_for_non_existing_models(self):
assert 'nonExistingModel' not in FtdModel.supported_models()
assert None not in FtdModel.supported_models()
class TestFtdPlatformFactory(object):
@pytest.fixture(autouse=True)
def mock_devices(self, mocker):
mocker.patch('ansible.module_utils.network.ftd.device.Kp')
mocker.patch('ansible.module_utils.network.ftd.device.Ftd5500x')
def test_factory_should_return_corresponding_platform(self):
ftd_platform = FtdPlatformFactory.create(FtdModel.FTD_ASA5508_X, dict(DEFAULT_MODULE_PARAMS))
assert type(ftd_platform) is FtdAsa5500xPlatform
ftd_platform = FtdPlatformFactory.create(FtdModel.FTD_2130, dict(DEFAULT_MODULE_PARAMS))
assert type(ftd_platform) is Ftd2100Platform
def test_factory_should_raise_error_with_not_supported_model(self):
with pytest.raises(ValueError) as ex:
FtdPlatformFactory.create('nonExistingModel', dict(DEFAULT_MODULE_PARAMS))
assert "FTD model 'nonExistingModel' is not supported by this module." == ex.value.args[0]
class TestAbstractFtdPlatform(object):
def test_install_ftd_image_raise_error_on_abstract_class(self):
with pytest.raises(NotImplementedError):
AbstractFtdPlatform().install_ftd_image(dict(DEFAULT_MODULE_PARAMS))
def test_supports_ftd_model_should_return_true_for_supported_models(self):
assert Ftd2100Platform.supports_ftd_model(FtdModel.FTD_2120)
assert FtdAsa5500xPlatform.supports_ftd_model(FtdModel.FTD_ASA5516_X)
def test_supports_ftd_model_should_return_false_for_non_supported_models(self):
assert not AbstractFtdPlatform.supports_ftd_model(FtdModel.FTD_2120)
assert not Ftd2100Platform.supports_ftd_model(FtdModel.FTD_ASA5508_X)
assert not FtdAsa5500xPlatform.supports_ftd_model(FtdModel.FTD_2120)
def test_parse_rommon_file_location(self):
server, path = AbstractFtdPlatform.parse_rommon_file_location('tftp://1.2.3.4/boot/rommon-boot.foo')
assert '1.2.3.4' == server
assert '/boot/rommon-boot.foo' == path
def test_parse_rommon_file_location_should_fail_for_non_tftp_protocol(self):
with pytest.raises(ValueError) as ex:
AbstractFtdPlatform.parse_rommon_file_location('http://1.2.3.4/boot/rommon-boot.foo')
assert 'The ROMMON image must be downloaded from TFTP server' in str(ex.value)
class TestFtd2100Platform(object):
@pytest.fixture
def kp_mock(self, mocker):
return mocker.patch('ansible.module_utils.network.ftd.device.Kp')
@pytest.fixture
def module_params(self):
return dict(DEFAULT_MODULE_PARAMS)
def test_install_ftd_image_should_call_kp_module(self, kp_mock, module_params):
ftd = FtdPlatformFactory.create(FtdModel.FTD_2110, module_params)
ftd.install_ftd_image(module_params)
assert kp_mock.called
assert kp_mock.return_value.ssh_console.called
ftd_line = kp_mock.return_value.ssh_console.return_value
assert ftd_line.baseline_fp2k_ftd.called
assert ftd_line.disconnect.called
def test_install_ftd_image_should_call_disconnect_when_install_fails(self, kp_mock, module_params):
ftd_line = kp_mock.return_value.ssh_console.return_value
ftd_line.baseline_fp2k_ftd.side_effect = Exception('Something went wrong')
ftd = FtdPlatformFactory.create(FtdModel.FTD_2120, module_params)
with pytest.raises(Exception):
ftd.install_ftd_image(module_params)
assert ftd_line.baseline_fp2k_ftd.called
assert ftd_line.disconnect.called
class TestFtdAsa5500xPlatform(object):
@pytest.fixture
def asa5500x_mock(self, mocker):
return mocker.patch('ansible.module_utils.network.ftd.device.Ftd5500x')
@pytest.fixture
def module_params(self):
return dict(DEFAULT_MODULE_PARAMS)
def test_install_ftd_image_should_call_kp_module(self, asa5500x_mock, module_params):
ftd = FtdPlatformFactory.create(FtdModel.FTD_ASA5508_X, module_params)
ftd.install_ftd_image(module_params)
assert asa5500x_mock.called
assert asa5500x_mock.return_value.ssh_console.called
ftd_line = asa5500x_mock.return_value.ssh_console.return_value
assert ftd_line.rommon_to_new_image.called
assert ftd_line.disconnect.called
def test_install_ftd_image_should_call_disconnect_when_install_fails(self, asa5500x_mock, module_params):
ftd_line = asa5500x_mock.return_value.ssh_console.return_value
ftd_line.rommon_to_new_image.side_effect = Exception('Something went wrong')
ftd = FtdPlatformFactory.create(FtdModel.FTD_ASA5516_X, module_params)
with pytest.raises(Exception):
ftd.install_ftd_image(module_params)
assert ftd_line.rommon_to_new_image.called
assert ftd_line.disconnect.called
| {
"content_hash": "28d75386aa717d05601833be9a99d43f",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 109,
"avg_line_length": 42.94488188976378,
"alnum_prop": 0.7207554088742207,
"repo_name": "thaim/ansible",
"id": "3cbe0d61da727aa16177a82c2d2190c8d653a381",
"size": "6161",
"binary": false,
"copies": "17",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/module_utils/network/ftd/test_device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings_content_gfk")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "135c2b7dca1757f33d32525c82d93e69",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.7112068965517241,
"repo_name": "Eksmo/django-tastypie",
"id": "5daae21df878568bd9b98e780dca7b1911957306",
"size": "254",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tests/manage_content_gfk.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "988"
},
{
"name": "Python",
"bytes": "817454"
},
{
"name": "Shell",
"bytes": "1162"
}
],
"symlink_target": ""
} |
import os
import sys
from fabric.api import env, run, cd
DEPLOYMENTS = {
'prod': {
'home': '/var/www/',
'host_string': '[email protected]',
'virtual_env': 'bamboo',
'repo_name': 'current',
'project': 'bamboo',
'docs': 'docs',
'branch': 'master',
'key_filename': os.path.expanduser('~/.ssh/modilabs.pem'),
'init_script': 'bamboo_uwsgi.sh',
'celeryd': 'celeryd',
}
}
def _run_in_virtualenv(command):
run('source ~/.virtualenvs/%s/bin/activate && %s' % (env.virtual_env,
command))
def _check_key_filename(deployment_name):
if 'key_filename' in DEPLOYMENTS[deployment_name] and \
not os.path.exists(DEPLOYMENTS[deployment_name]['key_filename']):
print 'Cannot find required permissions file: %s' % \
DEPLOYMENTS[deployment_name]['key_filename']
return False
return True
def _setup_env(deployment_name):
env.update(DEPLOYMENTS[deployment_name])
if not _check_key_filename(deployment_name):
sys.exit(1)
env.project_directory = os.path.join(env.home, env.project)
env.code_src = os.path.join(env.project_directory, env.repo_name)
env.doc_src = os.path.join(env.project_directory, env.repo_name, env.docs)
env.pip_requirements_file = os.path.join(
env.code_src, 'deploy/requirements/requirements.pip')
def deploy(deployment_name):
_setup_env(deployment_name)
# update code
with cd(env.code_src):
run('git fetch origin %(branch)s' % env)
run('git reset --hard origin/%(branch)s' % env)
run('git pull origin %(branch)s' % env)
run('find . -name "*.pyc" -delete')
# update docs
with cd(env.doc_src):
_run_in_virtualenv('make html')
# install dependencies
_run_in_virtualenv('pip install -r %s' % env.pip_requirements_file)
# restart celery
with cd(env.code_src):
_run_in_virtualenv('../shared/%s restart' % env.celeryd)
# restart the server
with cd(env.code_src):
_run_in_virtualenv('./scripts/%s restart' % env.init_script)
| {
"content_hash": "9f46112f4ff7127917e423eddf12b908",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 31.04225352112676,
"alnum_prop": 0.5893829401088929,
"repo_name": "pld/bamboo",
"id": "76c1f2457d59c6e147c8f221fcd0c7a23ec109a5",
"size": "2204",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1355"
},
{
"name": "Python",
"bytes": "382794"
},
{
"name": "Shell",
"bytes": "19303"
}
],
"symlink_target": ""
} |
import os.path
# Test import of same modules from different packages
testname = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
print "Testing " + testname + " - %module(package=...) + python 'import' in __init__.py"
import pkg2.foo
print " Finished importing pkg2.foo"
var2 = pkg2.foo.Pkg2_Foo()
if str(type(var2)).find("'pkg2.foo.Pkg2_Foo'") == -1:
raise RuntimeError("failed type checking: " + str(type(var2)))
print " Successfully created object pkg2.foo.Pkg2_Foo"
| {
"content_hash": "34dc99c2d768f16cb453d2c0a2dba891",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 37.76923076923077,
"alnum_prop": 0.6985743380855397,
"repo_name": "DEKHTIARJonathan/BilletterieUTC",
"id": "2107597b3bf72827dda36b86157e144edb2e6587",
"size": "491",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "badgingServer/Install/swigwin-3.0.7/Examples/python/import_packages/same_modnames1/runme.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "505"
},
{
"name": "C",
"bytes": "1489570"
},
{
"name": "C#",
"bytes": "323243"
},
{
"name": "C++",
"bytes": "2646678"
},
{
"name": "CSS",
"bytes": "1309792"
},
{
"name": "Common Lisp",
"bytes": "13780"
},
{
"name": "D",
"bytes": "260374"
},
{
"name": "DIGITAL Command Language",
"bytes": "16078"
},
{
"name": "Forth",
"bytes": "2411"
},
{
"name": "Go",
"bytes": "95670"
},
{
"name": "Groff",
"bytes": "17548"
},
{
"name": "HTML",
"bytes": "8474268"
},
{
"name": "Java",
"bytes": "517584"
},
{
"name": "JavaScript",
"bytes": "1574272"
},
{
"name": "Limbo",
"bytes": "2902"
},
{
"name": "Lua",
"bytes": "103853"
},
{
"name": "M",
"bytes": "58261"
},
{
"name": "Makefile",
"bytes": "193313"
},
{
"name": "Mathematica",
"bytes": "113"
},
{
"name": "Matlab",
"bytes": "49071"
},
{
"name": "Mercury",
"bytes": "4136"
},
{
"name": "OCaml",
"bytes": "25948"
},
{
"name": "Objective-C",
"bytes": "9721"
},
{
"name": "PHP",
"bytes": "336290"
},
{
"name": "Perl",
"bytes": "140021"
},
{
"name": "Perl6",
"bytes": "6403"
},
{
"name": "Pike",
"bytes": "6601"
},
{
"name": "Python",
"bytes": "271706"
},
{
"name": "R",
"bytes": "6053"
},
{
"name": "Ruby",
"bytes": "129514"
},
{
"name": "SQLPL",
"bytes": "10237"
},
{
"name": "Scheme",
"bytes": "81765"
},
{
"name": "Scilab",
"bytes": "84725"
},
{
"name": "Shell",
"bytes": "86284"
},
{
"name": "Standard ML",
"bytes": "2587"
},
{
"name": "Tcl",
"bytes": "38028"
},
{
"name": "Yacc",
"bytes": "211262"
}
],
"symlink_target": ""
} |
"""
Tests and Confidence Intervals for Binomial Proportions
Created on Fri Mar 01 00:23:07 2013
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.python import lzip
from typing import Callable, Tuple
import numpy as np
import pandas as pd
from scipy import optimize, stats
from statsmodels.stats.base import AllPairsResults, HolderTuple
from statsmodels.stats.weightstats import _zstat_generic2
from statsmodels.tools.sm_exceptions import HypothesisTestWarning
from statsmodels.tools.testing import Holder
from statsmodels.tools.validation import array_like
FLOAT_INFO = np.finfo(float)
def _bound_proportion_confint(
func: Callable[[float], float], qi: float, lower: bool = True
) -> float:
"""
Try hard to find a bound different from eps/1 - eps in proportion_confint
Parameters
----------
func : callable
Callable function to use as the objective of the search
qi : float
The empirical success rate
lower : bool
Whether to fund a lower bound for the left side of the CI
Returns
-------
float
The coarse bound
"""
default = FLOAT_INFO.eps if lower else 1.0 - FLOAT_INFO.eps
def step(v):
return v / 8 if lower else v + (1.0 - v) / 8
x = step(qi)
w = func(x)
cnt = 1
while w > 0 and cnt < 10:
x = step(x)
w = func(x)
cnt += 1
return x if cnt < 10 else default
def _bisection_search_conservative(
func: Callable[[float], float], lb: float, ub: float, steps: int = 27
) -> Tuple[float, float]:
"""
Private function used as a fallback by proportion_confint
Used when brentq returns a non-conservative bound for the CI
Parameters
----------
func : callable
Callable function to use as the objective of the search
lb : float
Lower bound
ub : float
Upper bound
steps : int
Number of steps to use in the bisection
Returns
-------
est : float
The estimated value. Will always produce a negative value of func
func_val : float
The value of the function at the estimate
"""
upper = func(ub)
lower = func(lb)
best = upper if upper < 0 else lower
best_pt = ub if upper < 0 else lb
if np.sign(lower) == np.sign(upper):
raise ValueError("problem with signs")
mp = (ub + lb) / 2
mid = func(mp)
if (mid < 0) and (mid > best):
best = mid
best_pt = mp
for _ in range(steps):
if np.sign(mid) == np.sign(upper):
ub = mp
upper = mid
else:
lb = mp
mp = (ub + lb) / 2
mid = func(mp)
if (mid < 0) and (mid > best):
best = mid
best_pt = mp
return best_pt, best
def proportion_confint(count, nobs, alpha:float=0.05, method="normal"):
"""
Confidence interval for a binomial proportion
Parameters
----------
count : {int, array_like}
number of successes, can be pandas Series or DataFrame. Arrays
must contain integer values.
nobs : {int, array_like}
total number of trials. Arrays must contain integer values.
alpha : float
Significance level, default 0.05. Must be in (0, 1)
method : {"normal", "agresti_coull", "beta", "wilson", "binom_test"}
default: "normal"
method to use for confidence interval. Supported methods:
- `normal` : asymptotic normal approximation
- `agresti_coull` : Agresti-Coull interval
- `beta` : Clopper-Pearson interval based on Beta distribution
- `wilson` : Wilson Score interval
- `jeffreys` : Jeffreys Bayesian Interval
- `binom_test` : Numerical inversion of binom_test
Returns
-------
ci_low, ci_upp : {float, ndarray, Series DataFrame}
lower and upper confidence level with coverage (approximately) 1-alpha.
When a pandas object is returned, then the index is taken from `count`.
Notes
-----
Beta, the Clopper-Pearson exact interval has coverage at least 1-alpha,
but is in general conservative. Most of the other methods have average
coverage equal to 1-alpha, but will have smaller coverage in some cases.
The "beta" and "jeffreys" interval are central, they use alpha/2 in each
tail, and alpha is not adjusted at the boundaries. In the extreme case
when `count` is zero or equal to `nobs`, then the coverage will be only
1 - alpha/2 in the case of "beta".
The confidence intervals are clipped to be in the [0, 1] interval in the
case of "normal" and "agresti_coull".
Method "binom_test" directly inverts the binomial test in scipy.stats.
which has discrete steps.
TODO: binom_test intervals raise an exception in small samples if one
interval bound is close to zero or one.
References
----------
.. [*] https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
.. [*] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion", Statistical
Science 16 (2): 101–133. doi:10.1214/ss/1009213286.
"""
is_scalar = np.isscalar(count) and np.isscalar(nobs)
is_pandas = isinstance(count, (pd.Series, pd.DataFrame))
count_a = array_like(count, "count", optional=False, ndim=None)
nobs_a = array_like(nobs, "nobs", optional=False, ndim=None)
def _check(x: np.ndarray, name: str) -> np.ndarray:
if np.issubdtype(x.dtype, np.integer):
return x
y = x.astype(np.int64, casting="unsafe")
if np.any(y != x):
raise ValueError(
f"{name} must have an integral dtype. Found data with "
f"dtype {x.dtype}"
)
return y
count_a = _check(np.asarray(count_a), "count")
nobs_a = _check(np.asarray(nobs_a), "count")
q_ = count_a / nobs_a
alpha_2 = 0.5 * alpha
if method == "normal":
std_ = np.sqrt(q_ * (1 - q_) / nobs_a)
dist = stats.norm.isf(alpha / 2.0) * std_
ci_low = q_ - dist
ci_upp = q_ + dist
elif method == "binom_test":
# inverting the binomial test
def func_factory(count: int, nobs: int) -> Callable[[float], float]:
if hasattr(stats, "binomtest"):
def func(qi):
return stats.binomtest(count, nobs, p=qi).pvalue - alpha
else:
# Remove after min SciPy >= 1.7
def func(qi):
return stats.binom_test(count, nobs, p=qi) - alpha
return func
bcast = np.broadcast(count_a, nobs_a)
ci_low = np.zeros(bcast.shape)
ci_upp = np.zeros(bcast.shape)
index = bcast.index
for c, n in bcast:
# Enforce symmetry
reverse = False
_q = q_.flat[index]
if c > n // 2:
c = n - c
reverse = True
_q = 1 - _q
func = func_factory(c, n)
if c == 0:
ci_low.flat[index] = 0.0
else:
lower_bnd = _bound_proportion_confint(func, _q, lower=True)
val, _z = optimize.brentq(
func, lower_bnd, _q, full_output=True
)
if func(val) > 0:
power = 10
new_lb = val - (val - lower_bnd) / 2**power
while func(new_lb) > 0 and power >= 0:
power -= 1
new_lb = val - (val - lower_bnd) / 2**power
val, _ = _bisection_search_conservative(func, new_lb, _q)
ci_low.flat[index] = val
if c == n:
ci_upp.flat[index] = 1.0
else:
upper_bnd = _bound_proportion_confint(func, _q, lower=False)
val, _z = optimize.brentq(
func, _q, upper_bnd, full_output=True
)
if func(val) > 0:
power = 10
new_ub = val + (upper_bnd - val) / 2**power
while func(new_ub) > 0 and power >= 0:
power -= 1
new_ub = val - (upper_bnd - val) / 2**power
val, _ = _bisection_search_conservative(func, _q, new_ub)
ci_upp.flat[index] = val
if reverse:
temp = ci_upp.flat[index]
ci_upp.flat[index] = 1 - ci_low.flat[index]
ci_low.flat[index] = 1 - temp
index = bcast.index
elif method == "beta":
ci_low = stats.beta.ppf(alpha_2, count_a, nobs_a - count_a + 1)
ci_upp = stats.beta.isf(alpha_2, count_a + 1, nobs_a - count_a)
if np.ndim(ci_low) > 0:
ci_low.flat[q_.flat == 0] = 0
ci_upp.flat[q_.flat == 1] = 1
else:
ci_low = 0 if q_ == 0 else ci_low
ci_upp = 1 if q_ == 1 else ci_upp
elif method == "agresti_coull":
crit = stats.norm.isf(alpha / 2.0)
nobs_c = nobs_a + crit**2
q_c = (count_a + crit**2 / 2.0) / nobs_c
std_c = np.sqrt(q_c * (1.0 - q_c) / nobs_c)
dist = crit * std_c
ci_low = q_c - dist
ci_upp = q_c + dist
elif method == "wilson":
crit = stats.norm.isf(alpha / 2.0)
crit2 = crit**2
denom = 1 + crit2 / nobs_a
center = (q_ + crit2 / (2 * nobs_a)) / denom
dist = crit * np.sqrt(
q_ * (1.0 - q_) / nobs_a + crit2 / (4.0 * nobs_a**2)
)
dist /= denom
ci_low = center - dist
ci_upp = center + dist
# method adjusted to be more forgiving of misspellings or incorrect option name
elif method[:4] == "jeff":
ci_low, ci_upp = stats.beta.interval(
1 - alpha, count_a + 0.5, nobs_a - count_a + 0.5
)
else:
raise NotImplementedError(f"method {method} is not available")
if method in ["normal", "agresti_coull"]:
ci_low = np.clip(ci_low, 0, 1)
ci_upp = np.clip(ci_upp, 0, 1)
if is_pandas:
container = pd.Series if isinstance(count, pd.Series) else pd.DataFrame
ci_low = container(ci_low, index=count.index)
ci_upp = container(ci_upp, index=count.index)
if is_scalar:
return float(ci_low), float(ci_upp)
return ci_low, ci_upp
def multinomial_proportions_confint(counts, alpha=0.05, method='goodman'):
"""
Confidence intervals for multinomial proportions.
Parameters
----------
counts : array_like of int, 1-D
Number of observations in each category.
alpha : float in (0, 1), optional
Significance level, defaults to 0.05.
method : {'goodman', 'sison-glaz'}, optional
Method to use to compute the confidence intervals; available methods
are:
- `goodman`: based on a chi-squared approximation, valid if all
values in `counts` are greater or equal to 5 [2]_
- `sison-glaz`: less conservative than `goodman`, but only valid if
`counts` has 7 or more categories (``len(counts) >= 7``) [3]_
Returns
-------
confint : ndarray, 2-D
Array of [lower, upper] confidence levels for each category, such that
overall coverage is (approximately) `1-alpha`.
Raises
------
ValueError
If `alpha` is not in `(0, 1)` (bounds excluded), or if the values in
`counts` are not all positive or null.
NotImplementedError
If `method` is not kown.
Exception
When ``method == 'sison-glaz'``, if for some reason `c` cannot be
computed; this signals a bug and should be reported.
Notes
-----
The `goodman` method [2]_ is based on approximating a statistic based on
the multinomial as a chi-squared random variable. The usual recommendation
is that this is valid if all the values in `counts` are greater than or
equal to 5. There is no condition on the number of categories for this
method.
The `sison-glaz` method [3]_ approximates the multinomial probabilities,
and evaluates that with a maximum-likelihood estimator. The first
approximation is an Edgeworth expansion that converges when the number of
categories goes to infinity, and the maximum-likelihood estimator converges
when the number of observations (``sum(counts)``) goes to infinity. In
their paper, Sison & Glaz demo their method with at least 7 categories, so
``len(counts) >= 7`` with all values in `counts` at or above 5 can be used
as a rule of thumb for the validity of this method. This method is less
conservative than the `goodman` method (i.e. it will yield confidence
intervals closer to the desired significance level), but produces
confidence intervals of uniform width over all categories (except when the
intervals reach 0 or 1, in which case they are truncated), which makes it
most useful when proportions are of similar magnitude.
Aside from the original sources ([1]_, [2]_, and [3]_), the implementation
uses the formulas (though not the code) presented in [4]_ and [5]_.
References
----------
.. [1] Levin, Bruce, "A representation for multinomial cumulative
distribution functions," The Annals of Statistics, Vol. 9, No. 5,
1981, pp. 1123-1126.
.. [2] Goodman, L.A., "On simultaneous confidence intervals for multinomial
proportions," Technometrics, Vol. 7, No. 2, 1965, pp. 247-254.
.. [3] Sison, Cristina P., and Joseph Glaz, "Simultaneous Confidence
Intervals and Sample Size Determination for Multinomial
Proportions," Journal of the American Statistical Association,
Vol. 90, No. 429, 1995, pp. 366-369.
.. [4] May, Warren L., and William D. Johnson, "A SAS® macro for
constructing simultaneous confidence intervals for multinomial
proportions," Computer methods and programs in Biomedicine, Vol. 53,
No. 3, 1997, pp. 153-162.
.. [5] May, Warren L., and William D. Johnson, "Constructing two-sided
simultaneous confidence intervals for multinomial proportions for
small counts in a large number of cells," Journal of Statistical
Software, Vol. 5, No. 6, 2000, pp. 1-24.
"""
if alpha <= 0 or alpha >= 1:
raise ValueError('alpha must be in (0, 1), bounds excluded')
counts = np.array(counts, dtype=float)
if (counts < 0).any():
raise ValueError('counts must be >= 0')
n = counts.sum()
k = len(counts)
proportions = counts / n
if method == 'goodman':
chi2 = stats.chi2.ppf(1 - alpha / k, 1)
delta = chi2 ** 2 + (4 * n * proportions * chi2 * (1 - proportions))
region = ((2 * n * proportions + chi2 +
np.array([- np.sqrt(delta), np.sqrt(delta)])) /
(2 * (chi2 + n))).T
elif method[:5] == 'sison': # We accept any name starting with 'sison'
# Define a few functions we'll use a lot.
def poisson_interval(interval, p):
"""
Compute P(b <= Z <= a) where Z ~ Poisson(p) and
`interval = (b, a)`.
"""
b, a = interval
prob = stats.poisson.cdf(a, p) - stats.poisson.cdf(b - 1, p)
return prob
def truncated_poisson_factorial_moment(interval, r, p):
"""
Compute mu_r, the r-th factorial moment of a poisson random
variable of parameter `p` truncated to `interval = (b, a)`.
"""
b, a = interval
return p ** r * (1 - ((poisson_interval((a - r + 1, a), p) -
poisson_interval((b - r, b - 1), p)) /
poisson_interval((b, a), p)))
def edgeworth(intervals):
"""
Compute the Edgeworth expansion term of Sison & Glaz's formula
(1) (approximated probability for multinomial proportions in a
given box).
"""
# Compute means and central moments of the truncated poisson
# variables.
mu_r1, mu_r2, mu_r3, mu_r4 = [
np.array([truncated_poisson_factorial_moment(interval, r, p)
for (interval, p) in zip(intervals, counts)])
for r in range(1, 5)
]
mu = mu_r1
mu2 = mu_r2 + mu - mu ** 2
mu3 = mu_r3 + mu_r2 * (3 - 3 * mu) + mu - 3 * mu ** 2 + 2 * mu ** 3
mu4 = (mu_r4 + mu_r3 * (6 - 4 * mu) +
mu_r2 * (7 - 12 * mu + 6 * mu ** 2) +
mu - 4 * mu ** 2 + 6 * mu ** 3 - 3 * mu ** 4)
# Compute expansion factors, gamma_1 and gamma_2.
g1 = mu3.sum() / mu2.sum() ** 1.5
g2 = (mu4.sum() - 3 * (mu2 ** 2).sum()) / mu2.sum() ** 2
# Compute the expansion itself.
x = (n - mu.sum()) / np.sqrt(mu2.sum())
phi = np.exp(- x ** 2 / 2) / np.sqrt(2 * np.pi)
H3 = x ** 3 - 3 * x
H4 = x ** 4 - 6 * x ** 2 + 3
H6 = x ** 6 - 15 * x ** 4 + 45 * x ** 2 - 15
f = phi * (1 + g1 * H3 / 6 + g2 * H4 / 24 + g1 ** 2 * H6 / 72)
return f / np.sqrt(mu2.sum())
def approximated_multinomial_interval(intervals):
"""
Compute approximated probability for Multinomial(n, proportions)
to be in `intervals` (Sison & Glaz's formula (1)).
"""
return np.exp(
np.sum(np.log([poisson_interval(interval, p)
for (interval, p) in zip(intervals, counts)])) +
np.log(edgeworth(intervals)) -
np.log(stats.poisson._pmf(n, n))
)
def nu(c):
"""
Compute interval coverage for a given `c` (Sison & Glaz's
formula (7)).
"""
return approximated_multinomial_interval(
[(np.maximum(count - c, 0), np.minimum(count + c, n))
for count in counts])
# Find the value of `c` that will give us the confidence intervals
# (solving nu(c) <= 1 - alpha < nu(c + 1).
c = 1.0
nuc = nu(c)
nucp1 = nu(c + 1)
while not (nuc <= (1 - alpha) < nucp1):
if c > n:
raise Exception("Couldn't find a value for `c` that "
"solves nu(c) <= 1 - alpha < nu(c + 1)")
c += 1
nuc = nucp1
nucp1 = nu(c + 1)
# Compute gamma and the corresponding confidence intervals.
g = (1 - alpha - nuc) / (nucp1 - nuc)
ci_lower = np.maximum(proportions - c / n, 0)
ci_upper = np.minimum(proportions + (c + 2 * g) / n, 1)
region = np.array([ci_lower, ci_upper]).T
else:
raise NotImplementedError('method "%s" is not available' % method)
return region
def samplesize_confint_proportion(proportion, half_length, alpha=0.05,
method='normal'):
"""
Find sample size to get desired confidence interval length
Parameters
----------
proportion : float in (0, 1)
proportion or quantile
half_length : float in (0, 1)
desired half length of the confidence interval
alpha : float in (0, 1)
significance level, default 0.05,
coverage of the two-sided interval is (approximately) ``1 - alpha``
method : str in ['normal']
method to use for confidence interval,
currently only normal approximation
Returns
-------
n : float
sample size to get the desired half length of the confidence interval
Notes
-----
this is mainly to store the formula.
possible application: number of replications in bootstrap samples
"""
q_ = proportion
if method == 'normal':
n = q_ * (1 - q_) / (half_length / stats.norm.isf(alpha / 2.))**2
else:
raise NotImplementedError('only "normal" is available')
return n
def proportion_effectsize(prop1, prop2, method='normal'):
"""
Effect size for a test comparing two proportions
for use in power function
Parameters
----------
prop1, prop2 : float or array_like
The proportion value(s).
Returns
-------
es : float or ndarray
effect size for (transformed) prop1 - prop2
Notes
-----
only method='normal' is implemented to match pwr.p2.test
see http://www.statmethods.net/stats/power.html
Effect size for `normal` is defined as ::
2 * (arcsin(sqrt(prop1)) - arcsin(sqrt(prop2)))
I think other conversions to normality can be used, but I need to check.
Examples
--------
>>> import statsmodels.api as sm
>>> sm.stats.proportion_effectsize(0.5, 0.4)
0.20135792079033088
>>> sm.stats.proportion_effectsize([0.3, 0.4, 0.5], 0.4)
array([-0.21015893, 0. , 0.20135792])
"""
if method != 'normal':
raise ValueError('only "normal" is implemented')
es = 2 * (np.arcsin(np.sqrt(prop1)) - np.arcsin(np.sqrt(prop2)))
return es
def std_prop(prop, nobs):
"""
Standard error for the estimate of a proportion
This is just ``np.sqrt(p * (1. - p) / nobs)``
Parameters
----------
prop : array_like
proportion
nobs : int, array_like
number of observations
Returns
-------
std : array_like
standard error for a proportion of nobs independent observations
"""
return np.sqrt(prop * (1. - prop) / nobs)
def _std_diff_prop(p1, p2, ratio=1):
return np.sqrt(p1 * (1 - p1) + p2 * (1 - p2) / ratio)
def _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt,
alpha=0.05, discrete=True, dist='norm', nobs=None,
continuity=0, critval_continuity=0):
"""
Generic statistical power function for normal based equivalence test
This includes options to adjust the normal approximation and can use
the binomial to evaluate the probability of the rejection region
see power_ztost_prob for a description of the options
"""
# TODO: refactor structure, separate norm and binom better
if not isinstance(continuity, tuple):
continuity = (continuity, continuity)
crit = stats.norm.isf(alpha)
k_low = mean_low + np.sqrt(var_low) * crit
k_upp = mean_upp - np.sqrt(var_upp) * crit
if discrete or dist == 'binom':
k_low = np.ceil(k_low * nobs + 0.5 * critval_continuity)
k_upp = np.trunc(k_upp * nobs - 0.5 * critval_continuity)
if dist == 'norm':
#need proportion
k_low = (k_low) * 1. / nobs #-1 to match PASS
k_upp = k_upp * 1. / nobs
# else:
# if dist == 'binom':
# #need counts
# k_low *= nobs
# k_upp *= nobs
#print mean_low, np.sqrt(var_low), crit, var_low
#print mean_upp, np.sqrt(var_upp), crit, var_upp
if np.any(k_low > k_upp): #vectorize
import warnings
warnings.warn("no overlap, power is zero", HypothesisTestWarning)
std_alt = np.sqrt(var_alt)
z_low = (k_low - mean_alt - continuity[0] * 0.5 / nobs) / std_alt
z_upp = (k_upp - mean_alt + continuity[1] * 0.5 / nobs) / std_alt
if dist == 'norm':
power = stats.norm.cdf(z_upp) - stats.norm.cdf(z_low)
elif dist == 'binom':
power = (stats.binom.cdf(k_upp, nobs, mean_alt) -
stats.binom.cdf(k_low-1, nobs, mean_alt))
return power, (k_low, k_upp, z_low, z_upp)
def binom_tost(count, nobs, low, upp):
"""
Exact TOST test for one proportion using binomial distribution
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
low, upp : floats
lower and upper limit of equivalence region
Returns
-------
pvalue : float
p-value of equivalence test
pval_low, pval_upp : floats
p-values of lower and upper one-sided tests
"""
# binom_test_stat only returns pval
tt1 = binom_test(count, nobs, alternative='larger', prop=low)
tt2 = binom_test(count, nobs, alternative='smaller', prop=upp)
return np.maximum(tt1, tt2), tt1, tt2,
def binom_tost_reject_interval(low, upp, nobs, alpha=0.05):
"""
Rejection region for binomial TOST
The interval includes the end points,
`reject` if and only if `r_low <= x <= r_upp`.
The interval might be empty with `r_upp < r_low`.
Parameters
----------
low, upp : floats
lower and upper limit of equivalence region
nobs : int
the number of trials or observations.
Returns
-------
x_low, x_upp : float
lower and upper bound of rejection region
"""
x_low = stats.binom.isf(alpha, nobs, low) + 1
x_upp = stats.binom.ppf(alpha, nobs, upp) - 1
return x_low, x_upp
def binom_test_reject_interval(value, nobs, alpha=0.05, alternative='two-sided'):
"""
Rejection region for binomial test for one sample proportion
The interval includes the end points of the rejection region.
Parameters
----------
value : float
proportion under the Null hypothesis
nobs : int
the number of trials or observations.
Returns
-------
x_low, x_upp : int
lower and upper bound of rejection region
"""
if alternative in ['2s', 'two-sided']:
alternative = '2s' # normalize alternative name
alpha = alpha / 2
if alternative in ['2s', 'smaller']:
x_low = stats.binom.ppf(alpha, nobs, value) - 1
else:
x_low = 0
if alternative in ['2s', 'larger']:
x_upp = stats.binom.isf(alpha, nobs, value) + 1
else :
x_upp = nobs
return int(x_low), int(x_upp)
def binom_test(count, nobs, prop=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
prop : float, optional
The probability of success under the null hypothesis,
`0 <= prop <= 1`. The default value is `prop = 0.5`
alternative : str in ['two-sided', 'smaller', 'larger']
alternative hypothesis, which can be two-sided or either one of the
one-sided tests.
Returns
-------
p-value : float
The p-value of the hypothesis test
Notes
-----
This uses scipy.stats.binom_test for the two-sided alternative.
"""
if np.any(prop > 1.0) or np.any(prop < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative in ['2s', 'two-sided']:
try:
pval = stats.binomtest(count, n=nobs, p=prop).pvalue
except AttributeError:
# Remove after min SciPy >= 1.7
pval = stats.binom_test(count, n=nobs, p=prop)
elif alternative in ['l', 'larger']:
pval = stats.binom.sf(count-1, nobs, prop)
elif alternative in ['s', 'smaller']:
pval = stats.binom.cdf(count, nobs, prop)
else:
raise ValueError('alternative not recognized\n'
'should be two-sided, larger or smaller')
return pval
def power_binom_tost(low, upp, nobs, p_alt=None, alpha=0.05):
if p_alt is None:
p_alt = 0.5 * (low + upp)
x_low, x_upp = binom_tost_reject_interval(low, upp, nobs, alpha=alpha)
power = (stats.binom.cdf(x_upp, nobs, p_alt) -
stats.binom.cdf(x_low-1, nobs, p_alt))
return power
def power_ztost_prop(low, upp, nobs, p_alt, alpha=0.05, dist='norm',
variance_prop=None, discrete=True, continuity=0,
critval_continuity=0):
"""
Power of proportions equivalence test based on normal distribution
Parameters
----------
low, upp : floats
lower and upper limit of equivalence region
nobs : int
number of observations
p_alt : float in (0,1)
proportion under the alternative
alpha : float in (0,1)
significance level of the test
dist : str in ['norm', 'binom']
This defines the distribution to evaluate the power of the test. The
critical values of the TOST test are always based on the normal
approximation, but the distribution for the power can be either the
normal (default) or the binomial (exact) distribution.
variance_prop : None or float in (0,1)
If this is None, then the variances for the two one sided tests are
based on the proportions equal to the equivalence limits.
If variance_prop is given, then it is used to calculate the variance
for the TOST statistics. If this is based on an sample, then the
estimated proportion can be used.
discrete : bool
If true, then the critical values of the rejection region are converted
to integers. If dist is "binom", this is automatically assumed.
If discrete is false, then the TOST critical values are used as
floating point numbers, and the power is calculated based on the
rejection region that is not discretized.
continuity : bool or float
adjust the rejection region for the normal power probability. This has
and effect only if ``dist='norm'``
critval_continuity : bool or float
If this is non-zero, then the critical values of the tost rejection
region are adjusted before converting to integers. This affects both
distributions, ``dist='norm'`` and ``dist='binom'``.
Returns
-------
power : float
statistical power of the equivalence test.
(k_low, k_upp, z_low, z_upp) : tuple of floats
critical limits in intermediate steps
temporary return, will be changed
Notes
-----
In small samples the power for the ``discrete`` version, has a sawtooth
pattern as a function of the number of observations. As a consequence,
small changes in the number of observations or in the normal approximation
can have a large effect on the power.
``continuity`` and ``critval_continuity`` are added to match some results
of PASS, and are mainly to investigate the sensitivity of the ztost power
to small changes in the rejection region. From my interpretation of the
equations in the SAS manual, both are zero in SAS.
works vectorized
**verification:**
The ``dist='binom'`` results match PASS,
The ``dist='norm'`` results look reasonable, but no benchmark is available.
References
----------
SAS Manual: Chapter 68: The Power Procedure, Computational Resources
PASS Chapter 110: Equivalence Tests for One Proportion.
"""
mean_low = low
var_low = std_prop(low, nobs)**2
mean_upp = upp
var_upp = std_prop(upp, nobs)**2
mean_alt = p_alt
var_alt = std_prop(p_alt, nobs)**2
if variance_prop is not None:
var_low = var_upp = std_prop(variance_prop, nobs)**2
power = _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt,
alpha=alpha, discrete=discrete, dist=dist, nobs=nobs,
continuity=continuity, critval_continuity=critval_continuity)
return np.maximum(power[0], 0), power[1:]
def _table_proportion(count, nobs):
"""
Create a k by 2 contingency table for proportion
helper function for proportions_chisquare
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
Returns
-------
table : ndarray
(k, 2) contingency table
Notes
-----
recent scipy has more elaborate contingency table functions
"""
count = np.asarray(count)
dt = np.promote_types(count.dtype, np.float64)
count = np.asarray(count, dtype=dt)
table = np.column_stack((count, nobs - count))
expected = table.sum(0) * table.sum(1)[:, None] * 1. / table.sum()
n_rows = table.shape[0]
return table, expected, n_rows
def proportions_ztest(count, nobs, value=None, alternative='two-sided',
prop_var=False):
"""
Test for proportions based on normal (z) test
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials. If this is array_like, then
the assumption is that this represents the number of successes for
each independent sample
nobs : {int, array_like}
the number of trials or observations, with the same length as
count.
value : float, array_like or None, optional
This is the value of the null hypothesis equal to the proportion in the
case of a one sample test. In the case of a two-sample test, the
null hypothesis is that prop[0] - prop[1] = value, where prop is the
proportion in the two samples. If not provided value = 0 and the null
is prop[0] = prop[1]
alternative : str in ['two-sided', 'smaller', 'larger']
The alternative hypothesis can be either two-sided or one of the one-
sided tests, smaller means that the alternative hypothesis is
``prop < value`` and larger means ``prop > value``. In the two sample
test, smaller means that the alternative hypothesis is ``p1 < p2`` and
larger means ``p1 > p2`` where ``p1`` is the proportion of the first
sample and ``p2`` of the second one.
prop_var : False or float in (0, 1)
If prop_var is false, then the variance of the proportion estimate is
calculated based on the sample proportion. Alternatively, a proportion
can be specified to calculate this variance. Common use case is to
use the proportion under the Null hypothesis to specify the variance
of the proportion estimate.
Returns
-------
zstat : float
test statistic for the z-test
p-value : float
p-value for the z-test
Examples
--------
>>> count = 5
>>> nobs = 83
>>> value = .05
>>> stat, pval = proportions_ztest(count, nobs, value)
>>> print('{0:0.3f}'.format(pval))
0.695
>>> import numpy as np
>>> from statsmodels.stats.proportion import proportions_ztest
>>> count = np.array([5, 12])
>>> nobs = np.array([83, 99])
>>> stat, pval = proportions_ztest(count, nobs)
>>> print('{0:0.3f}'.format(pval))
0.159
Notes
-----
This uses a simple normal test for proportions. It should be the same as
running the mean z-test on the data encoded 1 for event and 0 for no event
so that the sum corresponds to the count.
In the one and two sample cases with two-sided alternative, this test
produces the same p-value as ``proportions_chisquare``, since the
chisquare is the distribution of the square of a standard normal
distribution.
"""
# TODO: verify that this really holds
# TODO: add continuity correction or other improvements for small samples
# TODO: change options similar to propotion_ztost ?
count = np.asarray(count)
nobs = np.asarray(nobs)
if nobs.size == 1:
nobs = nobs * np.ones_like(count)
prop = count * 1. / nobs
k_sample = np.size(prop)
if value is None:
if k_sample == 1:
raise ValueError('value must be provided for a 1-sample test')
value = 0
if k_sample == 1:
diff = prop - value
elif k_sample == 2:
diff = prop[0] - prop[1] - value
else:
msg = 'more than two samples are not implemented yet'
raise NotImplementedError(msg)
p_pooled = np.sum(count) * 1. / np.sum(nobs)
nobs_fact = np.sum(1. / nobs)
if prop_var:
p_pooled = prop_var
var_ = p_pooled * (1 - p_pooled) * nobs_fact
std_diff = np.sqrt(var_)
from statsmodels.stats.weightstats import _zstat_generic2
return _zstat_generic2(diff, std_diff, alternative)
def proportions_ztost(count, nobs, low, upp, prop_var='sample'):
"""
Equivalence test based on normal distribution
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials. If this is array_like, then
the assumption is that this represents the number of successes for
each independent sample
nobs : int
the number of trials or observations, with the same length as
count.
low, upp : float
equivalence interval low < prop1 - prop2 < upp
prop_var : str or float in (0, 1)
prop_var determines which proportion is used for the calculation
of the standard deviation of the proportion estimate
The available options for string are 'sample' (default), 'null' and
'limits'. If prop_var is a float, then it is used directly.
Returns
-------
pvalue : float
pvalue of the non-equivalence test
t1, pv1 : tuple of floats
test statistic and pvalue for lower threshold test
t2, pv2 : tuple of floats
test statistic and pvalue for upper threshold test
Notes
-----
checked only for 1 sample case
"""
if prop_var == 'limits':
prop_var_low = low
prop_var_upp = upp
elif prop_var == 'sample':
prop_var_low = prop_var_upp = False #ztest uses sample
elif prop_var == 'null':
prop_var_low = prop_var_upp = 0.5 * (low + upp)
elif np.isreal(prop_var):
prop_var_low = prop_var_upp = prop_var
tt1 = proportions_ztest(count, nobs, alternative='larger',
prop_var=prop_var_low, value=low)
tt2 = proportions_ztest(count, nobs, alternative='smaller',
prop_var=prop_var_upp, value=upp)
return np.maximum(tt1[1], tt2[1]), tt1, tt2,
def proportions_chisquare(count, nobs, value=None):
"""
Test for proportions based on chisquare test
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials. If this is array_like, then
the assumption is that this represents the number of successes for
each independent sample
nobs : int
the number of trials or observations, with the same length as
count.
value : None or float or array_like
Returns
-------
chi2stat : float
test statistic for the chisquare test
p-value : float
p-value for the chisquare test
(table, expected)
table is a (k, 2) contingency table, ``expected`` is the corresponding
table of counts that are expected under independence with given
margins
Notes
-----
Recent version of scipy.stats have a chisquare test for independence in
contingency tables.
This function provides a similar interface to chisquare tests as
``prop.test`` in R, however without the option for Yates continuity
correction.
count can be the count for the number of events for a single proportion,
or the counts for several independent proportions. If value is given, then
all proportions are jointly tested against this value. If value is not
given and count and nobs are not scalar, then the null hypothesis is
that all samples have the same proportion.
"""
nobs = np.atleast_1d(nobs)
table, expected, n_rows = _table_proportion(count, nobs)
if value is not None:
expected = np.column_stack((nobs * value, nobs * (1 - value)))
ddof = n_rows - 1
else:
ddof = n_rows
#print table, expected
chi2stat, pval = stats.chisquare(table.ravel(), expected.ravel(),
ddof=ddof)
return chi2stat, pval, (table, expected)
def proportions_chisquare_allpairs(count, nobs, multitest_method='hs'):
"""
Chisquare test of proportions for all pairs of k samples
Performs a chisquare test for proportions for all pairwise comparisons.
The alternative is two-sided
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
prop : float, optional
The probability of success under the null hypothesis,
`0 <= prop <= 1`. The default value is `prop = 0.5`
multitest_method : str
This chooses the method for the multiple testing p-value correction,
that is used as default in the results.
It can be any method that is available in ``multipletesting``.
The default is Holm-Sidak 'hs'.
Returns
-------
result : AllPairsResults instance
The returned results instance has several statistics, such as p-values,
attached, and additional methods for using a non-default
``multitest_method``.
Notes
-----
Yates continuity correction is not available.
"""
#all_pairs = lmap(list, lzip(*np.triu_indices(4, 1)))
all_pairs = lzip(*np.triu_indices(len(count), 1))
pvals = [proportions_chisquare(count[list(pair)], nobs[list(pair)])[1]
for pair in all_pairs]
return AllPairsResults(pvals, all_pairs, multitest_method=multitest_method)
def proportions_chisquare_pairscontrol(count, nobs, value=None,
multitest_method='hs', alternative='two-sided'):
"""
Chisquare test of proportions for pairs of k samples compared to control
Performs a chisquare test for proportions for pairwise comparisons with a
control (Dunnet's test). The control is assumed to be the first element
of ``count`` and ``nobs``. The alternative is two-sided, larger or
smaller.
Parameters
----------
count : {int, array_like}
the number of successes in nobs trials.
nobs : int
the number of trials or observations.
prop : float, optional
The probability of success under the null hypothesis,
`0 <= prop <= 1`. The default value is `prop = 0.5`
multitest_method : str
This chooses the method for the multiple testing p-value correction,
that is used as default in the results.
It can be any method that is available in ``multipletesting``.
The default is Holm-Sidak 'hs'.
alternative : str in ['two-sided', 'smaller', 'larger']
alternative hypothesis, which can be two-sided or either one of the
one-sided tests.
Returns
-------
result : AllPairsResults instance
The returned results instance has several statistics, such as p-values,
attached, and additional methods for using a non-default
``multitest_method``.
Notes
-----
Yates continuity correction is not available.
``value`` and ``alternative`` options are not yet implemented.
"""
if (value is not None) or (alternative not in ['two-sided', '2s']):
raise NotImplementedError
#all_pairs = lmap(list, lzip(*np.triu_indices(4, 1)))
all_pairs = [(0, k) for k in range(1, len(count))]
pvals = [proportions_chisquare(count[list(pair)], nobs[list(pair)],
#alternative=alternative)[1]
)[1]
for pair in all_pairs]
return AllPairsResults(pvals, all_pairs, multitest_method=multitest_method)
def confint_proportions_2indep(count1, nobs1, count2, nobs2, method=None,
compare='diff', alpha=0.05, correction=True):
"""
Confidence intervals for comparing two independent proportions.
This assumes that we have two independent binomial samples.
Parameters
----------
count1, nobs1 : float
Count and sample size for first sample.
count2, nobs2 : float
Count and sample size for the second sample.
method : str
Method for computing confidence interval. If method is None, then a
default method is used. The default might change as more methods are
added.
diff:
- 'wald',
- 'agresti-caffo'
- 'newcomb' (default)
- 'score'
ratio:
- 'log'
- 'log-adjusted' (default)
- 'score'
odds-ratio:
- 'logit'
- 'logit-adjusted' (default)
- 'score'
compare : string in ['diff', 'ratio' 'odds-ratio']
If compare is diff, then the confidence interval is for diff = p1 - p2.
If compare is ratio, then the confidence interval is for the risk ratio
defined by ratio = p1 / p2.
If compare is odds-ratio, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2).
alpha : float
Significance level for the confidence interval, default is 0.05.
The nominal coverage probability is 1 - alpha.
Returns
-------
low, upp
Notes
-----
Status: experimental, API and defaults might still change.
more ``methods`` will be added.
References
----------
.. [1] Fagerland, Morten W., Stian Lydersen, and Petter Laake. 2015.
“Recommended Confidence Intervals for Two Independent Binomial
Proportions.” Statistical Methods in Medical Research 24 (2): 224–54.
https://doi.org/10.1177/0962280211415469.
.. [2] Koopman, P. A. R. 1984. “Confidence Intervals for the Ratio of Two
Binomial Proportions.” Biometrics 40 (2): 513–17.
https://doi.org/10.2307/2531405.
.. [3] Miettinen, Olli, and Markku Nurminen. "Comparative analysis of two
rates." Statistics in medicine 4, no. 2 (1985): 213-226.
.. [4] Newcombe, Robert G. 1998. “Interval Estimation for the Difference
between Independent Proportions: Comparison of Eleven Methods.”
Statistics in Medicine 17 (8): 873–90.
https://doi.org/10.1002/(SICI)1097-0258(19980430)17:8<873::AID-
SIM779>3.0.CO;2-I.
.. [5] Newcombe, Robert G., and Markku M. Nurminen. 2011. “In Defence of
Score Intervals for Proportions and Their Differences.” Communications
in Statistics - Theory and Methods 40 (7): 1271–82.
https://doi.org/10.1080/03610920903576580.
"""
method_default = {'diff': 'newcomb',
'ratio': 'log-adjusted',
'odds-ratio': 'logit-adjusted'}
# normalize compare name
if compare.lower() == 'or':
compare = 'odds-ratio'
if method is None:
method = method_default[compare]
method = method.lower()
if method.startswith('agr'):
method = 'agresti-caffo'
p1 = count1 / nobs1
p2 = count2 / nobs2
diff = p1 - p2
addone = 1 if method == 'agresti-caffo' else 0
if compare == 'diff':
if method in ['wald', 'agresti-caffo']:
count1_, nobs1_ = count1 + addone, nobs1 + 2 * addone
count2_, nobs2_ = count2 + addone, nobs2 + 2 * addone
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
diff_ = p1_ - p2_
var = p1_ * (1 - p1_) / nobs1_ + p2_ * (1 - p2_) / nobs2_
z = stats.norm.isf(alpha / 2)
d_wald = z * np.sqrt(var)
low = diff_ - d_wald
upp = diff_ + d_wald
elif method.startswith('newcomb'):
low1, upp1 = proportion_confint(count1, nobs1,
method='wilson', alpha=alpha)
low2, upp2 = proportion_confint(count2, nobs2,
method='wilson', alpha=alpha)
d_low = np.sqrt((p1 - low1)**2 + (upp2 - p2)**2)
d_upp = np.sqrt((p2 - low2)**2 + (upp1 - p1)**2)
low = diff - d_low
upp = diff + d_upp
elif method == "score":
low, upp = _score_confint_inversion(count1, nobs1, count2, nobs2,
compare=compare, alpha=alpha,
correction=correction)
else:
raise ValueError('method not recognized')
elif compare == 'ratio':
# ratio = p1 / p2
if method in ['log', 'log-adjusted']:
addhalf = 0.5 if method == 'log-adjusted' else 0
count1_, nobs1_ = count1 + addhalf, nobs1 + addhalf
count2_, nobs2_ = count2 + addhalf, nobs2 + addhalf
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
ratio_ = p1_ / p2_
var = (1 / count1_) - 1 / nobs1_ + 1 / count2_ - 1 / nobs2_
z = stats.norm.isf(alpha / 2)
d_log = z * np.sqrt(var)
low = np.exp(np.log(ratio_) - d_log)
upp = np.exp(np.log(ratio_) + d_log)
elif method == 'score':
res = _confint_riskratio_koopman(count1, nobs1, count2, nobs2,
alpha=alpha,
correction=correction)
low, upp = res.confint
else:
raise ValueError('method not recognized')
elif compare == 'odds-ratio':
# odds_ratio = p1 / (1 - p1) / p2 * (1 - p2)
if method in ['logit', 'logit-adjusted', 'logit-smoothed']:
if method in ['logit-smoothed']:
adjusted = _shrink_prob(count1, nobs1, count2, nobs2,
shrink_factor=2, return_corr=False)[0]
count1_, nobs1_, count2_, nobs2_ = adjusted
else:
addhalf = 0.5 if method == 'logit-adjusted' else 0
count1_, nobs1_ = count1 + addhalf, nobs1 + 2 * addhalf
count2_, nobs2_ = count2 + addhalf, nobs2 + 2 * addhalf
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
odds_ratio_ = p1_ / (1 - p1_) / p2_ * (1 - p2_)
var = (1 / count1_ + 1 / (nobs1_ - count1_) +
1 / count2_ + 1 / (nobs2_ - count2_))
z = stats.norm.isf(alpha / 2)
d_log = z * np.sqrt(var)
low = np.exp(np.log(odds_ratio_) - d_log)
upp = np.exp(np.log(odds_ratio_) + d_log)
elif method == "score":
low, upp = _score_confint_inversion(count1, nobs1, count2, nobs2,
compare=compare, alpha=alpha,
correction=correction)
else:
raise ValueError('method not recognized')
else:
raise ValueError('compare not recognized')
return low, upp
def _shrink_prob(count1, nobs1, count2, nobs2, shrink_factor=2,
return_corr=True):
"""
Shrink observed counts towards independence
Helper function for 'logit-smoothed' inference for the odds-ratio of two
independent proportions.
Parameters
----------
count1, nobs1 : float or int
count and sample size for first sample
count2, nobs2 : float or int
count and sample size for the second sample
shrink_factor : float
This corresponds to the number of observations that are added in total
proportional to the probabilities under independence.
return_corr : bool
If true, then only the correction term is returned
If false, then the corrected counts, i.e. original counts plus
correction term, are returned.
Returns
-------
count1_corr, nobs1_corr, count2_corr, nobs2_corr : float
correction or corrected counts
prob_indep :
TODO/Warning : this will change most likely
probabilities under independence, only returned if return_corr is
false.
"""
vectorized = any(np.size(i) > 1 for i in [count1, nobs1, count2, nobs2])
if vectorized:
raise ValueError("function is not vectorized")
nobs_col = np.array([count1 + count2, nobs1 - count1 + nobs2 - count2])
nobs_row = np.array([nobs1, nobs2])
nobs = nobs1 + nobs2
prob_indep = (nobs_col * nobs_row[:, None]) / nobs**2
corr = shrink_factor * prob_indep
if return_corr:
return (corr[0, 0], corr[0].sum(), corr[1, 0], corr[1].sum())
else:
return (count1 + corr[0, 0], nobs1 + corr[0].sum(),
count2 + corr[1, 0], nobs2 + corr[1].sum()), prob_indep
def score_test_proportions_2indep(count1, nobs1, count2, nobs2, value=None,
compare='diff', alternative='two-sided',
correction=True, return_results=True):
"""
Score test for two independent proportions
This uses the constrained estimate of the proportions to compute
the variance under the Null hypothesis.
Parameters
----------
count1, nobs1 :
count and sample size for first sample
count2, nobs2 :
count and sample size for the second sample
value : float
diff, ratio or odds-ratio under the null hypothesis. If value is None,
then equality of proportions under the Null is assumed,
i.e. value=0 for 'diff' or value=1 for either rate or odds-ratio.
compare : string in ['diff', 'ratio' 'odds-ratio']
If compare is diff, then the confidence interval is for diff = p1 - p2.
If compare is ratio, then the confidence interval is for the risk ratio
defined by ratio = p1 / p2.
If compare is odds-ratio, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2)
return_results : bool
If true, then a results instance with extra information is returned,
otherwise a tuple with statistic and pvalue is returned.
Returns
-------
results : results instance or tuple
If return_results is True, then a results instance with the
information in attributes is returned.
If return_results is False, then only ``statistic`` and ``pvalue``
are returned.
statistic : float
test statistic asymptotically normal distributed N(0, 1)
pvalue : float
p-value based on normal distribution
other attributes :
additional information about the hypothesis test
Notes
-----
Status: experimental, the type or extra information in the return might
change.
"""
value_default = 0 if compare == 'diff' else 1
if value is None:
# TODO: odds ratio does not work if value=1
value = value_default
nobs = nobs1 + nobs2
count = count1 + count2
p1 = count1 / nobs1
p2 = count2 / nobs2
if value == value_default:
# use pooled estimator if equality test
# shortcut, but required for odds ratio
prop0 = prop1 = count / nobs
# this uses index 0 from Miettinen Nurminned 1985
count0, nobs0 = count2, nobs2
p0 = p2
if compare == 'diff':
diff = value # hypothesis value
if diff != 0:
tmp3 = nobs
tmp2 = (nobs1 + 2 * nobs0) * diff - nobs - count
tmp1 = (count0 * diff - nobs - 2 * count0) * diff + count
tmp0 = count0 * diff * (1 - diff)
q = ((tmp2 / (3 * tmp3))**3 - tmp1 * tmp2 / (6 * tmp3**2) +
tmp0 / (2 * tmp3))
p = np.sign(q) * np.sqrt((tmp2 / (3 * tmp3))**2 -
tmp1 / (3 * tmp3))
a = (np.pi + np.arccos(q / p**3)) / 3
prop0 = 2 * p * np.cos(a) - tmp2 / (3 * tmp3)
prop1 = prop0 + diff
correction = True
var = prop1 * (1 - prop1) / nobs1 + prop0 * (1 - prop0) / nobs0
if correction:
var *= nobs / (nobs - 1)
diff_stat = (p1 - p0 - diff)
elif compare == 'ratio':
# risk ratio
ratio = value
if ratio != 1:
a = nobs * ratio
b = -(nobs1 * ratio + count1 + nobs2 + count0 * ratio)
c = count
prop0 = (-b - np.sqrt(b**2 - 4 * a * c)) / (2 * a)
prop1 = prop0 * ratio
var = (prop1 * (1 - prop1) / nobs1 +
ratio**2 * prop0 * (1 - prop0) / nobs0)
if correction:
var *= nobs / (nobs - 1)
# NCSS looks incorrect for var, but it is what should be reported
# diff_stat = (p1 / p0 - ratio) # NCSS/PASS
diff_stat = (p1 - ratio * p0) # Miettinen Nurminen
elif compare in ['or', 'odds-ratio']:
# odds ratio
oratio = value
if oratio != 1:
# Note the constraint estimator does not handle odds-ratio = 1
a = nobs0 * (oratio - 1)
b = nobs1 * oratio + nobs0 - count * (oratio - 1)
c = -count
prop0 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)
prop1 = prop0 * oratio / (1 + prop0 * (oratio - 1))
# try to avoid 0 and 1 proportions,
# those raise Zero Division Runtime Warnings
eps = 1e-10
prop0 = np.clip(prop0, eps, 1 - eps)
prop1 = np.clip(prop1, eps, 1 - eps)
var = (1 / (prop1 * (1 - prop1) * nobs1) +
1 / (prop0 * (1 - prop0) * nobs0))
if correction:
var *= nobs / (nobs - 1)
diff_stat = ((p1 - prop1) / (prop1 * (1 - prop1)) -
(p0 - prop0) / (prop0 * (1 - prop0)))
statistic, pvalue = _zstat_generic2(diff_stat, np.sqrt(var),
alternative=alternative)
if return_results:
res = HolderTuple(statistic=statistic,
pvalue=pvalue,
compare=compare,
method='score',
variance=var,
alternative=alternative,
prop1_null=prop1,
prop2_null=prop0,
)
return res
else:
return statistic, pvalue
def test_proportions_2indep(count1, nobs1, count2, nobs2, value=None,
method=None, compare='diff',
alternative='two-sided', correction=True,
return_results=True):
"""
Hypothesis test for comparing two independent proportions
This assumes that we have two independent binomial samples.
The Null and alternative hypothesis are
for compare = 'diff'
- H0: prop1 - prop2 - value = 0
- H1: prop1 - prop2 - value != 0 if alternative = 'two-sided'
- H1: prop1 - prop2 - value > 0 if alternative = 'larger'
- H1: prop1 - prop2 - value < 0 if alternative = 'smaller'
for compare = 'ratio'
- H0: prop1 / prop2 - value = 0
- H1: prop1 / prop2 - value != 0 if alternative = 'two-sided'
- H1: prop1 / prop2 - value > 0 if alternative = 'larger'
- H1: prop1 / prop2 - value < 0 if alternative = 'smaller'
for compare = 'odds-ratio'
- H0: or - value = 0
- H1: or - value != 0 if alternative = 'two-sided'
- H1: or - value > 0 if alternative = 'larger'
- H1: or - value < 0 if alternative = 'smaller'
where odds-ratio or = prop1 / (1 - prop1) / (prop2 / (1 - prop2))
Parameters
----------
count1 : int
Count for first sample.
nobs1 : int
Sample size for first sample.
count2 : int
Count for the second sample.
nobs2 : int
Sample size for the second sample.
value : float
Value of the difference, risk ratio or odds ratio of 2 independent
proportions under the null hypothesis.
Default is equal proportions, 0 for diff and 1 for risk-ratio and for
odds-ratio.
method : string
Method for computing confidence interval. If method is None, then a
default method is used. The default might change as more methods are
added.
diff:
- 'wald',
- 'agresti-caffo'
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985
ratio:
- 'log': wald test using log transformation
- 'log-adjusted': wald test using log transformation,
adds 0.5 to counts
- 'score': if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985
odds-ratio:
- 'logit': wald test using logit transformation
- 'logit-adjusted': wald test using logit transformation,
adds 0.5 to counts
- 'logit-smoothed': wald test using logit transformation, biases
cell counts towards independence by adding two observations in
total.
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985
compare : {'diff', 'ratio' 'odds-ratio'}
If compare is `diff`, then the confidence interval is for
diff = p1 - p2.
If compare is `ratio`, then the confidence interval is for the
risk ratio defined by ratio = p1 / p2.
If compare is `odds-ratio`, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2)
alternative : {'two-sided', 'smaller', 'larger'}
alternative hypothesis, which can be two-sided or either one of the
one-sided tests.
correction : bool
If correction is True (default), then the Miettinen and Nurminen
small sample correction to the variance nobs / (nobs - 1) is used.
Applies only if method='score'.
return_results : bool
If true, then a results instance with extra information is returned,
otherwise a tuple with statistic and pvalue is returned.
Returns
-------
results : results instance or tuple
If return_results is True, then a results instance with the
information in attributes is returned.
If return_results is False, then only ``statistic`` and ``pvalue``
are returned.
statistic : float
test statistic asymptotically normal distributed N(0, 1)
pvalue : float
p-value based on normal distribution
other attributes :
additional information about the hypothesis test
Notes
-----
Status: experimental, API and defaults might still change.
More ``methods`` will be added.
"""
method_default = {'diff': 'agresti-caffo',
'ratio': 'log-adjusted',
'odds-ratio': 'logit-adjusted'}
# normalize compare name
if compare.lower() == 'or':
compare = 'odds-ratio'
if method is None:
method = method_default[compare]
method = method.lower()
if method.startswith('agr'):
method = 'agresti-caffo'
if value is None:
# TODO: odds ratio does not work if value=1 for score test
value = 0 if compare == 'diff' else 1
count1, nobs1, count2, nobs2 = map(np.asarray,
[count1, nobs1, count2, nobs2])
p1 = count1 / nobs1
p2 = count2 / nobs2
diff = p1 - p2
ratio = p1 / p2
odds_ratio = p1 / (1 - p1) / p2 * (1 - p2)
res = None
if compare == 'diff':
if method in ['wald', 'agresti-caffo']:
addone = 1 if method == 'agresti-caffo' else 0
count1_, nobs1_ = count1 + addone, nobs1 + 2 * addone
count2_, nobs2_ = count2 + addone, nobs2 + 2 * addone
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
diff_stat = p1_ - p2_ - value
var = p1_ * (1 - p1_) / nobs1_ + p2_ * (1 - p2_) / nobs2_
statistic = diff_stat / np.sqrt(var)
distr = 'normal'
elif method.startswith('newcomb'):
msg = 'newcomb not available for hypothesis test'
raise NotImplementedError(msg)
elif method == 'score':
# Note score part is the same call for all compare
res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,
value=value, compare=compare,
alternative=alternative,
correction=correction,
return_results=return_results)
if return_results is False:
statistic, pvalue = res[:2]
distr = 'normal'
# TODO/Note score_test_proportion_2samp returns statistic and
# not diff_stat
diff_stat = None
else:
raise ValueError('method not recognized')
elif compare == 'ratio':
if method in ['log', 'log-adjusted']:
addhalf = 0.5 if method == 'log-adjusted' else 0
count1_, nobs1_ = count1 + addhalf, nobs1 + addhalf
count2_, nobs2_ = count2 + addhalf, nobs2 + addhalf
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
ratio_ = p1_ / p2_
var = (1 / count1_) - 1 / nobs1_ + 1 / count2_ - 1 / nobs2_
diff_stat = np.log(ratio_) - np.log(value)
statistic = diff_stat / np.sqrt(var)
distr = 'normal'
elif method == 'score':
res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,
value=value, compare=compare,
alternative=alternative,
correction=correction,
return_results=return_results)
if return_results is False:
statistic, pvalue = res[:2]
distr = 'normal'
diff_stat = None
else:
raise ValueError('method not recognized')
elif compare == "odds-ratio":
if method in ['logit', 'logit-adjusted', 'logit-smoothed']:
if method in ['logit-smoothed']:
adjusted = _shrink_prob(count1, nobs1, count2, nobs2,
shrink_factor=2, return_corr=False)[0]
count1_, nobs1_, count2_, nobs2_ = adjusted
else:
addhalf = 0.5 if method == 'logit-adjusted' else 0
count1_, nobs1_ = count1 + addhalf, nobs1 + 2 * addhalf
count2_, nobs2_ = count2 + addhalf, nobs2 + 2 * addhalf
p1_ = count1_ / nobs1_
p2_ = count2_ / nobs2_
odds_ratio_ = p1_ / (1 - p1_) / p2_ * (1 - p2_)
var = (1 / count1_ + 1 / (nobs1_ - count1_) +
1 / count2_ + 1 / (nobs2_ - count2_))
diff_stat = np.log(odds_ratio_) - np.log(value)
statistic = diff_stat / np.sqrt(var)
distr = 'normal'
elif method == 'score':
res = score_test_proportions_2indep(count1, nobs1, count2, nobs2,
value=value, compare=compare,
alternative=alternative,
correction=correction,
return_results=return_results)
if return_results is False:
statistic, pvalue = res[:2]
distr = 'normal'
diff_stat = None
else:
raise ValueError('method "%s" not recognized' % method)
else:
raise ValueError('compare "%s" not recognized' % compare)
if distr == 'normal' and diff_stat is not None:
statistic, pvalue = _zstat_generic2(diff_stat, np.sqrt(var),
alternative=alternative)
if return_results:
if res is None:
res = HolderTuple(statistic=statistic,
pvalue=pvalue,
compare=compare,
method=method,
diff=diff,
ratio=ratio,
odds_ratio=odds_ratio,
variance=var,
alternative=alternative,
value=value,
)
else:
# we already have a return result from score test
# add missing attributes
res.diff = diff
res.ratio = ratio
res.odds_ratio = odds_ratio
res.value = value
return res
else:
return statistic, pvalue
def tost_proportions_2indep(count1, nobs1, count2, nobs2, low, upp,
method=None, compare='diff', correction=True):
"""
Equivalence test based on two one-sided `test_proportions_2indep`
This assumes that we have two independent binomial samples.
The Null and alternative hypothesis for equivalence testing are
for compare = 'diff'
- H0: prop1 - prop2 <= low or upp <= prop1 - prop2
- H1: low < prop1 - prop2 < upp
for compare = 'ratio'
- H0: prop1 / prop2 <= low or upp <= prop1 / prop2
- H1: low < prop1 / prop2 < upp
for compare = 'odds-ratio'
- H0: or <= low or upp <= or
- H1: low < or < upp
where odds-ratio or = prop1 / (1 - prop1) / (prop2 / (1 - prop2))
Parameters
----------
count1, nobs1 :
count and sample size for first sample
count2, nobs2 :
count and sample size for the second sample
low, upp :
equivalence margin for diff, risk ratio or odds ratio
method : string
method for computing confidence interval. If method is None, then a
default method is used. The default might change as more methods are
added.
diff:
- 'wald',
- 'agresti-caffo'
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985.
ratio:
- 'log': wald test using log transformation
- 'log-adjusted': wald test using log transformation,
adds 0.5 to counts
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985.
odds-ratio:
- 'logit': wald test using logit transformation
- 'logit-adjusted': : wald test using logit transformation,
adds 0.5 to counts
- 'logit-smoothed': : wald test using logit transformation, biases
cell counts towards independence by adding two observations in
total.
- 'score' if correction is True, then this uses the degrees of freedom
correction ``nobs / (nobs - 1)`` as in Miettinen Nurminen 1985
compare : string in ['diff', 'ratio' 'odds-ratio']
If compare is `diff`, then the confidence interval is for
diff = p1 - p2.
If compare is `ratio`, then the confidence interval is for the
risk ratio defined by ratio = p1 / p2.
If compare is `odds-ratio`, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2).
correction : bool
If correction is True (default), then the Miettinen and Nurminen
small sample correction to the variance nobs / (nobs - 1) is used.
Applies only if method='score'.
Returns
-------
pvalue : float
p-value is the max of the pvalues of the two one-sided tests
t1 : test results
results instance for one-sided hypothesis at the lower margin
t1 : test results
results instance for one-sided hypothesis at the upper margin
Notes
-----
Status: experimental, API and defaults might still change.
"""
tt1 = test_proportions_2indep(count1, nobs1, count2, nobs2, value=low,
method=method, compare=compare,
alternative='larger',
correction=correction,
return_results=True)
tt2 = test_proportions_2indep(count1, nobs1, count2, nobs2, value=upp,
method=method, compare=compare,
alternative='smaller',
correction=correction,
return_results=True)
# idx_max = 1 if t1.pvalue < t2.pvalue else 0
idx_max = np.asarray(tt1.pvalue < tt2.pvalue, int)
statistic = np.choose(idx_max, [tt1.statistic, tt2.statistic])
pvalue = np.choose(idx_max, [tt1.pvalue, tt2.pvalue])
res = HolderTuple(statistic=statistic,
pvalue=pvalue,
compare=compare,
method=method,
results_larger=tt1,
results_smaller=tt2,
title="Equivalence test for 2 independent proportions"
)
return res
def _std_2prop_power(diff, p2, ratio=1, alpha=0.05, value=0):
"""
Compute standard error under null and alternative for 2 proportions
helper function for power and sample size computation
"""
if value != 0:
msg = 'non-zero diff under null, value, is not yet implemented'
raise NotImplementedError(msg)
nobs_ratio = ratio
p1 = p2 + diff
# The following contains currently redundant variables that will
# be useful for different options for the null variance
p_pooled = (p1 + p2 * ratio) / (1 + ratio)
# probabilities for the variance for the null statistic
p1_vnull, p2_vnull = p_pooled, p_pooled
p2_alt = p2
p1_alt = p2_alt + diff
std_null = _std_diff_prop(p1_vnull, p2_vnull, ratio=nobs_ratio)
std_alt = _std_diff_prop(p1_alt, p2_alt, ratio=nobs_ratio)
return p_pooled, std_null, std_alt
def power_proportions_2indep(diff, prop2, nobs1, ratio=1, alpha=0.05,
value=0, alternative='two-sided',
return_results=True):
"""
Power for ztest that two independent proportions are equal
This assumes that the variance is based on the pooled proportion
under the null and the non-pooled variance under the alternative
Parameters
----------
diff : float
difference between proportion 1 and 2 under the alternative
prop2 : float
proportion for the reference case, prop2, proportions for the
first case will be computing using p2 and diff
p1 = p2 + diff
nobs1 : float or int
number of observations in sample 1
ratio : float
sample size ratio, nobs2 = ratio * nobs1
alpha : float in interval (0,1)
Significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
value : float
currently only `value=0`, i.e. equality testing, is supported
alternative : string, 'two-sided' (default), 'larger', 'smaller'
Alternative hypothesis whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
return_results : bool
If true, then a results instance with extra information is returned,
otherwise only the computed power is returned.
Returns
-------
results : results instance or float
If return_results is True, then a results instance with the
information in attributes is returned.
If return_results is False, then only the power is returned.
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Other attributes in results instance include :
p_pooled
pooled proportion, used for std_null
std_null
standard error of difference under the null hypothesis (without
sqrt(nobs1))
std_alt
standard error of difference under the alternative hypothesis
(without sqrt(nobs1))
"""
# TODO: avoid possible circular import, check if needed
from statsmodels.stats.power import normal_power_het
p_pooled, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=ratio,
alpha=alpha, value=value)
pow_ = normal_power_het(diff, nobs1, alpha, std_null=std_null,
std_alternative=std_alt,
alternative=alternative)
if return_results:
res = Holder(power=pow_,
p_pooled=p_pooled,
std_null=std_null,
std_alt=std_alt,
nobs1=nobs1,
nobs2=ratio * nobs1,
nobs_ratio=ratio,
alpha=alpha,
)
return res
else:
return pow_
def samplesize_proportions_2indep_onetail(diff, prop2, power, ratio=1,
alpha=0.05, value=0,
alternative='two-sided'):
"""
Required sample size assuming normal distribution based on one tail
This uses an explicit computation for the sample size that is required
to achieve a given power corresponding to the appropriate tails of the
normal distribution. This ignores the far tail in a two-sided test
which is negligible in the common case when alternative and null are
far apart.
Parameters
----------
diff : float
Difference between proportion 1 and 2 under the alternative
prop2 : float
proportion for the reference case, prop2, proportions for the
first case will be computing using p2 and diff
p1 = p2 + diff
power : float
Power for which sample size is computed.
ratio : float
Sample size ratio, nobs2 = ratio * nobs1
alpha : float in interval (0,1)
Significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
value : float
Currently only `value=0`, i.e. equality testing, is supported
alternative : string, 'two-sided' (default), 'larger', 'smaller'
Alternative hypothesis whether the power is calculated for a
two-sided (default) or one sided test. In the case of a one-sided
alternative, it is assumed that the test is in the appropriate tail.
Returns
-------
nobs1 : float
Number of observations in sample 1.
"""
# TODO: avoid possible circular import, check if needed
from statsmodels.stats.power import normal_sample_size_one_tail
if alternative in ['two-sided', '2s']:
alpha = alpha / 2
_, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=ratio,
alpha=alpha, value=value)
nobs = normal_sample_size_one_tail(diff, power, alpha, std_null=std_null,
std_alternative=std_alt)
return nobs
def _score_confint_inversion(count1, nobs1, count2, nobs2, compare='diff',
alpha=0.05, correction=True):
"""
Compute score confidence interval by inverting score test
Parameters
----------
count1, nobs1 :
Count and sample size for first sample.
count2, nobs2 :
Count and sample size for the second sample.
compare : string in ['diff', 'ratio' 'odds-ratio']
If compare is `diff`, then the confidence interval is for
diff = p1 - p2.
If compare is `ratio`, then the confidence interval is for the
risk ratio defined by ratio = p1 / p2.
If compare is `odds-ratio`, then the confidence interval is for the
odds-ratio defined by or = p1 / (1 - p1) / (p2 / (1 - p2).
alpha : float in interval (0,1)
Significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
correction : bool
If correction is True (default), then the Miettinen and Nurminen
small sample correction to the variance nobs / (nobs - 1) is used.
Applies only if method='score'.
Returns
-------
low : float
Lower confidence bound.
upp : float
Upper confidence bound.
"""
def func(v):
r = test_proportions_2indep(count1, nobs1, count2, nobs2,
value=v, compare=compare, method='score',
correction=correction,
alternative="two-sided")
return r.pvalue - alpha
rt0 = test_proportions_2indep(count1, nobs1, count2, nobs2,
value=0, compare=compare, method='score',
correction=correction,
alternative="two-sided")
# use default method to get starting values
# this will not work if score confint becomes default
# maybe use "wald" as alias that works for all compare statistics
use_method = {"diff": "wald", "ratio": "log", "odds-ratio": "logit"}
rci0 = confint_proportions_2indep(count1, nobs1, count2, nobs2,
method=use_method[compare],
compare=compare, alpha=alpha)
# Note diff might be negative
ub = rci0[1] + np.abs(rci0[1]) * 0.5
lb = rci0[0] - np.abs(rci0[0]) * 0.25
if compare == 'diff':
param = rt0.diff
# 1 might not be the correct upper bound because
# rootfinding is for the `diff` and not for a probability.
ub = min(ub, 0.99999)
elif compare == 'ratio':
param = rt0.ratio
ub *= 2 # add more buffer
if compare == 'odds-ratio':
param = rt0.odds_ratio
# root finding for confint bounds
upp = optimize.brentq(func, param, ub)
low = optimize.brentq(func, lb, param)
return low, upp
def _confint_riskratio_koopman(count1, nobs1, count2, nobs2, alpha=0.05,
correction=True):
"""
Score confidence interval for ratio or proportions, Koopman/Nam
signature not consistent with other functions
When correction is True, then the small sample correction nobs / (nobs - 1)
by Miettinen/Nurminen is used.
"""
# The names below follow Nam
x0, x1, n0, n1 = count2, count1, nobs2, nobs1
x = x0 + x1
n = n0 + n1
z = stats.norm.isf(alpha / 2)**2
if correction:
# Mietinnen/Nurminen small sample correction
z *= n / (n - 1)
# z = stats.chi2.isf(alpha, 1)
# equ 6 in Nam 1995
a1 = n0 * (n0 * n * x1 + n1 * (n0 + x1) * z)
a2 = - n0 * (n0 * n1 * x + 2 * n * x0 * x1 + n1 * (n0 + x0 + 2 * x1) * z)
a3 = 2 * n0 * n1 * x0 * x + n * x0 * x0 * x1 + n0 * n1 * x * z
a4 = - n1 * x0 * x0 * x
p_roots_ = np.sort(np.roots([a1, a2, a3, a4]))
p_roots = p_roots_[:2][::-1]
# equ 5
ci = (1 - (n1 - x1) * (1 - p_roots) / (x0 + n1 - n * p_roots)) / p_roots
res = Holder()
res.confint = ci
res._p_roots = p_roots_ # for unit tests, can be dropped
return res
def _confint_riskratio_paired_nam(table, alpha=0.05):
"""
Confidence interval for marginal risk ratio for matched pairs
need full table
success fail marginal
success x11 x10 x1.
fail x01 x00 x0.
marginal x.1 x.0 n
The confidence interval is for the ratio p1 / p0 where
p1 = x1. / n and
p0 - x.1 / n
Todo: rename p1 to pa and p2 to pb, so we have a, b for treatment and
0, 1 for success/failure
current namings follow Nam 2009
status
testing:
compared to example in Nam 2009
internal polynomial coefficients in calculation correspond at around
4 decimals
confidence interval agrees only at 2 decimals
"""
x11, x10, x01, x00 = np.ravel(table)
n = np.sum(table) # nobs
p10, p01 = x10 / n, x01 / n
p1 = (x11 + x10) / n
p0 = (x11 + x01) / n
q00 = 1 - x00 / n
z2 = stats.norm.isf(alpha / 2)**2
# z = stats.chi2.isf(alpha, 1)
# before equ 3 in Nam 2009
g1 = (n * p0 + z2 / 2) * p0
g2 = - (2 * n * p1 * p0 + z2 * q00)
g3 = (n * p1 + z2 / 2) * p1
a0 = g1**2 - (z2 * p0 / 2)**2
a1 = 2 * g1 * g2
a2 = g2**2 + 2 * g1 * g3 + z2**2 * (p1 * p0 - 2 * p10 * p01) / 2
a3 = 2 * g2 * g3
a4 = g3**2 - (z2 * p1 / 2)**2
p_roots = np.sort(np.roots([a0, a1, a2, a3, a4]))
# p_roots = np.sort(np.roots([1, a1 / a0, a2 / a0, a3 / a0, a4 / a0]))
ci = [p_roots.min(), p_roots.max()]
res = Holder()
res.confint = ci
res.p = p1, p0
res._p_roots = p_roots # for unit tests, can be dropped
return res
| {
"content_hash": "2b2b6bf38180f9f104c856700e8284d1",
"timestamp": "",
"source": "github",
"line_count": 2333,
"max_line_length": 83,
"avg_line_length": 36.56150878696957,
"alnum_prop": 0.5724518746043283,
"repo_name": "josef-pkt/statsmodels",
"id": "25528204e8a20b75a801c86a1fbd141d42477932",
"size": "85349",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "statsmodels/stats/proportion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14428857"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25322"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
} |
from test_plus.test import TestCase
class TestUser(TestCase):
def setUp(self):
self.user = self.make_user()
def test__str__(self):
self.assertEqual(
self.user.__str__(),
"testuser" # This is the default username for self.make_user()
)
def test_get_absolute_url(self):
self.assertEqual(
self.user.get_absolute_url(),
'/users/testuser/'
) | {
"content_hash": "67ce7fe3657e38bf857e8577fddc6092",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 23.263157894736842,
"alnum_prop": 0.5542986425339367,
"repo_name": "pkom/gestionies",
"id": "30065868ea2430e5b28198d20822ed345757e403",
"size": "442",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "gestionies/users/tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1767"
},
{
"name": "HTML",
"bytes": "20634"
},
{
"name": "JavaScript",
"bytes": "3148"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "56315"
},
{
"name": "Shell",
"bytes": "4533"
}
],
"symlink_target": ""
} |
import copy
def merge(dbag, data):
""" Simply overwrite the existsing bag as, the whole configuration is sent every time """
if "rules" not in data:
return dbag
dbag['config'] = data['rules']
return dbag
| {
"content_hash": "667380d51eaa9aade2317d66a11529aa",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 93,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.6478260869565218,
"repo_name": "wido/cloudstack",
"id": "777130974b04194305503e57bd53bd048911be44",
"size": "1016",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "systemvm/debian/opt/cloud/bin/cs_loadbalancer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10890"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "358651"
},
{
"name": "Dockerfile",
"bytes": "2374"
},
{
"name": "FreeMarker",
"bytes": "4887"
},
{
"name": "Groovy",
"bytes": "146420"
},
{
"name": "HTML",
"bytes": "149088"
},
{
"name": "Java",
"bytes": "36088724"
},
{
"name": "JavaScript",
"bytes": "7976318"
},
{
"name": "Python",
"bytes": "13363686"
},
{
"name": "Ruby",
"bytes": "37714"
},
{
"name": "Shell",
"bytes": "784058"
},
{
"name": "XSLT",
"bytes": "58008"
}
],
"symlink_target": ""
} |
import mock
from boto.exception import BotoServerError
from boto.route53.connection import Route53Connection
from boto.route53.exception import DNSServerError
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
class TestRoute53Connection(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestRoute53Connection, self).setUp()
self.calls = {
'count': 0,
}
def default_body(self):
return """<Route53Result>
<Message>It failed.</Message>
</Route53Result>
"""
def test_typical_400(self):
self.set_http_response(status_code=400, header=[
['Code', 'Throttling'],
])
with self.assertRaises(DNSServerError) as err:
self.service_connection.get_all_hosted_zones()
self.assertTrue('It failed.' in str(err.exception))
@mock.patch('time.sleep')
def test_retryable_400(self, sleep_mock):
self.set_http_response(status_code=400, header=[
['Code', 'PriorRequestNotComplete'],
])
def incr_retry_handler(func):
def _wrapper(*args, **kwargs):
self.calls['count'] += 1
return func(*args, **kwargs)
return _wrapper
# Patch.
orig_retry = self.service_connection._retry_handler
self.service_connection._retry_handler = incr_retry_handler(
orig_retry
)
self.assertEqual(self.calls['count'], 0)
# Retries get exhausted.
with self.assertRaises(BotoServerError):
self.service_connection.get_all_hosted_zones()
self.assertEqual(self.calls['count'], 7)
# Unpatch.
self.service_connection._retry_handler = orig_retry
| {
"content_hash": "0fff7e87606926e02598ec1bfe9bd566",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 68,
"avg_line_length": 28.822580645161292,
"alnum_prop": 0.6284275321768327,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "3141dd1744a98712a616a05ee3aabd64d75023c4",
"size": "2932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dataset/python/test_connection (19).py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
} |
"""The virtual interfaces extension."""
import webob
from jacket.api.compute.openstack import api_version_request
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket.i18n import _
from jacket.compute import network
ALIAS = 'os-virtual-interfaces'
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_vif_summary_view(req, vif):
"""Maps keys for VIF summary view."""
d = {}
d['id'] = vif.uuid
d['mac_address'] = vif.address
if api_version_request.is_supported(req, min_version='2.12'):
d['net_id'] = vif.net_uuid
# NOTE(gmann): This is for v2.1 compatible mode where response should be
# same as v2 one.
if req.is_legacy_v2():
d['OS-EXT-VIF-NET:net_id'] = vif.net_uuid
return d
class ServerVirtualInterfaceController(wsgi.Controller):
"""The instance VIF API controller for the OpenStack API.
"""
def __init__(self):
self.compute_api = cloud.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
super(ServerVirtualInterfaceController, self).__init__()
def _items(self, req, server_id, entity_maker):
"""Returns a list of VIFs, transformed through entity_maker."""
context = req.environ['compute.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
try:
vifs = self.network_api.get_vifs_by_instance(context, instance)
except NotImplementedError:
msg = _('Listing virtual interfaces is not supported by this '
'cloud.')
raise webob.exc.HTTPBadRequest(explanation=msg)
limited_list = common.limited(vifs, req)
res = [entity_maker(req, vif) for vif in limited_list]
return {'virtual_interfaces': res}
@extensions.expected_errors((400, 404))
def index(self, req, server_id):
"""Returns the list of VIFs for a given instance."""
return self._items(req, server_id,
entity_maker=_translate_vif_summary_view)
class VirtualInterfaces(extensions.V21APIExtensionBase):
"""Virtual interface support."""
name = "VirtualInterfaces"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
ALIAS,
controller=ServerVirtualInterfaceController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
def get_controller_extensions(self):
return []
| {
"content_hash": "be80359538c84bc2adc334a0ad9526cf",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 32.98795180722892,
"alnum_prop": 0.6566837107377648,
"repo_name": "HybridF5/jacket",
"id": "da51d92a00453540db40f70afd2e5f47e4d212a4",
"size": "3369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/api/compute/openstack/compute/virtual_interfaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
import typing
import numpy as np
from .unit import Unit
class FixedLength(Unit):
"""
FixedLengthUnit Class.
Process unit to get the fixed length text.
Examples:
>>> from matchzoo.preprocessors.units import FixedLength
>>> fixedlen = FixedLength(3)
>>> fixedlen.transform(list(range(1, 6))) == [3, 4, 5]
True
>>> fixedlen.transform(list(range(1, 3))) == [0, 1, 2]
True
"""
def __init__(
self,
text_length: int,
pad_value: typing.Union[int, str] = 0,
pad_mode: str = 'pre',
truncate_mode: str = 'pre'
):
"""
Class initialization.
:param text_length: fixed length of the text.
:param pad_value: if text length is smaller than :attr:`text_length`,
filling text with :attr:`pad_value`.
:param pad_mode: String, `pre` or `post`:
pad either before or after each sequence.
:param truncate_mode: String, `pre` or `post`:
remove values from sequences larger than :attr:`text_length`,
either at the beginning or at the end of the sequences.
"""
self._text_length = text_length
self._pad_value = pad_value
self._pad_mode = pad_mode
self._truncate_mode = truncate_mode
def transform(self, input_: list) -> list:
"""
Transform list of tokenized tokens into the fixed length text.
:param input_: list of tokenized tokens.
:return tokens: list of tokenized tokens in fixed length.
"""
# padding process can not handle empty list as input
if len(input_) == 0:
input_ = [self._pad_value]
np_tokens = np.array(input_)
fixed_tokens = np.full([self._text_length], self._pad_value,
dtype=np_tokens.dtype)
if self._truncate_mode == 'pre':
trunc_tokens = input_[-self._text_length:]
elif self._truncate_mode == 'post':
trunc_tokens = input_[:self._text_length]
else:
raise ValueError('{} is not a vaild '
'truncate mode.'.format(self._truncate_mode))
if self._pad_mode == 'post':
fixed_tokens[:len(trunc_tokens)] = trunc_tokens
elif self._pad_mode == 'pre':
fixed_tokens[-len(trunc_tokens):] = trunc_tokens
else:
raise ValueError('{} is not a vaild '
'pad mode.'.format(self._pad_mode))
return fixed_tokens.tolist()
| {
"content_hash": "0380adb17f122ffa3aaa54eb303ccf4a",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 77,
"avg_line_length": 32.45569620253165,
"alnum_prop": 0.5522620904836193,
"repo_name": "faneshion/MatchZoo",
"id": "d1425f031e48688fe5867f86de769dbf4779b033",
"size": "2564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matchzoo/preprocessors/units/fixed_length.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "201"
},
{
"name": "Python",
"bytes": "249119"
},
{
"name": "Shell",
"bytes": "2746"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import copy
from netman.core.objects.bond import Bond
from netman.core.objects.interface import Interface
from netman.core.objects.port_modes import ACCESS, TRUNK
from netman.core.objects.switch_base import SwitchBase
from netman.core.objects.vlan import Vlan
from netman.core.objects.vrrp_group import VrrpGroup
__all__ = ['CachedSwitch']
class Cache(object):
object_type = None
object_key = None
def __init__(self, *key_value_tuples):
self.refresh_items = set()
self.dict = OrderedDict(*key_value_tuples)
def create_fake_object(self, item):
params = {self.object_key: item}
return self.object_type(**params)
def invalidated(self):
self.refresh_items.add(None)
return self
def __getitem__(self, item):
try:
return self.dict[item]
except KeyError:
self.refresh_items.add(item)
return self.create_fake_object(item)
def __setitem__(self, key, value):
self.dict[key] = value
try:
self.refresh_items.remove(key)
except KeyError:
pass
def __contains__(self, item):
return item in self.dict
def __len__(self):
return len(self.dict)
def __delitem__(self, key):
try:
del self.dict[key]
self.refresh_items.remove(key)
except KeyError:
pass
def values(self):
return self.dict.values()
class VlanCache(Cache):
object_type = Vlan
object_key = 'number'
class InterfaceCache(Cache):
object_type = Interface
object_key = 'name'
class BondCache(Cache):
object_type = Bond
object_key = 'number'
class CachedSwitch(SwitchBase):
def __init__(self, real_switch):
super(CachedSwitch, self).__init__(real_switch.switch_descriptor)
self.real_switch = real_switch
self.vlans_cache = VlanCache().invalidated()
self.interfaces_cache = InterfaceCache().invalidated()
self.bonds_cache = BondCache().invalidated()
def connect(self):
return self.real_switch.connect()
def disconnect(self):
return self.real_switch.disconnect()
def start_transaction(self):
return self.real_switch.start_transaction()
def commit_transaction(self):
return self.real_switch.commit_transaction()
def rollback_transaction(self):
return self.real_switch.rollback_transaction()
def end_transaction(self):
return self.real_switch.end_transaction()
def get_vlan(self, number):
if (self.vlans_cache.refresh_items and number not in self.vlans_cache) \
or number in self.vlans_cache.refresh_items:
self.vlans_cache[number] = self.real_switch.get_vlan(number)
return copy.deepcopy(self.vlans_cache[number])
def get_vlans(self):
if self.vlans_cache.refresh_items:
self.vlans_cache = VlanCache(
(vlan.number, vlan) for vlan in self.real_switch.get_vlans())
return copy.deepcopy(self.vlans_cache.values())
def get_interfaces(self):
if self.interfaces_cache.refresh_items:
self.interfaces_cache = InterfaceCache(
(interface.name, interface)
for interface in self.real_switch.get_interfaces())
return copy.deepcopy(self.interfaces_cache.values())
def get_bond(self, number):
if (self.bonds_cache.refresh_items and number not in self.bonds_cache)\
or number in self.bonds_cache.refresh_items:
self.bonds_cache[number] = self.real_switch.get_bond(number)
return copy.deepcopy(self.bonds_cache[number])
def get_bonds(self):
if self.bonds_cache.refresh_items:
self.bonds_cache = BondCache(
(bond.number, bond) for bond in self.real_switch.get_bonds())
return copy.deepcopy(self.bonds_cache.values())
def add_vlan(self, number, name=None):
extras = {}
if name is not None:
extras["name"] = name
result = self.real_switch.add_vlan(number, **extras)
self.vlans_cache.refresh_items.add(number)
return result
def remove_vlan(self, number):
self.real_switch.remove_vlan(number)
del self.vlans_cache[number]
def set_vlan_access_group(self, vlan_number, direction, name):
self.real_switch.set_vlan_access_group(vlan_number, direction, name)
self.vlans_cache[vlan_number].access_groups[direction] = name
def remove_vlan_access_group(self, vlan_number, direction):
self.real_switch.remove_vlan_access_group(vlan_number, direction)
self.vlans_cache[vlan_number].access_groups[direction] = None
def add_ip_to_vlan(self, vlan_number, ip_network):
self.real_switch.add_ip_to_vlan(vlan_number, ip_network)
self.vlans_cache[vlan_number].ips.append(ip_network)
def remove_ip_from_vlan(self, vlan_number, ip_network):
self.real_switch.remove_ip_from_vlan(vlan_number, ip_network)
self.vlans_cache[vlan_number].ips = [
net for net in self.vlans_cache[vlan_number].ips
if str(net) != str(ip_network)]
def set_vlan_vrf(self, vlan_number, vrf_name):
self.real_switch.set_vlan_vrf(vlan_number, vrf_name)
self.vlans_cache[vlan_number].vrf_forwarding = vrf_name
def remove_vlan_vrf(self, vlan_number):
self.real_switch.remove_vlan_vrf(vlan_number)
self.vlans_cache[vlan_number].vrf_forwarding = None
def set_access_mode(self, interface_id):
self.real_switch.set_access_mode(interface_id)
self.interfaces_cache[interface_id].port_mode = ACCESS
self.interfaces_cache[interface_id].trunk_native_vlan = None
self.interfaces_cache[interface_id].trunk_vlans = []
def set_trunk_mode(self, interface_id):
self.real_switch.set_trunk_mode(interface_id)
self.interfaces_cache[interface_id].port_mode = TRUNK
def set_bond_access_mode(self, bond_number):
self.real_switch.set_bond_access_mode(bond_number)
self.bonds_cache[bond_number].port_mode = ACCESS
def set_bond_trunk_mode(self, bond_number):
self.real_switch.set_bond_trunk_mode(bond_number)
self.bonds_cache[bond_number].port_mode = TRUNK
def set_access_vlan(self, interface_id, vlan):
self.real_switch.set_access_vlan(interface_id, vlan)
self.interfaces_cache[interface_id].access_vlan = vlan
def remove_access_vlan(self, interface_id):
self.real_switch.remove_access_vlan(interface_id)
self.interfaces_cache[interface_id].access_vlan = None
def configure_native_vlan(self, interface_id, vlan):
self.real_switch.configure_native_vlan(interface_id, vlan)
self.interfaces_cache[interface_id].trunk_native_vlan = vlan
def remove_native_vlan(self, interface_id):
self.real_switch.remove_native_vlan(interface_id)
self.interfaces_cache[interface_id].trunk_native_vlan = None
def configure_bond_native_vlan(self, bond_number, vlan):
self.real_switch.configure_bond_native_vlan(bond_number, vlan)
self.bonds_cache[bond_number].trunk_native_vlan = vlan
def remove_bond_native_vlan(self, bond_number):
self.real_switch.remove_bond_native_vlan(bond_number)
self.bonds_cache[bond_number].trunk_native_vlan = None
def add_trunk_vlan(self, interface_id, vlan):
self.real_switch.add_trunk_vlan(interface_id, vlan)
self.interfaces_cache[interface_id].trunk_vlans.append(vlan)
def remove_trunk_vlan(self, interface_id, vlan):
self.real_switch.remove_trunk_vlan(interface_id, vlan)
try:
self.interfaces_cache[interface_id].trunk_vlans.remove(vlan)
except ValueError:
pass
def add_bond_trunk_vlan(self, bond_number, vlan):
self.real_switch.add_bond_trunk_vlan(bond_number, vlan)
self.bonds_cache[bond_number].trunk_vlans.append(vlan)
def remove_bond_trunk_vlan(self, bond_number, vlan):
self.real_switch.remove_bond_trunk_vlan(bond_number, vlan)
try:
self.bonds_cache[bond_number].trunk_vlans.remove(vlan)
except ValueError:
pass
def set_interface_description(self, interface_id, description):
# No cache to update
self.real_switch.set_interface_description(interface_id, description)
def remove_interface_description(self, interface_id):
# No cache to update
self.real_switch.remove_interface_description(interface_id)
def set_bond_description(self, bond_number, description):
# No cache to update
self.real_switch.set_bond_description(bond_number, description)
def remove_bond_description(self, bond_number):
# No cache to update
self.real_switch.remove_bond_description(bond_number)
def edit_interface_spanning_tree(self, interface_id, edge=None):
# No cache to update
self.real_switch.edit_interface_spanning_tree(interface_id, edge=edge)
def openup_interface(self, interface_id):
self.real_switch.openup_interface(interface_id)
self.interfaces_cache[interface_id].shutdown = False
def shutdown_interface(self, interface_id):
self.real_switch.shutdown_interface(interface_id)
self.interfaces_cache[interface_id].shutdown = True
def add_bond(self, number):
self.real_switch.add_bond(number)
self.bonds_cache[number] = Bond(number=number)
def remove_bond(self, number):
self.real_switch.remove_bond(number)
del self.bonds_cache[number]
def add_interface_to_bond(self, interface, bond_number):
self.real_switch.add_interface_to_bond(interface, bond_number)
self.bonds_cache[bond_number].members.append(interface)
self.interfaces_cache[interface].bond_master = bond_number
def remove_interface_from_bond(self, interface):
self.real_switch.remove_interface_from_bond(interface)
self.interfaces_cache[interface].bond_master = None
for bond in self.bonds_cache.values():
try:
bond.members.remove(interface)
except ValueError:
pass
def set_bond_link_speed(self, number, speed):
self.real_switch.set_bond_link_speed(number, speed)
self.bonds_cache[number].link_speed = speed
def edit_bond_spanning_tree(self, number, edge=None):
self.real_switch.edit_bond_spanning_tree(number, edge=edge)
def add_vrrp_group(self, vlan_number, group_id, ips=None, priority=None,
hello_interval=None, dead_interval=None ,track_id=None,
track_decrement=None):
self.real_switch.add_vrrp_group(vlan_number, group_id, ips=ips,
priority=priority,
hello_interval=hello_interval,
dead_interval=dead_interval,
track_id=track_id,
track_decrement=track_decrement)
self.vlans_cache[vlan_number].vrrp_groups.append(VrrpGroup(
id=group_id, ips=ips, priority=priority,
hello_interval=hello_interval, dead_interval=dead_interval,
track_id=track_id, track_decrement=track_decrement
))
def remove_vrrp_group(self, vlan_number, group_id):
self.real_switch.remove_vrrp_group(vlan_number, group_id)
for group in self.vlans_cache[vlan_number].vrrp_groups:
if group.id == group_id:
self.vlans_cache[vlan_number].vrrp_groups.remove(group)
def add_dhcp_relay_server(self, vlan_number, ip_address):
self.real_switch.add_dhcp_relay_server(vlan_number, ip_address)
self.vlans_cache[vlan_number].dhcp_relay_servers.append(ip_address)
def remove_dhcp_relay_server(self, vlan_number, ip_address):
self.real_switch.remove_dhcp_relay_server(vlan_number, ip_address)
try:
self.vlans_cache[vlan_number].dhcp_relay_servers.remove(ip_address)
except ValueError:
pass
def enable_lldp(self, interface_id, enabled):
self.real_switch.enable_lldp(interface_id, enabled)
def set_vlan_icmp_redirects_state(self, vlan_number, state):
self.real_switch.set_vlan_icmp_redirects_state(vlan_number, state)
self.vlans_cache[vlan_number].icmp_redirects = state | {
"content_hash": "471402993bfe25164407de32fc59a773",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 80,
"avg_line_length": 38.06363636363636,
"alnum_prop": 0.6509035904784651,
"repo_name": "mat128/netman",
"id": "d074ac14f6940a83dbd54724b0359ef206620714",
"size": "13135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netman/adapters/switches/cached.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "1098335"
}
],
"symlink_target": ""
} |
import sys
import os
from harvester.post_processing.couchdb_runner import CouchDBJobEnqueue
from harvester.post_processing.couchdb_runner import CouchDBWorker
from harvester.image_harvest import harvest_image_for_doc
from harvester.couchdb_init import get_couchdb
import couchdb #couchdb-python
from dplaingestion.selector import delprop
EMAIL_RETURN_ADDRESS = os.environ.get('EMAIL_RETURN_ADDRESS',
'[email protected]')
# csv delim email addresses
EMAIL_SYS_ADMIN = os.environ.get('EMAIL_SYS_ADMINS', None)
def delete_field_and_queue_image_harvest(doc, field, cdb, enq):
print 'Delete {} for {}'.format(field, doc['_id'])
delprop(doc, field, keyErrorAsNone=True)
cdb.save(doc)
timeout = 10000
results = enq.queue_list_of_ids([doc['_id']],
timeout,
harvest_image_for_doc,
)
def main(cid):
worker = CouchDBWorker()
enq = CouchDBJobEnqueue()
timeout = 100000
cdb = get_couchdb()
worker.run_by_collection(cid,
delete_field_and_queue_image_harvest,
'object',
cdb,
enq
)
if __name__ == '__main__':
main('26094')
| {
"content_hash": "1d6261fe63227cb2f85025e4648fe58c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 32.41025641025641,
"alnum_prop": 0.6091772151898734,
"repo_name": "mredar/harvester",
"id": "5f6554bb50d24af394ff2f715161d12ca0b0908a",
"size": "1283",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/remove_object_and_harvest_image_26094.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "523005"
},
{
"name": "Shell",
"bytes": "12556"
}
],
"symlink_target": ""
} |
from markdown.util import AtomicString
from markdown.util import etree
from markdown.inlinepatterns import Pattern
from markdown import Extension
class MathJaxPattern(Pattern):
groups = 2, 3, 4
start_end = None
def __init__ (self, start_end=None, groups=None):
if start_end is not None:
self.start_end = start_end
if groups is not None:
self.groups = groups
pattern = r'(?<!\\)(%s)(.+?)(?<!\\)(%s)' % (self.start_end)
Pattern.__init__(self, pattern)
def handleMatch(self, m):
node = etree.Element(None)
text = ''
for group in self.groups:
text += m.group(group)
node.text = AtomicString(text)
return node;
class MathJaxInlinePattern(MathJaxPattern):
start_end = r'\\\(', r'\\\)'
class BraketPattern(MathJaxPattern):
start_end = r'\\\[', r'\\\]'
class DoubleDollarPattern(MathJaxPattern):
start_end = r'\$\$', r'\$\$'
class BeginEndPattern(MathJaxPattern):
start_end = r'\\begin\{(.+?)\}', r'\\end\{\3\}'
groups = 2, 4, 5
class MathJaxExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add('mathjax_invironment', BeginEndPattern(), '<escape')
md.inlinePatterns.add('mathjax_bracket', BraketPattern(), '<escape')
md.inlinePatterns.add('mathjax_double_dollar', DoubleDollarPattern(), '<escape')
md.inlinePatterns.add('mathjax_inline', MathJaxInlinePattern(), '<escape')
def makeExtension(configs=None):
return MathJaxExtension(configs)
| {
"content_hash": "ed066a528e2fb199faa71f49456c5159",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 88,
"avg_line_length": 33.083333333333336,
"alnum_prop": 0.6202770780856424,
"repo_name": "epsilony/md_mathjax",
"id": "4047918a50a34a603b5f912497311b4359f209cc",
"size": "1588",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mdx_mathjax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1588"
}
],
"symlink_target": ""
} |
import sys
def add(x, y): return x + y
def sub(x, y): return x - y
def mult(x, y): return x * y
OPERATIONS = [add, sub, mult]
REPRESENTATIONS = {add: '+', sub: '-', mult: '*'}
def new_counter(counter, item):
if counter is None: counter = 0
if type(item) == type(add): return counter - 1
else: return counter + 1
def valid_perms(lst, counter=None):
if counter is not None and counter <= 0: return
if len(lst) == 1:
if new_counter(counter, lst[0]) != 1: return
yield lst
else:
for i in xrange(len(lst)):
for perm in valid_perms(lst[:i]+lst[i+1:], new_counter(counter, lst[i])):
yield [lst[i]] + perm
def all_combos(lst, n):
if n == 0: yield []
else:
for i in xrange(len(lst)):
for combo in all_combos(lst, n - 1):
yield [lst[i]] + combo
def uniq(generator):
seen_things=set([])
for thing in generator:
if tuple(thing) not in seen_things:
seen_things.add(tuple(thing))
yield thing
def valid_postfix_stacks(nums):
for op_combo in uniq(all_combos(OPERATIONS, len(nums) - 1)):
for perm in uniq(valid_perms(nums + op_combo)):
yield perm
def compute(stack):
s = []
for item in stack:
if type(item) != type(add):
s.append(item)
else:
s.append(item(*reversed([s.pop(), s.pop()])))
assert len(s) == 1
return s[0]
def infix(stack):
s = []
for item in stack:
if type(item) != type(add):
s.append(str(item))
else:
s.append('(' + ' '.join(reversed([s.pop(), REPRESENTATIONS[item], s.pop()])) + ')')
assert len(s) == 1
return s[0]
def solve(nums, answer):
for postfix_stack in uniq(valid_postfix_stacks(nums)):
try: val = compute(postfix_stack)
except ZeroDivisionError, e: continue
if val == answer:
print "Y" #, infix(postfix_stack), "=", val
return
print "N"
def main(argv):
for _ in xrange(int(sys.stdin.readline().strip())):
numbers = map(int, sys.stdin.readline().strip().split())
target = numbers.pop()
solve(numbers, target)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| {
"content_hash": "a7e9384cb2ffa0cbc9d6266941d214e3",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 89,
"avg_line_length": 26.227848101265824,
"alnum_prop": 0.6013513513513513,
"repo_name": "seanhess/contests",
"id": "936348ff26d96b5094fc24972fd6db51e718a00f",
"size": "2091",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "2011-mebipenny/practice/permute/permute.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "909"
},
{
"name": "HTML",
"bytes": "19065"
},
{
"name": "Haskell",
"bytes": "717"
},
{
"name": "Python",
"bytes": "47134"
},
{
"name": "Ruby",
"bytes": "64169"
}
],
"symlink_target": ""
} |
'''
*
* VTN-based RTM Network Service
*
* file: \rtm-api\api\v1_0\policies.py
*
* This software contributed by NEC under Apache 2.0 license for open source use. This software is supplied under the terms of the OpenSourceSDN Apache 2.0 license agreement
* Copyright (c) 2015 NEC Europe Ltd.
*
* Authors: Savvas Zannettou
* Fabian Schneider ([email protected])
*
* NEC Europe Ltd. DISCLAIMS ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE AND THE WARRANTY AGAINST LATENT
* DEFECTS, WITH RESPECT TO THE PROGRAM AND THE ACCOMPANYING
* DOCUMENTATION.
*
* No Liability For Consequential Damages IN NO EVENT SHALL NEC Europe
* Ltd., NEC Corporation OR ANY OF ITS SUBSIDIARIES BE LIABLE FOR ANY
* DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS
* OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF INFORMATION, OR
* OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, INCIDENTAL,
* ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF OR INABILITY
* TO USE THIS PROGRAM, EVEN IF NEC Europe Ltd. HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
*
'''
from flask import request, jsonify
from ..models import db, Policy, ClassDscp, FlowfilterEntryMap
from ..decorators import json, paginate, etag
from . import api
#Get All policies#
@api.route('/policies/', methods=['GET'])
def get_policies():
return jsonify(json_list = [i.to_json() for i in Policy.query.all()])
#Get a policy using the Policy id#
@api.route('/policies/<int:id>', methods=['GET'])
@json
def get_policy_by_id(id):
return Policy.query.get_or_404(id)
#Create a new policy#
@api.route('/policies/', methods=['POST'])
@json
def add_policy():
#add the policy to the database#
policy = Policy().from_json(request.json)
db.session.add(policy)
db.session.commit()
#create the vtn configuration according to policy information#
mapping = ClassDscp.query.get_or_404(policy.application_class)
flowfilter=policy.create_vtn_config(mapping.dscp)
#save the mapping between flowlist and flowfilter entry for future reference#
flowlistname = 'flowlist'+ str(policy.policy_id)
entry = FlowfilterEntryMap().init(flowfilter,flowlistname)
db.session.add(entry)
db.session.commit()
return request.json, 201
#Delete a policy#
@api.route('/policies/<int:id>', methods=['DELETE'])
@json
def delete_policy(id):
policy = Policy.query.get_or_404(id)
#before deleting remove the vtn configuration for the policy element#
flowlistname = 'flowlist' + str(policy.policy_id)
entry = FlowfilterEntryMap.query.get_or_404(flowlistname)
policy.delete_vtn_config(entry.flowfilterentry)
#also delete the flowfilterentrymap from the database#
db.session.delete(entry)
db.session.delete(policy)
db.session.commit()
return {}
#Update a policy#
@api.route('/policies/<int:id>', methods=['PUT'])
@json
def update_policy(id):
policy = Policy.query.get_or_404(id)
policy.from_json(request.json)
#Before updating the database update the vtn configuration. If the application class is changed then we have to change the dscp
#value of the flowfilter entry
flowlistname = 'flowlist' + str(policy.policy_id)
entry = FlowfilterEntryMap.query.get_or_404(flowlistname)
#get the new dscp value by querying the mapping table using the application class#
mapping = ClassDscp.query.get_or_404(policy.application_class)
policy.update_vtn_config(flowlistname,entry.flowfilterentry, mapping.dscp)
db.session.add(policy)
db.session.commit()
return {}
@api.route('/policies/<int:id>', methods=['PATCH'])
@json
def patch_policy(id):
policy= Policy.query.get_or_404(id)
policy.from_json_patch(request.json)
#Before updating the database update the vtn configuration. If the application class is changed then we have to change the dscp
#value of the flowfilter entry
flowlistname = 'flowlist' + str(policy.policy_id)
entry = FlowfilterEntryMap.query.get_or_404(flowlistname)
#get the new dscp value by querying the mapping table using the application class#
mapping = ClassDscp.query.get_or_404(policy.application_class)
policy.update_vtn_config(flowlistname,entry.flowfilterentry, mapping.dscp)
db.session.add(policy)
db.session.commit()
return {} | {
"content_hash": "a29211af705d436a8c79b28aa819f6c7",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 173,
"avg_line_length": 35.72,
"alnum_prop": 0.7254199328107502,
"repo_name": "OpenNetworkingFoundation/ASPEN-Real-Time-Media-Interface",
"id": "8cbdda443c4ca213d96ee640cf325bb174dd5a09",
"size": "4465",
"binary": false,
"copies": "1",
"ref": "refs/heads/NEC-code-contribution",
"path": "rtm-api/api/v1_0/policies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1126"
},
{
"name": "CSS",
"bytes": "86486"
},
{
"name": "HTML",
"bytes": "7114"
},
{
"name": "Java",
"bytes": "81375"
},
{
"name": "JavaScript",
"bytes": "634584"
},
{
"name": "Python",
"bytes": "190637"
},
{
"name": "Shell",
"bytes": "5296"
}
],
"symlink_target": ""
} |
from src.base import Template, Collector
__version__ = '0.1' | {
"content_hash": "2b53def220bf90fb74006e0861773219",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 40,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.7049180327868853,
"repo_name": "smirnoval/simple-template-engine",
"id": "14ea18ad35bc3b01afddaf75bdcb7ca87ca7cbef",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2428"
},
{
"name": "Python",
"bytes": "20919"
}
],
"symlink_target": ""
} |
import os.path
import shutil
import click
from .movie import find_likely_movie, cleanup_filename
from .config import Settings, update_settings
def misnamer(in_file):
"""Main misnamer function. Process movie files.
"""
directory, filename = os.path.split(in_file)
_, extension = os.path.splitext(filename)
movie = find_likely_movie(cleanup_filename(filename))
# Build destination path
if Settings.get('move_file_enable', False):
directory = os.path.expanduser(Settings['move_file_destination'])
# Perform replacements on filename
new_filename = Settings['rename_format'] + extension
destination = os.path.join(directory, new_filename).format(**movie)
if os.path.exists(destination) and not Settings['overwrite_destination']:
raise RuntimeError('File %s already exists' % destination)
click.secho('Moving %s -> %s' % (in_file, destination), fg='green')
shutil.move(in_file, destination)
@click.command()
@click.option('--out_format', help='Ouput format')
@click.option('--configuration', help='Configuration file location')
@click.argument('in_file', type=click.Path(exists=True))
def main(out_format, configuration, in_file):
try:
update_settings(configuration)
if out_format is not None:
Settings['rename_format'] = out_format
misnamer(click.format_filename(in_file))
except RuntimeError as e:
click.secho(str(e), bg='red')
except ValueError as e:
click.secho(str(e), bg='red')
| {
"content_hash": "4feb288a8ab4ab6bd37d650059eda98c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 77,
"avg_line_length": 32.38297872340426,
"alnum_prop": 0.6872536136662286,
"repo_name": "ygreenc/misnamer",
"id": "72710697a9ed41990f4e90a80686e093ec073076",
"size": "1545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misnamer/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "53"
},
{
"name": "Python",
"bytes": "6276"
}
],
"symlink_target": ""
} |
import asyncio
def f(lock):
yield from lock.acquire()
try:
yield from asyncio.sleep(0.1)
finally:
lock.release()
loop = asyncio.get_event_loop()
lock = asyncio.Lock()
print('pass 1')
loop.run_until_complete(f(lock))
def g(lock):
with (yield from lock):
yield from asyncio.sleep(0.1)
print('pass 2')
loop.run_until_complete(g(lock))
| {
"content_hash": "590317505f6f4e9f352e98c041a08f3c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 37,
"avg_line_length": 15.24,
"alnum_prop": 0.6325459317585301,
"repo_name": "asvetlov/europython2015",
"id": "10c8ed2f2a7f0950f1e3fb4e6cc3191ce47ddfd6",
"size": "381",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "locks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19358"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from functools import partial
from hashlib import md5
from operator import attrgetter
import pickle
import os
import uuid
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from .compatibility import bind_method, unicode
from .context import _globals
from .utils import Dispatch
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""
Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def compute(self, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
"""
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg', 'pos'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne', 'getitem'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() v in vals]``."""
dsk = {}
keys = []
for v in vals:
# Optimization to avoid merging dictionaries in Delayed values. Reduces
# memory usage for large graphs.
if hasattr(v, '_dasks'):
for d in v._dasks:
dsk.update(d)
else:
dsk.update(v.dask)
keys.append(v._keys())
return dsk, keys
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If the object is a dask collection, it's
computed and the result is returned. Otherwise it's passed through
unchanged.
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults for
the collections.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
variables = [a for a in args if isinstance(a, Base)]
if not variables:
return args
get = kwargs.pop('get', None) or _globals['get']
optimizations = (kwargs.pop('optimizations', None) or
_globals.get('optimizations', []))
if not get:
get = variables[0]._default_get
if not all(a._default_get == get for a in variables):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
if kwargs.get('optimize_graph', True):
groups = groupby(attrgetter('_optimize'), variables)
groups = {opt: _extract_graph_and_keys(val)
for opt, val in groups.items()}
for opt in optimizations:
groups = {k: [opt(dsk, keys), keys]
for k, (dsk, keys) in groups.items()}
dsk = merge([opt(dsk, keys, **kwargs)
for opt, (dsk, keys) in groups.items()])
keys = [var._keys() for var in variables]
else:
dsk, keys = _extract_graph_and_keys(variables)
results = get(dsk, keys, **kwargs)
results_iter = iter(results)
return tuple(a if not isinstance(a, Base)
else a._finalize(next(results_iter))
for a in args)
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, Base)]
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks.extend([arg._optimize(arg.dask, arg._keys()) for arg in args])
else:
dsks.extend([arg.dask for arg in args])
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b'__main__' not in result: # abort on dynamic functions
return result
except:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, unicode, bytes, type(None), type,
slice),
identity)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register((tuple, list, set))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@normalize_token.register(object)
def normalize_object(o):
if callable(o):
return normalize_function(o)
else:
return uuid.uuid4().hex
@normalize_token.register(Base)
def normalize_base(b):
return type(b).__name__, b.key
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
@normalize_token.register(pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and getattr(x, 'filename', None):
if hasattr(x.base, 'ctypes'):
offset = (x.ctypes.get_as_parameter().value -
x.base.ctypes.get_as_parameter().value)
else:
offset = 0 # root memmap's have mmap object as base
return (x.filename, os.path.getmtime(x.filename), x.dtype,
x.shape, x.strides, offset)
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat).encode('utf-8')).hexdigest()
except TypeError:
data = md5(b'-'.join([str(item).encode() for item in x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
| {
"content_hash": "94c36d3a2eb3905c5c520699df6dd99f",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 90,
"avg_line_length": 33.163636363636364,
"alnum_prop": 0.59406328320802,
"repo_name": "gameduell/dask",
"id": "2f24db9006df15406ed6df42bcae5d6b858134b3",
"size": "12768",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dask/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "1712941"
}
],
"symlink_target": ""
} |
import re
import json
import jinja2
import regex
def parse(lines, hilight_words, filters, objref_dict):
"""
Given filters returns indeces of wanted lines from log
Args:
lines: array of log lines
hilight_words: array of words that need to be bolded
filters: dictionary of which filters to apply
objref_dict: a dictionary where the keys are possible filters
and the values are the words to be hilighted
Returns:
matched_lines: ordered array of indeces of lines to display
hilight_words: updated hilight_words
"""
matched_lines = []
if not filters["pod"] and objref_dict:
hilight_words = []
# If the filter is on, look for it in the objref_dict
for k in filters:
if k != "pod" and filters[k] and k in objref_dict:
hilight_words.append(objref_dict[k])
words_re = regex.combine_wordsRE(hilight_words)
for n, line in enumerate(lines):
if words_re.search(line):
matched_lines.append(n)
return matched_lines, hilight_words
def make_dict(data, pod_re, objref_dict):
"""
Given the log file and the failed pod name, returns a dictionary
containing the namespace, UID, and other information associated with the pod
and a bool indicating if the pod name string is in the log file.
This dictionary is lifted from the line with the ObjectReference
"""
pod_in_file = False
lines = unicode(jinja2.escape(data)).split('\n')
for line in lines:
if pod_re.search(line):
pod_in_file = True
objref = regex.objref(line)
containerID = regex.containerID(line)
if containerID and not objref_dict.get("ContainerID"):
objref_dict["ContainerID"] = containerID.group(1)
if objref:
objref_dict_re = objref.group(1)
objref_dict_re = re.sub(r'(\w+):', r'"\1": ', objref_dict_re)
objref_dict_re = objref_dict_re.replace('"', '"')
objref_dict_re = json.loads(objref_dict_re)
objref_dict_re.update(objref_dict)
return objref_dict_re, pod_in_file
return objref_dict, pod_in_file
| {
"content_hash": "1dca16d567ee1aa1a67ad374cd9f1a96",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 33.298507462686565,
"alnum_prop": 0.6230389959659346,
"repo_name": "girishkalele/test-infra",
"id": "2a0ca934b62337e9e9153885f73d7452b8aa92a4",
"size": "2841",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "gubernator/kubelet_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3152"
},
{
"name": "Go",
"bytes": "145720"
},
{
"name": "HTML",
"bytes": "15047"
},
{
"name": "JavaScript",
"bytes": "2973"
},
{
"name": "Makefile",
"bytes": "7289"
},
{
"name": "Protocol Buffer",
"bytes": "3206"
},
{
"name": "Python",
"bytes": "239256"
},
{
"name": "Shell",
"bytes": "53415"
}
],
"symlink_target": ""
} |
import sys
from collections import OrderedDict
try:
from PyQt4 import QtGui, QtCore
qt_widgets = QtGui
except ImportError:
from PyQt5 import QtCore, QtGui
from PyQt5 import QtWidgets as qt_widgets
if sys.version_info >= (3,):
unicode = str
class MultipleFieldsDialog(qt_widgets.QDialog):
"""Dialog with multiple fields stored in a dict, with the label
being the key and the entry being the corresponding value"""
def __init__(self, labels=None, title="Demo", masks=None, parent=None):
super(MultipleFieldsDialog, self).__init__(None,
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowTitleHint)
if parent is None:
raise Exception("Parent must be a valid object")
self.parent = parent
self.parent.o_dict = OrderedDict()
self.setWindowTitle(title)
# set up a special case for quick demo
if labels is None:
labels = ["Regular field", "Masked field"]
masks = [False, True]
self.setWindowTitle("MultipleFieldsDialog demo")
if masks is not None:
assert len(masks) == len(labels)
layout = qt_widgets.QGridLayout()
layout.setColumnStretch(1, 1)
layout.setColumnMinimumWidth(1, 250)
self._labels_ = []
self.fields = []
for index, choice in enumerate(labels):
self._labels_.append(qt_widgets.QLabel())
self._labels_[index].setText(choice)
self.fields.append(qt_widgets.QLineEdit())
self.fields[index].setText('')
self.parent.o_dict[choice] = ''
if masks is not None and masks[index]:
self.fields[index].setEchoMode(qt_widgets.QLineEdit.Password)
layout.addWidget(self._labels_[index], index, 0)
layout.addWidget(self.fields[index], index, 1)
button_box = qt_widgets.QDialogButtonBox()
confirm_button = button_box.addButton(qt_widgets.QDialogButtonBox.Ok)
layout.addWidget(button_box, index+1, 1)
confirm_button.clicked.connect(self.confirm)
self.setLayout(layout)
self.setWindowTitle(title)
self.show()
self.raise_()
def confirm(self):
"""Selection completed, set the value and close"""
o_dict = self.parent.o_dict
for index, item in enumerate(self._labels_):
o_dict[unicode(item.text())] = unicode(self.fields[index].text())
self.close()
if __name__ == '__main__':
app = qt_widgets.QApplication([])
class Parent:
pass
parent = Parent()
dialog = MultipleFieldsDialog(parent=parent)
dialog.exec_()
print(parent.o_dict) | {
"content_hash": "090c72bade3f722075564872adc64709",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 77,
"avg_line_length": 34.1625,
"alnum_prop": 0.6121478229052324,
"repo_name": "aroberge/easygui_qt",
"id": "b5b96bdb2d916143a221dea1371653464dd6c302",
"size": "2733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easygui_qt/multifields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "74719"
}
],
"symlink_target": ""
} |
import os,os.path
import re,time,copy,string,math,random,fcntl
import traceback
from glideinwms.lib import xmlFormat,timeConversion
from glideinwms.lib import rrdSupport
from glideinwms.lib import logSupport
############################################################
#
# Configuration
#
############################################################
class MonitoringConfig:
def __init__(self):
# set default values
# user should modify if needed
self.rrd_step=300 #default to 5 minutes
self.rrd_heartbeat=1800 #default to 30 minutes, should be at least twice the loop time
self.rrd_archives=[('AVERAGE',0.8,1,740), # max precision, keep 2.5 days
('AVERAGE',0.92,12,740), # 1 h precision, keep for a month (30 days)
('AVERAGE',0.98,144,740) # 12 hour precision, keep for a year
]
# The name of the attribute that identifies the glidein
self.monitor_dir="monitor/"
self.rrd_obj=rrdSupport.rrdSupport()
self.my_name="Unknown"
def write_file(self,relative_fname,output_str):
fname=os.path.join(self.monitor_dir,relative_fname)
if not os.path.isdir(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
#print "Writing "+fname
fd=open(fname+".tmp","w")
try:
fd.write(output_str+"\n")
finally:
fd.close()
tmp2final(fname)
return
def establish_dir(self,relative_dname):
dname=os.path.join(self.monitor_dir,relative_dname)
if not os.path.isdir(dname):
os.mkdir(dname)
return
def write_rrd_multi(self,relative_fname,ds_type,time,val_dict,min_val=None,max_val=None):
"""
Create a RRD file, using rrdtool.
"""
if self.rrd_obj.isDummy():
return # nothing to do, no rrd bin no rrd creation
for tp in ((".rrd",self.rrd_archives),):
rrd_ext,rrd_archives=tp
fname=os.path.join(self.monitor_dir,relative_fname+rrd_ext)
#print "Writing RRD "+fname
if not os.path.isfile(fname):
#print "Create RRD "+fname
if min_val is None:
min_val='U'
if max_val is None:
max_val='U'
ds_names=val_dict.keys()
ds_names.sort()
ds_arr=[]
for ds_name in ds_names:
ds_arr.append((ds_name,ds_type,self.rrd_heartbeat,min_val,max_val))
self.rrd_obj.create_rrd_multi(fname,
self.rrd_step,rrd_archives,
ds_arr)
#print "Updating RRD "+fname
try:
self.rrd_obj.update_rrd_multi(fname,time,val_dict)
except Exception,e:
logSupport.log.error("Failed to update %s" % fname)
#logSupport.log.exception(traceback.format_exc())
return
#########################################################################################################################################
#
# condorQStats
#
# This class handles the data obtained from condor_q
#
#########################################################################################################################################
class groupStats:
def __init__(self):
self.data={'factories':{},'states':{},'totals':{}}
self.updated=time.time()
self.files_updated=None
self.attributes = {
'Jobs':("Idle","OldIdle","Running","Total"),
'Glideins':("Idle","Running","Total"),
'MatchedJobs':("Idle","EffIdle","OldIdle","Running","RunningHere"),
#'MatchedGlideins':("Total","Idle","Running","Failed","TotalCores","IdleCores","RunningCores"),
'MatchedGlideins':("Total","Idle","Running","Failed"),
'MatchedCores':("Total","Idle","Running"),
'Requested':("Idle","MaxRun")
}
# only these will be states, all other names are assumed to be factories
self.states_names=('Unmatched','MatchedUp','MatchedDown')
def logJobs(self,jobs_data):
el={}
self.data['totals']['Jobs']=el
for k in self.attributes['Jobs']:
if jobs_data.has_key(k):
el[k]=int(jobs_data[k])
self.updated=time.time()
def logGlideins(self,slots_data):
el={}
self.data['totals']['Glideins']=el
for k in self.attributes['Glideins']:
if slots_data.has_key(k):
el[k]=int(slots_data[k])
self.updated=time.time()
def logMatchedJobs(self, factory, idle, effIdle, oldIdle, running, realRunning):
factory_or_state_d = self.get_factory_dict(factory)
factory_or_state_d['MatchedJobs'] = {self.attributes['MatchedJobs'][0]: int(idle),
self.attributes['MatchedJobs'][1]: int(effIdle),
self.attributes['MatchedJobs'][2]: int(oldIdle),
self.attributes['MatchedJobs'][3]: int(running),
self.attributes['MatchedJobs'][4]: int(realRunning)
}
self.update=time.time()
def logFactDown(self, factory, isDown):
factory_or_state_d = self.get_factory_dict(factory)
if isDown:
factory_or_state_d['Down'] = 'Down'
else:
factory_or_state_d['Down'] = 'Up'
self.updated = time.time()
def logMatchedGlideins(self, factory, total, idle, running, failed, totalcores, idlecores, runningcores):
factory_or_state_d = self.get_factory_dict(factory)
factory_or_state_d['MatchedGlideins'] = {
self.attributes['MatchedGlideins'][0]: int(total),
self.attributes['MatchedGlideins'][1]: int(idle),
self.attributes['MatchedGlideins'][2]: int(running),
self.attributes['MatchedGlideins'][3]: int(failed),
}
factory_or_state_d['MatchedCores'] = {
self.attributes['MatchedCores'][0]: int(totalcores),
self.attributes['MatchedCores'][1]: int(idlecores),
self.attributes['MatchedCores'][2]: int(runningcores),
}
self.update=time.time()
def logFactAttrs(self, factory, attrs, blacklist):
factory_or_state_d = self.get_factory_dict(factory)
factory_or_state_d['Attributes'] = {}
for attr in attrs:
if not attr in blacklist:
factory_or_state_d['Attributes'][attr] = attrs[attr]
self.update=time.time()
def logFactReq(self, factory, reqIdle, reqMaxRun, params):
factory_or_state_d = self.get_factory_dict(factory)
factory_or_state_d['Requested'] = {self.attributes['Requested'][0]: int(reqIdle),
self.attributes['Requested'][1]: int(reqMaxRun),
'Parameters': copy.deepcopy(params)
}
self.updated = time.time()
def get_factories_data(self):
return copy.deepcopy(self.data['factories'])
def get_xml_factories_data(self,indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=""):
data=self.get_factories_data()
return xmlFormat.dict2string(data,
dict_name='factories', el_name='factory',
subtypes_params={"class":{'subclass_params':{'Requested':{'dicts_params':{'Parameters':{'el_name':'Parameter'}}}}}},
indent_tab=indent_tab,leading_tab=leading_tab)
def get_states_data(self):
return copy.deepcopy(self.data['states'])
def get_xml_states_data(self,indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=""):
data=self.get_states_data()
return xmlFormat.dict2string(data,
dict_name='states', el_name='state',
subtypes_params={"class":{'subclass_params':{'Requested':{'dicts_params':{'Parameters':{'el_name':'Parameter'}}}}}},
indent_tab=indent_tab,leading_tab=leading_tab)
def get_xml_updated(self,indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=""):
return xmlFormat.time2xml(self.updated, "updated", indent_tab=xmlFormat.DEFAULT_TAB, leading_tab="")
def get_total(self):
total = {
'MatchedJobs':None,
'Requested':None,
'MatchedGlideins':None,
'MatchedCores':None,
}
numtypes=(type(1),type(1L),type(1.0))
for f in self.data['factories'].keys():
fa=self.data['factories'][f]
for w in fa.keys():
if total.has_key(w): # ignore eventual not supported classes
el=fa[w]
tel=total[w]
if tel is None:
# first one, just copy over
total[w]={}
tel=total[w]
for a in el.keys():
if type(el[a]) in numtypes: # copy only numbers
tel[a]=el[a]
else:
# successive, sum
for a in el.keys():
if type(el[a]) in numtypes: # consider only numbers
if tel.has_key(a):
tel[a]+=el[a]
# if other frontends did't have this attribute, ignore
# if any attribute from prev. frontends are not in the current one, remove from total
for a in tel.keys():
if not el.has_key(a):
del tel[a]
elif not (type(el[a]) in numtypes):
del tel[a]
for w in total.keys():
if total[w] is None:
del total[w] # remove entry if not defined
total.update(copy.deepcopy(self.data['totals']))
return total
def get_xml_total(self,indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=""):
total=self.get_total()
return xmlFormat.class2string(total,
inst_name="total",
indent_tab=indent_tab,leading_tab=leading_tab)
def write_file(self):
global monitoringConfig
if (self.files_updated is not None) and ((self.updated-self.files_updated)<5):
# files updated recently, no need to redo it
return
# write snaphot file
xml_str=('<?xml version="1.0" encoding="ISO-8859-1"?>\n\n'+
'<VOFrontendGroupStats>\n'+
self.get_xml_updated(indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=xmlFormat.DEFAULT_TAB)+"\n"+
self.get_xml_factories_data(indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=xmlFormat.DEFAULT_TAB)+"\n"+
self.get_xml_states_data(indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=xmlFormat.DEFAULT_TAB)+"\n"+
self.get_xml_total(indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=xmlFormat.DEFAULT_TAB)+"\n"+
"</VOFrontendGroupStats>\n")
monitoringConfig.write_file("frontend_status.xml",xml_str)
# update RRDs
total_el = self.get_total()
self.write_one_rrd("total",total_el)
data = self.get_factories_data()
for fact in data.keys():
self.write_one_rrd("factory_%s"%sanitize(fact),data[fact],1)
data = self.get_states_data()
for fact in data.keys():
self.write_one_rrd("state_%s"%sanitize(fact),data[fact],1)
self.files_updated=self.updated
return
################################################
# PRIVATE - Used to select the right disctionary
def get_factory_dict(self,factory):
if factory in self.states_names:
factories = self.data['states']
else:
factories = self.data['factories']
if not factory in factories:
factories[factory] = {}
return factories[factory]
###############################
# PRIVATE - Used by write_file
# Write one RRD
def write_one_rrd(self,name,data,fact=0):
global monitoringConfig
val_dict={}
if fact==0:
type_strings = {
'Jobs':'Jobs',
'Glideins':'Glidein',
'MatchedJobs':'MatchJob',
'MatchedGlideins':'MatchGlidein',
'MatchedCores':'MatchCore',
'Requested':'Req'
}
else:
type_strings = {
'MatchedJobs':'MatchJob',
'MatchedGlideins':'MatchGlidein',
'MatchedCores':'MatchCore',
'Requested':'Req'
}
#init, so that all get created properly
for tp in self.attributes.keys():
if tp in type_strings.keys():
tp_str=type_strings[tp]
attributes_tp=self.attributes[tp]
for a in attributes_tp:
val_dict["%s%s"%(tp_str,a)]=None
for tp in data:
# type - Jobs,Slots
if not (tp in self.attributes.keys()):
continue
if not (tp in type_strings.keys()):
continue
tp_str=type_strings[tp]
attributes_tp=self.attributes[tp]
fe_el_tp=data[tp]
for a in fe_el_tp.keys():
if a in attributes_tp:
a_el=fe_el_tp[a]
if type(a_el)!=type({}): # ignore subdictionaries
val_dict["%s%s"%(tp_str,a)]=a_el
monitoringConfig.establish_dir("%s"%name)
monitoringConfig.write_rrd_multi("%s/Status_Attributes"%name,
"GAUGE",self.updated,val_dict)
########################################################################
class factoryStats:
def __init__(self):
self.data={}
self.updated=time.time()
self.files_updated=None
self.attributes={'Jobs':("Idle","OldIdle","Running","Total"),
'Matched':("Idle","OldIdle","Running","Total"),
'Requested':("Idle","MaxRun"),
'Slots':("Idle","Running","Total")}
def logJobs(self,client_name,qc_status):
if self.data.has_key(client_name):
t_el=self.data[client_name]
else:
t_el={}
self.data[client_name]=t_el
el={}
t_el['Status']=el
status_pairs=((1,"Idle"), (2,"Running"), (5,"Held"), (1001,"Wait"),(1002,"Pending"),(1010,"StageIn"),(1100,"IdleOther"),(4010,"StageOut"))
for p in status_pairs:
nr, status=p
if qc_status.has_key(nr):
el[status]=int(qc_status[nr])
else:
el[status]=0
self.updated=time.time()
def logRequest(self,client_name,requests,params):
"""
requests is a dictinary of requests
params is a dictinary of parameters
At the moment, it looks only for
'IdleGlideins'
'MaxRunningGlideins'
"""
if self.data.has_key(client_name):
t_el=self.data[client_name]
else:
t_el={}
self.data[client_name]=t_el
el={}
t_el['Requested']=el
if requests.has_key('IdleGlideins'):
el['Idle']=int(requests['IdleGlideins'])
if requests.has_key('MaxRunningGlideins'):
el['MaxRun']=int(requests['MaxRunningGlideins'])
el['Parameters']=copy.deepcopy(params)
self.updated=time.time()
def logClientMonitor(self,client_name,client_monitor,client_internals):
"""
client_monitor is a dictinary of monitoring info
client_internals is a dictinary of internals
At the moment, it looks only for
'Idle'
'Running'
'GlideinsIdle'
'GlideinsRunning'
'GlideinsTotal'
'LastHeardFrom'
"""
if self.data.has_key(client_name):
t_el=self.data[client_name]
else:
t_el={}
self.data[client_name]=t_el
el={}
t_el['ClientMonitor']=el
for karr in (('Idle','JobsIdle'),('Running','JobsRunning'),('GlideinsIdle','GlideIdle'),('GlideinsRunning','GlideRunning'),('GlideinsTotal','GlideTotal')):
ck,ek=karr
if client_monitor.has_key(ck):
el[ek]=int(client_monitor[ck])
if client_internals.has_key('LastHeardFrom'):
el['InfoAge']=int(time.time()-long(client_internals['LastHeardFrom']))
el['InfoAgeAvgCounter']=1 # used for totals since we need an avg in totals, not absnum
self.updated=time.time()
def get_data(self):
data1=copy.deepcopy(self.data)
for f in data1.keys():
fe=data1[f]
for w in fe.keys():
el=fe[w]
for a in el.keys():
if a[-10:]=='AvgCounter': # do not publish avgcounter fields... they are internals
del el[a]
return data1
def get_xml_data(self,indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=""):
data=self.get_data()
return xmlFormat.dict2string(data,
dict_name="frontends",el_name="frontend",
subtypes_params={"class":{'subclass_params':{'Requested':{'dicts_params':{'Parameters':{'el_name':'Parameter'}}}}}},
indent_tab=indent_tab,leading_tab=leading_tab)
def get_total(self):
total={'Status':None,'Requested':None,'ClientMonitor':None}
numtypes=(type(1),type(1L),type(1.0))
for f in self.data.keys():
fe=self.data[f]
for w in fe.keys():
if total.has_key(w): # ignore eventual not supported classes
el=fe[w]
tel=total[w]
if tel is None:
# first one, just copy over
total[w]={}
tel=total[w]
for a in el.keys():
if type(el[a]) in numtypes: # copy only numbers
tel[a]=el[a]
else:
# successive, sum
for a in el.keys():
if type(el[a]) in numtypes: # consider only numbers
if tel.has_key(a):
tel[a]+=el[a]
# if other frontends did't have this attribute, ignore
# if any attribute from prev. frontends are not in the current one, remove from total
for a in tel.keys():
if not el.has_key(a):
del tel[a]
elif not (type(el[a]) in numtypes):
del tel[a]
for w in total.keys():
if total[w] is None:
del total[w] # remove entry if not defined
else:
tel=total[w]
for a in tel.keys():
if a[-10:]=='AvgCounter':
# this is an average counter, calc the average of the referred element
# like InfoAge=InfoAge/InfoAgeAvgCounter
aorg=a[:-10]
tel[aorg]=tel[aorg]/tel[a]
# the avgcount totals are just for internal purposes
del tel[a]
return total
def get_xml_total(self,indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=""):
total=self.get_total()
return xmlFormat.class2string(total,
inst_name="total",
indent_tab=indent_tab,leading_tab=leading_tab)
def get_xml_updated(self,indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=""):
return xmlFormat.time2xml(self.updated, "updated", indent_tab=xmlFormat.DEFAULT_TAB,leading_tab="")
def write_file(self):
global monitoringConfig
if (self.files_updated is not None) and ((self.updated-self.files_updated)<5):
# files updated recently, no need to redo it
return
# write snaphot file
xml_str=('<?xml version="1.0" encoding="ISO-8859-1"?>\n\n'+
'<glideFactoryEntryQStats>\n'+
self.get_xml_updated(indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=xmlFormat.DEFAULT_TAB)+"\n"+
self.get_xml_data(indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=xmlFormat.DEFAULT_TAB)+"\n"+
self.get_xml_total(indent_tab=xmlFormat.DEFAULT_TAB,leading_tab=xmlFormat.DEFAULT_TAB)+"\n"+
"</glideFactoryEntryQStats>\n")
monitoringConfig.write_file("schedd_status.xml",xml_str)
data=self.get_data()
total_el=self.get_total()
# update RRDs
type_strings={'Status':'Status','Requested':'Req','ClientMonitor':'Client'}
for fe in [None]+data.keys():
if fe is None: # special key == Total
fe_dir="total"
fe_el=total_el
else:
fe_dir="frontend_"+fe
fe_el=data[fe]
val_dict={}
#init, so that all get created properly
for tp in self.attributes.keys():
tp_str=type_strings[tp]
attributes_tp=self.attributes[tp]
for a in attributes_tp:
val_dict["%s%s"%(tp_str,a)]=None
monitoringConfig.establish_dir(fe_dir)
for tp in fe_el.keys():
# type - Status, Requested or ClientMonitor
if not (tp in self.attributes.keys()):
continue
tp_str=type_strings[tp]
attributes_tp=self.attributes[tp]
fe_el_tp=fe_el[tp]
for a in fe_el_tp.keys():
if a in attributes_tp:
a_el=fe_el_tp[a]
if type(a_el)!=type({}): # ignore subdictionaries
val_dict["%s%s"%(tp_str,a)]=a_el
monitoringConfig.write_rrd_multi("%s/Status_Attributes"%fe_dir,
"GAUGE",self.updated,val_dict)
self.files_updated=self.updated
return
############### P R I V A T E ################
##################################################
def tmp2final(fname):
"""
This exact method is also in glideFactoryMonitoring.py
"""
try:
os.remove(fname+"~")
except:
pass
try:
os.rename(fname,fname+"~")
except:
pass
try:
os.rename(fname+".tmp",fname)
except:
print "Failed renaming %s.tmp into %s"%(fname,fname)
logSupport.log.error("Failed renaming %s.tmp into %s" % (fname,fname))
return
##################################################
def sanitize(name):
good_chars=string.ascii_letters+string.digits+".-"
outarr=[]
for i in range(len(name)):
if name[i] in good_chars:
outarr.append(name[i])
else:
outarr.append("_")
return string.join(outarr,"")
##################################################
# global configuration of the module
monitoringConfig=MonitoringConfig()
def write_frontend_descript_xml(frontendDescript, monitor_dir):
"""
Writes out the frontend descript.xml file in the monitor web area.
@type frontendDescript: FrontendDescript
@param frontendDescript: contains the data in the frontend.descript file in the frontend instance dir
@type monitor_dir: string
@param monitor_dir: filepath the the monitor dir in the frontend instance dir
"""
frontend_data = copy.deepcopy(frontendDescript.data)
frontend_str = '<frontend FrontendName="%s"' % frontend_data['FrontendName'] + '/>'
dis_link_txt = 'display_txt="%s" href_link="%s"' % (frontend_data['MonitorDisplayText'], frontend_data['MonitorLink'])
footer_str = '<monitor_footer ' + dis_link_txt + '/>'
output = '<?xml version="1.0" encoding="ISO-8859-1"?>\n\n' + \
'<glideinFrontendDescript>\n' \
+ xmlFormat.time2xml(time.time(), "updated", indent_tab=xmlFormat.DEFAULT_TAB, leading_tab=xmlFormat.DEFAULT_TAB) + "\n" \
+ xmlFormat.DEFAULT_TAB + frontend_str + "\n" \
+ xmlFormat.DEFAULT_TAB + footer_str + "\n" \
+ '</glideinFrontendDescript>'
fname = os.path.join(monitor_dir, 'descript.xml')
try:
f = open(fname + '.tmp', 'wb')
try:
f.write(output)
finally:
f.close()
tmp2final(fname)
except IOError:
logSupport.log.exception("Error writing out the frontend descript.xml: ")
| {
"content_hash": "466a10f70924210c5b2642d4b0f7cd5b",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 163,
"avg_line_length": 38.09763313609467,
"alnum_prop": 0.5031063135823561,
"repo_name": "bbockelm/glideinWMS",
"id": "6d8254c191b6246b008aef607515820c7a896062",
"size": "25946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/glideinFrontendMonitoring.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "4617"
},
{
"name": "HTML",
"bytes": "380368"
},
{
"name": "JavaScript",
"bytes": "11648"
},
{
"name": "Python",
"bytes": "2005860"
},
{
"name": "Shell",
"bytes": "239244"
},
{
"name": "XSLT",
"bytes": "4667"
}
],
"symlink_target": ""
} |
"""Example DAG demonstrating the DummyOperator and a custom DummySkipOperator which skips by default."""
import airflow
from airflow.exceptions import AirflowSkipException
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(2),
}
# Create some placeholder operators
class DummySkipOperator(DummyOperator):
"""Dummy operator which always skips the task."""
ui_color = '#e8b7e4'
def execute(self, context):
raise AirflowSkipException
def create_test_pipeline(suffix, trigger_rule, dag_):
"""
Instantiate a number of operators for the given DAG.
:param str suffix: Suffix to append to the operator task_ids
:param str trigger_rule: TriggerRule for the join task
:param DAG dag_: The DAG to run the operators on
"""
skip_operator = DummySkipOperator(task_id='skip_operator_{}'.format(suffix), dag=dag_)
always_true = DummyOperator(task_id='always_true_{}'.format(suffix), dag=dag_)
join = DummyOperator(task_id=trigger_rule, dag=dag_, trigger_rule=trigger_rule)
final = DummyOperator(task_id='final_{}'.format(suffix), dag=dag_)
skip_operator >> join
always_true >> join
join >> final
dag = DAG(dag_id='example_skip_dag', default_args=args)
create_test_pipeline('1', 'all_success', dag)
create_test_pipeline('2', 'one_success', dag)
| {
"content_hash": "546660f762e2b61bc8ecc034bb76e06d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 104,
"avg_line_length": 32.52272727272727,
"alnum_prop": 0.7120894479385046,
"repo_name": "Fokko/incubator-airflow",
"id": "205cd7a6404f1bd7e7f31e1027fe518fdc339aa0",
"size": "2243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/example_dags/example_skip_dag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
import warnings
if sys.version > '3':
xrange = range
basestring = str
from pyspark import SparkContext
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
class MLUtils(object):
"""
Helper methods to load, save and pre-process data used in MLlib.
"""
@staticmethod
def _parse_libsvm_line(line, multiclass=None):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
if multiclass is not None:
warnings.warn("deprecated", DeprecationWarning)
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
@staticmethod
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None, multiclass=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
if multiclass is not None:
warnings.warn("deprecated", DeprecationWarning)
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
@staticmethod
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])), \
LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])), \
LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
def loadVectors(sc, path):
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
class Saveable(object):
"""
Mixin for models and transformers which may be saved as files.
"""
def save(self, sc, path):
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using py:meth:`Loader.load`.
:param sc: Spark context used to save model data.
:param path: Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
"""
def save(self, sc, path):
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(object):
"""
Mixin for classes which can load saved models from files.
"""
@classmethod
def load(cls, sc, path):
"""
Load a model from the given path. The model should have been
saved using py:meth:`Saveable.save`.
:param sc: Spark context used for loading model files.
:param path: Path specifying the directory to which the model
was saved.
:return: model instance
"""
raise NotImplemented
@inherit_doc
class JavaLoader(Loader):
"""
Mixin for classes which can load saved models using its Scala
implementation.
"""
@classmethod
def _java_loader_class(cls):
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc, path):
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj = sc._jvm
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
def load(cls, sc, path):
java_model = cls._load_java(sc, path)
return cls(java_model)
class LinearDataGenerator(object):
"""Utils for generating linear data"""
@staticmethod
def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
:param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps)))
@staticmethod
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate a RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[2]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "d915c2bd0c8ad0dcbf036e9639289ea9",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 98,
"avg_line_length": 36.890965732087224,
"alnum_prop": 0.5971964195237291,
"repo_name": "tophua/spark1.52",
"id": "10a1e4b3eb0fc269373e0525e1d1f0962d7ef623",
"size": "12627",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/mllib/util.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26914"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "15314"
},
{
"name": "Dockerfile",
"bytes": "4597"
},
{
"name": "HiveQL",
"bytes": "2018996"
},
{
"name": "Java",
"bytes": "1763581"
},
{
"name": "JavaScript",
"bytes": "68648"
},
{
"name": "Makefile",
"bytes": "7771"
},
{
"name": "Python",
"bytes": "1552537"
},
{
"name": "R",
"bytes": "452786"
},
{
"name": "Roff",
"bytes": "23131"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "16031983"
},
{
"name": "Shell",
"bytes": "147300"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "154646"
}
],
"symlink_target": ""
} |
from django.db.models import Q
from rest_framework import authentication
from rest_framework import exceptions
from api.models.User import User
from api.models.Organization import Organization
from api.models.OrganizationType import OrganizationType
from api.utils import get_firstname_lastname
class UserAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
header_username = request.META.get('HTTP_SMAUTH_USER', request.META.get(
'HTTP_SMAUTH_UNIVERSALID'))
header_user_guid = request.META.get('HTTP_SMAUTH_USERGUID')
header_user_dir = request.META.get('HTTP_SMAUTH_DIRNAME')
header_user_id = request.META.get('HTTP_SMAUTH_UNIVERSALID')
header_user_email = request.META.get('HTTP_SMAUTH_USEREMAIL')
header_user_displayname = request.META.get(
'HTTP_SMAUTH_USERDISPLAYNAME')
header_user_type = request.META.get('HTTP_SMAUTH_USERTYPE')
if not header_user_guid and not header_user_id:
raise exceptions.AuthenticationFailed('No SiteMinder headers found')
government_user = False
if header_user_type == 'Internal' and header_user_dir == 'IDIR':
government_user = True
gov_organization = Organization.objects.get(
type=OrganizationType.objects.get(type="Government"))
try:
user = User.objects.get(Q(authorization_guid=header_user_guid) |
Q(authorization_id=header_user_id))
# First time logging in, map the GUID to the user and set fname & lname
if user.authorization_guid is None:
user.authorization_guid = header_user_guid
first_name, last_name = get_firstname_lastname(
header_user_displayname, header_user_type)
user.first_name = first_name if first_name else ""
user.last_name = last_name if last_name else ""
user.username = user.username if user.username else header_username
user.authorization_email = header_user_email
user.authorization_id = header_user_id
user.authorization_directory = header_user_dir
user.display_name = header_user_displayname
if government_user:
user.organization = gov_organization
user.save()
except User.DoesNotExist:
# Log this attempt
# raise exceptions.AuthenticationFailed('User is not authorized.')
return (None, None)
return (user, None)
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return (None, None) | {
"content_hash": "81740ba105d1e7a9c5085f056f31f334",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 83,
"avg_line_length": 42.984375,
"alnum_prop": 0.6401308615049073,
"repo_name": "swcurran/tfrs",
"id": "706db968f7a415b5b259c3f74cfd0b5218c77846",
"size": "2797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/api/authentication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1066"
},
{
"name": "CSS",
"bytes": "599020"
},
{
"name": "Groovy",
"bytes": "11736"
},
{
"name": "HTML",
"bytes": "354477"
},
{
"name": "JavaScript",
"bytes": "269355"
},
{
"name": "Python",
"bytes": "441575"
},
{
"name": "Shell",
"bytes": "7363"
}
],
"symlink_target": ""
} |
"""Pluralize English nouns (stage 4)
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
Command line usage:
$ python plural4.py noun
nouns
"""
__author__ = "Mark Pilgrim ([email protected])"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/03/18 16:43:37 $"
__copyright__ = "Copyright (c) 2004 Mark Pilgrim"
__license__ = "Python"
import re
def buildMatchAndApplyFunctions((pattern, search, replace)):
matchFunction = lambda word: re.search(pattern, word)
applyFunction = lambda word: re.sub(search, replace, word)
return (matchFunction, applyFunction)
patterns = \
(
('[sxz]$', '$', 'es'),
('[^aeioudgkprt]h$', '$', 'es'),
('(qu|[^aeiou])y$', 'y$', 'ies'),
('$', '$', 's')
)
rules = map(buildMatchAndApplyFunctions, patterns)
def plural(noun):
for matchesRule, applyRule in rules:
if matchesRule(noun):
return applyRule(noun)
if __name__ == '__main__':
import sys
if sys.argv[1:]:
print plural(sys.argv[1])
else:
print __doc__
| {
"content_hash": "a25ecdf7593b902a5aa0f9053a48e68e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 66,
"avg_line_length": 25.75,
"alnum_prop": 0.6248896734333628,
"repo_name": "tapomayukh/projects_in_python",
"id": "9ac406fe64d31fd65c6fada5c679db0a3ebd7675",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/plural/stage4/plural4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "4903"
},
{
"name": "Python",
"bytes": "4451912"
}
],
"symlink_target": ""
} |
from s3 import S3CustomController
THEME = "skeleton"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
self._view(THEME, "index.html")
return dict()
# END =========================================================================
| {
"content_hash": "bac47b089e0d0e12d5c1f57496a59b59",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.35792349726775957,
"repo_name": "gnarula/eden_deployment",
"id": "c3614bbab6844e4e7c9cfad5d82d225240ec4ccf",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "private/templates/skeleton/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1305178"
},
{
"name": "JavaScript",
"bytes": "16338028"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28218113"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2491556"
}
],
"symlink_target": ""
} |
def get_starting_line(logFile,delimeter,skipln):
raw_log = open(logFile) # open file
raw_lines = raw_log.read().split('\n') # split file by line
line_cnt = 0
for line in raw_lines:
if delimeter in str(line): # look for delimiter
line_cnt += skipln # add offset
break;
line_cnt += 1
return raw_lines,line_cnt
# MAIN
#############################
import csv
import datetime
import sys
for logFile in sys.argv[1:]: # iterate through arguments - each of which is a log file
# read lines from the log file and identify the start of the data that will be
# copied into the CSV
log_lines, line_cnt = get_starting_line(logFile,"# Broadband LOG Results",3)
# prepare CSV writer object
dest_name = logFile+'.csv' # append '.csv' to create CSV file name
log_csv = open(dest_name,'wb')
log_writer = csv.writer(log_csv,dialect='excel')
for line in log_lines[line_cnt:]: # iterate through lines in the log file
words = line.split() # split line by whitespace
if len(words) >= 4: # read until a line is unreadable - the end of the data
# parse the line day/date and data
year,month,day = words[0].split('-')
hours,mins,secs = words[1].split(':')
data = words[3]
# convert to numbers
year = int(year)
month = int(month)
day = int(day)
hours = int(hours)
mins = int(mins)
secs = int(secs)
data = float(data)
# write row to CSV
log_writer.writerow([year,month,day,hours,mins,secs,data])
else:
break # break when line is unreadable
print "Decibel readings CSV File created with filename '%s'"%dest_name
| {
"content_hash": "27c4c3229438606ef0dad52bb2a39440",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 86,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.5933257918552036,
"repo_name": "zane-weissman/XL2-analysis",
"id": "a12af44a1dd85829549015ba3457323283cc847f",
"size": "3181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CreateCSV.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "13362"
},
{
"name": "Python",
"bytes": "3181"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
class DataBaseConfig(AppConfig):
name = 'data_base'
| {
"content_hash": "e0233a1452b5a69cc76103cafe204315",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 19,
"alnum_prop": 0.7518796992481203,
"repo_name": "xuqiantong/NY_Auto",
"id": "ee4c321e15b826b01f7a2b11b4a8ae6d6428f6ef",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/data_base/apps.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47443"
},
{
"name": "HTML",
"bytes": "11630"
},
{
"name": "JavaScript",
"bytes": "425994674"
},
{
"name": "Python",
"bytes": "42707"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from functools import partial
from itertools import islice
from pathlib import Path
from urllib.parse import urljoin
from typing import NamedTuple, Iterable, Optional
from xml.etree import ElementTree as ET
import gzip
import sys
import mimetypes
DEFAULT = 'http://www.sitemaps.org/schemas/sitemap/0.9'
RS = 'http://www.openarchives.org/rs/terms/'
ET.register_namespace('', DEFAULT)
ET.register_namespace('rs', RS)
def qname(ns, lname):
return str(ET.QName(ns, lname))
def elem(lname, txt=None, ns=DEFAULT, **kws):
el = ET.Element(qname(ns, lname), **kws)
if txt is not None:
el.text = txt
return el, partial(subelem, el, ns=ns)
def subelem(parent, lname, txt=None, ns=DEFAULT, **kws):
el = ET.SubElement(parent, qname(ns, lname), **kws)
if txt is not None:
el.text = txt
return el, partial(subelem, el, ns=ns)
def write_xml(root: ET.Element, outfile: Path, compress=False, size=0):
tree = ET.ElementTree(root)
if compress:
sfx = outfile.suffix
outfile = outfile.with_suffix(f'{sfx}.gz')
note = f' ({size:,} items)' if size else ''
print(f'Writing: {outfile}{note}', file=sys.stderr)
f = gzip.GzipFile(outfile, 'wb') if compress else outfile.open('wb')
with f as fp: # type: ignore
tree.write(fp, encoding='utf-8', xml_declaration=True)
def chunk_by(seq, n):
it = iter(seq)
while True:
chunk = list(islice(it, n))
if not chunk:
return
yield chunk
def normalize_timestamp(s : str) -> str:
s = s.replace('Z', '+00')
dtime, offsign, tz = s.rpartition('+' if '+' in s else '-')
dtime = dtime.replace(' ', 'T')
if '.' in dtime:
whole, pastdot = dtime.split('.', 1)
fraction = pastdot
fraction += '0' * (3 - len(fraction))
dtime = f"{whole}.{fraction}"
else:
dtime += '.0'
tz = 'Z' if tz == '00' else f"{offsign}{tz}"
return f'{dtime}{tz}'
class ItemSet(NamedTuple):
url: str
firstmod: str
lastmod: str
created: str
file: str
class Item(NamedTuple):
slug: str
created: str
modified: str
deleted: bool
@classmethod
def parse(cls, l: str) -> 'Item':
slug, created, modified, deleted = l.rstrip().split('\t')
return cls(slug,
normalize_timestamp(created),
normalize_timestamp(modified),
deleted == 't')
class Indexer:
DEFAULT_MAX_ITEMS = 50_000
outdir: Path
def __init__(self,
base_url: str,
sync_dir: str,
outdir: Path,
max_items=0,
compress=True,
representation_templates: list[str]=[],
):
self.outdir = outdir
self.max_items = max_items or Indexer.DEFAULT_MAX_ITEMS
self.compress = compress
self.caplistfilename = f'capabilitylist.xml'
self.changelistfilename = f'changelist.xml'
self.descfilename = f'description.xml'
sync_dir = sync_dir.rstrip('/') + '/' if sync_dir else ''
sync_base_url = urljoin(base_url, sync_dir)
self.base_url = base_url
self.changelist_url = urljoin(sync_base_url, self.changelistfilename)
self.caplist_url = urljoin(sync_base_url, self.caplistfilename)
self.desc_url = urljoin(sync_base_url, self.descfilename)
self.write_sitemap = ItemsetWriter(
base_url,
self.changelist_url,
self.caplist_url,
representation_templates,
compress,
self.outdir)
def index(self, iterable: Iterable[str]):
self.dump_sets(self.write_sitemap(lines, i + 1)
for i, lines in enumerate(
chunk_by(iterable, self.max_items)))
def index_multiproc(self, iterable: Iterable[str], pool):
self.dump_sets(
pool.imap_unordered(self.write_sitemap,
chunk_by(iterable, self.max_items)))
def dump_sets(self, itemsets: Iterable[ItemSet]) -> None:
sorteditemsets = sorted(itemsets, key=lambda iset: iset.lastmod)
if not sorteditemsets:
return
firstmod = sorteditemsets[0].firstmod
lastmod = sorteditemsets[-1].lastmod
sitemapindex, smsub = elem('sitemapindex')
smsub('md', ns=RS, capability='changelist',
**{'from': firstmod, 'until': lastmod})
smsub('ln', ns=RS, rel='self', href=self.changelist_url)
smsub('ln', ns=RS, rel='up', href=self.caplist_url)
for itemset in sorteditemsets:
_, urlsetsub = smsub('sitemap')
urlsetsub('loc', itemset.url)
urlsetsub('md', ns=RS,
**{'from': itemset.firstmod, 'until': itemset.lastmod})
#urlsetsub('lastmod', itemset.modified) # optional
outfile = self.outdir / self.changelistfilename
write_xml(sitemapindex, outfile, self.compress, len(sorteditemsets))
self.write_parents()
def write_parents(self):
self.write_caplist()
self.write_desc()
def write_caplist(self):
root, sub = elem('urlset')
sub('ln', ns=RS, rel='up', href=self.desc_url)
sub('ln', ns=RS, rel='self', href=self.caplist_url)
sub('md', ns=RS, capability='capabilitylist')
_, urlsub = sub('url')
urlsub('loc', self.changelist_url)
urlsub('md', ns=RS, capability='changelist')
write_xml(root, self.outdir / self.caplistfilename, self.compress)
def write_desc(self):
root, sub = elem('urlset')
sub('ln', ns=RS, rel='self', href=self.desc_url)
# TOOD: ds_url
#sub('ln', ns=RS, rel='describedby', href=self.base_url)
sub('md', ns=RS, capability='description')
_, urlsub = sub('url')
urlsub('loc', self.caplist_url)
urlsub('md', ns=RS, capability='capabilitylist')
write_xml(root, self.outdir / self.descfilename, self.compress)
# TODO: Use to enable cheap incremental updates?
#mapcreatedfile = outfile.parent / 'sitemap-created.tsv'
#print(f'Writing: {mapcreatedfile}', file=sys.stderr)
#with open(mapcreatedfile, 'w') as f:
# for itemset in sorteditemsets:
# print(itemset.file, itemset.created, sep='\t', file=f)
class ItemsetWriter(NamedTuple):
base_url: str
changelist_url: str
caplist_url: str
representation_templates: list[str]
compress: bool
outdir: Path
def __call__(self, lines: list[str], seqnum: Optional[int] = None) -> ItemSet:
items = [Item.parse(l) for l in lines]
firstid = items[0].slug
firstcreated = items[0].created
items.sort(key=lambda item: item.modified)
firstmod = items[0].modified
lastmod = items[-1].modified
seqslug = (str(seqnum) if seqnum is not None else
f"{firstcreated.replace(':', '_').replace('.', '-')}-{firstid}")
changelist_nosuffix = self.changelist_url.rsplit('.', 1)[0]
itemlisturl = f'{changelist_nosuffix}-{seqslug}.xml'
filename = itemlisturl.rsplit('/', 1)[-1]
sitemap, smsub = elem('urlset')
smsub('md', ns=RS, capability='changelist',
**{'from': firstmod, 'until': lastmod})
smsub('ln', ns=RS, rel='self', href=itemlisturl)
smsub('ln', ns=RS, rel='index', href=self.changelist_url)
smsub('ln', ns=RS, rel='up', href=self.caplist_url)
for slug, created, modified, deleted in items:
uri = urljoin(self.base_url, slug)
_, urlsub = smsub('url')
urlsub('loc', uri)
if deleted:
urlsub('md', ns=RS, change='deleted', datetime=modified)
else:
#urlsub('lastmod', modified) # optional
#urlsub('md', ns=RS, at=modified) # in plain resourcelist
change = 'created' if created == modified else 'updated'
urlsub('md', ns=RS, change=change, datetime=modified)
self.add_representations(urlsub, uri)
write_xml(sitemap, self.outdir / filename, self.compress, len(items))
return ItemSet(itemlisturl, firstmod, lastmod, firstcreated, filename)
def add_representations(self, urlsub, uri):
for repr_url_tplt in self.representation_templates:
repr_url = repr_url_tplt.format(uri)
mtype, enc = mimetypes.guess_type(repr_url)
urlsub('ln', ns=RS, rel='alternate', href=repr_url, type=mtype)
def main():
"""
This tool generates a set of ResourceSync changeset files from an input
stream of tab-separated values of the form:
<slug>\t<created>\t<modified>\t<deleted>
Where:
| Column | Definition |
| ----------- | ---------- |
| slug | Slug ID resolved against BASE_URL
| created | SQL or W3C datetime value
| modified | SQL or W3C datetime value
| deleted | Boolean hint ('t' is true)
The result is:
- one Source Description,
- one Capability List,
- one Change List index,
- a set of Change Lists.
All entries in a Change List are provided in forward chronological order:
the least recently changed resource at the beginning of the Change List and
the most recently changed resource must be listed at the end.
"""
import argparse
from textwrap import dedent
argp = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=dedent(main.__doc__),
)
argp.add_argument('-b', '--base-url', default='')
argp.add_argument('-s', '--sync-dir', default='resourcesync/')
argp.add_argument('-m', '--max-items', type=int, default=0)
argp.add_argument('-C', '--no-compress', action='store_true')
argp.add_argument('-M', '--no-multiproc', action='store_true')
argp.add_argument('-r', '--representation-templates', nargs='*')
argp.add_argument('outdir', metavar='OUTDIR')
args = argp.parse_args()
outdir = Path(args.outdir)
outdir.mkdir(parents=True, exist_ok=True)
indexer = Indexer(args.base_url,
args.sync_dir,
outdir,
args.max_items,
compress=not args.no_compress,
representation_templates=args.representation_templates)
if args.no_multiproc:
indexer.index(sys.stdin)
else:
from multiprocessing import Pool
indexer.index_multiproc(sys.stdin, Pool())
if __name__ == '__main__':
main()
| {
"content_hash": "bc37d795889b634dfaf496b210f1c0f8",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 82,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.5906531531531531,
"repo_name": "libris/librisxl",
"id": "d42310dc2b87c58e6c352fda81013663d996faee",
"size": "10656",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "librisxl-tools/resourcesync/mkresourcesync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1498"
},
{
"name": "Groovy",
"bytes": "2063396"
},
{
"name": "Java",
"bytes": "798753"
},
{
"name": "PLSQL",
"bytes": "16918"
},
{
"name": "Python",
"bytes": "88819"
},
{
"name": "Shell",
"bytes": "36836"
}
],
"symlink_target": ""
} |
"""
tests.appctx
~~~~~~~~~~~~
Tests the application context.
:copyright: (c) 2016 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
def test_basic_url_generation():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
app.config['PREFERRED_URL_SCHEME'] = 'https'
@app.route('/')
def index():
pass
with app.app_context():
rv = flask.url_for('index')
assert rv == 'https://localhost/'
def test_url_generation_requires_server_name():
app = flask.Flask(__name__)
with app.app_context():
with pytest.raises(RuntimeError):
flask.url_for('index')
def test_url_generation_without_context_fails():
with pytest.raises(RuntimeError):
flask.url_for('index')
def test_request_context_means_app_context():
app = flask.Flask(__name__)
with app.test_request_context():
assert flask.current_app._get_current_object() == app
assert flask._app_ctx_stack.top is None
def test_app_context_provides_current_app():
app = flask.Flask(__name__)
with app.app_context():
assert flask.current_app._get_current_object() == app
assert flask._app_ctx_stack.top is None
def test_app_tearing_down():
cleanup_stuff = []
app = flask.Flask(__name__)
@app.teardown_appcontext
def cleanup(exception):
cleanup_stuff.append(exception)
with app.app_context():
pass
assert cleanup_stuff == [None]
def test_app_tearing_down_with_previous_exception():
cleanup_stuff = []
app = flask.Flask(__name__)
@app.teardown_appcontext
def cleanup(exception):
cleanup_stuff.append(exception)
try:
raise Exception('dummy')
except Exception:
pass
with app.app_context():
pass
assert cleanup_stuff == [None]
def test_app_tearing_down_with_handled_exception():
cleanup_stuff = []
app = flask.Flask(__name__)
@app.teardown_appcontext
def cleanup(exception):
cleanup_stuff.append(exception)
with app.app_context():
try:
raise Exception('dummy')
except Exception:
pass
assert cleanup_stuff == [None]
def test_app_ctx_globals_methods():
app = flask.Flask(__name__)
with app.app_context():
# get
assert flask.g.get('foo') is None
assert flask.g.get('foo', 'bar') == 'bar'
# __contains__
assert 'foo' not in flask.g
flask.g.foo = 'bar'
assert 'foo' in flask.g
# setdefault
flask.g.setdefault('bar', 'the cake is a lie')
flask.g.setdefault('bar', 'hello world')
assert flask.g.bar == 'the cake is a lie'
# pop
assert flask.g.pop('bar') == 'the cake is a lie'
with pytest.raises(KeyError):
flask.g.pop('bar')
assert flask.g.pop('bar', 'more cake') == 'more cake'
# __iter__
assert list(flask.g) == ['foo']
def test_custom_app_ctx_globals_class():
class CustomRequestGlobals(object):
def __init__(self):
self.spam = 'eggs'
app = flask.Flask(__name__)
app.app_ctx_globals_class = CustomRequestGlobals
with app.app_context():
assert flask.render_template_string('{{ g.spam }}') == 'eggs'
def test_context_refcounts():
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_req(error=None):
called.append('request')
@app.teardown_appcontext
def teardown_app(error=None):
called.append('app')
@app.route('/')
def index():
with flask._app_ctx_stack.top:
with flask._request_ctx_stack.top:
pass
env = flask._request_ctx_stack.top.request.environ
assert env['werkzeug.request'] is not None
return u''
c = app.test_client()
res = c.get('/')
assert res.status_code == 200
assert res.data == b''
assert called == ['request', 'app']
| {
"content_hash": "cf8a48dd0a6688152094ac98bea1eefb",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 69,
"avg_line_length": 27.27891156462585,
"alnum_prop": 0.5930174563591022,
"repo_name": "Parkayun/flask",
"id": "31e3b2071729f51db3dd7b2c73a984d6e752bbd2",
"size": "4034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_appctx.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18"
},
{
"name": "HTML",
"bytes": "404"
},
{
"name": "Makefile",
"bytes": "1586"
},
{
"name": "Python",
"bytes": "438838"
}
],
"symlink_target": ""
} |
from rx import Observable, AnonymousObservable
from rx.internal.utils import adapt_call
from rx.internal import extensionmethod
@extensionmethod(Observable)
def skip_while(self, predicate):
"""Bypasses elements in an observable sequence as long as a specified
condition is true and then returns the remaining elements. The
element's index is used in the logic of the predicate function.
1 - source.skip_while(lambda value: value < 10)
2 - source.skip_while(lambda value, index: value < 10 or index < 10)
predicate -- A function to test each element for a condition; the
second parameter of the function represents the index of the
source element.
Returns an observable sequence that contains the elements from the
input sequence starting at the first element in the linear series that
does not pass the test specified by predicate.
"""
predicate = adapt_call(predicate)
source = self
def subscribe(observer):
i, running = [0], [False]
def on_next(value):
if not running[0]:
try:
running[0] = not predicate(value, i[0])
except Exception as exn:
observer.on_error(exn)
return
else:
i[0] += 1
if running[0]:
observer.on_next(value)
return source.subscribe(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe)
| {
"content_hash": "4dd313bcba7beba7d327dad5270b1625",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 82,
"avg_line_length": 34.45454545454545,
"alnum_prop": 0.6418205804749341,
"repo_name": "Sprytile/Sprytile",
"id": "45870bd1f9abd8db22af122d1860cde38510ee49",
"size": "1516",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rx/linq/observable/skipwhile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "720766"
}
],
"symlink_target": ""
} |
from lxml.builder import ElementMaker
from moai.metadata.mods import NL_MODS, XSI_NS
class DIDL(object):
"""A metadata prefix implementing the DARE DIDL metadata format
this format is registered under the name "didl"
Note that this format re-uses oai_dc and mods formats that come with
MOAI by default
"""
def __init__(self, prefix, config, db):
self.prefix = prefix
self.config = config
self.db = db
self.ns = {'didl': "urn:mpeg:mpeg21:2002:02-DIDL-NS",
'dii': "urn:mpeg:mpeg21:2002:01-DII-NS",
'dip': "urn:mpeg:mpeg21:2005:01-DIP-NS",
'dcterms': "http://purl.org/dc/terms/",
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'dc': 'http://purl.org/dc/elements/1.1/',
}
self.schemas = {'didl':'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd',
'dii': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd',
'dip': 'http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dip/dip.xsd'}
def get_namespace(self):
return self.ns[self.prefix]
def get_schema_location(self):
return self.schemas[self.prefix]
def __call__(self, element, metadata):
data = metadata.record
DIDL = ElementMaker(namespace=self.ns['didl'], nsmap=self.ns)
DII = ElementMaker(namespace=self.ns['dii'])
DIP = ElementMaker(namespace=self.ns['dip'])
RDF = ElementMaker(namespace=self.ns['rdf'])
DCTERMS = ElementMaker(namespace=self.ns['dcterms'])
oai_url = (self.config.url+'?verb=GetRecord&'
'metadataPrefix=%s&identifier=%s' % (
self.prefix,
data['id']))
id_url = data['metadata'].get('url', [None])[0]
# generate mods for this feed
mods_data = DIDL.Resource(mimeType="application/xml")
NL_MODS('mods', self.config, self.db)(mods_data, metadata)
asset_data = []
descriptive_metadata = RDF.type()
descriptive_metadata.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/descriptiveMetadata')
didl = DIDL.DIDL(
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(
DCTERMS.modified(data['modified'].isoformat().split('.')[0]),
mimeType="application/xml"
)
),
DIDL.Component(
DIDL.Resource(ref=id_url or oai_url,mimeType="application/xml")
),
DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(descriptive_metadata, mimeType="application/xml")
),
DIDL.Component(
DIDL.Descriptor(
DIDL.Statement("mods", mimeType="text/plain")),
mods_data)
),
)
)
object_file = RDF.type()
object_file.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/objectFile')
for root_item in didl:
for asset in data['metadata'].get('asset', []):
url = asset['url']
if not url.startswith('http://'):
url = self.config.url.rstrip('/') + '/' + url.lstrip('/')
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(object_file, mimeType="application/xml")
)
)
access = asset.get('access')
if access == 'open':
access = (
'http://purl.org/eprint/accessRights/OpenAccess')
elif access == 'restricted':
access = (
'http://purl.org/eprint/accessRights/RestrictedAccess')
elif access == 'closed':
access = (
'http://purl.org/eprint/accessRights/ClosedAccess')
if access:
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.accessRights(access),
mimeType="application/xml")))
for modified in asset.get('modified', []):
item.append(
DIDL.Descriptor(
DIDL.Statement(DCTERMS.modified(modified),
mimeType="application/xml")))
item.append(
DIDL.Component(
DIDL.Resource(mimeType=asset['mimetype'],
ref=url)
)
)
root_item.append(item)
break
human_start_page = RDF.type()
human_start_page.attrib['{%s}resource' % self.ns['rdf']] = (
'info:eu-repo/semantics/humanStartPage')
if data['metadata'].get('url'):
item = DIDL.Item(
DIDL.Descriptor(
DIDL.Statement(human_start_page, mimeType="application/xml")
),
DIDL.Component(
DIDL.Resource(mimeType="text/html", ref=data['metadata']['url'][0])
)
)
root_item.append(item)
didl.attrib['{%s}schemaLocation' % XSI_NS] = (
'%s %s %s %s %s %s' % (self.ns['didl'],
self.schemas['didl'],
self.ns['dii'],
self.schemas['dii'],
self.ns['dip'],
self.schemas['dip']))
element.append(didl)
| {
"content_hash": "d7855cac9e2539f08853b9cc3ab02dc3",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 124,
"avg_line_length": 39.98684210526316,
"alnum_prop": 0.4738400789733465,
"repo_name": "infrae/moai",
"id": "f70ac9c888e434188cbef3a72ff9b7b53e5fafd7",
"size": "6079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moai/metadata/didl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "133444"
}
],
"symlink_target": ""
} |
from asyncio import Event
from alembic import config, script, command
from alembic.runtime import migration
from sqlalchemy import engine
from configparser import ConfigParser
from datetime import datetime
from subprocess import check_output, CalledProcessError
from sys import exit, hexversion
from traceback import format_exception, format_exc
import os
import discord
import sys
from discord.ext import commands
from utils.checks import check_staff_id
from utils.manager import WordFilterManager, InviteFilterManager
from utils import models, crud
from utils.models import db
IS_DOCKER = os.environ.get('IS_DOCKER', 0)
# sets working directory to bot's folder
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
# Load config
if IS_DOCKER:
token_file = os.environ.get('KURISU_TOKEN')
if token_file:
with open(token_file, 'r', encoding='utf-8') as f:
TOKEN = f.readline().strip()
else:
sys.exit('Token path needs to be provided in the KURISU_TOKEN environment variable')
db_user_file = os.environ.get('DB_USER')
db_password_file = os.environ.get('DB_PASSWORD')
if db_user_file and db_password_file:
with open(db_user_file, 'r', encoding='utf-8') as f:
db_user = f.readline().strip()
with open(db_password_file, 'r', encoding='utf-8') as f:
db_password = f.readline().strip()
DATABASE_URL = f"postgresql://{db_user}:{db_password}@db/{db_user}"
else:
sys.exit('Database user and database password files paths need to be provided')
else:
kurisu_config = ConfigParser()
kurisu_config.read("data/config.ini")
TOKEN = kurisu_config['Main']['token']
DATABASE_URL = kurisu_config['Main']['database_url']
# loads extensions
cogs = [
'cogs.assistance',
'cogs.blah',
'cogs.events',
'cogs.extras',
'cogs.filters',
'cogs.friendcode',
'cogs.kickban',
'cogs.load',
'cogs.lockdown',
'cogs.logs',
'cogs.loop',
'cogs.memes',
'cogs.helperlist',
'cogs.imgconvert',
'cogs.mod_staff',
'cogs.mod_warn',
'cogs.mod_watch',
'cogs.mod',
'cogs.results',
'cogs.rules',
'cogs.ssnc',
'cogs.xkcdparse',
'cogs.seasonal',
'cogs.newcomers',
]
class CustomContext(commands.Context):
async def get_user(self, userid: int):
if self.guild and (user := self.guild.get_member(userid)):
return user
else:
return await self.bot.fetch_user(userid)
class Kurisu(commands.Bot):
"""Its him!!."""
def __init__(self, *args, commit, branch, **kwargs):
super().__init__(*args, **kwargs)
self.startup = datetime.now()
self.IS_DOCKER = IS_DOCKER
self.commit = commit
self.branch = branch
self.roles = {
'Helpers': None,
'Staff': None,
'HalfOP': None,
'OP': None,
'SuperOP': None,
'Owner': None,
'On-Duty 3DS': None,
'On-Duty Wii U': None,
'On-Duty Switch': None,
'On-Duty Legacy': None,
'Probation': None,
'Retired Staff': None,
'Verified': None,
'Trusted': None,
'Muted': None,
'No-Help': None,
'No-elsewhere': None,
'No-Memes': None,
'No-art': None,
'#art-discussion': None,
'No-Embed': None,
'#elsewhere': None,
'Small Help': None,
'meta-mute': None,
'Nitro Booster': None,
'crc': None,
'No-Tech': None,
}
self.actions = []
self.pruning = False
self.channels = {
'announcements': None,
'welcome-and-rules': None,
'3ds-assistance-1': None,
'3ds-assistance-2': None,
'wiiu-assistance': None,
'switch-assistance-1': None,
'switch-assistance-2': None,
'helpers': None,
'watch-logs': None,
'message-logs': None,
'upload-logs': None,
'hacking-general': None,
'meta': None,
'legacy-systems': None,
'dev': None,
'off-topic': None,
'voice-and-music': None,
'bot-cmds': None,
'mods': None,
'mod-mail': None,
'mod-logs': None,
'server-logs': None,
'bot-err': None,
'elsewhere': None, # I'm a bit worried about how often this changes, shouldn't be a problem tho
'newcomers': None,
}
self.failed_cogs = []
self.exitcode = 0
self._is_all_ready = Event(loop=self.loop)
os.makedirs("data", exist_ok=True)
async def get_context(self, message, *, cls=CustomContext):
return await super().get_context(message, cls=cls)
def upgrade_database_revision(self):
connection = engine.create_engine(DATABASE_URL)
alembic_cfg = config.Config('./alembic.ini', stdout=None)
directory = script.ScriptDirectory.from_config(alembic_cfg)
with connection.begin() as connection:
context = migration.MigrationContext.configure(connection)
if set(context.get_current_heads()) != set(directory.get_heads()):
print('Upgrading database revision')
command.upgrade(alembic_cfg, 'head')
def load_cogs(self):
for extension in cogs:
try:
self.load_extension(extension)
except BaseException as e:
print(f'{extension} failed to load.')
self.failed_cogs.append([extension, type(e).__name__, e])
async def load_channels(self):
for n in self.channels:
if channel := await models.Channel.query.where(models.Channel.name == n).gino.scalar():
self.channels[n] = self.guild.get_channel(channel)
else:
self.channels[n] = discord.utils.get(self.guild.text_channels, name=n)
if not self.channels[n]:
print(f"Failed to find channel {n}")
continue
if db_chan := await crud.get_dbchannel(self.channels[n].id):
await db_chan.update(name=n).apply()
else:
await models.Channel.create(id=self.channels[n].id, name=self.channels[n].name)
async def load_roles(self):
for n in self.roles:
if role := await models.Role.query.where(models.Role.name == n).gino.scalar():
self.roles[n] = self.guild.get_role(role)
else:
self.roles[n] = discord.utils.get(self.guild.roles, name=n)
if not self.roles[n]:
print(f"Failed to find role {n}")
continue
if db_role := await crud.get_dbrole(self.roles[n].id):
await db_role.update(name=n).apply()
else:
await models.Role.create(id=self.roles[n].id, name=self.roles[n].name)
@staticmethod
def escape_text(text):
text = str(text)
return discord.utils.escape_markdown(text)
async def on_ready(self):
guilds = self.guilds
assert len(guilds) == 1
self.guild = guilds[0]
self.upgrade_database_revision()
try:
await db.set_bind(DATABASE_URL)
except:
sys.exit('Error when connecting to database')
await self.load_channels()
await self.load_roles()
self.assistance_channels = {
self.channels['3ds-assistance-1'],
self.channels['3ds-assistance-2'],
self.channels['wiiu-assistance'],
self.channels['switch-assistance-1'],
self.channels['switch-assistance-2'],
self.channels['hacking-general'],
self.channels['legacy-systems'],
}
self.staff_roles = {'Owner': self.roles['Owner'],
'SuperOP': self.roles['SuperOP'],
'OP': self.roles['OP'],
'HalfOP': self.roles['HalfOP'],
'Staff': self.roles['Staff'],
}
self.helper_roles = {"3DS": self.roles['On-Duty 3DS'],
"WiiU": self.roles['On-Duty Wii U'],
"Switch": self.roles['On-Duty Switch'],
"Legacy": self.roles['On-Duty Legacy']
}
self.wordfilter = WordFilterManager()
await self.wordfilter.load()
self.invitefilter = InviteFilterManager()
await self.invitefilter.load()
startup_message = f'{self.user.name} has started! {self.guild} has {self.guild.member_count:,} members!'
if len(self.failed_cogs) != 0:
startup_message += "\n\nSome addons failed to load:\n"
for f in self.failed_cogs:
startup_message += "\n{}: `{}: {}`".format(*f)
print(startup_message)
await self.channels['helpers'].send(startup_message)
self._is_all_ready.set()
@staticmethod
def format_error(msg):
error_paginator = commands.Paginator()
for chunk in [msg[i:i + 1800] for i in range(0, len(msg), 1800)]:
error_paginator.add_line(chunk)
return error_paginator
async def on_command_error(self, ctx: commands.Context, exc: commands.CommandInvokeError):
author: discord.Member = ctx.author
command: commands.Command = ctx.command or '<unknown cmd>'
exc = getattr(exc, 'original', exc)
channel = self.channels['bot-err'] if self.channels['bot-err'] else ctx.channel
if isinstance(exc, commands.CommandNotFound):
return
elif isinstance(exc, commands.ArgumentParsingError):
await ctx.send_help(ctx.command)
elif isinstance(exc, commands.NoPrivateMessage):
await ctx.send(f'`{command}` cannot be used in direct messages.')
elif isinstance(exc, commands.MissingPermissions):
await ctx.send(f"{author.mention} You don't have permission to use `{command}`.")
elif isinstance(exc, commands.CheckFailure):
await ctx.send(f'{author.mention} You cannot use `{command}`.')
elif isinstance(exc, commands.BadArgument):
await ctx.send(f'{author.mention} A bad argument was given: `{exc}`\n')
await ctx.send_help(ctx.command)
elif isinstance(exc, discord.ext.commands.errors.CommandOnCooldown):
if not await check_staff_id('Helper', author.id):
try:
await ctx.message.delete()
except (discord.errors.NotFound, discord.errors.Forbidden):
pass
await ctx.send(f"{author.mention} This command was used {exc.cooldown.per - exc.retry_after:.2f}s ago and is on cooldown. Try again in {exc.retry_after:.2f}s.", delete_after=10)
else:
await ctx.reinvoke()
elif isinstance(exc, commands.MissingRequiredArgument):
await ctx.send(f'{author.mention} You are missing required argument {exc.param.name}.\n')
await ctx.send_help(ctx.command)
elif isinstance(exc, discord.NotFound):
await ctx.send("ID not found.")
elif isinstance(exc, discord.Forbidden):
await ctx.send(f"💢 I can't help you if you don't let me!\n`{exc.text}`.")
elif isinstance(exc, commands.CommandInvokeError):
await ctx.send(f'{author.mention} `{command}` raised an exception during usage')
msg = "".join(format_exception(type(exc), exc, exc.__traceback__))
error_paginator = self.format_error(msg)
for page in error_paginator.pages:
await channel.send(page)
else:
if not isinstance(command, str):
command.reset_cooldown(ctx)
await ctx.send(f'{author.mention} Unexpected exception occurred while using the command `{command}`')
msg = "".join(format_exception(type(exc), exc, exc.__traceback__))
error_paginator = self.format_error(msg)
for page in error_paginator.pages:
await channel.send(page)
async def on_error(self, event_method, *args, **kwargs):
await self.channels['bot-err'].send(f'Error in {event_method}:')
msg = format_exc()
error_paginator = self.format_error(msg)
for page in error_paginator.pages:
await self.channels['bot-err'].send(page)
def add_cog(self, cog):
super().add_cog(cog)
print(f'Cog "{cog.qualified_name}" loaded')
async def close(self):
print('Kurisu is shutting down')
await db.pop_bind().close()
await super().close()
async def is_all_ready(self):
"""Checks if the bot is finished setting up."""
return self._is_all_ready.is_set()
async def wait_until_all_ready(self):
"""Wait until the bot is finished setting up."""
await self._is_all_ready.wait()
def main():
"""Main script to run the bot."""
if discord.version_info.major < 1:
print(f'discord.py is not at least 1.0.0x. (current version: {discord.__version__})')
return 2
if not hexversion >= 0x30800f0: # 3.8
print('Kurisu requires 3.8 or later.')
return 2
if not IS_DOCKER:
# attempt to get current git information
try:
commit = check_output(['git', 'rev-parse', 'HEAD']).decode('ascii')[:-1]
except CalledProcessError as e:
print(f'Checking for git commit failed: {type(e).__name__}: {e}')
commit = "<unknown>"
try:
branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode()[:-1]
except CalledProcessError as e:
print(f'Checking for git branch failed: {type(e).__name__}: {e}')
branch = "<unknown>"
else:
commit = os.environ.get('COMMIT_SHA')
branch = os.environ.get('COMMIT_BRANCH')
intents = discord.Intents(guilds=True, members=True, bans=True, messages=True)
bot = Kurisu(('.', '!'), description="Kurisu, the bot for Nintendo Homebrew!", allowed_mentions=discord.AllowedMentions(everyone=False, roles=False), commit=commit, branch=branch, intents=intents)
bot.help_command = commands.DefaultHelpCommand(dm_help=None)
print(f'Starting Kurisu on commit {commit} on branch {branch}')
bot.load_cogs()
bot.run(TOKEN)
return bot.exitcode
if __name__ == '__main__':
exit(main())
| {
"content_hash": "d498459285ee1795f8cc49a994a1465c",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 200,
"avg_line_length": 36.37931034482759,
"alnum_prop": 0.5714285714285714,
"repo_name": "ihaveamac/Kurisu",
"id": "9eea228bab18fd53fa819364d8f0c2cd1e0d2da9",
"size": "14911",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "kurisu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "234699"
}
],
"symlink_target": ""
} |
"""Exceptions raised by Apps."""
from functools import wraps
from typing import Callable, List, Union, Any, TypeVar, Optional
from types import TracebackType
import dill
import logging
from tblib import Traceback
from six import reraise
from parsl.data_provider.files import File
logger = logging.getLogger(__name__)
class ParslError(Exception):
"""Base class for all exceptions.
Only to be invoked when a more specific error is not available.
"""
class NotFutureError(ParslError):
"""A non future item was passed to a function that expected a future.
This is basically a type error.
"""
class AppException(ParslError):
"""An error raised during execution of an app.
What this exception contains depends entirely on context
"""
class AppBadFormatting(ParslError):
"""An error raised during formatting of a bash function.
"""
class BashExitFailure(AppException):
"""A non-zero exit code returned from a @bash_app
Contains:
app name (str)
exitcode (int)
"""
def __init__(self, app_name: str, exitcode: int) -> None:
self.app_name = app_name
self.exitcode = exitcode
def __str__(self) -> str:
return f"bash_app {self.app_name} failed with unix exit code {self.exitcode}"
class AppTimeout(AppException):
"""An error raised during execution of an app when it exceeds its allotted walltime.
"""
class BashAppNoReturn(AppException):
"""Bash app returned no string.
Contains:
reason(string)
"""
def __init__(self, reason: str) -> None:
super().__init__(reason)
self.reason = reason
class MissingOutputs(ParslError):
"""Error raised at the end of app execution due to missing output files.
Contains:
reason(string)
outputs(List of strings/files..)
"""
def __init__(self, reason: str, outputs: List[Union[str, File]]) -> None:
super().__init__(reason, outputs)
self.reason = reason
self.outputs = outputs
def __repr__(self) -> str:
return "Missing Outputs: {0}, Reason:{1}".format(self.outputs, self.reason)
class BadStdStreamFile(ParslError):
"""Error raised due to bad filepaths specified for STDOUT/ STDERR.
Contains:
reason(string)
exception object
"""
def __init__(self, reason: str, exception: Exception) -> None:
super().__init__(reason, exception)
self._reason = reason
self._exception = exception
def __repr__(self) -> str:
return "Bad Stream File: {} Exception: {}".format(self._reason, self._exception)
def __str__(self) -> str:
return self.__repr__()
class RemoteExceptionWrapper:
def __init__(self, e_type: type, e_value: BaseException, traceback: Optional[TracebackType]) -> None:
self.e_type = dill.dumps(e_type)
self.e_value = dill.dumps(e_value)
self.e_traceback = None if traceback is None else Traceback(traceback)
if e_value.__cause__ is None:
self.cause = None
else:
cause = e_value.__cause__
self.cause = self.__class__(type(cause), cause, cause.__traceback__)
def reraise(self) -> None:
t = dill.loads(self.e_type)
# the type is logged here before deserialising v and tb
# because occasionally there are problems deserialising the
# value (see #785, #548) and the fix is related to the
# specific exception type.
logger.debug("Reraising exception of type {}".format(t))
v = self.get_exception()
reraise(t, v, v.__traceback__)
def get_exception(self) -> Exception:
v = dill.loads(self.e_value)
if self.cause is not None:
v.__cause__ = self.cause.get_exception()
if self.e_traceback is not None:
tb = self.e_traceback.as_traceback()
return v.with_traceback(tb)
else:
return v
R = TypeVar('R')
# There appears to be no solution to typing this without a mypy plugin.
# The reason is because wrap_error maps a Callable[[X...], R] to a Callable[[X...], Union[R, R2]].
# However, there is no provision in Python typing for pattern matching all possible types of
# callable arguments. This is because Callable[] is, in the infinite wisdom of the typing module,
# only used for callbacks: "There is no syntax to indicate optional or keyword arguments; such
# function types are rarely used as callback types.".
# The alternative supported by the typing module, of saying Callable[..., R] ->
# Callable[..., Union[R, R2]] results in no pattern matching between the first and second
# ellipsis.
# Yet another bogus solution that was here previously would simply define wrap_error as
# wrap_error(T) -> T, where T was a custom TypeVar. This obviously missed the fact that
# the returned function had its return signature modified.
# Ultimately, the best choice appears to be Callable[..., R] -> Callable[..., Union[R, ?Exception]],
# since it results in the correct type specification for the return value(s) while treating the
# arguments as Any.
def wrap_error(func: Callable[..., R]) -> Callable[..., Union[R, RemoteExceptionWrapper]]:
@wraps(func) # type: ignore
def wrapper(*args: object, **kwargs: object) -> Any:
import sys
from parsl.app.errors import RemoteExceptionWrapper
try:
return func(*args, **kwargs) # type: ignore
except Exception:
return RemoteExceptionWrapper(*sys.exc_info())
return wrapper # type: ignore
| {
"content_hash": "37f40c18929c98df84bc7a7144028fa8",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 105,
"avg_line_length": 31.269662921348313,
"alnum_prop": 0.6525332375134747,
"repo_name": "Parsl/parsl",
"id": "d524c81b7745759f71fc47b666a5da29027964e8",
"size": "5566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/app/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1263"
},
{
"name": "CSS",
"bytes": "337"
},
{
"name": "HTML",
"bytes": "12706"
},
{
"name": "Makefile",
"bytes": "4908"
},
{
"name": "Python",
"bytes": "1173869"
},
{
"name": "Shell",
"bytes": "12057"
}
],
"symlink_target": ""
} |
from math import log
import numpy as np
from scipy.linalg import pinvh
import pytest
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_less
from sklearn.utils._testing import assert_raise_message
from sklearn.utils import check_random_state
from sklearn.linear_model import BayesianRidge, ARDRegression
from sklearn.linear_model import Ridge
from sklearn import datasets
from sklearn.utils.extmath import fast_logdet
diabetes = datasets.load_diabetes()
def test_n_iter():
"""Check value of n_iter."""
X = np.array([[1], [2], [6], [8], [10]])
y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(n_iter=0)
msg = "n_iter should be greater than or equal to 1."
assert_raise_message(ValueError, msg, clf.fit, X, y)
def test_bayesian_ridge_scores():
"""Check scores attribute shape"""
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
assert clf.scores_.shape == (clf.n_iter_ + 1,)
def test_bayesian_ridge_score_values():
"""Check value of score on toy example.
Compute log marginal likelihood with equation (36) in Sparse Bayesian
Learning and the Relevance Vector Machine (Tipping, 2001):
- 0.5 * (log |Id/alpha + X.X^T/lambda| +
y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
+ lambda_1 * log(lambda) - lambda_2 * lambda
+ alpha_1 * log(alpha) - alpha_2 * alpha
and check equality with the score computed during training.
"""
X, y = diabetes.data, diabetes.target
n_samples = X.shape[0]
# check with initial values of alpha and lambda (see code for the values)
eps = np.finfo(np.float64).eps
alpha_ = 1. / (np.var(y) + eps)
lambda_ = 1.
# value of the parameters of the Gamma hyperpriors
alpha_1 = 0.1
alpha_2 = 0.1
lambda_1 = 0.1
lambda_2 = 0.1
# compute score using formula of docstring
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
M = 1. / alpha_ * np.eye(n_samples) + 1. / lambda_ * np.dot(X, X.T)
M_inv = pinvh(M)
score += - 0.5 * (fast_logdet(M) + np.dot(y.T, np.dot(M_inv, y)) +
n_samples * log(2 * np.pi))
# compute score with BayesianRidge
clf = BayesianRidge(alpha_1=alpha_1, alpha_2=alpha_2,
lambda_1=lambda_1, lambda_2=lambda_2,
n_iter=1, fit_intercept=False, compute_score=True)
clf.fit(X, y)
assert_almost_equal(clf.scores_[0], score, decimal=9)
def test_bayesian_ridge_parameter():
# Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224)
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_bayesian_sample_weights():
# Test correctness of the sample_weights method
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
w = np.array([4, 3, 3, 1, 1, 2, 3]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y, sample_weight=w)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(
X, y, sample_weight=w)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_bayesian_initial_params():
# Test BayesianRidge with initial values (alpha_init, lambda_init)
X = np.vander(np.linspace(0, 4, 5), 4)
y = np.array([0., 1., 0., -1., 0.]) # y = (x^3 - 6x^2 + 8x) / 3
# In this case, starting from the default initial values will increase
# the bias of the fitted curve. So, lambda_init should be small.
reg = BayesianRidge(alpha_init=1., lambda_init=1e-3)
# Check the R2 score nearly equals to one.
r2 = reg.fit(X, y).score(X, y)
assert_almost_equal(r2, 1.)
def test_prediction_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression predictions for edge case of
# constant target vectors
n_samples = 4
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
expected = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
for clf in [BayesianRidge(), ARDRegression()]:
y_pred = clf.fit(X, y).predict(X)
assert_array_almost_equal(y_pred, expected)
def test_std_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression standard dev. for edge case of
# constant target vector
# The standard dev. should be relatively small (< 0.01 is tested here)
n_samples = 10
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value,
dtype=np.array(constant_value).dtype)
expected_upper_boundary = 0.01
for clf in [BayesianRidge(), ARDRegression()]:
_, y_std = clf.fit(X, y).predict(X, return_std=True)
assert_array_less(y_std, expected_upper_boundary)
def test_update_of_sigma_in_ard():
# Checks that `sigma_` is updated correctly after the last iteration
# of the ARDRegression algorithm. See issue #10128.
X = np.array([[1, 0],
[0, 0]])
y = np.array([0, 0])
clf = ARDRegression(n_iter=1)
clf.fit(X, y)
# With the inputs above, ARDRegression prunes both of the two coefficients
# in the first iteration. Hence, the expected shape of `sigma_` is (0, 0).
assert clf.sigma_.shape == (0, 0)
# Ensure that no error is thrown at prediction stage
clf.predict(X, return_std=True)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
@pytest.mark.parametrize('seed', range(100))
@pytest.mark.parametrize('n_samples, n_features', ((10, 100), (100, 10)))
def test_ard_accuracy_on_easy_problem(seed, n_samples, n_features):
# Check that ARD converges with reasonable accuracy on an easy problem
# (Github issue #14055)
X = np.random.RandomState(seed=seed).normal(size=(250, 3))
y = X[:, 1]
regressor = ARDRegression()
regressor.fit(X, y)
abs_coef_error = np.abs(1 - regressor.coef_[1])
assert abs_coef_error < 1e-10
def test_return_std():
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
@pytest.mark.parametrize('seed', range(10))
def test_update_sigma(seed):
# make sure the two update_sigma() helpers are equivalent. The woodbury
# formula is used when n_samples < n_features, and the other one is used
# otherwise.
rng = np.random.RandomState(seed)
# set n_samples == n_features to avoid instability issues when inverting
# the matrices. Using the woodbury formula would be unstable when
# n_samples > n_features
n_samples = n_features = 10
X = rng.randn(n_samples, n_features)
alpha = 1
lmbda = np.arange(1, n_features + 1)
keep_lambda = np.array([True] * n_features)
reg = ARDRegression()
sigma = reg._update_sigma(X, alpha, lmbda, keep_lambda)
sigma_woodbury = reg._update_sigma_woodbury(X, alpha, lmbda, keep_lambda)
np.testing.assert_allclose(sigma, sigma_woodbury)
def test_ard_regression_predict_normalize_true():
"""Check that we can predict with `normalize=True` and `return_std=True`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18605
"""
clf = ARDRegression(normalize=True)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
clf.predict([[1, 1]], return_std=True)
| {
"content_hash": "795bb2191ddbb5a1a00220be3bf5885c",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 78,
"avg_line_length": 35.38709677419355,
"alnum_prop": 0.6366859110705966,
"repo_name": "ryfeus/lambda-packs",
"id": "529702ff752ace5d3409b4b4672a3dda751c6f6d",
"size": "10014",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Sklearn_x86/source/sklearn/linear_model/tests/test_bayes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
"""Ban logic for HTTP component."""
from collections import defaultdict
from datetime import datetime
from ipaddress import ip_address
import logging
from aiohttp.web import middleware
from aiohttp.web_exceptions import HTTPForbidden, HTTPUnauthorized
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.util.yaml import dump
from .const import KEY_REAL_IP
_LOGGER = logging.getLogger(__name__)
KEY_BANNED_IPS = 'ha_banned_ips'
KEY_FAILED_LOGIN_ATTEMPTS = 'ha_failed_login_attempts'
KEY_LOGIN_THRESHOLD = 'ha_login_threshold'
NOTIFICATION_ID_BAN = 'ip-ban'
NOTIFICATION_ID_LOGIN = 'http-login'
IP_BANS_FILE = 'ip_bans.yaml'
ATTR_BANNED_AT = 'banned_at'
SCHEMA_IP_BAN_ENTRY = vol.Schema({
vol.Optional('banned_at'): vol.Any(None, cv.datetime)
})
@callback
def setup_bans(hass, app, login_threshold):
"""Create IP Ban middleware for the app."""
app.middlewares.append(ban_middleware)
app[KEY_FAILED_LOGIN_ATTEMPTS] = defaultdict(int)
app[KEY_LOGIN_THRESHOLD] = login_threshold
async def ban_startup(app):
"""Initialize bans when app starts up."""
app[KEY_BANNED_IPS] = await async_load_ip_bans_config(
hass, hass.config.path(IP_BANS_FILE))
app.on_startup.append(ban_startup)
@middleware
async def ban_middleware(request, handler):
"""IP Ban middleware."""
if KEY_BANNED_IPS not in request.app:
_LOGGER.error("IP Ban middleware loaded but banned IPs not loaded")
return await handler(request)
# Verify if IP is not banned
ip_address_ = request[KEY_REAL_IP]
is_banned = any(ip_ban.ip_address == ip_address_
for ip_ban in request.app[KEY_BANNED_IPS])
if is_banned:
raise HTTPForbidden()
try:
return await handler(request)
except HTTPUnauthorized:
await process_wrong_login(request)
raise
def log_invalid_auth(func):
"""Decorate function to handle invalid auth or failed login attempts."""
async def handle_req(view, request, *args, **kwargs):
"""Try to log failed login attempts if response status >= 400."""
resp = await func(view, request, *args, **kwargs)
if resp.status >= 400:
await process_wrong_login(request)
return resp
return handle_req
async def process_wrong_login(request):
"""Process a wrong login attempt.
Increase failed login attempts counter for remote IP address.
Add ip ban entry if failed login attempts exceeds threshold.
"""
remote_addr = request[KEY_REAL_IP]
msg = ('Login attempt or request with invalid authentication '
'from {}'.format(remote_addr))
_LOGGER.warning(msg)
hass = request.app['hass']
hass.components.persistent_notification.async_create(
msg, 'Login attempt failed', NOTIFICATION_ID_LOGIN)
# Check if ban middleware is loaded
if (KEY_BANNED_IPS not in request.app or
request.app[KEY_LOGIN_THRESHOLD] < 1):
return
request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] += 1
if (request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] >=
request.app[KEY_LOGIN_THRESHOLD]):
new_ban = IpBan(remote_addr)
request.app[KEY_BANNED_IPS].append(new_ban)
await hass.async_add_job(
update_ip_bans_config, hass.config.path(IP_BANS_FILE), new_ban)
_LOGGER.warning(
"Banned IP %s for too many login attempts", remote_addr)
hass.components.persistent_notification.async_create(
'Too many login attempts from {}'.format(remote_addr),
'Banning IP address', NOTIFICATION_ID_BAN)
async def process_success_login(request):
"""Process a success login attempt.
Reset failed login attempts counter for remote IP address.
No release IP address from banned list function, it can only be done by
manual modify ip bans config file.
"""
remote_addr = request[KEY_REAL_IP]
# Check if ban middleware is loaded
if (KEY_BANNED_IPS not in request.app or
request.app[KEY_LOGIN_THRESHOLD] < 1):
return
if remote_addr in request.app[KEY_FAILED_LOGIN_ATTEMPTS] and \
request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] > 0:
_LOGGER.debug('Login success, reset failed login attempts counter'
' from %s', remote_addr)
request.app[KEY_FAILED_LOGIN_ATTEMPTS].pop(remote_addr)
class IpBan:
"""Represents banned IP address."""
def __init__(self, ip_ban: str, banned_at: datetime = None) -> None:
"""Initialize IP Ban object."""
self.ip_address = ip_address(ip_ban)
self.banned_at = banned_at or datetime.utcnow()
async def async_load_ip_bans_config(hass: HomeAssistant, path: str):
"""Load list of banned IPs from config file."""
ip_list = []
try:
list_ = await hass.async_add_executor_job(load_yaml_config_file, path)
except FileNotFoundError:
return ip_list
except HomeAssistantError as err:
_LOGGER.error('Unable to load %s: %s', path, str(err))
return ip_list
for ip_ban, ip_info in list_.items():
try:
ip_info = SCHEMA_IP_BAN_ENTRY(ip_info)
ip_list.append(IpBan(ip_ban, ip_info['banned_at']))
except vol.Invalid as err:
_LOGGER.error("Failed to load IP ban %s: %s", ip_info, err)
continue
return ip_list
def update_ip_bans_config(path: str, ip_ban: IpBan):
"""Update config file with new banned IP address."""
with open(path, 'a') as out:
ip_ = {str(ip_ban.ip_address): {
ATTR_BANNED_AT: ip_ban.banned_at.strftime("%Y-%m-%dT%H:%M:%S")
}}
out.write('\n')
out.write(dump(ip_))
| {
"content_hash": "ae499d5f7b872c734adf8d96f6ad5a71",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 78,
"avg_line_length": 32.44808743169399,
"alnum_prop": 0.6588076793533176,
"repo_name": "aequitas/home-assistant",
"id": "1cb610e71a640ebaad46df014d758ab44dbb9274",
"size": "5938",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/http/ban.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15601734"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
import tweepy
import json
from datetime import datetime
import pytz
class TwitterAPI:
def __init__(self, keys_filename):
self.api = None
self.authenticate_api(keys_filename)
def authenticate_api(self, keys_filename):
# Open the file containing the API keys and authenticate tweepy
try:
with open(keys_filename) as json_data:
keys = json.load(json_data)
auth = tweepy.OAuthHandler(keys["consumer_key"], keys["consumer_secret"])
auth.set_access_token(keys["access_token"], keys["access_token_secret"])
self.api = tweepy.API(auth)
except FileNotFoundError as e:
# Assume mocked data is going to step in as we have no api now
print("No auth found!!")
return
def download_tweets(self, username, start_date, end_date):
tweets = []
# Load in a page of results at a time
for tweet_page in self.tweepy_timeline(username):
print("Processing page")
for tweet in tweet_page:
# Exit once the tweets have past our start date
if (tweet.created_at < start_date):
return tweets
# Add this tweet to our list if it's within the date range
if (tweet.created_at > start_date and tweet.created_at <= end_date):
tweets.append(tweet)
return tweets
def tweepy_timeline(self, username):
return tweepy.Cursor(self.api.user_timeline, id=username).pages()
| {
"content_hash": "57605fc76d54a5c46f5220812f48052d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 89,
"avg_line_length": 36.55813953488372,
"alnum_prop": 0.594147582697201,
"repo_name": "dsykesturner/TwitterFavouriteFilter",
"id": "73b6f1b9360bfa4b8006f43fe7f77efecb83ae0f",
"size": "1572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TwitterAPI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3288"
}
],
"symlink_target": ""
} |
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from websocket import create_connection
from pysdn.common.status import STATUS
from pysdn.common.utils import load_dict_from_file
from pysdn.controller.controller import Controller
from pysdn.controller.notification import InventoryChangeNotification
def of_demo_30():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
description = (
"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
" This demo illustrates how to use Controller's notification\n"
" subscription service for tracing dynamic changes in the\n"
" the Controller's inventory data store.\n"
"\n"
" It is implied that core network services (OpenFlow, NETCONF)\n"
" are functioning on the Controller.\n"
"\n"
" This script creates an event listener on the Controller and\n"
" establishes permanent connection to the events notification\n"
" stream. Once a data change event in the inventory data store\n"
" (such as add/remove node or flow entry) is detected\n"
" it will be reported to the screen.\n"
"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
)
print "\n".strip()
print description
print "\n".strip()
time.sleep(rundelay)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 30 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
print "\n".strip()
print ("<<< 'Controller': %s" % (ctrlIpAddr))
time.sleep(rundelay)
# Data store for the changes
# Can be one of:
# - CONFIGURATION: Logical data store representing configuration
# state of the system and it's components.
# - OPERATIONAL: Logical data store representing operational
# state of the system and it's components
datastore = "OPERATIONAL"
# Scope of the data changes
# Can be one of:
# - BASE: Represents only a direct change of the node, such as
# replacement, addition or deletion of the node.
# - ONE: Represent a change (addition, replacement, deletion)
# of the node or one of its direct children.
# This scope is a superset of BASE.
# - SUBTREE: Represents a change of the node or any of its child
# nodes, direct and nested.
# This scope is superset of ONE and BASE.
scope = "SUBTREE"
# Path to the inventory data store in the YANG data tree
path = ctrl.get_inventory_nodes_yang_schema_path()
# Create listener on the Controller (if it does already exist Controller
# just returns the stream name to subscribe to)
result = ctrl.create_data_change_event_subscription(datastore, scope, path)
status = result.get_status()
if not status.eq(STATUS.OK):
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(1)
stream_name = result.get_data()
# Subscribe to the stream
result = ctrl.subscribe_to_stream(stream_name)
status = result.get_status()
if not status.eq(STATUS.OK):
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(1)
print "\n".strip()
print " Successfully subscribed for data change notifications"
print " Stream location:"
stream_location = result.get_data()
print " %s" % stream_location
print "\n".strip()
print " Listening ... (CTRL-C to exit)"
print "\n".strip()
# Connect to the notification stream on the Controller
# and start listening for the data change notifications
# (report only events that we are really interested in)
websock = create_connection(stream_location)
try:
while True:
notification = websock.recv()
icn = InventoryChangeNotification(notification)
timestamp = icn.get_time()
l = icn.nodes_added()
if l and len(l):
for i in l:
print " [%s] added node: %s" % (timestamp, i)
l = icn.nodes_removed()
if l and len(l):
for i in l:
print " [%s] removed node: %s" % (timestamp, i)
l = icn.flows_added()
if l and len(l):
for i in l:
print " [%s] added flow entry: %s" % (timestamp,
i.to_string())
l = icn.flows_removed()
if l and len(l):
for i in l:
print " [%s] removed flow entry: %s" % (timestamp,
i.to_string())
except(KeyboardInterrupt):
print "Interrupted from keyboard, exit\n"
websock.close()
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_30()
| {
"content_hash": "4dd464888184389a050214516815f371",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 34.60493827160494,
"alnum_prop": 0.539065287192294,
"repo_name": "brocade/pysdn",
"id": "4a3f074f462483f81b45535355c92df6d2560b6e",
"size": "7180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/sampleopenflow/demos/demo30.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2452"
},
{
"name": "Python",
"bytes": "529708"
}
],
"symlink_target": ""
} |
"""Tool for helpers used in linux building process."""
import os
import SCons.Defaults
import subprocess
def _OutputFromShellCommand(command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return process.communicate()[0].strip()
# This is a pure SCons helper function.
def _InternalBuildDebianPackage(env, debian_files, package_files,
output_dir=None, force_version=None):
"""Creates build rules to build a Debian package from the specified sources.
Args:
env: SCons Environment.
debian_files: Array of the Debian control file sources that should be
copied into the package source tree, e.g., changelog, control, rules,
etc.
package_files: An array of 2-tuples listing the files that should be
copied into the package source tree.
The first element is the path where the file should be placed for the
.install control file to find it, relative to the generated debian
package source directory.
The second element is the file source.
output_dir: An optional directory to place the files in. If omitted, the
current output directory is used.
force_version: Optional. Forces the version of the package to start with
this version string if specified. If the last entry in the changelog
is not for a version that starts with this then a dummy entry is
generated with this version and a ~prerelease suffix (so that the
final version will compare as greater).
Return:
A list of the targets (if any).
"""
if 0 != subprocess.call(['which', 'dpkg-buildpackage']):
print ('dpkg-buildpackage not installed on this system; '
'skipping DEB build stage')
return []
# Read the control file and changelog file to determine the package name,
# version, and arch that the Debian build tools will use to name the
# generated files.
control_file = None
changelog_file = None
for file in debian_files:
if os.path.basename(file) == 'control':
control_file = env.File(file).srcnode().abspath
elif os.path.basename(file) == 'changelog':
changelog_file = env.File(file).srcnode().abspath
if not control_file:
raise Exception('Need to have a control file')
if not changelog_file:
raise Exception('Need to have a changelog file')
source = _OutputFromShellCommand(
"awk '/^Source:/ { print $2; }' " + control_file)
packages = _OutputFromShellCommand(
"awk '/^Package:/ { print $2; }' " + control_file).split('\n')
version = _OutputFromShellCommand(
"sed -nr '1 { s/.*\\((.*)\\).*/\\1/; p }' " + changelog_file)
arch = _OutputFromShellCommand('dpkg --print-architecture')
add_dummy_changelog_entry = False
if force_version and not version.startswith(force_version):
print ('Warning: no entry in ' + changelog_file + ' for version ' +
force_version + ' (last is ' + version +'). A dummy entry will be ' +
'generated. Remember to add the real changelog entry before ' +
'releasing.')
version = force_version + '~prerelease'
add_dummy_changelog_entry = True
source_dir_name = source + '_' + version + '_' + arch
target_file_names = [ source_dir_name + '.changes' ]
for package in packages:
package_file_name = package + '_' + version + '_' + arch + '.deb'
target_file_names.append(package_file_name)
# The targets
if output_dir:
targets = [os.path.join(output_dir, s) for s in target_file_names]
else:
targets = target_file_names
# Path to where we will construct the debian build tree.
deb_build_tree = os.path.join(source_dir_name, 'deb_build_tree')
# First copy the files.
for file in package_files:
env.Command(os.path.join(deb_build_tree, file[0]), file[1],
SCons.Defaults.Copy('$TARGET', '$SOURCE'))
env.Depends(targets, os.path.join(deb_build_tree, file[0]))
# Now copy the Debian metadata sources. We have to do this all at once so
# that we can remove the target directory before copying, because there
# can't be any other stale files there or else dpkg-buildpackage may use
# them and give incorrect build output.
copied_debian_files_paths = []
for file in debian_files:
copied_debian_files_paths.append(os.path.join(deb_build_tree, 'debian',
os.path.basename(file)))
copy_commands = [
"""dir=$$(dirname $TARGET) && \
rm -Rf $$dir && \
mkdir -p $$dir && \
cp $SOURCES $$dir && \
chmod -R u+w $$dir"""
]
if add_dummy_changelog_entry:
copy_commands += [
"""debchange -c $$(dirname $TARGET)/changelog --newversion %s \
--distribution UNRELEASED \
'Developer preview build. (This entry was auto-generated.)'""" %
version
]
env.Command(copied_debian_files_paths, debian_files, copy_commands)
env.Depends(targets, copied_debian_files_paths)
# Must explicitly specify -a because otherwise cross-builds won't work.
# Must explicitly specify -D because -a disables it.
# Must explicitly specify fakeroot because old dpkg tools don't assume that.
env.Command(targets, None,
"""dir=%(dir)s && \
cd $$dir && \
dpkg-buildpackage -b -uc -a%(arch)s -D -rfakeroot && \
cd $$OLDPWD && \
for file in %(targets)s; do \
mv $$dir/../$$file $$(dirname $TARGET) || exit 1; \
done""" %
{'dir':env.Dir(deb_build_tree).path,
'arch':arch,
'targets':' '.join(target_file_names)})
return targets
def BuildDebianPackage(env, debian_files, package_files, force_version=None):
"""Creates build rules to build a Debian package from the specified sources.
This is a Hammer-ified version of _InternalBuildDebianPackage that knows to
put the packages in the Hammer staging dir.
Args:
env: SCons Environment.
debian_files: Array of the Debian control file sources that should be
copied into the package source tree, e.g., changelog, control, rules,
etc.
package_files: An array of 2-tuples listing the files that should be
copied into the package source tree.
The first element is the path where the file should be placed for the
.install control file to find it, relative to the generated debian
package source directory.
The second element is the file source.
force_version: Optional. Forces the version of the package to start with
this version string if specified. If the last entry in the changelog
is not for a version that starts with this then a dummy entry is
generated with this version and a ~prerelease suffix (so that the
final version will compare as greater).
Return:
A list of the targets (if any).
"""
if not env.Bit('host_linux'):
return []
return _InternalBuildDebianPackage(env, debian_files, package_files,
output_dir='$STAGING_DIR', force_version=force_version)
def _HavePackage(package):
"""Whether the given pkg-config package name is present on the build system.
Args:
package: The name of the package.
Returns:
True if the package is present, else False
"""
return subprocess.call(['pkg-config', '--exists', package]) == 0
def _GetPackageFlags(flag_type, packages):
"""Get the flags needed to compile/link against the given package(s).
Returns the flags that are needed to compile/link against the given pkg-config
package(s).
Args:
flag_type: The option to pkg-config specifying the type of flags to get.
packages: The list of package names as strings.
Returns:
The flags of the requested type.
"""
process = subprocess.Popen(['pkg-config', flag_type] + packages,
stdout=subprocess.PIPE)
return process.communicate()[0].strip().split(' ')
def GetPackageParams(env, packages):
"""Get the params needed to compile/link against the given package(s).
Returns the params that are needed to compile/link against the given
pkg-config package(s).
Args:
env: The current SCons environment.
packages: The name of the package, or a list of names.
Returns:
A dictionary containing the params.
Raises:
Exception: One or more of the packages is not installed.
"""
if not env.Bit('host_linux'):
return {}
if not SCons.Util.is_List(packages):
packages = [packages]
for package in packages:
if not _HavePackage(package):
raise Exception(('Required package \"%s\" was not found. Please install '
'the package that provides the \"%s.pc\" file.') %
(package, package))
package_ccflags = _GetPackageFlags('--cflags', packages)
package_libs = _GetPackageFlags('--libs', packages)
# Split package_libs into libs, libdirs, and misc. linker flags. (In a perfect
# world we could just leave libdirs in link_flags, but some linkers are
# somehow confused by the different argument order.)
libs = [flag[2:] for flag in package_libs if flag[0:2] == '-l']
libdirs = [flag[2:] for flag in package_libs if flag[0:2] == '-L']
link_flags = [flag for flag in package_libs if flag[0:2] not in ['-l', '-L']]
return {
'ccflags': package_ccflags,
'libs': libs,
'libdirs': libdirs,
'link_flags': link_flags,
'dependent_target_settings' : {
'libs': libs[:],
'libdirs': libdirs[:],
'link_flags': link_flags[:],
},
}
def EnableFeatureWherePackagePresent(env, bit, cpp_flag, package):
"""Enable a feature if a required pkg-config package is present.
Args:
env: The current SCons environment.
bit: The name of the Bit to enable when the package is present.
cpp_flag: The CPP flag to enable when the package is present.
package: The name of the package.
"""
if not env.Bit('host_linux'):
return
if _HavePackage(package):
env.SetBits(bit)
env.Append(CPPDEFINES=[cpp_flag])
else:
print ('Warning: Package \"%s\" not found. Feature \"%s\" will not be '
'built. To build with this feature, install the package that '
'provides the \"%s.pc\" file.') % (package, bit, package)
def generate(env):
if env.Bit('linux'):
env.AddMethod(EnableFeatureWherePackagePresent)
env.AddMethod(GetPackageParams)
env.AddMethod(BuildDebianPackage)
def exists(env):
return 1 # Required by scons
| {
"content_hash": "694a34291233c35f63aa32cf46bf6457",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 80,
"avg_line_length": 38.91760299625468,
"alnum_prop": 0.6641324222885189,
"repo_name": "muzili/libjingle-0.6.14",
"id": "67214c08cc367585a2f67257fc460a1029c58546",
"size": "10498",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "talk/site_scons/site_tools/talk_linux.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "129803"
},
{
"name": "C++",
"bytes": "6658964"
},
{
"name": "Objective-C++",
"bytes": "17548"
},
{
"name": "Python",
"bytes": "30923"
},
{
"name": "Shell",
"bytes": "1960"
}
],
"symlink_target": ""
} |
'''
Created on 25 sty 2015
@author: Adam Polomski
'''
import unittest
from model import TwoCriteriaModelFactory, AgregateMetric
from unittest.mock import MagicMock
from probability.metric import Metric
class TwoCriteriaModelFactoryTest(unittest.TestCase):
def testShouldCalculateValueFrequency(self):
# given
values = [6, 5, 4, 3, 2, 1]
rolls = 3
# when
frequency = TwoCriteriaModelFactory.valueFrequency(values, rolls)
# then
self.assertSequenceEqual(frequency, {1: 1, 2: 7, 3: 19, 4: 37, 5: 61, 6: 91}, "Invalid frequency dictionary.", None)
def testShouldCalculateValueFrequencyWithDuplicaes(self):
# given
values = [6, 5, 4, 3, 3, 1]
rolls = 3
# when
frequency = TwoCriteriaModelFactory.valueFrequency(values, rolls)
# then
self.assertSequenceEqual(frequency, {1: 1, 3: 26, 4: 37, 5: 61, 6: 91}, "Invalid frequency dictionary.", None)
def testShouldFailOnUnorderedValues(self):
# given
values = [5, 6, 4, 3, 2, 1]
rolls = 3
# then
self.assertRaises(ValueError, TwoCriteriaModelFactory.valueFrequency, values, rolls)
class AgregateMetricTest(unittest.TestCase):
def testShouldCalculateMetric(self):
#given
alpha = 5
gain = Metric()
gain.calculate = MagicMock(return_value=5)
risk = Metric()
risk.calculate = MagicMock(return_value=5)
metric = AgregateMetric(alpha, gain, risk)
#when
result = metric.calculate({})
#then
self.assertEqual(result, 20, "Invalid result.")
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| {
"content_hash": "72dac5e49b619d0aed367ae0e5fd8507",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 124,
"avg_line_length": 29.75,
"alnum_prop": 0.5745798319327731,
"repo_name": "adampolomski/decisionmaker",
"id": "5b3f1eed1ef46edda2acb0f37432be782791885a",
"size": "1904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/model_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10198"
}
],
"symlink_target": ""
} |
"""Support for Vallox ventilation units."""
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import date
import ipaddress
import logging
from typing import Any, NamedTuple
from uuid import UUID
from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox
from vallox_websocket_api.exceptions import ValloxApiException
from vallox_websocket_api.vallox import (
get_next_filter_change_date as calculate_next_filter_change_date,
get_uuid as calculate_uuid,
)
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME, Platform
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType, StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
DEFAULT_FAN_SPEED_AWAY,
DEFAULT_FAN_SPEED_BOOST,
DEFAULT_FAN_SPEED_HOME,
DEFAULT_NAME,
DOMAIN,
METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
METRIC_KEY_PROFILE_FAN_SPEED_HOME,
STATE_SCAN_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
PLATFORMS: list[str] = [
Platform.SENSOR,
Platform.FAN,
Platform.BINARY_SENSOR,
]
ATTR_PROFILE_FAN_SPEED = "fan_speed"
SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED = vol.Schema(
{
vol.Required(ATTR_PROFILE_FAN_SPEED): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
)
}
)
class ServiceMethodDetails(NamedTuple):
"""Details for SERVICE_TO_METHOD mapping."""
method: str
schema: vol.Schema
SERVICE_SET_PROFILE_FAN_SPEED_HOME = "set_profile_fan_speed_home"
SERVICE_SET_PROFILE_FAN_SPEED_AWAY = "set_profile_fan_speed_away"
SERVICE_SET_PROFILE_FAN_SPEED_BOOST = "set_profile_fan_speed_boost"
SERVICE_TO_METHOD = {
SERVICE_SET_PROFILE_FAN_SPEED_HOME: ServiceMethodDetails(
method="async_set_profile_fan_speed_home",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
SERVICE_SET_PROFILE_FAN_SPEED_AWAY: ServiceMethodDetails(
method="async_set_profile_fan_speed_away",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
SERVICE_SET_PROFILE_FAN_SPEED_BOOST: ServiceMethodDetails(
method="async_set_profile_fan_speed_boost",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
}
@dataclass
class ValloxState:
"""Describes the current state of the unit."""
metric_cache: dict[str, Any] = field(default_factory=dict)
profile: VALLOX_PROFILE = VALLOX_PROFILE.NONE
def get_metric(self, metric_key: str) -> StateType:
"""Return cached state value."""
if (value := self.metric_cache.get(metric_key)) is None:
return None
if not isinstance(value, (str, int, float)):
return None
return value
def get_uuid(self) -> UUID | None:
"""Return cached UUID value."""
uuid = calculate_uuid(self.metric_cache)
if not isinstance(uuid, UUID):
raise ValueError
return uuid
def get_next_filter_change_date(self) -> date | None:
"""Return the next filter change date."""
next_filter_change_date = calculate_next_filter_change_date(self.metric_cache)
if not isinstance(next_filter_change_date, date):
return None
return next_filter_change_date
class ValloxDataUpdateCoordinator(DataUpdateCoordinator):
"""The DataUpdateCoordinator for Vallox."""
data: ValloxState
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the integration from configuration.yaml (DEPRECATED)."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the client and boot the platforms."""
host = entry.data[CONF_HOST]
name = entry.data[CONF_NAME]
client = Vallox(host)
async def async_update_data() -> ValloxState:
"""Fetch state update."""
_LOGGER.debug("Updating Vallox state cache")
try:
metric_cache = await client.fetch_metrics()
profile = await client.get_profile()
except (OSError, ValloxApiException) as err:
raise UpdateFailed("Error during state cache update") from err
return ValloxState(metric_cache, profile)
coordinator = ValloxDataUpdateCoordinator(
hass,
_LOGGER,
name=f"{name} DataUpdateCoordinator",
update_interval=STATE_SCAN_INTERVAL,
update_method=async_update_data,
)
await coordinator.async_config_entry_first_refresh()
service_handler = ValloxServiceHandler(client, coordinator)
for vallox_service, service_details in SERVICE_TO_METHOD.items():
hass.services.async_register(
DOMAIN,
vallox_service,
service_handler.async_handle,
schema=service_details.schema,
)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"client": client,
"coordinator": coordinator,
"name": name,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
if hass.data[DOMAIN]:
return unload_ok
for service in SERVICE_TO_METHOD:
hass.services.async_remove(DOMAIN, service)
return unload_ok
class ValloxServiceHandler:
"""Services implementation."""
def __init__(
self, client: Vallox, coordinator: DataUpdateCoordinator[ValloxState]
) -> None:
"""Initialize the proxy."""
self._client = client
self._coordinator = coordinator
async def async_set_profile_fan_speed_home(
self, fan_speed: int = DEFAULT_FAN_SPEED_HOME
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Home fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_HOME: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Home profile: %s", err)
return False
async def async_set_profile_fan_speed_away(
self, fan_speed: int = DEFAULT_FAN_SPEED_AWAY
) -> bool:
"""Set the fan speed in percent for the Away profile."""
_LOGGER.debug("Setting Away fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_AWAY: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Away profile: %s", err)
return False
async def async_set_profile_fan_speed_boost(
self, fan_speed: int = DEFAULT_FAN_SPEED_BOOST
) -> bool:
"""Set the fan speed in percent for the Boost profile."""
_LOGGER.debug("Setting Boost fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_BOOST: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Boost profile: %s", err)
return False
async def async_handle(self, call: ServiceCall) -> None:
"""Dispatch a service call."""
service_details = SERVICE_TO_METHOD.get(call.service)
params = call.data.copy()
if service_details is None:
return
if not hasattr(self, service_details.method):
_LOGGER.error("Service not implemented: %s", service_details.method)
return
result = await getattr(self, service_details.method)(**params)
# This state change affects other entities like sensors. Force an immediate update that can
# be observed by all parties involved.
if result:
await self._coordinator.async_request_refresh()
| {
"content_hash": "0f95c0e7199d9bc223dbf7ad1ea88559",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 99,
"avg_line_length": 30.494915254237288,
"alnum_prop": 0.641951978657181,
"repo_name": "GenericStudent/home-assistant",
"id": "aeb9e59e28682d51937a1aa1fd892ba582f3d583",
"size": "8996",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/vallox/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('pdb_in', default=None, help='Input PDB file')
args = parser.parse_args()
import os, inspect
dirs = {}
dirs['script'] = os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))
execfile(os.path.join(dirs['script'],'_external_paths.py'))
command_paths = findPaths(['pdb2pqr','sander','chimera','dock6','sphgen_cpp'])
dirs['amber'] = os.path.abspath(os.path.dirname(command_paths['sander'])[:-4])
dirs['dock6'] = os.path.abspath(os.path.dirname(command_paths['dock6'])[:-4])
if not os.path.isfile(args.pdb_in):
raise Exception('PDB file not found!')
pdb_path = os.path.dirname(args.pdb_in)
name = os.path.basename(args.pdb_in)[:-4]
surface_script = os.path.join(dirs['script'],'_receptor_surface.chimera.py')
if not os.path.isfile(surface_script):
raise Exception('Chimera script for dms not found!')
pqr2mol2_script = os.path.join(dirs['script'],'_pqr2mol2.chimera.py')
if not os.path.isfile(pqr2mol2_script):
raise Exception('Chimera to convert from pqr to mol2 not found!')
for dirN in ['pqr','amber_prep','../amber_in','dock_prep','../dock_in']:
if not os.path.isdir(dirN):
os.makedirs(dirN)
# Add hydrogens using pdb2pqr
if not os.path.isfile('pqr/{0}.pdb2pqr_amber.pqr'.format(name)):
print '*** Adding hydrogens ***'
os.chdir('pqr')
command = command_paths['pdb2pqr'] + \
" ../{0}/{1}.pdb {1}.pdb2pqr_amber.pqr".format(pdb_path, name) + \
" --ff amber --with-ph=7 --nodebump --ffout=amber"
os.system(command)
os.chdir('..')
# Create AMBER prmtop files
if not (os.path.isfile('../amber_in/{0}.prmtop'.format(name)) and \
os.path.isfile('../amber_in/{0}.inpcrd'.format(name))):
print '\n*** Preparing AMBER input files ***'
tleap_F = open('amber_prep/{0}.tleap'.format(name),'w')
tleap_F.write('''
source leaprc.ff14SB
set default PBRadii bondi
# Receptor
protein = loadpdb pqr/{0}.pdb2pqr_amber.pqr
saveamberparm protein ../amber_in/{0}.prmtop ../amber_in/{0}.inpcrd
savepdb protein amber_prep/{0}.amber.pdb
# savemol2 protein dock_prep/{0}.mol2 0 # default atom types, seems to have a bug
quit
'''.format(name))
tleap_F.close()
command = dirs['amber']+'/bin/tleap -f amber_prep/{0}.tleap; ' + \
'mv leap.log amber_prep/{0}.leaplog'
command = command.format(name)
os.system(command)
# Convert prmtop to other formats
if not os.path.isfile('pqr/{0}.bres.pqr'.format(name)):
print '\n*** Writing standard PQR file (Brookhaven Residue Names) ***'
command = dirs['amber']+'/bin/ambpdb -p ../amber_in/{0}.prmtop -pqr -bres' + \
' < ../amber_in/{0}.inpcrd > pqr/{0}.bres.pqr'
command = command.format(name)
os.system(command)
if not os.path.isfile('dock_in/{0}.mol2'.format(name)):
print '\n*** Writing mol2 file ***'
command = command_paths['chimera'] + " --nogui --nostatus --script" + \
" '{0} --pqr_in pqr/{1}.bres.pqr --mol2_out dock_prep/{1}.mol2'"
command = command.format(pqr2mol2_script, name)
os.system(command)
if not os.path.isfile('amber_prep/{0}.bres.pdb'.format(name)):
print '\n*** Writing standard PDB file (Brookhaven Residue Names) ***'
command = dirs['amber']+'/bin/ambpdb -p ../amber_in/{0}.prmtop -bres' + \
' < ../amber_in/{0}.inpcrd > amber_prep/{0}.bres.pdb'
command = command.format(name)
os.system(command)
if not os.path.isfile('../amber_in/{0}.pdb'.format(name)):
print '\n*** Writing standard PDB file for fixed atoms (AMBER Residue Names) ***'
command = dirs['amber']+'/bin/ambpdb -p ../amber_in/{0}.prmtop' + \
' < ../amber_in/{0}.inpcrd > ../amber_in/{0}.pdb'
command = command.format(name)
os.system(command)
command = 'python {0}/label_fixed_atoms.py ../amber_in/{1}.pdb'
command = command.format(dirs['script'], name)
os.system(command)
# Create a molecular surface file
if not (os.path.isfile('dock_prep/'+name+'.ms') or \
os.path.isfile('dock_prep/'+name+'.all.sph') or \
os.path.isfile('../dock_in/'+name+'.sph')):
print '\n*** Calculating a molecular surface ***'
command = command_paths['chimera'] + " --nogui --nostatus --script" + \
" '{0} --pdb_in {1}/{2}.pdb --dms_out dock_prep/{2}.ms'"
command = command.format(surface_script, pdb_path, name)
os.system(command)
if not os.path.isfile('dock_prep/'+name+'.ms'):
raise Exception('Surface generation failed!')
else:
print 'Molecular surface already generated'
# Generate spheres
# Usage: sphgen_cpp -i inputfilename [-s surface_topology]
# [-d surfacepoints] [-l min_distance] [-m min_radius]
# [-x max_radius] -o outputfilename
# -i Input file name [required]
# -o Output file name [required]
# -s R: outside of receptor L: inside of receptor [default: R]
# -d X: all surface points [default: X]
# -l Minimum distance between spheres [default: 0.0]
# -m Minimum sphere radius [default: 1.4]
# -x Maximum sphere radius [default: 4.0]
if not (os.path.isfile('dock_prep/'+name+'.all.sph') or \
os.path.isfile('../dock_in/'+name+'.sph')):
print '\n*** Generating all spheres ***'
command = command_paths['sphgen_cpp'] + \
" -i dock_prep/{0}.ms -o dock_prep/{0}.all.sph".format(name) + \
" -s R -d X -l 0.0 -m 1.4 -x 4.0"
os.system(command)
if os.path.isfile('dock_prep/'+name+'.all.sph'):
os.remove('dock_prep/{0}.ms'.format(name))
else:
raise Exception('Sphere generation failed!')
else:
print 'Spheres already generated'
# Load the binding site radius and half edge length
if os.path.isfile('../2-binding_site/measured_binding_site.py'):
execfile('../2-binding_site/measured_binding_site.py')
else:
raise Exception('No binding site information')
maxR2 = half_edge_length**2
# Keep spheres that are within the half edge length of the grid center
if not os.path.isfile('../dock_in/'+name+'.sph'):
print '\n*** Selecting spheres ***'
insphF = open('dock_prep/'+name+'.all.sph','r')
insph = insphF.read().strip().split('\n')
insphF.close()
import numpy as np
# Keep clusters which have spheres within the center of mass box
osph = []
keepCluster = False
for line in insph:
if line.startswith('DOCK'):
pass
elif line.startswith('cluster'):
if keepCluster:
osph += osph_c
osph_c = []
keepCluster = False
elif len(line)>40:
osph_c.append(line)
try:
pos = np.array([float(line[5:15]),float(line[15:25]),float(line[25:35])])
except:
print line
raise Exception('Could not convert line!')
if ((pos[0]>com_min[0]) and (pos[0]<com_max[0]) and \
(pos[1]>com_min[1]) and (pos[1]<com_max[1]) and \
(pos[2]>com_min[2]) and (pos[2]<com_max[2])):
keepCluster = True
osphF = open('../dock_in/'+name+'.sph','w')
osphF.write(insph[0]+'\n')
osphF.write('cluster 1 number of spheres in cluster %d\n'%len(osph))
osphF.write('\n'.join(osph))
osphF.flush()
osphF.close()
if os.path.isfile('../dock_in/'+name+'.sph'):
pass
else:
raise Exception('Sphere selection failed!')
else:
print 'Spheres already selected'
# Run showbox
if not os.path.isfile('dock_prep/'+name+'.box.pdb'):
print '\n*** Generating the box ***'
showboxF = open('dock_prep/'+name+'.showbox.in','w')
showboxF.write('''N
U
{0} {0} {0}
{1} {1} {1}
dock_prep/{2}.box.pdb
'''.format(half_edge_length, 2*half_edge_length, name))
showboxF.close()
command = '{0}/bin/showbox < dock_prep/{1}.showbox.in'.format(dirs['dock6'], name)
os.system(command)
if os.path.isfile('dock_prep/'+name+'.box.pdb'):
os.remove('dock_prep/{0}.showbox.in'.format(name))
else:
raise Exception('Box generation failed!')
else:
print 'Box already generated'
# Calculate the grid
if not os.path.isfile('../dock_in/'+name+'.nrg'):
print '\n*** Calculating the DOCK6 grid ***'
F = open('dock_prep/'+name+'.grid.in','w')
F.write('''compute_grids yes
grid_spacing 0.25
output_molecule no
contact_score no
energy_score yes
energy_cutoff_distance 9999
atom_model a
attractive_exponent 6
repulsive_exponent 12
distance_dielectric yes
dielectric_factor 4
bump_filter yes
bump_overlap 0.75
receptor_file dock_prep/{0}.mol2
box_file dock_prep/{0}.box.pdb
vdw_definition_file {1}/parameters/vdw_AMBER_parm99.defn
score_grid_prefix ../dock_in/{0}
'''.format(name, dirs['dock6']))
F.close()
command = '{0}/bin/grid -i dock_prep/{1}.grid.in'.format(dirs['dock6'], name)
os.system(command)
if os.path.isfile('../dock_in/'+name+'.nrg'):
os.remove('dock_prep/{0}.grid.in'.format(name))
else:
raise Exception('Grid calculation failed!')
else:
print 'Grid already calculated' | {
"content_hash": "b49b0eb6bf81b0dc5b7974e6ecf1c14e",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 84,
"avg_line_length": 36.87916666666667,
"alnum_prop": 0.6341656310021466,
"repo_name": "gkumar7/AlGDock",
"id": "fbbe90df0370133364607206faebf5e5c976ca7d",
"size": "8886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pipeline/prep_receptor_for_dock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "127550"
},
{
"name": "CSS",
"bytes": "2136"
},
{
"name": "CoffeeScript",
"bytes": "13826"
},
{
"name": "JavaScript",
"bytes": "240611"
},
{
"name": "Mathematica",
"bytes": "9061"
},
{
"name": "Python",
"bytes": "834939"
},
{
"name": "Shell",
"bytes": "10278"
}
],
"symlink_target": ""
} |
from abc import ABCMeta, abstractmethod
from core import log
import threading
import time
logger = log.getLogger("service")
class Service(threading.Thread):
__metaclass__ = ABCMeta
@abstractmethod
def do_action(self):
"""
Implement this method that is called every step.
"""
pass
def __init__(self, name, interval):
super(Service, self).__init__(name=name)
self.__mutex = threading.Lock()
self.__stop = threading.Event()
# Interval/Frequency (in seconds)
self.__interval = interval
self.name = name
self.daemon = True
def __is_stopped(self):
with self.__mutex:
return self.__stop.isSet()
def __close(self):
with self.__mutex:
self.__stop.set()
def start(self):
logger.debug("Start the %s service (frequency=%d)" % (
self.name, self.__interval))
super(Service, self).start()
def stop(self):
timeout = 5 # do not block for a long time!
logger.debug("Stop the %s service" % (self.name,))
self.__close()
try:
if self.is_alive():
logger.debug("Joining %dsecs" % (timeout,))
self.join(timeout=timeout)
logger.info("%s service successfully stopped!" % (
self.name,))
except Exception as e:
logger.error("RunTime error: %s" % (str(e),))
def run(self):
try:
while not self.__is_stopped():
self.do_action()
time.sleep(self.__interval)
except Exception as e:
logger.error("RunTime error: %s" % (str(e),))
def debug(self, msg):
logger.debug("[%s] %s" % (self.name, msg,))
def info(self, msg):
logger.info("[%s] %s" % (self.name, msg,))
def error(self, msg):
logger.error("[%s] %s" % (self.name, msg,))
| {
"content_hash": "cc36b1ed9c23acb5d565c95fc5790f1f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 66,
"avg_line_length": 28.057971014492754,
"alnum_prop": 0.53150826446281,
"repo_name": "ict-felix/stack",
"id": "5c47dd6a42e305d9d4bf542db20dbf06e5fb52dc",
"size": "1936",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/resource/orchestrator/src/core/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
} |
import os
from compressor.exceptions import FilterError
def get_class(class_string, exception=FilterError):
"""
Convert a string version of a function name to the callable object.
"""
if not hasattr(class_string, '__bases__'):
try:
class_string = class_string.encode('ascii')
mod_name, class_name = get_mod_func(class_string)
if class_name != '':
cls = getattr(__import__(mod_name, {}, {}, ['']), class_name)
except (ImportError, AttributeError):
pass
else:
return cls
raise exception('Failed to import %s' % class_string)
def get_mod_func(callback):
"""
Converts 'django.views.news.stories.story_detail' to
('django.views.news.stories', 'story_detail')
"""
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
def get_pathext(default_pathext=None):
"""
Returns the path extensions from environment or a default
"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
return os.environ.get('PATHEXT', default_pathext)
def find_command(cmd, paths=None, pathext=None):
"""
Searches the PATH for the given command and returns its path
"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, basestring):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
return None
| {
"content_hash": "ad81628fe3627a687a971f005d9b2a78",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 77,
"avg_line_length": 32.24285714285714,
"alnum_prop": 0.6030128489144883,
"repo_name": "neumerance/cloudloon2",
"id": "83a1a2a1167a05204a1bf6bc21d3f69bb2970cdb",
"size": "2281",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/compressor/utils/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "178040"
},
{
"name": "JavaScript",
"bytes": "460971"
},
{
"name": "Perl",
"bytes": "1954"
},
{
"name": "Python",
"bytes": "3227734"
},
{
"name": "Ruby",
"bytes": "76"
},
{
"name": "Shell",
"bytes": "14108"
}
],
"symlink_target": ""
} |
from zipline.api import order, order_target_percent, record, symbol, symbols, set_symbol_lookup_date, history, \
get_datetime, schedule_function, get_open_orders
import pprint
from numpy import diff, isnan, arange, insert, sort, array
class DTPortfolio:
def __init__(self, cash):
self.start_cash = cash
self.port = {}
self.cash = cash
self.buy_dates = []
self.sell_dates = []
self.pct = 0
def pre_cache(self, context, data):
for stock in context.stocks:
if stock.symbol not in self.port:
self.port[stock.symbol] = {'pos': 0, 'trades': []}
self.port[stock.symbol]['last_price'] = data[stock].price
def handle(self, context, data):
for stock in context.stocks:
record("value", self.value(data, stock))
record("pct", self.pct)
if 'first_price' not in self.port[stock.symbol]:
self.port[stock.symbol]['first_price'] = data[stock].price
def order_add_percent(self, context, data, stock, pct, quiet=True):
now = get_datetime().date()
new_pct = min(self.pct + pct, 1)
if not quiet:
print("buy", str(now), stock.symbol, new_pct)
self.buy_dates.append(now)
self.order_target_percent(context, data, stock, new_pct)
def order_sub_percent(self, context, data, stock, pct, quiet=True):
now = get_datetime().date()
new_pct = max(self.pct - pct, 0)
if not quiet:
print("sell", str(now), stock.symbol, new_pct)
self.sell_dates.append(now)
self.order_target_percent(context, data, stock, new_pct)
def value(self, data, stock):
price = data[stock].price
value = int((self.cash + price * self.port[stock.symbol]['pos']) * 100.0)/100.0
return value
def order_target_percent(self, context, data, stock, pct):
# quantopian...
# order_target_percent(stock, pct)
# our naive simulation...
now = get_datetime().date()
# pct = min(max(pct, 0), 1)
self.pct = pct
if stock.symbol not in self.port:
self.port[stock.symbol] = {'pos': 0, 'trades': []}
dict = self.port[stock.symbol]
price = int(data[stock].price * 100) / 100.0
value = self.cash + price * dict['pos']
to_invest = value * pct
new_pos = int(to_invest / price)
prev_pos = dict['pos']
diff_pos = new_pos - prev_pos
to_invest = int(price * diff_pos * 100) / 100.0
dict['pos'] = new_pos
self.cash -= to_invest
dict['trades'].append({'date': now, 'cost': to_invest, 'price': price, 'pos': diff_pos, 'value': value})
self.port[stock.symbol] = dict
def dump(self):
pprint.pprint(self.port, width=200)
def performance_csv(self, prefix=""):
for my_stock in self.port:
sp = self.port[my_stock]
st = int((self.cash + sp['pos'] * sp['last_price']) * 100.0) / 100.0
bhpos = self.start_cash / sp['first_price']
bh = int((bhpos * sp['last_price']) * 100.0) / 100.0
print(prefix+",%.2f,%.2f" % (st, bh))
def csv(self):
#print("--- portfolio csv ---")
print("cash,10000")
for my_stock in self.port:
sp = self.port[my_stock]
bhpos = self.start_cash / sp['first_price']
bh = int((bhpos * sp['last_price']) * 100.0) / 100.0
st = int((self.cash + sp['pos'] * sp['last_price']) * 100.0) / 100.0
print("first price,%0.2f" % (sp['first_price']))
print("last price,%0.2f" % (sp['last_price']))
print("Buy&Hold,%0.2f" % (bh))
print("Strategy,%0.2f\n" % (st))
print("cost,date,position,price,st value,bh value")
for trade in sp['trades']:
d = trade['date']
bh = int((bhpos * trade['price'] + 5000) * 100.0) / 100.0
print("%0.2f,%d-%d-%d,%d,%0.2f,%0.2f,%0.2f" %
(trade['cost'], d.year, d.month, d.day, trade['pos'], trade['price'], trade['value'], bh))
print("Strategy,%0.2f" % (st))
print("Buy&Hold,%0.2f" % (bh))
#print("\n--- portfolio csv ---")
def plot_signals(self, ax1):
ymin, ymax = ax1.get_ylim()
ax1.vlines(x=self.sell_dates, ymin=ymin, ymax=ymax, color='r')
ax1.vlines(x=self.buy_dates, ymin=ymin, ymax=ymax, color='b')
# all_dates = pv.axes[0].date
# yx = (ymax - ymin) / 3
# ax1.vlines(x=all_dates, ymin=ymin+yx, ymax=ymax-yx, color='g')
class DTEODChangeTrader:
def __init__(self, buy_threshold, sell_threshold, buy_pct, sell_pct, roc_window=180):
self.buy_threshold = buy_threshold
self.sell_threshold = sell_threshold
self.buy_pct = buy_pct
self.sell_pct = sell_pct
self.roc_window = roc_window
self.name = "EODCT"
self.prices = 0
self.portfolio = DTPortfolio(10000)
@property
def portfolio(self):
return self.__portfolio
@portfolio.setter
def portfolio(self, portfolio):
self.__portfolio = portfolio
def pre_cache(self):
# closing prices for all stocks
self.prices = history(self.roc_window, '1d', 'price')
def handle(self, context, data, stock, quiet=True):
# find the historical daily % changes
# choose the top x% and bellow y% value
# use them as thresholds for sell/buy signals
velocity = self.prices.diff()
rate_of_change = velocity / self.prices
roc_sorted = rate_of_change.sort(stock)
roc_size = len(roc_sorted)
# index of nth element (top/bottom n% roc)
buy_index = roc_size * self.buy_threshold
sell_index = -roc_size * self.sell_threshold
buy_threashold = roc_sorted.values[buy_index][0]
sell_threashold = roc_sorted.values[sell_index][0]
record(self.name + '_buy', buy_threashold)
record(self.name + '_sell', sell_threashold)
# calculate today's (now's) % change (roc)
p_yesterday = self.prices[stock][-2]
p_today = data[stock].price
p_change = 1 - p_yesterday / p_today
if p_change > sell_threashold:
self.portfolio.order_sub_percent(context, data, stock, self.sell_pct, quiet=quiet)
elif p_change < buy_threashold:
self.portfolio.order_add_percent(context, data, stock, self.buy_pct, quiet=quiet)
def plot(self, results, symbol, ax2):
r = results.get(symbol)
self.plot_roc(r, ax2)
# plot threasholds
results.get(self.name + '_buy').plot(ax=ax2, color='g')
results.get(self.name + '_sell').plot(ax=ax2, color='r')
def plot_roc(self, r, ax2):
v = diff(r)
v = insert(v, 0, v[0])
roc = v / r
obj = r.axes[0].date
ax2.plot(obj, roc, 'x-', label='v')
return roc
class DTEODChangeTrader2:
def __init__(self, rate=0.5, roc_window=180):
self.rate = rate
self.roc_window = roc_window
self.name = "EODCT"
self.prices = 0
self.portfolio = DTPortfolio(10000)
self.last_txn = 0
self.setup()
def setup(self):
# Offset makes strategy to like to trade
pref_sell = 1 + (max(0, self.portfolio.pct - 0.75) * 3)
pref_buy = 1 + (max(0, 0.25 - self.portfolio.pct) * 3)
# threashold 0.05 to 0.4, 0.05 step => 1 to 0.1
# pct = if threashold is 0.4, pct is 20%, if 0.05, pct is 100%
self.buy_threasholds = arange(0.4, 0.0499, -0.05) * self.rate * pref_buy * (2 - pref_sell)
self.sell_threasholds = arange(0.4, 0.0499, -0.05) * self.rate * pref_sell* (2 - pref_buy)
ranges = len(self.buy_threasholds) - 1
self.buy_pcts = arange(0.2, 1.01, 0.8/ranges)
self.sell_pcts = arange(0.2, 1.01, 0.8/ranges)
@property
def portfolio(self):
return self.__portfolio
@portfolio.setter
def portfolio(self, portfolio):
self.__portfolio = portfolio
def pre_cache(self):
# closing prices for all stocks
self.prices = history(self.roc_window, '1d', 'price')
def handle(self, context, data, stock, quiet=True):
self.setup()
# find the historical daily % changes
# choose the top x% and bellow y% value
# use them as thresholds for sell/buy signals
velocity = self.prices.diff().dropna()
rate_of_change = (velocity / self.prices).dropna()
roc_sorted = rate_of_change.sort(stock)
roc_size = len(roc_sorted)
# calculate today's (now's) % change (roc)
p_yesterday = self.prices[stock][-2]
p_today = data[stock].price
p_change = 1 - p_yesterday / p_today
if not quiet:
print("y %.2f, t %.2f, c %.4f" % (p_yesterday, p_today, p_change))
# index of nth element (top/bottom n% roc)
ranges = len(self.buy_threasholds)
done = False
for i in range(ranges-1, -1, -1):
bi = (roc_size - 1) * min(1.0, self.buy_threasholds[i])
bt = roc_sorted.values[bi][0]
si = - (roc_size - 1) * min(1.0, self.sell_threasholds[i])
st = roc_sorted.values[si][0]
if not quiet:
print("* bi %.2f, bi %d, bt %.4f, bp %.4f, si %.2f, si %d, st %.4f, sp %.4f" %
(self.buy_threasholds[i], bi, bt, self.buy_pcts[i],
self.sell_threasholds[i], si, st, self.sell_pcts[i]))
if not done:
if p_change > st:
self.portfolio.order_sub_percent(context, data, stock, self.sell_pcts[i], quiet)
self.last_txn = get_datetime().date()
done = True
elif p_change < bt:
self.portfolio.order_add_percent(context, data, stock, self.buy_pcts[i], quiet)
self.last_txn = get_datetime().date()
done = True
record(self.name + '_buy' + str(i), bt if bt < 0 else 0)
record(self.name + '_sell' + str(i), st if st > 0 else 0)
def plot(self, results, symbol, ax2):
r = results.get(symbol)
self.plot_roc(r, ax2)
# plot threasholds
ranges = len(self.buy_threasholds)
for i in [0, ranges-1]:
results.get(self.name + '_buy' + str(i)).plot(ax=ax2)
results.get(self.name + '_sell' + str(i)).plot(ax=ax2)
# print("b", i, results.get(self.name + '_buy' + str(i)).dropna(),
# "s", i, results.get(self.name + '_sell' + str(i)).dropna())
def plot_roc(self, r, ax2):
v = diff(r)
v = insert(v, 0, v[0])
roc = v / r
obj = r.axes[0].date
ax2.plot(obj, roc, 'x-', label='v')
return roc
| {
"content_hash": "b8adc075bdfc0566c8966f8d4f2f227e",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 112,
"avg_line_length": 37.63793103448276,
"alnum_prop": 0.5464956481905634,
"repo_name": "jdtogni/trader",
"id": "2cc7052560593a715f13d4e571e1a7f36d4e7f9f",
"size": "10915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/dttrader/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import page_objects
def click_button():
page = page_objects.dynamic_controls.DynamicControlsPage()
page.click_button()
return
def click_button_and_wait_for_loading():
page = page_objects.dynamic_controls.DynamicControlsPage()
page.click_button_and_wait_for_loading()
return
def click_checkbox():
page = page_objects.dynamic_controls.DynamicControlsPage()
page.click_checkbox()
return
def wait_for_loading_indicator_to_complete():
page = page_objects.dynamic_controls.DynamicControlsPage()
page.wait_for_loading_indicator_to_complete()
return
| {
"content_hash": "612d8ae5d1c7bad8194774889cd9667f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 62,
"avg_line_length": 28.285714285714285,
"alnum_prop": 0.7323232323232324,
"repo_name": "MooMan272/selenium_the_internet",
"id": "ed94e8c793a4af43e518ef7b607e83d8fd0ede45",
"size": "594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "action/dynamic_controls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34748"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .enums import *
from .errors import *
class RAMONBase(object):
"""
RAMONBase Object for storing neuroscience data
"""
def __init__(self, id=DEFAULT_ID,
confidence=DEFAULT_CONFIDENCE,
kvpairs=DEFAULT_DYNAMIC_METADATA,
status=DEFAULT_STATUS,
author=DEFAULT_AUTHOR):
"""
Initialize a new RAMONBase object with default attributes.
Arguments:
id (int): Unique 32-bit ID value assigned by OCP database
confidence (float): Value 0-1 indicating confidence in annotation
kvpairs (dict): A collection of key-value pairs
status (string): Status of annotation in database
author (string): Username of the person who created the annotation
"""
self.id = id
self.confidence = confidence
self.kvpairs = kvpairs
self.status = status
self.author = author
def __str__(self):
"""
String representation of a RAMON object for convenience.
"""
return "<{} object. id={}>".format(type(self), self.id)
def __repr__(self):
"""
String representation of a RAMON object for convenience.
"""
return "{} object. id={}".format(type(self), self.id)
| {
"content_hash": "15bc6686b8163edeb5e34eb8894dce88",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 32.26190476190476,
"alnum_prop": 0.5852398523985239,
"repo_name": "openconnectome/ndio",
"id": "51fefbd10d20da70df087f1383de95730730491e",
"size": "1355",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ndio/ramon/RAMONBase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "138605"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.ifconfig', 'sphinxcontrib.httpdomain']
def setup(app):
app.add_config_value('platform_docs', True, 'env')
# Even if it has a default, these options need to be specified
platform_docs = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kafka REST Proxy'
copyright = u'2014, Confluent, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0'
# The full version, including alpha/beta/rc tags.
release = '4.0.0-SNAPSHOT'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'KafkaRESTProxydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'KafkaRESTProxy.tex', u'Kafka REST Proxy Documentation',
u'Confluent, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kafkarestproxy', u'Kafka REST Proxy Documentation',
[u'Confluent, Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KafkaRESTProxy', u'Kafka REST Proxy Documentation',
u'Confluent, Inc.', 'KafkaRESTProxy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "1e7c2655db2c43d9158252035ca7760e",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 32.107142857142854,
"alnum_prop": 0.7078235076010382,
"repo_name": "imdark/kafka-rest",
"id": "47354f338ea1200bc69033ff11cb29614b242021",
"size": "8520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "660603"
},
{
"name": "Shell",
"bytes": "5621"
}
],
"symlink_target": ""
} |
"""
Flask-Kinesis
-------------
With this library, you can simply record events from the flask application to aws kinesis
"""
from distutils.core import setup
setup(
name="Flask-kinesis",
version="0.1.9",
py_modules=["flask_kinesis"],
author="doyoung",
author_email="hdy3405" "@" "gmail.com",
url="https://github.com/iidd0101/flask-kinesis",
license="MIT",
description="Flask plugin for aws kinesis stream",
long_description=__doc__,
platforms="any",
install_requires=[
"Flask",
"boto3"
],
classifiers=[
"Development Status :: 1 - Planning",
"Framework :: Flask",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Natural Language :: Korean",
"Natural Language :: English",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
| {
"content_hash": "dc5b4ed4164737194c5a6117b9edca5e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 89,
"avg_line_length": 27.83783783783784,
"alnum_prop": 0.6019417475728155,
"repo_name": "iidd0101/flask-kinesis",
"id": "00239aaf8acaef88b7dfef9c845d3198e30a6dff",
"size": "1030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2890"
}
],
"symlink_target": ""
} |
try:
from unittest.mock import Mock
from unittest.mock import patch
except ImportError:
from mock import Mock
from mock import patch
from moira_client.client import Client
from moira_client.client import InvalidJSONError
from moira_client.client import ResponseStructureError
from moira_client.models.subscription import SubscriptionManager
from .test_model import ModelTest
class SubscriptionTest(ModelTest):
def test_create(self):
client = Client(self.api_url)
manager = SubscriptionManager(client)
tags = ['server', 'cpu']
contacts = ['acd2db98-1659-4a2f-b227-52d71f6e3ba1']
s = manager.create(tags, contacts)
with patch.object(client, 'put', return_value={"id": "e5cd5d73-d893-42b5-98b5-f9bd6c7bc501"}) as put_mock:
s.save()
self.assertTrue(put_mock.called)
args_ = put_mock.call_args[1]
body_json = args_['json']
# check required fields
self.assertEqual(tags, body_json['tags'])
self.assertEqual(contacts, body_json['contacts'])
# check default values
self.assertEqual(True, body_json['enabled'])
self.assertEqual(True, body_json['throttling'])
self.assertTrue('sched' in body_json)
self.assertEqual(False, body_json['ignore_warnings'])
self.assertEqual(False, body_json['ignore_recoverings'])
self.assertTrue('plotting' in body_json)
self.assertEqual(False, body_json['any_tags'])
def test_fetch_all(self):
client = Client(self.api_url)
subscription_manager = SubscriptionManager(client)
with patch.object(client, 'get', return_value={'list': []}) as get_mock:
subscription_manager.fetch_all()
self.assertTrue(get_mock.called)
get_mock.assert_called_with('subscription')
def test_fetch_all_bad_response(self):
client = Client(self.api_url)
subscription_manager = SubscriptionManager(client)
with patch.object(client, 'get', return_value={}) as get_mock:
with self.assertRaises(ResponseStructureError):
subscription_manager.fetch_all()
self.assertTrue(get_mock.called)
get_mock.assert_called_with('subscription')
def test_delete(self):
client = Client(self.api_url)
subscription_manager = SubscriptionManager(client)
subscription_id = '1'
with patch.object(client, 'delete', new=Mock(side_effect=InvalidJSONError(b''))) as delete_mock:
res = subscription_manager.delete(subscription_id)
self.assertTrue(delete_mock.called)
self.assertTrue(res)
delete_mock.assert_called_with('subscription/' + subscription_id)
def test_delete_fail(self):
client = Client(self.api_url)
subscription_manager = SubscriptionManager(client)
subscription_id = '1'
with patch.object(client, 'delete') as delete_mock:
res = subscription_manager.delete(subscription_id)
self.assertTrue(delete_mock.called)
self.assertFalse(res)
delete_mock.assert_called_with('subscription/' + subscription_id)
def test_test(self):
client = Client(self.api_url)
subscription_manager = SubscriptionManager(client)
subscription_id = '1'
with patch.object(client, 'put', new=Mock(side_effect=InvalidJSONError(b''))) as put_mock:
res = subscription_manager.test(subscription_id)
self.assertTrue(put_mock.called)
self.assertTrue(res)
put_mock.assert_called_with('subscription/' + subscription_id + '/test')
def test_test_fail(self):
client = Client(self.api_url)
subscription_manager = SubscriptionManager(client)
subscription_id = '1'
with patch.object(client, 'put') as put_mock:
res = subscription_manager.test(subscription_id)
self.assertTrue(put_mock.called)
self.assertFalse(res)
put_mock.assert_called_with('subscription/' + subscription_id + '/test')
| {
"content_hash": "c511e349a3acb2f8b144492cf211366c",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 114,
"avg_line_length": 35.56140350877193,
"alnum_prop": 0.6578687715836211,
"repo_name": "moira-alert/python-moira-client",
"id": "07801b0fe8ae77f5c8435428d7af5892e414ca1f",
"size": "4054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models/test_subscription.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "397"
},
{
"name": "Python",
"bytes": "73230"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Series',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('number', models.PositiveSmallIntegerField()),
('year', models.PositiveSmallIntegerField()),
('parent',
models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE,
to='series.Series')),
],
options={
'ordering': ('year',),
'verbose_name_plural': 'series',
},
),
]
| {
"content_hash": "1f384cc6b862ea0924b11347890afa35",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 116,
"avg_line_length": 32.06666666666667,
"alnum_prop": 0.5353430353430353,
"repo_name": "reiniervdwindt/power-rangers-api",
"id": "160d60787e8a8b020eebb7fc21b993698939f39d",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/series/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "617"
},
{
"name": "Python",
"bytes": "70343"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import shlex
import subprocess
from maas_common import metric
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
def run_command(arg):
proc = subprocess.Popen(shlex.split(arg),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
out, err = proc.communicate()
ret = proc.returncode
return ret, out, err
def parse_args():
parser = argparse.ArgumentParser(
description='Check holland backup completion')
parser.add_argument('galera_container_name',
help='Name of the Galera container running holland')
parser.add_argument('holland_binary', nargs='?',
help='Absolute path to the holland binary',
default='/usr/local/bin/holland')
parser.add_argument('holland_backupset', nargs='?',
help='Name of the holland backupset',
default='rpc_support')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
return parser.parse_args()
def print_metrics(name, size):
metric('holland_backup_size', 'double', size, 'Megabytes')
def container_holland_lb_check(container, binary, backupset):
backupsets = []
# Call holland directly inside container
retcode, output, err = run_command('lxc-attach -n %s -- %s lb' %
(container, binary))
if retcode > 0:
status_err('Could not list holland backupsets: %s' % (err),
m_name='maas_holland')
for line in output.split():
if backupset + '/' in line:
backupname = line.split('/')[-1]
disksize = 0
# Determine size of the backup
retcode, output, err = \
run_command('lxc-attach -n %s -- '
'du -ks /var/backup/holland_backups/%s/%s' %
(container, backupset, backupname))
if retcode == 0:
disksize = output.split()[0]
# Populate backupset informations
backupsets.append([backupname, disksize])
return backupsets
def main():
galera_container = args.galera_container_name
holland_bin = args.holland_binary
holland_bs = args.holland_backupset
today = datetime.date.today().strftime('%Y%m%d')
yesterday = (datetime.date.today() -
datetime.timedelta(days=1)).strftime('%Y%m%d')
# Get completed Holland backup set
backupsets = \
container_holland_lb_check(galera_container, holland_bin, holland_bs)
if len([backup for backup in backupsets
if yesterday or today in backup[0]]) > 0:
status_ok(m_name='maas_holland')
metric_bool('holland_backup_status', True, m_name='maas_holland')
else:
metric_bool('holland_backup_status', False, m_name='maas_holland')
status_err('Could not find Holland backup from %s or %s'
% (yesterday, today), m_name='maas_holland')
# Print metric about last backup
print_metrics('holland_backup_size', float(backupsets[-1][1]) / 1024)
if __name__ == '__main__':
args = parse_args()
with print_output(print_telegraf=args.telegraf_output):
main()
| {
"content_hash": "1523fe6ff0669bc48dd27f9045de3576",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 77,
"avg_line_length": 33.838095238095235,
"alnum_prop": 0.5817618913594146,
"repo_name": "cloudnull/rpc-maas",
"id": "1aaedb6ab7fafd753191612e2a71a4deea65570b",
"size": "4159",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "playbooks/files/rax-maas/plugins/holland_local_check.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2138"
},
{
"name": "Python",
"bytes": "370758"
},
{
"name": "Shell",
"bytes": "41469"
}
],
"symlink_target": ""
} |
_author_ = 'Pylar'
#!flask/bin/python
from app import create_app
import os
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
app.run(debug = True)
| {
"content_hash": "e0c66ae5501515273424f1e29667c06d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 56,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7142857142857143,
"repo_name": "EricFZ/flask",
"id": "87faa81e7e32a4a991d5544ceef565bfcc8e551e",
"size": "178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10907"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "36180"
}
],
"symlink_target": ""
} |
import numpy as np
import os, pdb
import pytest
from astropy import units as u
from xastropy.igm.abs_sys import abssys_utils as xabsys
from linetools.spectralline import AbsLine
'''
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
'''
def test_grab_line():
# Init
gensys = xabsys.GenericAbsSystem()
#
few_lines = [1215.6700, 1334.5323]*u.AA
z=1.34
for ilin in few_lines:
gensys.lines.append(AbsLine(ilin,z=z))
# Test grab line
Lya = gensys.grab_line((z,1215.670*u.AA))
assert Lya.trans == 'HI 1215'
| {
"content_hash": "f4040d23b3b65b042232a7ba8632eebb",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 63,
"avg_line_length": 23.444444444444443,
"alnum_prop": 0.665086887835703,
"repo_name": "nhmc/xastropy",
"id": "0cbadaaadb118c2ab00caeb075f67da0c5b4fd23",
"size": "711",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xastropy/igm/abs_sys/tests/test_use_abssys.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "144038"
},
{
"name": "Python",
"bytes": "1007319"
}
],
"symlink_target": ""
} |
"""
Running CLI
"""
from sunhead.cli.entrypoint import main
if __name__ == '__main__':
main()
| {
"content_hash": "1142b9cac2c387b75deb93299963a979",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 11.222222222222221,
"alnum_prop": 0.5742574257425742,
"repo_name": "anti1869/sunhead",
"id": "07d49e245836c468ff01b52d213443ca37ad93f6",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sunhead/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "62290"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
import os
import subprocess
VERSION = (0, 1, 0, 'rc', 0)
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
if version is None:
version = VERSION
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S') | {
"content_hash": "ec3554f7e6f8327b1857ca2d43312e3d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 32.089285714285715,
"alnum_prop": 0.6249304396215916,
"repo_name": "kevwilde/cmsplugin-twcarousel",
"id": "607be56e01caa1b4509efb7e6850892b7adeff20",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twcarousel/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "61752"
},
{
"name": "Python",
"bytes": "4381"
}
],
"symlink_target": ""
} |
import sys
if sys.platform not in ['mozilla', 'ie6', 'opera', 'oldmoz', 'safari']:
from __pyjamas__ import doc
from pyjamas import Factory
from pyjamas import DOM
from HTMLTable import HTMLTable
from RowFormatter import RowFormatter
from FlexCellFormatter import FlexCellFormatter
class FlexTable(HTMLTable):
def __init__(self, **kwargs):
if not kwargs.has_key('CellFormatter'):
kwargs['CellFormatter'] = FlexCellFormatter(self)
HTMLTable.__init__(self, **kwargs)
def addCell(self, row):
self.insertCell(row, self.getCellCount(row))
def getCellCount(self, row):
self.checkRowBounds(row)
return self.getDOMCellCount(self.getBodyElement(), row)
def getFlexCellFormatter(self):
return self.getCellFormatter()
def getRowCount(self):
return self.getDOMRowCount()
def removeCells(self, row, column, num):
for i in range(num):
self.removeCell(row, column)
def prepareCell(self, row, column):
self.prepareRow(row)
#if column < 0: throw new IndexOutOfBoundsException("Cannot create a column with a negative index: " + column);
cellCount = self.getCellCount(row)
required = column + 1 - cellCount
if required > 0:
self.addCells(self.getBodyElement(), row, required)
def prepareRow(self, row):
#if row < 0: throw new IndexOutOfBoundsException("Cannot create a row with a negative index: " + row);
rowCount = self.getRowCount()
for i in range(rowCount, row + 1):
self.insertRow(i)
def addCells(self, table, row, num):
rowElem = table.rows.item(row)
for i in range(num):
cell = doc().createElement("td")
rowElem.appendChild(cell)
Factory.registerClass('pyjamas.ui.FlexTable', FlexTable)
| {
"content_hash": "5f6a9b4c125be86c084af65a9f024fb5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 119,
"avg_line_length": 31.82758620689655,
"alnum_prop": 0.6522210184182016,
"repo_name": "jaredly/pyjamas",
"id": "a9bd8cd785505eb3b43b412edd9e77955bfe6435",
"size": "2505",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "library/pyjamas/ui/FlexTable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "411613"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "4243623"
},
{
"name": "Shell",
"bytes": "14781"
}
],
"symlink_target": ""
} |
from kbox import kbox
import unittest
class TestAirML(unittest.TestCase):
def test_list(self):
output = kbox.execute('-list')
self.assertTrue('KBox KNS Resource table list' in output)
def test_info(self):
output = kbox.execute("-info http://purl.org/pcp-on-web/dbpedia")
print(output)
self.assertTrue('KB:http://purl.org/pcp-on-web/dbpedia' in output)
def test_search(self):
output = kbox.execute('-search ontology')
print(output)
self.assertTrue('KBox KNS Resource table list' in output)
def test_dir(self):
output = kbox.execute('-r-dir')
print(output)
self.assertTrue('Your current resource directory is:' in output)
def test_version(self):
output = kbox.execute('-version')
print(output)
self.assertTrue('KBox version v0.0.2-alpha' in output)
def test_invalid_command(self):
output = kbox.execute('lists')
self.assertTrue("KBox.jar <command> [option]" in output)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ebf4f5f7a288b56ceb69ec89c9dbc5fa",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 29.86111111111111,
"alnum_prop": 0.6288372093023256,
"repo_name": "AKSW/KBox",
"id": "cdd32e8e385ee997fe4532fbe262e4fe9f95b8e8",
"size": "1075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kbox.pip/test/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "232"
},
{
"name": "Java",
"bytes": "238025"
},
{
"name": "Python",
"bytes": "3110"
}
],
"symlink_target": ""
} |
"""
pghoard: fixtures for tests
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
import contextlib
import json
import lzma
import os
import random
import re
import signal
import subprocess
import tempfile
import time
from distutils.version import LooseVersion
from unittest import SkipTest
import psycopg2
import pytest
from py import path as py_path # pylint: disable=no-name-in-module
from pghoard import config as pghconfig
from pghoard import logutil, pgutil
from pghoard.pghoard import PGHoard
from pghoard.rohmu.compat import suppress
from pghoard.rohmu.snappyfile import snappy
logutil.configure_logging()
class PGTester:
def __init__(self, pgdata):
pgver = os.getenv("PG_VERSION")
pgbin, ver = pghconfig.find_pg_binary("", versions=[pgver] if pgver else None)
self.pgbin = pgbin
self.ver = ver
self.pgdata = pgdata
self.pg = None
self.user = None
@property
def pgver(self):
with open(os.path.join(self.pgdata, "PG_VERSION"), "r") as fp:
return fp.read().strip()
def run_cmd(self, cmd, *args):
argv = [os.path.join(self.pgbin, cmd)]
argv.extend(args)
subprocess.check_call(argv)
def run_pg(self):
cmd = [
os.path.join(self.pgbin, "postgres"),
"-D",
self.pgdata,
"-k",
self.pgdata,
"-p",
self.user["port"],
"-c",
"listen_addresses=",
]
self.pg = subprocess.Popen(cmd)
time.sleep(1.0) # let pg start
def kill(self, force=True, immediate=True):
if self.pg is None:
return
if force:
os.kill(self.pg.pid, signal.SIGKILL)
elif immediate:
os.kill(self.pg.pid, signal.SIGQUIT)
else:
os.kill(self.pg.pid, signal.SIGTERM)
timeout = time.time() + 10
while (self.pg.poll() is None) and (time.time() < timeout):
time.sleep(0.1)
if not force and self.pg.poll() is None:
raise Exception("PG pid {} not dead".format(self.pg.pid))
@contextlib.contextmanager
def setup_pg():
tmpdir_obj = py_path.local(tempfile.mkdtemp(prefix="pghoard_dbtest_"))
tmpdir = str(tmpdir_obj)
# try to find the binaries for these versions in some path
pgdata = os.path.join(tmpdir, "pgdata")
db = PGTester(pgdata) # pylint: disable=redefined-outer-name
db.run_cmd("initdb", "-D", pgdata, "--encoding", "utf-8")
# NOTE: does not use TCP ports, no port conflicts
db.user = dict(host=pgdata, user="pghoard", password="pghoard", dbname="postgres", port="5432")
# NOTE: point $HOME to tmpdir - $HOME shouldn't affect most tests, but
# psql triest to find .pgpass file from there as do our functions that
# manipulate pgpass. By pointing $HOME there we make sure we're not
# making persistent changes to the environment.
os.environ["HOME"] = tmpdir
# allow replication connections
with open(os.path.join(pgdata, "pg_hba.conf"), "w") as fp:
fp.write(
"local all disabled reject\n"
"local all passwordy md5\n"
"local all all trust\n"
"local replication disabled reject\n"
"local replication passwordy md5\n"
"local replication all trust\n"
)
# rewrite postgresql.conf
with open(os.path.join(pgdata, "postgresql.conf"), "r+") as fp:
lines = fp.read().splitlines()
fp.seek(0)
fp.truncate()
config = {}
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
key, val = re.split(r"\s*=\s*", line, 1)
config[key] = re.sub(r"\s*(#.*)?$", "", val)
config.update({
"hot_standby": "on",
"logging_collector": "off",
"max_wal_senders": 2,
"wal_keep_segments": 100,
"wal_level": "hot_standby",
# disable fsync and synchronous_commit to speed up the tests a bit
"fsync": "off",
"synchronous_commit": "off",
# don't need to wait for autovacuum workers when shutting down
"autovacuum": "off",
})
# Setting name has changed in PG 13, set comparison needed because 9.6 is somehow larger than 13 for the comparison
if db.pgver >= "13" and db.pgver not in {"9.5", "9.6"}:
del config["wal_keep_segments"]
config["wal_keep_size"] = 16 * 100
lines = ["{} = {}\n".format(key, val) for key, val in sorted(config.items())] # noqa
fp.write("".join(lines))
# now start pg and create test users
db.run_pg()
try:
db.run_cmd("createuser", "-h", db.user["host"], "-p", db.user["port"], "disabled")
db.run_cmd("createuser", "-h", db.user["host"], "-p", db.user["port"], "passwordy")
db.run_cmd("createuser", "-h", db.user["host"], "-p", db.user["port"], "-s", db.user["user"])
yield db
finally:
db.kill()
with suppress(Exception):
tmpdir_obj.remove(rec=1)
@pytest.yield_fixture(scope="session", name="db")
def fixture_db():
with setup_pg() as pg:
yield pg
@pytest.yield_fixture(scope="session")
def recovery_db():
with setup_pg() as pg:
# Make sure pgespresso extension is installed before we turn this into a standby
conn_str = pgutil.create_connection_string(pg.user)
conn = psycopg2.connect(conn_str)
cursor = conn.cursor()
cursor.execute("SELECT 1 FROM pg_available_extensions WHERE name = 'pgespresso' AND default_version >= '1.2'")
if cursor.fetchone():
cursor.execute("CREATE EXTENSION pgespresso")
conn.commit()
conn.close()
# Now perform a clean shutdown and restart in recovery
pg.kill(force=False, immediate=False)
recovery_conf = [
"recovery_target_timeline = 'latest'",
"restore_command = 'false'",
]
if LooseVersion(pg.ver) >= "12":
with open(os.path.join(pg.pgdata, "standby.signal"), "w") as fp:
pass
recovery_conf_path = "postgresql.auto.conf"
open_mode = "a" # As it might exist already in some cases
else:
recovery_conf.append("standby_mode = 'on'")
recovery_conf_path = "recovery.conf"
open_mode = "w"
with open(os.path.join(pg.pgdata, recovery_conf_path), open_mode) as fp:
fp.write("\n".join(recovery_conf) + "\n")
pg.run_pg()
yield pg
@pytest.yield_fixture # pylint: disable=redefined-outer-name
def pghoard(db, tmpdir, request): # pylint: disable=redefined-outer-name
yield from pghoard_base(db, tmpdir, request)
@pytest.yield_fixture # pylint: disable=redefined-outer-name
def pghoard_separate_volume(db, tmpdir, request):
tmpfs_volume = os.path.join(str(tmpdir), "tmpfs")
os.makedirs(tmpfs_volume, exist_ok=True)
# Tests that require separate volume with restricted space can only be run in
# environments where sudo can be executed without password prompts.
try:
subprocess.check_call(
["sudo", "-S", "mount", "-t", "tmpfs", "-o", "size=100m", "tmpfs", tmpfs_volume],
stdin=subprocess.DEVNULL,
)
except subprocess.CalledProcessError as ex:
raise SkipTest("Failed to create tmpfs: {!r}".format(ex))
backup_location = os.path.join(tmpfs_volume, "backupspool")
try:
yield from pghoard_base(
db,
tmpdir,
request,
backup_location=backup_location,
pg_receivexlog_config={
"disk_space_check_interval": 0.0001,
"min_disk_free_bytes": 70 * 1024 * 1024,
"resume_multiplier": 1.2,
},
)
finally:
subprocess.check_call(["sudo", "umount", tmpfs_volume])
def pghoard_base(
db,
tmpdir,
request,
compression="snappy", # pylint: disable=redefined-outer-name
transfer_count=None,
metrics_cfg=None,
*,
backup_location=None,
pg_receivexlog_config=None
):
test_site = request.function.__name__
if pg_receivexlog_config:
active_backup_mode = "pg_receivexlog"
elif os.environ.get("pghoard_test_walreceiver"):
active_backup_mode = "walreceiver"
else:
active_backup_mode = "pg_receivexlog"
if compression == "snappy" and not snappy:
compression = "lzma"
backup_location = backup_location or os.path.join(str(tmpdir), "backupspool")
config = {
"alert_file_dir": os.path.join(str(tmpdir), "alerts"),
"backup_location": backup_location,
"backup_sites": {
test_site: {
"active_backup_mode": active_backup_mode,
"basebackup_count": 2,
"basebackup_interval_hours": 24,
"pg_bin_directory": db.pgbin,
"pg_data_directory": db.pgdata,
"pg_receivexlog": pg_receivexlog_config or {},
"nodes": [db.user],
"object_storage": {
"storage_type": "local",
"directory": os.path.join(str(tmpdir), "backups"),
},
},
},
"compression": {
"algorithm": compression,
},
"http_address": "127.0.0.1",
"http_port": random.randint(1024, 32000),
"json_state_file_path": tmpdir.join("pghoard_state.json").strpath,
"maintenance_mode_file": tmpdir.join("maintenance_mode_file").strpath,
# Set process count to 1 to avoid launching subprocesses during basebackup tests.
# The new processes would be created with fork, which doesn't work properly due to
# all the fds and other things that are created during typical test setup. There
# is separate test case that executes the multiprocess version.
"restore_process_count": 1,
"tar_executable": "tar",
}
if metrics_cfg is not None:
config.update(metrics_cfg)
if transfer_count is not None:
config["transfer"] = {"thread_count": transfer_count}
confpath = os.path.join(str(tmpdir), "config.json")
with open(confpath, "w") as fp:
json.dump(config, fp)
backup_site_path = os.path.join(config["backup_location"], test_site)
basebackup_path = os.path.join(backup_site_path, "basebackup")
backup_xlog_path = os.path.join(backup_site_path, "xlog")
backup_timeline_path = os.path.join(backup_site_path, "timeline")
os.makedirs(config["alert_file_dir"])
os.makedirs(basebackup_path)
os.makedirs(backup_xlog_path)
os.makedirs(backup_timeline_path)
pgh = PGHoard(confpath)
pgh.test_site = test_site
pgh.start_threads_on_startup()
if compression == "snappy":
pgh.Compressor = snappy.StreamCompressor
else:
pgh.Compressor = lambda: lzma.LZMACompressor(preset=0) # pylint: disable=redefined-variable-type
time.sleep(0.05) # Hack to give the server time to start up
yield pgh
pgh.quit()
@pytest.yield_fixture # pylint: disable=redefined-outer-name
def pghoard_lzma(db, tmpdir, request): # pylint: disable=redefined-outer-name
yield from pghoard_base(db, tmpdir, request, compression="lzma")
@pytest.yield_fixture # pylint: disable=redefined-outer-name
def pghoard_no_mp(db, tmpdir, request): # pylint: disable=redefined-outer-name
yield from pghoard_base(db, tmpdir, request, transfer_count=1)
@pytest.yield_fixture # pylint: disable=redefined-outer-name
def pghoard_metrics(db, tmpdir, request): # pylint: disable=redefined-outer-name
metrics_cfg = {
"prometheus": {
"tags": {
"foo": "bar",
},
},
}
yield from pghoard_base(db, tmpdir, request, metrics_cfg=metrics_cfg)
| {
"content_hash": "c551c2e1ce842db65cd1e78a588aa1b9",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 123,
"avg_line_length": 35.33038348082596,
"alnum_prop": 0.5983134340819904,
"repo_name": "ohmu/pghoard",
"id": "55cdaa2f3437e7d338410b1cac0caca6697fc876",
"size": "11977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "4937"
},
{
"name": "Makefile",
"bytes": "1765"
},
{
"name": "Python",
"bytes": "467043"
}
],
"symlink_target": ""
} |
import operator
from pyinter.interval_set import IntervalSet
from .extrema import INFINITY, NEGATIVE_INFINITY
def open(lower_value, upper_value):
"""Helper function to construct an interval object with open lower and upper.
For example:
>>> open(100.2, 800.9)
(100.2, 800.9)
"""
return Interval(Interval.OPEN, lower_value, upper_value, Interval.OPEN)
def closed(lower_value, upper_value):
"""Helper function to construct an interval object with closed lower and upper.
For example:
>>> closed(100.2, 800.9)
[100.2, 800.9]
"""
return Interval(Interval.CLOSED, lower_value, upper_value, Interval.CLOSED)
def openclosed(lower_value, upper_value):
"""Helper function to construct an interval object with a open lower and closed upper.
For example:
>>> openclosed(100.2, 800.9)
(100.2, 800.9]
"""
return Interval(Interval.OPEN, lower_value, upper_value, Interval.CLOSED)
def closedopen(lower_value, upper_value):
"""Helper function to construct an interval object with a closed lower and open upper.
For example:
>>> closedopen(100.2, 800.9)
[100.2, 800.9)
"""
return Interval(Interval.CLOSED, lower_value, upper_value, Interval.OPEN)
class Interval(object):
"""An interval class with methods associated with mathematical intervals.
This class can deal with any comparible objects.
*Note: comparison is performed solely on the lower value*
**Examples**
An open interval:
>>> Interval(Interval.OPEN, 100.2, 800.9, Interval.OPEN)
(100.2, 800.9)
A closed interval:
>>> Interval(Interval.CLOSED, 100.2, 800.9, Interval.CLOSED)
[100.2, 800.9]
An open-closed interval:
>>> Interval(Interval.OPEN, 100.2, 800.9, Interval.CLOSED)
(100.2, 800.9]
A closed-open interval:
>>> Interval(Interval.CLOSED, 100.2, 800.9, Interval.OPEN)
[100.2, 800.9)
"""
OPEN = 0
CLOSED = 1
_lower = None
_lower_value = None
_upper_value = None
_upper = None
lower_value = property(fget=lambda self: self._lower_value, doc='This intervals lower value')
upper_value = property(fget=lambda self: self._upper_value, doc='This intervals upper value')
lower = property(fget=lambda self: self._lower, doc='Whether lower is Interval.OPEN or Interval.CLOSED')
upper = property(fget=lambda self: self._upper, doc='Whether upper is Interval.OPEN or Interval.CLOSED')
def __init__(self, lower, lower_value, upper_value, upper):
"""Create a new :class:`~pyinter.Interval` object, lower and upper should be one of
:const:`~pyinter.Interval.OPEN` or :const:`~pyinter.Interval.CLOSED`"""
if lower_value > upper_value:
raise ValueError('lower_value({lower}) must be smaller than upper_value({upper})'.format(lower=lower_value,
upper=upper_value))
self._lower = lower
self._lower_value = lower_value
self._upper_value = upper_value
self._upper = upper
def copy(self):
"""Returns a new :class:`~pyinter.Interval` object with the same bounds and values."""
return Interval(self._lower, self._lower_value, self._upper_value, self._upper)
def replace(self, lower=None, lower_value=None, upper_value=None, upper=None):
interval = self.copy()
if lower is not None:
interval._lower = lower
if lower_value is not None:
interval._lower_value = lower_value
if upper_value is not None:
interval._upper_value = upper_value
if upper is not None:
interval._upper = upper
return interval
def __repr__(self):
lower_string = '(' if self._lower == self.OPEN else '['
upper_string = ')' if self._upper == self.OPEN else ']'
return '{l}{lv}, {uv}{u}'.format(l=lower_string, lv=self.lower_value, uv=self.upper_value, u=upper_string)
def __lt__(self, other):
if hasattr(other, 'lower_value'):
return self.lower_value < other.lower_value
else:
raise NotImplementedError
def __le__(self, other):
if hasattr(other, 'lower_value'):
return self.lower_value <= other.lower_value
else:
raise NotImplementedError
def __eq__(self, other):
if hasattr(other, '_lower') and hasattr(other, 'lower_value') \
and hasattr(other, '_upper_value') and hasattr(other, '_upper'):
return self._lower == other._lower and self.lower_value == other.lower_value \
and self._upper_value == other._upper_value and self._upper == other._upper
else:
raise NotImplementedError
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
if hasattr(other, 'lower_value'):
return self.lower_value > other.lower_value
else:
raise NotImplementedError
def __ge__(self, other):
if hasattr(other, 'lower_value'):
return self.lower_value >= other.lower_value
else:
raise NotImplementedError
def __hash__(self):
return hash(self.lower_value)
def _contains_value(self, value):
"""Helper function for __contains__ to check a single value is contained within the interval"""
g = operator.gt if self._lower is self.OPEN else operator.ge
l = operator.lt if self._upper is self.OPEN else operator.le
return g(value, self.lower_value) and l(value, self._upper_value)
def __contains__(self, item):
if isinstance(item, Interval):
lower_in = False
upper_in = False
if self._contains_value(item._lower_value):
lower_in = True
elif self._lower == item._lower and self._lower_value == item._lower_value:
lower_in = True
if self._contains_value(item._upper_value):
upper_in = True
elif self._upper == item._upper and self._upper_value == item._upper_value:
upper_in = True
return lower_in and upper_in
else:
return self._contains_value(item)
@classmethod
def _opposite_boundary_type(cls, bound):
return cls.CLOSED if bound == cls.OPEN else cls.OPEN
def _get_new_lower_upper(self, other, operator):
if operator == self.intersect:
if self.lower_value == other.lower_value:
new_lower = self._lower and other._lower
elif self.lower_value < other.lower_value:
new_lower = other._lower
else:
new_lower = self._lower
if self._upper_value == other._upper_value:
new_upper = self._upper and other._upper
elif self._upper_value < other._upper_value:
new_upper = self._upper
else:
new_upper = other._upper
elif operator == self.union:
if self.lower_value == other.lower_value:
new_lower = self._lower or other._lower
elif self.lower_value < other.lower_value:
new_lower = self._lower
else:
new_lower = other._lower
if self._upper_value == other._upper_value:
new_upper = self._upper or other._upper
elif self._upper_value < other._upper_value:
new_upper = other._upper
else:
new_upper = self._upper
return new_lower, new_upper
def empty(self):
return (self._lower_value >= self._upper_value and
(self._lower == self.OPEN or self._upper == self.OPEN))
def overlaps(self, other):
"""If self and other have any overlapping values returns True, otherwise returns False"""
if self > other:
smaller, larger = other, self
else:
smaller, larger = self, other
if larger.empty():
return False
if smaller._upper_value == larger._lower_value:
return smaller._upper == smaller.CLOSED and larger._lower == smaller.CLOSED
return larger._lower_value < smaller._upper_value
def intersect(self, other):
"""Returns a new :class:`~pyinter.Interval` representing the intersection of this :class:`~pyinter.Interval`
with the other :class:`~pyinter.Interval`"""
if self.overlaps(other):
newlower_value = max(self.lower_value, other.lower_value)
new_upper_value = min(self._upper_value, other._upper_value)
new_lower, new_upper = self._get_new_lower_upper(other, self.intersect)
return Interval(new_lower, newlower_value, new_upper_value, new_upper)
else:
return None
__and__ = intersect
def union(self, other):
"""Returns a new Interval or an :class:`~pyinter.IntervalSet` representing the union of this
:class:`~pyinter.Interval` with the other :class:`~pyinter.Interval`.
If the two intervals are overlaping then this will return an :class:`~pyinter.Interval`,
otherwise this returns an :class:`~pyinter.IntervalSet`."""
if self.overlaps(other):
newlower_value = min(self.lower_value, other.lower_value)
new_upper_value = max(self._upper_value, other._upper_value)
new_lower, new_upper = self._get_new_lower_upper(other, self.union)
return Interval(new_lower, newlower_value, new_upper_value, new_upper)
else:
return IntervalSet((self, other))
__or__ = __add__ = union
def difference(self, other):
"""Returns a new Interval or an :class:`~pyinter.IntervalSet` representing the subtraction of this
:class:`~pyinter.Interval` with the other :class:`~pyinter.Interval`.
The result will contain everything that is contained by the left interval but not contained
by the second interval.
If the `other` interval is enclosed in this one then this will return a
:class:`~pyinter.IntervalSet`, otherwise this returns a :class:`~pyinter.Interval`.
"""
if other.empty():
return self
if self in other:
return open(self._lower_value, self._lower_value)
if self._lower == other._lower and self._lower_value == other._lower_value:
return Interval(self._opposite_boundary_type(other._upper), other._upper_value, self._upper_value, self._upper)
if self._upper == other._upper and self._upper_value == other._upper_value:
return Interval(self._lower, self._lower_value, other._lower_value, self._opposite_boundary_type(other._lower))
if other in self:
return IntervalSet([
Interval(self._lower, self._lower_value, other.lower_value, self._opposite_boundary_type(other._lower)),
Interval(self._opposite_boundary_type(other._upper), other._upper_value, self.upper_value, self._upper),
])
if other.lower_value in self:
return Interval(self._lower, self._lower_value, other._lower_value, self._opposite_boundary_type(other._lower))
if other.upper_value in self:
return Interval(self._opposite_boundary_type(other._upper), other._upper_value, self._upper_value, self._upper)
return Interval(self._lower, self._lower_value, self._upper_value, self._upper)
def complement(self):
return IntervalSet([
Interval(self.OPEN, NEGATIVE_INFINITY, self.lower_value, self._opposite_boundary_type(self._upper)),
Interval(self._opposite_boundary_type(self._lower), self.upper_value, INFINITY, self.OPEN),
])
__sub__ = difference
| {
"content_hash": "99fda78dc499b8a86b1654c1c9f224a1",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 123,
"avg_line_length": 39.635451505016725,
"alnum_prop": 0.6096531938233061,
"repo_name": "intiocean/pyinter",
"id": "20a86636009722f32e3f63c3ef4fdbc57370bf12",
"size": "11851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyinter/interval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46266"
}
],
"symlink_target": ""
} |
module_name = 'example module'
module_version = '0.0'
module_author = 'HydroGen'
def main(message, long_pool_thead):
pass
| {
"content_hash": "2210111d1ee1b02c4bef49c695f7cdc1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 35,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.712,
"repo_name": "ihydrogen/hydrogen-chat-bot-py",
"id": "4bcda1500cd4f95daa1a08f12a0d42d197504623",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/test_module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77801"
}
],
"symlink_target": ""
} |
import argparse
import json
from operator import attrgetter
import os
from pprint import pprint
import subprocess
import sys
from lib import Flow
import utils
OVS_OFCTL = '/usr/bin/ovs-ofctl'
def parse_args():
parser = argparse.ArgumentParser(description='openflow parser')
parser.add_argument('--disable-unicode', action='store_true',
help='do not output unicode characters')
parser.add_argument('--json', action='store_true',
help='output json format')
parser.add_argument('--show-config', action='store_true',
help='display the specified flozer args/config')
parser.add_argument('--conf', metavar='config file',
default='~/.flozer.json',
help='config file if not ~/.flozer.json')
parser.add_argument('--sort', nargs='+',
help='one or more sort keys '
'(ex: --sort table label priority). '
'defaults to not sorting input')
parser.add_argument('-O', '--protocol', metavar='OpenFlow protocol',
help='openflow protocol to use for collecting flows, '
'see the ovs-ofctl man page for more info. '
'flozer defaults to OpenFlow13')
parser.add_argument('bridge', nargs='*', action='store',
help='bridge(s) to dump flows on')
return parser.parse_args()
def parse_config(conf_file):
try:
with open(os.path.expanduser(conf_file)) as f:
config = json.load(f)
except IOError:
return {}
# cookie map is a function so the string needs to be converted
if 'cookie_map' in config:
# get ready for some srs business
f = compile(config['cookie_map'], conf_file, 'eval')
config['cookie_map'] = eval(f, {'__builtins__': {},
'utils': utils})
# integerize the tables
if 'table_map' in config:
config['table_map'] = {int(k): v
for k, v in config['table_map'].iteritems()}
# parse json for boolean
if 'json' in config:
if config['json'] in ('true', 'True', 'yes', 'Yes', '1', 1):
config['json'] = True
else:
config['json'] = False
# parse disable_unicode for boolean
if 'disable_unicode' in config:
if config['disable_unicode'] in ('true', 'True', 'yes', 'Yes', '1', 1):
config['disable_unicode'] = True
else:
config['disable_unicode'] = False
return config
def get_stdin():
stdin_lines = []
for line in sys.stdin:
stdin_lines.append(line)
return stdin_lines
def collect_flows(bridges, protocol):
flows = []
for bridge in bridges:
args = [OVS_OFCTL, 'dump-flows', '-O', protocol, bridge]
flows[len(flows):] = subprocess.check_output(args).split('\n')
return flows
def execute():
args = parse_args()
conf = parse_config(args.conf)
# merge args and conf preferring args over conf values
# also set a few defaults
disable_unicode = (args.disable_unicode or
conf.get('disable_unicode', False))
json_output = args.json or conf.get('json', False)
protocol = args.protocol or conf.get('protocol', 'OpenFlow13')
sort = args.sort or conf.get('sort')
if args.show_config:
print 'bridges: %s' % args.bridge
print 'OpenFlow protocol used: %s' % protocol
print 'json output: %s' % json_output
print 'disable unicode: %s' % disable_unicode
print 'sort keys: %s' % sort
print 'conf file: %s' % args.conf
print 'conf file contents:'
pprint(conf)
return
# collect raw flows
if not args.bridge:
flows = get_stdin()
else:
flows = collect_flows(args.bridge, protocol)
# parse flows
kwargs = {'cookie_map': conf.get('cookie_map'),
'match_map': conf.get('match_map'),
'table_map': conf.get('table_map'),
'action_map': conf.get('action_map'),
'disable_unicode': disable_unicode}
flows = [Flow(flow, **kwargs) for flow in flows
if flow and '_FLOW reply' not in flow]
if sort:
flows = sorted(flows, key=attrgetter(*sort))
# output flows
if json_output:
print json.dumps(flows)
else:
for flow in flows:
print flow
print
if __name__ == '__main__':
execute()
| {
"content_hash": "04e15d34434a950cbfe60fe0c1d13574",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 32.75714285714286,
"alnum_prop": 0.5601831661578718,
"repo_name": "tr3buchet/flozer",
"id": "6ef400c606cb829b4859e6a838bc79ab3affa1dd",
"size": "5230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flozer/flozer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20414"
}
],
"symlink_target": ""
} |
import requests
from pagespeed.settings import GOOGLE_API_KEY, GOOGLE_PAGESPEED_URL
def get_results(input_url):
api_key = GOOGLE_API_KEY
test_url = GOOGLE_PAGESPEED_URL
payload = {'url': input_url, 'key': api_key}
r = requests.get(test_url, params=payload)
json = r.json()
results = json["ruleGroups"]["SPEED"]["score"]
return results | {
"content_hash": "c9de0db9a6e4792cabd8cbc2891a8ac6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.674863387978142,
"repo_name": "boomajoom/pagespeed",
"id": "2aad3f33c1f7b0ba462ce31c1b861a6fc7200b42",
"size": "366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pagespeed/module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1671"
}
],
"symlink_target": ""
} |
"""URLs for all views."""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import include, path, reverse_lazy
from django.views.generic import RedirectView, TemplateView
from djpsilobus.core import views
from djauth.views import loggedout
admin.autodiscover()
handler404 = 'djtools.views.errors.four_oh_four_error'
handler500 = 'djtools.views.errors.server_error'
urlpatterns = [
# login as
path('admin/', include('loginas.urls')),
# django admin
path('admin/', admin.site.urls),
# auth
path(
'accounts/login/',
auth_views.LoginView.as_view(),
{'template_name': 'registration/login.html'},
name='auth_login',
),
path(
'accounts/logout/',
auth_views.LogoutView.as_view(),
{'next_page': reverse_lazy('auth_loggedout')},
name='auth_logout',
),
path(
'accounts/loggedout/',
loggedout,
{'template_name': 'registration/logged_out.html'},
name='auth_loggedout',
),
path(
'accounts/',
RedirectView.as_view(url=reverse_lazy('auth_login')),
),
path(
'denied/',
TemplateView.as_view(template_name='denied.html'),
name='access_denied',
),
# downloads
path(
'<str:division>/<str:department>/<str:term>/<int:year>/download/',
views.download,
name='download_department',
),
path(
'<str:division>/<str:department>/download/',
views.download,
name='download_department',
),
path(
'<str:division>/download/',
views.download,
name='download_division',
),
# OpenXML export
path(
'<str:division>/<str:department>/<str:term>/<int:year>/openxml/',
views.openxml,
name='openxml_department',
),
path(
'<str:division>/<str:department>/openxml/',
views.openxml,
name='openxml_department',
),
path(
'<str:division>/openxml/',
views.openxml,
name='openxml_division',
),
# dspace API
path(
'dspace/file/search/',
views.dspace_file_search,
name='dspace_file_search',
),
path(
'dspace/<str:dept>/<str:term>/<int:year>/courses/',
views.dspace_dept_courses,
name='dspace_dept_courses',
),
# home, with and without department code, term, year.
path('<str:dept>/<str:term>/<int:year>/', views.home, name='home_all'),
path('<str:dept>', views.home, name='home_dept'),
path('', views.home, name='home'),
]
| {
"content_hash": "cec87392ab20b1f58b0933c9e7db48c8",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 75,
"avg_line_length": 26.612244897959183,
"alnum_prop": 0.584739263803681,
"repo_name": "carthage-college/django-djpsilobus",
"id": "4c11e25e6649c70c9c917b012e6bb320768b9140",
"size": "2633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djpsilobus/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "25749"
},
{
"name": "Python",
"bytes": "63960"
},
{
"name": "Shell",
"bytes": "7521"
}
],
"symlink_target": ""
} |
__author__ = 'yinjun'
class Solution:
"""
@param nums: The integer array
@return: The length of LIS (longest increasing subsequence)
"""
def longestIncreasingSubsequence(self, nums):
# write your code here
if nums == None or nums == []:
return 0
l = len(nums)
length = [0 for i in range()]
maxLength = 0
for i in range(l):
length[i] = 1
for j in range(0, i):
if nums[j] <= nums[i]:
length[i] = max(length[i], length[j] + 1)
maxLength = max(maxLength, length[i])
return maxLength | {
"content_hash": "820102fc106c22380882f4ba73b7a1b6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 23.071428571428573,
"alnum_prop": 0.5030959752321982,
"repo_name": "shootsoft/practice",
"id": "ab0543b51f101d74cea6db70c15c18eaa1821522",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lintcode/NineChapters/04/longest-increasing-subsequence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "722333"
}
],
"symlink_target": ""
} |
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.data.vision import CIFAR10
IMAGE_SIZE = 64
def transformer(data, label):
""" data preparation """
data = mx.image.imresize(data, IMAGE_SIZE, IMAGE_SIZE)
data = mx.nd.transpose(data, (2, 0, 1))
data = data.astype(np.float32) / 128.0 - 1
return data, label
def get_training_data(batch_size):
""" helper function to get dataloader"""
return gluon.data.DataLoader(
CIFAR10(train=True, transform=transformer),
batch_size=batch_size, shuffle=True, last_batch='discard')
| {
"content_hash": "ed837ae92198d257e36f93eb75228561",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 28.19047619047619,
"alnum_prop": 0.6858108108108109,
"repo_name": "dmlc/mxnet",
"id": "782f74ffca5d7c5fb54efa6ce86a24f610792b91",
"size": "1621",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "example/gluon/sn_gan/data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "85580"
},
{
"name": "C++",
"bytes": "3227650"
},
{
"name": "CMake",
"bytes": "48546"
},
{
"name": "Cuda",
"bytes": "567360"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "16368"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "40096"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "615878"
},
{
"name": "Perl 6",
"bytes": "21993"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "3164782"
},
{
"name": "R",
"bytes": "284084"
},
{
"name": "Scala",
"bytes": "862528"
},
{
"name": "Shell",
"bytes": "110890"
}
],
"symlink_target": ""
} |
"""
Tests for Block Device utility functions.
"""
from oslo_utils.fixture import uuidsentinel as uuids
import six
from nova import block_device
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import matchers
class BlockDeviceTestCase(test.NoDBTestCase):
def setUp(self):
super(BlockDeviceTestCase, self).setUp()
BDM = block_device.BlockDeviceDict
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'volume_size': 1,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'volume_size': 10,
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'}),
]
def test_properties(self):
root_device0 = '/dev/sda'
root_device1 = '/dev/sdb'
mappings = [{'virtual': 'root',
'device': root_device0}]
properties0 = {'mappings': mappings}
properties1 = {'mappings': mappings,
'root_device_name': root_device1}
self.assertIsNone(block_device.properties_root_device_name({}))
self.assertEqual(root_device0,
block_device.properties_root_device_name(properties0))
self.assertEqual(root_device1,
block_device.properties_root_device_name(properties1))
def test_ephemeral(self):
self.assertFalse(block_device.is_ephemeral('ephemeral'))
self.assertTrue(block_device.is_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_ephemeral('ephemeral11'))
self.assertFalse(block_device.is_ephemeral('root'))
self.assertFalse(block_device.is_ephemeral('swap'))
self.assertFalse(block_device.is_ephemeral('/dev/sda1'))
self.assertEqual(0, block_device.ephemeral_num('ephemeral0'))
self.assertEqual(1, block_device.ephemeral_num('ephemeral1'))
self.assertEqual(11, block_device.ephemeral_num('ephemeral11'))
self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_swap_or_ephemeral('swap'))
self.assertFalse(block_device.is_swap_or_ephemeral('root'))
self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1'))
def test_mappings_prepend_dev(self):
mapping = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': 'sdb'},
{'virtual': 'swap', 'device': 'sdc'},
{'virtual': 'ephemeral1', 'device': 'sdd'},
{'virtual': 'ephemeral2', 'device': 'sde'}]
expected = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': '/dev/sdb'},
{'virtual': 'swap', 'device': '/dev/sdc'},
{'virtual': 'ephemeral1', 'device': '/dev/sdd'},
{'virtual': 'ephemeral2', 'device': '/dev/sde'}]
prepended = block_device.mappings_prepend_dev(mapping)
self.assertEqual(sorted(expected, key=lambda v: v['virtual']),
sorted(prepended, key=lambda v: v['virtual']))
def test_strip_dev(self):
self.assertEqual('sda', block_device.strip_dev('/dev/sda'))
self.assertEqual('sda', block_device.strip_dev('sda'))
self.assertIsNone(block_device.strip_dev(None))
def test_strip_prefix(self):
self.assertEqual('a', block_device.strip_prefix('/dev/sda'))
self.assertEqual('a', block_device.strip_prefix('a'))
self.assertEqual('a', block_device.strip_prefix('xvda'))
self.assertEqual('a', block_device.strip_prefix('vda'))
self.assertEqual('a', block_device.strip_prefix('hda'))
self.assertIsNone(block_device.strip_prefix(None))
def test_get_device_letter(self):
self.assertEqual('', block_device.get_device_letter(''))
self.assertEqual('a', block_device.get_device_letter('/dev/sda1'))
self.assertEqual('b', block_device.get_device_letter('/dev/xvdb'))
self.assertEqual('d', block_device.get_device_letter('/dev/d'))
self.assertEqual('a', block_device.get_device_letter('a'))
self.assertEqual('b', block_device.get_device_letter('sdb2'))
self.assertEqual('c', block_device.get_device_letter('vdc'))
self.assertEqual('c', block_device.get_device_letter('hdc'))
self.assertIsNone(block_device.get_device_letter(None))
def test_generate_device_name(self):
expected = (
('vda', ("vd", 0)),
('vdaa', ("vd", 26)),
('vdabc', ("vd", 730)),
('vdidpok', ("vd", 4194304)),
('sdc', ("sd", 2)),
('sdaa', ("sd", 26)),
('sdiw', ("sd", 256)),
('hdzz', ("hd", 701))
)
for res, args in expected:
self.assertEqual(res, block_device.generate_device_name(*args))
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1},
{'num': 2,
'virtual_name': 'ephemeral2',
'device_name': '/dev/sdd',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
in_mapping = block_device.volume_in_mapping(
device_name, block_device_info)
self.assertEqual(true_or_false, in_mapping)
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
def test_get_root_bdm(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vdd'}]
self.assertEqual(root_bdm, block_device.get_root_bdm(bdms))
self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]]))
self.assertIsNone(block_device.get_root_bdm(bdms[1:]))
self.assertIsNone(block_device.get_root_bdm(bdms[2:]))
self.assertIsNone(block_device.get_root_bdm(bdms[3:]))
self.assertIsNone(block_device.get_root_bdm([]))
def test_get_bdm_ephemeral_disk_size(self):
size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping)
self.assertEqual(10, size)
def test_get_bdm_swap_list(self):
swap_list = block_device.get_bdm_swap_list(self.new_mapping)
self.assertEqual(1, len(swap_list))
self.assertEqual(1, swap_list[0].get('id'))
def test_get_bdm_local_disk_num(self):
size = block_device.get_bdm_local_disk_num(self.new_mapping)
self.assertEqual(2, size)
def test_new_format_is_swap(self):
expected_results = [True, False, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_swap(bdm)
self.assertEqual(expected, res)
def test_new_format_is_ephemeral(self):
expected_results = [False, True, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_ephemeral(bdm)
self.assertEqual(expected, res)
def test_validate_device_name(self):
for value in [' ', 10, None, 'a' * 260]:
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_device_name,
value)
def test_validate_and_default_volume_size(self):
bdm = {}
for value in [-1, 'a', 2.5]:
bdm['volume_size'] = value
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_and_default_volume_size,
bdm)
def test_get_bdms_to_connect(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vde', 'boot_index': None},
{'device_name': 'vdd'}]
self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms,
exclude_root_mapping=True))
self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms))
class TestBlockDeviceDict(test.NoDBTestCase):
def setUp(self):
super(TestBlockDeviceDict, self).setUp()
BDM = block_device.BlockDeviceDict
self.api_mapping = [
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1},
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1},
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'boot_index': 0},
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'uuid': 'fake-snapshot-id-1',
'boot_index': -1},
{'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'}),
]
self.legacy_mapping = [
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'delete_on_termination': True,
'virtual_name': 'swap'},
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'delete_on_termination': True,
'virtual_name': 'ephemeral0'},
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}"},
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2'},
{'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping_source_image = [
BDM({'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3',
'boot_index': -1}),
BDM({'id': 7, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda4',
'source_type': 'image',
'destination_type': 'local',
'connection_info': "{'fake': 'connection_info'}",
'image_id': 'fake-image-id-2',
'boot_index': -1}),
]
self.legacy_mapping_source_image = [
{'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda3',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3'},
]
def test_init(self):
def fake_validate(obj, dct):
pass
self.stub_out('nova.block_device.BlockDeviceDict._fields',
set(['field1', 'field2']))
self.stub_out('nova.block_device.BlockDeviceDict._db_only_fields',
set(['db_field1', 'db_field2']))
self.stub_out('nova.block_device.BlockDeviceDict._validate',
fake_validate)
# Make sure db fields are not picked up if they are not
# in the original dict
dev_dict = block_device.BlockDeviceDict({'field1': 'foo',
'field2': 'bar',
'db_field1': 'baz'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Make sure all expected fields are defaulted
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Unless they are not meant to be
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'},
do_not_default=set(['field2']))
self.assertIn('field1', dev_dict)
self.assertNotIn('field2', dev_dict)
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Passing kwargs to constructor works
dev_dict = block_device.BlockDeviceDict(field1='foo')
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
dev_dict = block_device.BlockDeviceDict(
{'field1': 'foo'}, field2='bar')
self.assertEqual('foo', dev_dict['field1'])
self.assertEqual('bar', dev_dict['field2'])
def test_init_prepend_dev_to_device_name(self):
bdm = {'id': 3, 'instance_uuid': uuids.instance,
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vda', bdm_dict['device_name'])
bdm['device_name'] = '/dev/vdb'
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vdb', bdm_dict['device_name'])
bdm['device_name'] = None
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertIsNone(bdm_dict['device_name'])
def test_init_boolify_delete_on_termination(self):
# Make sure that when delete_on_termination is not passed it's
# still set to False and not None
bdm = {'id': 3, 'instance_uuid': uuids.instance,
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertFalse(bdm_dict['delete_on_termination'])
def test_validate(self):
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
{'bogus_field': 'lame_val'})
lame_bdm = dict(self.new_mapping[2])
del lame_bdm['source_type']
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_bdm)
lame_bdm['no_device'] = True
block_device.BlockDeviceDict(lame_bdm)
lame_dev_bdm = dict(self.new_mapping[2])
lame_dev_bdm['device_name'] = "not a valid name"
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
lame_dev_bdm['device_name'] = ""
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
cool_volume_size_bdm = dict(self.new_mapping[2])
cool_volume_size_bdm['volume_size'] = '42'
cool_volume_size_bdm = block_device.BlockDeviceDict(
cool_volume_size_bdm)
self.assertEqual(42, cool_volume_size_bdm['volume_size'])
lame_volume_size_bdm = dict(self.new_mapping[2])
lame_volume_size_bdm['volume_size'] = 'some_non_int_string'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_volume_size_bdm)
truthy_bdm = dict(self.new_mapping[2])
truthy_bdm['delete_on_termination'] = '1'
truthy_bdm = block_device.BlockDeviceDict(truthy_bdm)
self.assertTrue(truthy_bdm['delete_on_termination'])
verbose_bdm = dict(self.new_mapping[2])
verbose_bdm['boot_index'] = 'first'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
verbose_bdm)
def test_from_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
block_device.BlockDeviceDict.from_legacy(legacy),
matchers.IsSubDictOf(new))
def test_from_legacy_mapping(self):
def _get_image_bdms(bdms):
return [bdm for bdm in bdms if bdm['source_type'] == 'image']
def _get_bootable_bdms(bdms):
return [bdm for bdm in bdms
if (bdm['boot_index'] is not None and
bdm['boot_index'] >= 0)]
new_no_img = block_device.from_legacy_mapping(self.legacy_mapping)
self.assertEqual(0, len(_get_image_bdms(new_no_img)))
for new, expected in zip(new_no_img, self.new_mapping):
self.assertThat(new, matchers.IsSubDictOf(expected))
new_with_img = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref')
image_bdms = _get_image_bdms(new_with_img)
boot_bdms = _get_bootable_bdms(new_with_img)
self.assertEqual(1, len(image_bdms))
self.assertEqual(1, len(boot_bdms))
self.assertEqual(0, image_bdms[0]['boot_index'])
self.assertEqual('image', boot_bdms[0]['source_type'])
new_with_img_and_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1')
image_bdms = _get_image_bdms(new_with_img_and_root)
boot_bdms = _get_bootable_bdms(new_with_img_and_root)
self.assertEqual(0, len(image_bdms))
self.assertEqual(1, len(boot_bdms))
self.assertEqual(0, boot_bdms[0]['boot_index'])
self.assertEqual('volume', boot_bdms[0]['source_type'])
new_no_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True)
self.assertEqual(0, len(_get_image_bdms(new_no_root)))
self.assertEqual(0, len(_get_bootable_bdms(new_no_root)))
def test_from_api(self):
for api, new in zip(self.api_mapping, self.new_mapping):
new['connection_info'] = None
if new['snapshot_id']:
new['volume_id'] = None
self.assertThat(
block_device.BlockDeviceDict.from_api(api, False),
matchers.IsSubDictOf(new))
def test_from_api_invalid_blank_id(self):
api_dict = {'id': 1,
'source_type': 'blank',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'delete_on_termination': True,
'boot_index': -1}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict,
False)
def test_from_api_invalid_source_to_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'uuid': 'fake-volume-id-1'}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict,
False)
def test_from_api_valid_source_to_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'uuid': 1,
'boot_index': 0}
retexp = block_device.BlockDeviceDict(
{'id': 1,
'source_type': 'image',
'image_id': 1,
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})
self.assertEqual(retexp,
block_device.BlockDeviceDict.from_api(api_dict, True))
def test_from_api_valid_source_to_local_mapping_with_string_bi(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'uuid': 1,
'boot_index': '0'}
retexp = block_device.BlockDeviceDict(
{'id': 1,
'source_type': 'image',
'image_id': 1,
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})
self.assertEqual(retexp,
block_device.BlockDeviceDict.from_api(api_dict, True))
def test_from_api_invalid_image_to_destination_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1',
'boot_index': 1}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Mapping image to local is not supported',
six.text_type(ex))
def test_from_api_invalid_volume_type_to_destination_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'volume',
'destination_type': 'local',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1'}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Specifying a volume_type with destination_type=local '
'is not supported', six.text_type(ex))
def test_from_api_invalid_specify_volume_type_with_source_volume_mapping(
self):
api_dict = {'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1'}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Specifying volume type to existing volume is '
'not supported', six.text_type(ex))
def test_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
legacy,
matchers.IsSubDictOf(new.legacy()))
def test_legacy_mapping(self):
got_legacy = block_device.legacy_mapping(self.new_mapping)
for legacy, expected in zip(got_legacy, self.legacy_mapping):
self.assertThat(expected, matchers.IsSubDictOf(legacy))
def test_legacy_source_image(self):
for legacy, new in zip(self.legacy_mapping_source_image,
self.new_mapping_source_image):
if new['destination_type'] == 'volume':
self.assertThat(legacy, matchers.IsSubDictOf(new.legacy()))
else:
self.assertRaises(exception.InvalidBDMForLegacy, new.legacy)
def test_legacy_mapping_source_image(self):
got_legacy = block_device.legacy_mapping(self.new_mapping)
for legacy, expected in zip(got_legacy, self.legacy_mapping):
self.assertThat(expected, matchers.IsSubDictOf(legacy))
def test_legacy_mapping_from_object_list(self):
bdm1 = objects.BlockDeviceMapping()
bdm1 = objects.BlockDeviceMapping._from_db_object(
None, bdm1, fake_block_device.FakeDbBlockDeviceDict(
self.new_mapping[0]))
bdm2 = objects.BlockDeviceMapping()
bdm2 = objects.BlockDeviceMapping._from_db_object(
None, bdm2, fake_block_device.FakeDbBlockDeviceDict(
self.new_mapping[1]))
bdmlist = objects.BlockDeviceMappingList()
bdmlist.objects = [bdm1, bdm2]
block_device.legacy_mapping(bdmlist)
def test_image_mapping(self):
removed_fields = ['id', 'instance_uuid', 'connection_info',
'created_at', 'updated_at', 'deleted_at', 'deleted']
for bdm in self.new_mapping:
mapping_bdm = fake_block_device.FakeDbBlockDeviceDict(
bdm).get_image_mapping()
for fld in removed_fields:
self.assertNotIn(fld, mapping_bdm)
def _test_snapshot_from_bdm(self, template):
snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template)
self.assertEqual('new-snapshot-id', snapshot['snapshot_id'])
self.assertEqual('snapshot', snapshot['source_type'])
self.assertEqual('volume', snapshot['destination_type'])
self.assertEqual(template.volume_size, snapshot['volume_size'])
self.assertEqual(template.delete_on_termination,
snapshot['delete_on_termination'])
self.assertEqual(template.device_name, snapshot['device_name'])
for key in ['disk_bus', 'device_type', 'boot_index']:
self.assertEqual(template[key], snapshot[key])
def test_snapshot_from_bdm(self):
for bdm in self.new_mapping:
self._test_snapshot_from_bdm(objects.BlockDeviceMapping(**bdm))
def test_snapshot_from_object(self):
for bdm in self.new_mapping[:-1]:
obj = objects.BlockDeviceMapping()
obj = objects.BlockDeviceMapping._from_db_object(
None, obj, fake_block_device.FakeDbBlockDeviceDict(
bdm))
self._test_snapshot_from_bdm(obj)
| {
"content_hash": "a2be6e3a2d0c2ad86bdf4ac4ffed99be",
"timestamp": "",
"source": "github",
"line_count": 710,
"max_line_length": 79,
"avg_line_length": 43.45211267605634,
"alnum_prop": 0.5361252471556838,
"repo_name": "rahulunair/nova",
"id": "abf265818a32083da47c69534fe376dbd2312ca1",
"size": "31481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/test_block_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22804450"
},
{
"name": "Shell",
"bytes": "41649"
},
{
"name": "Smarty",
"bytes": "472764"
}
],
"symlink_target": ""
} |
"""engine.SCons.Variables.ListVariable
This file defines the option type for SCons implementing 'lists'.
A 'list' option may either be 'all', 'none' or a list of names
separated by comma. After the option has been processed, the option
value holds either the named list elements, all list elemens or no
list elements at all.
Usage example:
list_of_libs = Split('x11 gl qt ical')
opts = Variables()
opts.Add(ListVariable('shared',
'libraries to build as shared libraries',
'all',
elems = list_of_libs))
...
for lib in list_of_libs:
if lib in env['shared']:
env.SharedObject(...)
else:
env.Object(...)
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/ListVariable.py 3603 2008/10/10 05:46:45 scons"
# Know Bug: This should behave like a Set-Type, but does not really,
# since elements can occur twice.
__all__ = ['ListVariable',]
import string
import UserList
import SCons.Util
class _ListVariable(UserList.UserList):
def __init__(self, initlist=[], allowedElems=[]):
UserList.UserList.__init__(self, filter(None, initlist))
self.allowedElems = allowedElems[:]
self.allowedElems.sort()
def __cmp__(self, other):
raise NotImplementedError
def __eq__(self, other):
raise NotImplementedError
def __ge__(self, other):
raise NotImplementedError
def __gt__(self, other):
raise NotImplementedError
def __le__(self, other):
raise NotImplementedError
def __lt__(self, other):
raise NotImplementedError
def __str__(self):
if len(self) == 0:
return 'none'
self.data.sort()
if self.data == self.allowedElems:
return 'all'
else:
return string.join(self, ',')
def prepare_to_store(self):
return self.__str__()
def _converter(val, allowedElems, mapdict):
"""
"""
if val == 'none':
val = []
elif val == 'all':
val = allowedElems
else:
val = filter(None, string.split(val, ','))
val = map(lambda v, m=mapdict: m.get(v, v), val)
notAllowed = filter(lambda v, aE=allowedElems: not v in aE, val)
if notAllowed:
raise ValueError("Invalid value(s) for option: %s" %
string.join(notAllowed, ','))
return _ListVariable(val, allowedElems)
## def _validator(key, val, env):
## """
## """
## # todo: write validater for pgk list
## return 1
def ListVariable(key, help, default, names, map={}):
"""
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validater appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (separated by space).
"""
names_str = 'allowed names: %s' % string.join(names, ' ')
if SCons.Util.is_List(default):
default = string.join(default, ',')
help = string.join(
(help, '(all|none|comma-separated list of names)', names_str),
'\n ')
return (key, help, default,
None, #_validator,
lambda val, elems=names, m=map: _converter(val, elems, m))
| {
"content_hash": "aad5badd4bf9db34384e19f7c7ff39ba",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 90,
"avg_line_length": 33.45112781954887,
"alnum_prop": 0.6414924702180266,
"repo_name": "frew/simpleproto",
"id": "4dcaa1c95c53fe6efec56826c2a24cf2873cfb69",
"size": "4449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scons-local-1.1.0/SCons/Variables/ListVariable.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "30217"
},
{
"name": "Protocol Buffer",
"bytes": "1960"
},
{
"name": "Python",
"bytes": "1704215"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from collections import namedtuple
from collections import OrderedDict
import copy
import re
import time
from oslo_config import cfg
from oslo_log import log
from vitrage.common.constants import EdgeProperties as EProps
from vitrage.common.constants import VertexProperties as VProps
from vitrage.common.utils import md5
from vitrage.common.utils import recursive_keypairs
from vitrage.entity_graph.mappings.datasource_info_mapper \
import DatasourceInfoMapper
from vitrage.evaluator.actions.action_executor import ActionExecutor
from vitrage.evaluator.actions.base import ActionMode
from vitrage.evaluator.actions.base import ActionType
import vitrage.evaluator.actions.priority_tools as pt
from vitrage.evaluator.template_data import ActionSpecs
from vitrage.evaluator.template_data import EdgeDescription
from vitrage.evaluator.template_functions.function_resolver import is_function
from vitrage.evaluator.template_schema_factory import TemplateSchemaFactory
from vitrage.graph.algo_driver.algorithm import Mapping
from vitrage.graph.algo_driver.sub_graph_matching import \
NEG_CONDITION
from vitrage.graph.driver import Vertex
from vitrage import storage
from vitrage.storage.sqlalchemy import models
from vitrage.utils.datetime import utcnow
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Entry containing action info.
# specs - ActionSpecs
# mode - DO or UNDO (the action)
# action_id - the action id in scenario_repository
# Trigger_id - a unique identifier per match in graph (i.e., the subgraph
# that matched the action in the spec) for the specific action.
ActionInfo = \
namedtuple('ActionInfo', ['specs', 'mode', 'action_id', 'trigger_id'])
TARGET = 'target'
SOURCE = 'source'
class ScenarioEvaluator(object):
def __init__(self,
e_graph,
scenario_repo,
actions_callback,
enabled=False):
self._entity_graph = e_graph
self._db = storage.get_connection_from_config()
self._scenario_repo = scenario_repo
self._action_executor = ActionExecutor(actions_callback)
self._entity_graph.subscribe(self.process_event)
self.enabled = enabled
self.connected_component_cache = defaultdict(dict)
@property
def scenario_repo(self):
return self._scenario_repo
@scenario_repo.setter
def scenario_repo(self, scenario_repo):
self._scenario_repo = scenario_repo
def run_evaluator(self, action_mode=ActionMode.DO):
self.enabled = True
vertices = self._entity_graph.get_vertices()
start_time = time.time()
for vertex in vertices:
if action_mode == ActionMode.DO:
self.process_event(None, vertex, True)
elif action_mode == ActionMode.UNDO:
self.process_event(vertex, None, True)
LOG.info(
'Run %s Evaluator on %s items - took %s',
action_mode, len(vertices), (time.time() - start_time))
def process_event(self, before, current, is_vertex, *args, **kwargs):
"""Notification of a change in the entity graph.
:param is_vertex:
:param before: The graph element (vertex or edge) prior to the
change that happened. None if the element was just created.
:param current: The graph element (vertex or edge) after the
change that happened. Deleted elements should arrive with the
vitrage_is_deleted property set to True
"""
if not self.enabled:
LOG.debug("Process event disabled")
return
LOG.debug('Process event - starting')
LOG.debug("Element before event: %s, Current element: %s",
before,
current)
before_scenarios = self._get_element_scenarios(before, is_vertex)
current_scenarios = self._get_element_scenarios(current, is_vertex)
before_scenarios, current_scenarios = \
self._remove_overlap_scenarios(before_scenarios, current_scenarios)
if len(before_scenarios) + len(current_scenarios):
LOG.debug("Number of relevant scenarios found: undo = %s, do = %s",
len(before_scenarios),
len(current_scenarios))
actions = self._process_and_get_actions(before,
before_scenarios,
ActionMode.UNDO)
actions.extend(self._process_and_get_actions(current,
current_scenarios,
ActionMode.DO))
actions_to_preform = []
try:
actions_to_preform = self._analyze_and_filter_actions(actions)
except Exception:
LOG.exception("Evaluator error, will not execute actions %s",
actions)
self._action_executor.execute(actions_to_preform)
LOG.debug('Process event - completed')
def _get_element_scenarios(self, element, is_vertex):
if not element \
or element.get(VProps.VITRAGE_IS_DELETED) \
or element.get(EProps.VITRAGE_IS_DELETED):
return []
elif is_vertex:
return self._scenario_repo.get_scenarios_by_vertex(element)
else: # is edge
edge_desc = self._get_edge_description(element)
return self._scenario_repo.get_scenarios_by_edge(edge_desc)
def _get_edge_description(self, element):
source = self._entity_graph.get_vertex(element.source_id)
target = self._entity_graph.get_vertex(element.target_id)
edge_desc = EdgeDescription(element, source, target)
return edge_desc
@staticmethod
def _remove_overlap_scenarios(before, current):
intersection = list(filter(lambda x: x in before, current))
before = list(filter(lambda x: x not in intersection, before))
current = list(filter(lambda x: x not in intersection, current))
return before, current
def _process_and_get_actions(self, element, triggered_scenarios, mode):
actions = []
for triggered_scenario in triggered_scenarios:
LOG.debug("Processing: %s", triggered_scenario)
scenario_element = triggered_scenario[0]
scenario = triggered_scenario[1]
actions.extend(self._process_scenario(element,
scenario,
scenario_element,
mode))
return actions
def _process_scenario(self, element, scenario, scenario_elements, mode):
if not isinstance(scenario_elements, list):
scenario_elements = [scenario_elements]
actions = []
for action in scenario.actions:
for scenario_element in scenario_elements:
matches = self._evaluate_subgraphs(scenario.subgraphs,
element,
scenario_element,
action.targets[TARGET])
actions.extend(self._get_actions_from_matches(scenario.version,
matches,
mode,
action))
return actions
def _evaluate_subgraphs(self,
subgraphs,
element,
scenario_element,
action_target):
if isinstance(element, Vertex):
return self._find_vertex_subgraph_matching(subgraphs,
action_target,
element,
scenario_element)
else:
return self._find_edge_subgraph_matching(subgraphs,
action_target,
element,
scenario_element)
def _get_actions_from_matches(self,
scenario_version,
combined_matches,
mode,
action_spec):
actions = []
for is_switch_mode, matches in combined_matches:
new_mode = mode
if is_switch_mode:
new_mode = ActionMode.UNDO \
if mode == ActionMode.DO else ActionMode.DO
template_schema = \
TemplateSchemaFactory().template_schema(scenario_version)
for match in matches:
match_action_spec = self._get_action_spec(action_spec, match)
items_ids = \
[match_item[1].vertex_id for match_item in match.items()]
match_hash = md5(tuple(sorted(items_ids)))
self._evaluate_property_functions(template_schema, match,
match_action_spec.properties)
actions.append(ActionInfo(match_action_spec, new_mode,
match_action_spec.id, match_hash))
return actions
def _evaluate_property_functions(self, template_schema, match,
action_props):
"""Evaluate the action properties, in case they contain functions
In template version 2 we introduced functions, and specifically the
get_attr function. This method evaluate its value and updates the
action properties, before the action is being executed.
Example:
- action:
action_type: execute_mistral
properties:
workflow: evacuate_vm
input:
vm_name: get_attr(instance1,name)
force: false
In this example, the method will iterate over 'properties', and then
recursively over 'input', and for 'vm_name' it will replace the
call for get_attr with the actual name of the VM. The input for the
Mistral workflow will then be:
vm_name: vm_1
force: false
"""
for key, value in action_props.items():
if isinstance(value, dict):
# Recursive call for a dictionary
self._evaluate_property_functions(template_schema,
match, value)
elif value is not None and is_function(value):
# The value is a function
func_and_args = re.split('[(),]', value)
func_name = func_and_args.pop(0)
args = [arg.strip() for arg in func_and_args if len(arg) > 0]
# Get the function, execute it and update the property value
func = template_schema.functions.get(func_name)
action_props[key] = func(match, *args)
LOG.debug('Changed property %s value from %s to %s', key,
value, action_props[key])
@staticmethod
def _get_action_spec(action_spec, match):
targets = action_spec.targets
real_items = {
target: match[target_id] for target, target_id in targets.items()
}
return ActionSpecs(action_spec.id,
action_spec.type,
real_items,
copy.deepcopy(action_spec.properties))
@staticmethod
def _generate_action_id(action_spec):
"""Generate a unique action id for the action
BEWARE: The value created here should not be stored in database,
as in python3, the hash function seed changes after program restart
"""
targets = [(k, v.vertex_id) for k, v in action_spec.targets.items()]
return hash(
(action_spec.type,
tuple(sorted(targets)),
tuple(sorted(recursive_keypairs(action_spec.properties))))
)
def _analyze_and_filter_actions(self, actions):
LOG.debug("Actions before filtering: %s", actions)
if not actions:
return []
active_actions = ActiveActionsTracker(self._db, actions)
for action_info in actions:
if action_info.mode == ActionMode.DO:
active_actions.calc_do_action(action_info)
elif action_info.mode == ActionMode.UNDO:
active_actions.calc_undo_action(action_info)
active_actions.flush_db_updates()
unique_ordered_actions = OrderedDict()
for action in active_actions.actions_to_perform:
if isinstance(action, models.ActiveAction):
action = self._db_action_to_action_info(action)
id_ = self._generate_action_id(action.specs)
unique_ordered_actions[id_] = action
return unique_ordered_actions.values()
def _find_vertex_subgraph_matching(self,
subgraphs,
action_target,
vertex,
scenario_vertex):
"""calculates subgraph matching for vertex
iterates over all the subgraphs, and checks if the triggered vertex is
in the same connected component as the action then run subgraph
matching on the vertex and return its result, otherwise return an
empty list of matches.
"""
matches = []
for subgraph in subgraphs:
connected_component = self.get_connected_component(subgraph,
action_target)
is_switch_mode = \
connected_component.get_vertex(scenario_vertex.vertex_id)
if is_switch_mode:
initial_map = Mapping(scenario_vertex, vertex, True)
mat = self._entity_graph.algo.sub_graph_matching(subgraph,
initial_map)
matches.append((False, mat))
else:
matches.append((True, []))
return matches
def _find_edge_subgraph_matching(self,
subgraphs,
action_target,
edge,
scenario_edge):
"""calculates subgraph matching for edge
iterates over all the subgraphs, and checks if the triggered edge is a
negative edge then mark it as deleted=false and negative=false so that
subgraph matching on that edge will work correctly. after running
subgraph matching, we need to remove the negative vertices that were
added due to the change above.
"""
matches = []
for subgraph in subgraphs:
subgraph_edge = subgraph.get_edge(scenario_edge.source.vertex_id,
scenario_edge.target.vertex_id,
scenario_edge.edge.label)
if not subgraph_edge:
continue
is_switch_mode = subgraph_edge.get(NEG_CONDITION, False)
connected_component = self.get_connected_component(subgraph,
action_target)
# change the vitrage_is_deleted and negative_condition props to
# false when is_switch_mode=true so that when we have an event on a
# negative_condition=true edge it will find the correct subgraph
self._switch_edge_negative_props(is_switch_mode, scenario_edge,
subgraph, False)
initial_map = Mapping(scenario_edge.edge, edge, False)
curr_matches = \
self._entity_graph.algo.sub_graph_matching(subgraph,
initial_map)
# switch back to the original values
self._switch_edge_negative_props(is_switch_mode, scenario_edge,
subgraph, True)
self._remove_negative_vertices_from_matches(curr_matches,
connected_component)
matches.append((is_switch_mode, curr_matches))
return matches
def get_connected_component(self, subgraph, target):
connected_component = self.connected_component_cache.get(
id(subgraph), {}).get(id(target))
if not connected_component:
connected_component = subgraph.algo.graph_query_vertices(
root_id=target,
edge_query_dict={'!=': {NEG_CONDITION: True}})
self.connected_component_cache[id(subgraph)][id(target)] = \
connected_component
return connected_component
def _db_action_to_action_info(self, db_action):
target = self._entity_graph.get_vertex(db_action.target_vertex_id)
targets = {TARGET: target}
if db_action.source_vertex_id:
source = self._entity_graph.get_vertex(db_action.source_vertex_id)
targets[SOURCE] = source
scenario_action = self._scenario_repo.actions.get(db_action.action_id)
properties = copy.copy(scenario_action.properties)
action_specs = ActionSpecs(
id=db_action.action_id,
type=db_action.action_type,
targets=targets,
properties=properties,
)
action_info = ActionInfo(
specs=action_specs,
mode=ActionMode.DO,
action_id=db_action.action_id,
trigger_id=db_action.trigger,
)
return action_info
@staticmethod
def _switch_edge_negative_props(is_switch_mode,
scenario_edge,
subgraph,
status):
if is_switch_mode:
scenario_edge.edge[NEG_CONDITION] = status
scenario_edge.edge[EProps.VITRAGE_IS_DELETED] = status
subgraph.update_edge(scenario_edge.edge)
@staticmethod
def _remove_negative_vertices_from_matches(matches, connected_component):
for match in matches:
ver_ids = [v.vertex_id for v in connected_component.get_vertices()]
ver_to_remove = [id for id in match.keys() if id not in ver_ids]
for v_id in ver_to_remove:
del match[v_id]
class ActiveActionsTracker(object):
"""Keeps track of all active actions and relative dominance/priority.
Actions are organized according to resource-id and action details.
Examples:
- all set_state actions on a given resource are considered similar action
regardless of state
- all raise_alarm of type alarm_name on a given resource are considered
similar action, regardless of severity
Each action is assigned a score by mapping the value property to the
priority defined in datasource values config.
- Alarm: severity
- Resource: state
The score is used to determine which action in each group of similar
actions to be executed next.
"""
action_tools = None
def __init__(self, db, actions):
self.db = db
self.data = defaultdict(set)
self.actions_to_create = {}
self.actions_to_remove = set()
self.actions_to_perform = [] # use a list to keep the insertion order
self._init_action_tools()
# Query DB for all actions with same properties
actions_keys = set([self._get_key(action) for action in actions])
db_rows = self.db.active_actions.query_similar(actions_keys) or []
for db_row in db_rows:
self.data[(db_row.source_vertex_id, db_row.target_vertex_id,
db_row.extra_info, db_row.action_type)].add(db_row)
@classmethod
def _init_action_tools(cls):
if cls.action_tools:
return
info_mapper = DatasourceInfoMapper()
alarms_score = info_mapper.get_datasource_priorities('vitrage')
all_scores = info_mapper.get_datasource_priorities()
cls.action_tools = {
ActionType.SET_STATE: pt.SetStateTools(all_scores),
ActionType.RAISE_ALARM: pt.RaiseAlarmTools(alarms_score),
ActionType.ADD_CAUSAL_RELATIONSHIP: pt.BaselineTools,
ActionType.MARK_DOWN: pt.BaselineTools,
ActionType.EXECUTE_MISTRAL: pt.BaselineTools
}
def calc_do_action(self, action_info):
"""Add this action to active_actions table, if not exists
return value to help decide if action should be performed
Only a top scored action that is new should be performed
:return: (is top score, is it already existing)
"""
similar_actions = self._get_similar(action_info)
exists = any(
a.action_id == action_info.action_id and
a.trigger == action_info.trigger_id for a in similar_actions)
if not exists:
self._add(action_info)
if not exists and self._is_highest_score(similar_actions, action_info):
self.actions_to_perform.append(action_info)
def calc_undo_action(self, action_info):
"""Delete this action form active_actions table, if exists
decide if action should be performed
A top scored action should be 'undone' if there is not a second action.
If there is a second, it should now be 'done' and become the dominant
:param action_info: action to delete
"""
similar_actions = self._get_similar(action_info)
if not self._is_highest_score(similar_actions, action_info):
self._remove(action_info)
return
second_highest = self._sort_db_actions(similar_actions)[1]\
if len(similar_actions) > 1 else None
# We should 'DO' the Second highest scored action so
# to override the existing dominant action.
# or, if there is no second highest scored action
# So we just 'UNDO' the existing dominant action
if second_highest:
self.actions_to_perform.append(second_highest)
else:
self.actions_to_perform.append(action_info)
self._remove(action_info)
def flush_db_updates(self):
self.db.active_actions.bulk_create(self.actions_to_create.values())
self.db.active_actions.bulk_delete(self.actions_to_remove)
def _add(self, action_info):
db_row = self._to_db_row(action_info)
self._get_similar(action_info).add(db_row)
id_ = ScenarioEvaluator._generate_action_id(action_info.specs)
if id_ not in self.actions_to_create:
self.actions_to_create[id_] = db_row
def _remove(self, action_info):
similar_actions = self._get_similar(action_info)
for action in similar_actions:
if action.trigger == action_info.trigger_id and \
action.action_id == action_info.action_id:
similar_actions.remove(action)
break
self.actions_to_remove.add(
(action_info.trigger_id, action_info.action_id))
def _get_similar(self, action_info):
return self.data.get(self._get_key(action_info), set())
def _get_key(self, action_info):
src = action_info.specs.targets.get(SOURCE, {}).get(VProps.VITRAGE_ID)
trg = action_info.specs.targets.get(TARGET, {}).get(VProps.VITRAGE_ID)
extra_info = self.action_tools[action_info.specs.type].get_extra_info(
action_info.specs)
action_type = action_info.specs.type
return src, trg, extra_info, action_type
def _to_db_row(self, action_info):
source = action_info.specs.targets.get(SOURCE, {})
target = action_info.specs.targets.get(TARGET, {})
action_score = self.action_tools[action_info.specs.type]. \
get_score(action_info)
extra_info = self.action_tools[action_info.specs.type]. \
get_extra_info(action_info.specs)
return storage.sqlalchemy.models.ActiveAction(
action_type=action_info.specs.type,
extra_info=extra_info,
source_vertex_id=source.get(VProps.VITRAGE_ID),
target_vertex_id=target.get(VProps.VITRAGE_ID),
action_id=action_info.action_id,
trigger=action_info.trigger_id,
score=action_score)
@classmethod
def _is_highest_score(cls, db_actions, action_info):
"""Get the top action from the list and compare to action_info
Actions are sorted according to:
score - primary, ascending
created_at - secondary, descending
"""
if not db_actions:
return True
highest_score_action = min(
db_actions, key=lambda action: (-action.score, action.created_at
or utcnow(False)))
return highest_score_action.trigger == action_info.trigger_id and \
highest_score_action.action_id == action_info.action_id
@staticmethod
def _sort_db_actions(db_actions):
"""Sort ActiveAction items by two fields
score - primary, ascending
created_at - secondary, descending
"""
return sorted(
db_actions,
key=lambda action: (-action.score, action.created_at),
reverse=False)
| {
"content_hash": "30b5ec7b4d0d0ab7b10b6f2a3850645a",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 79,
"avg_line_length": 41.85598705501618,
"alnum_prop": 0.575984845556114,
"repo_name": "openstack/vitrage",
"id": "eee52d5307986b73ba189f5f3bb871f43c85f131",
"size": "26439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitrage/evaluator/scenario_evaluator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26541"
},
{
"name": "Mako",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2074427"
},
{
"name": "Shell",
"bytes": "17668"
}
],
"symlink_target": ""
} |
from __future__ import with_statement # needed for python 2.5
import os
from fabric.api import *
def blef_fr():
"""Use the actual webserver"""
env.project_name = 'moon'
env.hosts = ['blef.fr']
env.user = 'root'
env.key_filename = '~/.ssh/id_rsa'
env.path = '/var/www/%(project_name)s' % env
env.virtualhost_path = '/var/www/%(project_name)s/.venv/%(project_name)s' % env
def deploy(branch='master'):
"""
Deploy the latest version of the site to the servers,
install any required third party modules,
install the virtual host and then restart the webserver
"""
require('hosts', provided_by=[blef_fr])
require('path')
with cd(env.path):
run('git checkout %s' % branch)
run('git pull origin %s' % branch)
run('%(virtualhost_path)s/bin/pip install -r requirements.txt' % env)
run('%(virtualhost_path)s/bin/python manage.py migrate' % env)
with lcd(os.getcwd()):
local('git checkout %s' % branch)
local('webpack -p --config webpack-prod.config.js')
run('mkdir -p %(path)s/static/dist;' % env)
put('website/static/dist/*', '%(path)s/static/dist/' % env)
with cd(env.path):
run('%(virtualhost_path)s/bin/python manage.py collectstatic --noinput' % env)
run('supervisorctl reload')
| {
"content_hash": "c54fe654341cd20a3b8eca0778c2eadf",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 86,
"avg_line_length": 31.023255813953487,
"alnum_prop": 0.618440779610195,
"repo_name": "Frky/moon",
"id": "b685681e7aa971c7a2ad8d2e5e8ef3ca7dfec6be",
"size": "1334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4973"
},
{
"name": "HTML",
"bytes": "2377"
},
{
"name": "Java",
"bytes": "1228"
},
{
"name": "JavaScript",
"bytes": "29475"
},
{
"name": "Objective-C",
"bytes": "4392"
},
{
"name": "Python",
"bytes": "41561"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
} |
from swproject.settings import *
DATABASES['default']['NAME'] = 'hudson'
DATABASES['default']['USER'] = 'hudson'
DATABASES['default']['HOST'] = 'localhost'
CELERY_ALWAYS_EAGER = True
HAYSTACK_SEARCH_ENGINE = 'simple'
| {
"content_hash": "482a0b1ae039878a10be9991dcfc0993",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 42,
"avg_line_length": 22.1,
"alnum_prop": 0.7013574660633484,
"repo_name": "snswa/swsites",
"id": "f706eba5f491cc1833e996e746a919dad8e0d10d",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swproject/settings_hudson.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "338722"
},
{
"name": "Python",
"bytes": "301670"
},
{
"name": "Shell",
"bytes": "2618"
}
],
"symlink_target": ""
} |
"""
This script tests functions to compute different Power Spectra from complex SH coefficients
"""
# standard imports:
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# import shtools:
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
import pyshtools as shtools
# set shtools plot style:
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
mpl.rcParams.update(style_shtools)
#==== MAIN FUNCTION ====
def main():
test_ComplexSpectralAnalysis()
example()
def test_ComplexSpectralAnalysis():
#---- input parameters ----
lmax = 5
ls = np.arange(lmax + 1)
mask = np.zeros((2, lmax + 1, lmax + 1), dtype=np.bool)
for l in np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
print 'creating {:d} random coefficients'.format(2 * (lmax + 1) * (lmax + 1))
print '\n---- testing SHPower/DensityLC, SHPowerSpectrum/DensityC ----'
print 'generating normal distributed complex coefficients with variance 1...'
coeffs1 = np.random.normal( loc=0., scale=1., size=2 * (lmax + 1) * (lmax + 1) ) + \
1j * np.random.normal(loc=0., scale=1., size=2 * (lmax + 1) * (lmax + 1))
coeffs1 = coeffs1.reshape(2, lmax + 1, lmax + 1)
coeffs1[np.invert(mask)] = 0.
spec1 = np.array([shtools.SHPowerLC(coeffs1, l) for l in ls])
spec2 = shtools.SHPowerSpectrumC(coeffs1)
print 'tot power computed with SHPowerL={:2.2f}'.format(np.sum(spec1))
print 'tot power computed with SHPowerSpectrum={:2.2f}'.format(np.sum(spec2))
spec1 = np.array([shtools.SHPowerDensityLC(coeffs1, l) for l in ls])
spec2 = shtools.SHPowerSpectrumDensityC(coeffs1)
print 'tot power computed with SHPowerDensityL={:2.2f}'.format(np.sum(spec1 * (2 * ls + 1)))
print 'tot power computed with SHPowerSpectrumDensity={:2.2f}'.format(np.sum(spec2 * (2 * ls + 1)))
print '\n---- testing SHCrossPower/DensityLC, SHCrossCrossPowerSpectrum/DensityC ----'
print 'generating two sets of normal distributed complex coefficients with variance 1...'
coeffs2 = np.random.normal( loc=0., scale=1., size=2 * (lmax + 1) * (lmax + 1) ) + \
1j * np.random.normal(loc=0., scale=1., size=2 * (lmax + 1) * (lmax + 1))
coeffs2 = coeffs2.reshape(2, lmax + 1, lmax + 1)
coeffs2[np.invert(mask)] = 0.
spec1 = np.array([shtools.SHCrossPowerLC(coeffs1, coeffs2, l) for l in ls])
spec2 = shtools.SHCrossPowerSpectrumC(coeffs1, coeffs2)
print 'tot cpower computed with SHCrossPowerL={:2.2f}'.format(np.sum(spec1))
print 'tot cpower computed with SHCrossPowerSpectrum={:2.2f}'.format(np.sum(spec2))
spec1 = np.array([shtools.SHCrossPowerDensityLC(coeffs1, coeffs2, l) for l in ls])
spec2 = shtools.SHCrossPowerSpectrumDensityC(coeffs1, coeffs2)
print 'tot cpower computed with SHCrossPowerDensityL={:2.2f}'.format(np.sum(spec1 * (2 * ls + 1)))
print 'tot cpower computed with SHCrossPowerSpectrumDensity={:2.2f}'.format(np.sum(spec2 * (2 * ls + 1)))
#==== PLOT POWER SPECTRA ====
def example():
"""
example that plots the power spectrum of Mars topography data
"""
#--- input data filename ---
infile = '../../ExampleDataFiles/MarsTopo719.shape'
coeffs, lmax = shtools.SHRead(infile, 719)
lmax = coeffs.shape[1] - 1
#--- plot grid ---
grid = shtools.MakeGridDH(coeffs, lmax, csphase=-1)
fig_map = plt.figure()
plt.imshow(grid)
#---- compute spectrum ----
ls = np.arange(lmax + 1)
pspectrum = shtools.SHPowerSpectrum(coeffs)
pdensity = shtools.SHPowerSpectrumDensity(coeffs)
#---- plot spectrum ----
fig_spectrum, ax = plt.subplots(1, 1)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('degree l')
ax.grid(True, which='both')
ax.plot(ls[1:], pspectrum[1:], label='power per degree l')
ax.plot(ls[1:], pdensity[1:], label='power per degree l and order m')
ax.legend()
fig_map.savefig('SHCtopography_mars.png')
fig_spectrum.savefig('SHCspectrum_mars.png')
print 'mars topography and spectrum saved'
# plt.show()
#==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| {
"content_hash": "d1a437d114f1ba85cad5c6c39f367338",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 109,
"avg_line_length": 36.46551724137931,
"alnum_prop": 0.6515366430260048,
"repo_name": "pochoi/SHTOOLS",
"id": "7638b5218de80af908aab50b8d3cc21461af0b85",
"size": "4252",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/python/GlobalSpectralAnalysis/SHComplexSpectralAnalysis.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "FORTRAN",
"bytes": "1345156"
},
{
"name": "HTML",
"bytes": "5467"
},
{
"name": "Makefile",
"bytes": "23331"
},
{
"name": "Python",
"bytes": "36215"
}
],
"symlink_target": ""
} |
import datetime
import h5py
import os
import random
import sys
import time
import timeit
import numpy as np
DTYPE = np.dtype({'names': ('time', 'price', 'volume', 'amount'),
'formats': ('i4', 'f4', 'f4', 'f4')})
def bench_ds():
filename = '/tmp/bench-%d.h5' % int(time.time())
symbols = ["SH%.6d" % i for i in xrange(10000)]
f = h5py.File(filename)
for symbol in symbols:
f.create_dataset(symbol, (240, ), DTYPE)
f.close()
for x in xrange(10):
# open for bench again
f = h5py.File(filename)
random.shuffle(symbols)
for symbol in symbols:
ds = f[symbol]
f.close()
def require_dataset(handle, symbol):
gid = symbol[:3]
group = handle.require_group(gid)
try:
ds = group[symbol]
except KeyError:
ds = group.create_dataset(symbol, (240, ), DTYPE)
return ds
def dataset(handle, symbol):
path = "%s/%s" % (symbol[:3], symbol)
return handle[path]
def bench_grouped_ds():
filename = '/tmp/bench-%d.h5' % int(time.time())
symbols = ["SH%.6d" % i for i in xrange(10000)]
f = h5py.File(filename)
for symbol in symbols:
require_dataset(f, symbol)
f.close()
for x in xrange(10):
# open for bench again
f = h5py.File(filename)
random.shuffle(symbols)
for symbol in symbols:
ds = dataset(f, symbol)
f.close()
if __name__ == '__main__':
d = 1
ds_timer = timeit.Timer(stmt='bench_ds()',
setup="from __main__ import bench_ds")
ds_result = ds_timer.timeit(number=d)
print ds_result
grouped_ds_timer = timeit.Timer(stmt='bench_grouped_ds()',
setup="from __main__ import bench_grouped_ds")
grouped_ds_result = grouped_ds_timer.timeit(number=d)
print grouped_ds_result
| {
"content_hash": "8e3a12680e62a3153e24c79f1967ac62",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 82,
"avg_line_length": 24.346153846153847,
"alnum_prop": 0.5624012638230648,
"repo_name": "yinhm/datafeed",
"id": "312d00b36fa4bdf349a107c305527d232f2609db",
"size": "1970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/bench_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "203178"
},
{
"name": "Shell",
"bytes": "3121"
}
],
"symlink_target": ""
} |
from ducktape.services.service import Service
from kafkatest.services.kafka.directory import kafka_dir
import subprocess
import time
class ZookeeperService(Service):
logs = {
"zk_log": {
"path": "/mnt/zk.log",
"collect_default": True},
"zk_data": {
"path": "/mnt/zookeeper",
"collect_default": True}
}
def __init__(self, context, num_nodes):
"""
:type context
"""
super(ZookeeperService, self).__init__(context, num_nodes)
def start_node(self, node):
idx = self.idx(node)
self.logger.info("Starting ZK node %d on %s", idx, node.account.hostname)
node.account.ssh("mkdir -p /mnt/zookeeper")
node.account.ssh("echo %d > /mnt/zookeeper/myid" % idx)
config_file = self.render('zookeeper.properties')
self.logger.info("zookeeper.properties:")
self.logger.info(config_file)
node.account.create_file("/mnt/zookeeper.properties", config_file)
start_cmd = "/opt/%s/bin/zookeeper-server-start.sh " % kafka_dir(node)
start_cmd += "/mnt/zookeeper.properties 1>> %(path)s 2>> %(path)s &" % self.logs["zk_log"]
node.account.ssh(start_cmd)
time.sleep(5) # give it some time to start
def pids(self, node):
try:
cmd = "ps ax | grep -i zookeeper | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def stop_node(self, node):
idx = self.idx(node)
self.logger.info("Stopping %s node %d on %s" % (type(self).__name__, idx, node.account.hostname))
node.account.kill_process("zookeeper", allow_fail=False)
def clean_node(self, node):
self.logger.info("Cleaning ZK node %d on %s", self.idx(node), node.account.hostname)
if self.alive(node):
self.logger.warn("%s %s was still alive at cleanup time. Killing forcefully..." %
(self.__class__.__name__, node.account))
node.account.kill_process("zookeeper", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf /mnt/zookeeper /mnt/zookeeper.properties /mnt/zk.log", allow_fail=False)
def connect_setting(self):
return ','.join([node.account.hostname + ':2181' for node in self.nodes])
| {
"content_hash": "5f7462036ef42afd6bacb1b537b0c142",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 105,
"avg_line_length": 37.08695652173913,
"alnum_prop": 0.5982805783509183,
"repo_name": "junrao/kafka",
"id": "a1f999ea9ee1e10d1a3c36179d6ab882317ff500",
"size": "3341",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/services/zookeeper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "20422"
},
{
"name": "HTML",
"bytes": "5443"
},
{
"name": "Java",
"bytes": "3227522"
},
{
"name": "Python",
"bytes": "265552"
},
{
"name": "Scala",
"bytes": "2752216"
},
{
"name": "Shell",
"bytes": "43740"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
import random
import hashlib
challengelen = 2
chain = []
def mine(lastblock, txns):
hash = '1'*challengelen
nonce = 0
while hash[:challengelen] != '0'*challengelen:
hash = hashlib.md5(lastblock[4] + '|' + txns + '|' + str(nonce)).digest()
nonce += 1
#print(txns, nonce, hash[:challengelen])
return (lastblock[0]+1, lastblock[4], txns, nonce, hash)
# Genesis Block
genlasthash = 'Genesis'
gentxns = '<3 R4gul'
genhash = '1'*challengelen
gennonce = 0
while genhash[:challengelen] != '0'*challengelen:
genhash = hashlib.md5(genlasthash + '|' + gentxns + '|' + str(gennonce)).digest()
gennonce += 1
genblock = (0, 'Genesis', '<3 R4gul', gennonce, genhash)
chain.append(genblock)
print('Genesis Block', genblock)
# Chain is born.
# Fly Free!
while True: #Forever
#for i in range(1000):
block = mine(chain[-1], 'Rn' + str(random.random())[2:])
chain.append(block)
print(block)
#print(chain)
| {
"content_hash": "17ef8953b638b55df82a9675699fe900",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 82,
"avg_line_length": 20.75,
"alnum_prop": 0.664841182913472,
"repo_name": "ragulbalaji/Random-Cool-Things",
"id": "79b58189e496cdf0ccea23dcce46a35c55da942a",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Math/blockchainBAREBONES.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1686"
},
{
"name": "HTML",
"bytes": "3551"
},
{
"name": "PHP",
"bytes": "491"
},
{
"name": "Python",
"bytes": "8404"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <[email protected]>
"""
import time
import calendar
from xml.etree import ElementTree
from twisted.internet import defer
from twisted.web import client
from twisted.python import log
from smap import util
from smap.driver import SmapDriver
class CuriosityWeather(SmapDriver):
"""Periodically scrape data from Mars
"""
FIELDS = {
'min_temp': ("C"),
'max_temp': ("C"),
'pressure': ('hPa'),
'abs_humidity': ("%"),
'wind_speed': ('m/s'),
# 'sol': ('days'),
}
@defer.inlineCallbacks
def scrape(self):
try:
page = yield client.getPage("http://cab.inta-csic.es/rems/rems_weather.xml")
except:
log.msg()
return
root = ElementTree.fromstring(page)
magnitudes = root.find("magnitudes")
date = time.strptime(root.find("terrestrial_date").text, "%b %d, %Y")
date = calendar.timegm(date)
for stream in self.FIELDS.iterkeys():
self._add('/' + stream, date, float(magnitudes.find(stream).text))
def setup(self, opts):
for stream, meta in self.FIELDS.iteritems():
self.add_timeseries('/' + stream, meta[0],
data_type='double',
timezone='Utc')
def start(self):
self.scraper = util.PeriodicCaller(self.scrape, (), where="reactor")
self.scraper.start(900)
def stop(self):
self.scraper.stop()
| {
"content_hash": "7ba340c907ba854ae0cbd7e10448941e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 88,
"avg_line_length": 34.345238095238095,
"alnum_prop": 0.6779896013864818,
"repo_name": "Alwnikrotikz/smap-data",
"id": "65367df7681cc25e524bce67fa54d548d0f8c091",
"size": "2885",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/smap/drivers/mars.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "301328"
},
{
"name": "HTML",
"bytes": "7902"
},
{
"name": "Makefile",
"bytes": "5268"
},
{
"name": "Python",
"bytes": "1394465"
},
{
"name": "R",
"bytes": "23461"
},
{
"name": "Shell",
"bytes": "1273"
},
{
"name": "TeX",
"bytes": "40212"
},
{
"name": "XSLT",
"bytes": "5081"
}
],
"symlink_target": ""
} |
import json
import logging
import time
import unittest
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_false, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group
from desktop.models import Document
from hadoop import cluster
from hadoop.conf import YARN_CLUSTERS
from hadoop.yarn import resource_manager_api, mapreduce_api, history_server_api
from liboozie.oozie_api_tests import OozieServerProvider
from oozie.models import Workflow
from jobbrowser import models, views
from jobbrowser.conf import SHARE_JOBS
from jobbrowser.models import can_view_job, can_modify_job, Job
LOG = logging.getLogger(__name__)
_INITIALIZED = False
class TestBrowser():
def test_dots_to_camel_case(self):
assert_equal("fooBar", models.dots_to_camel_case("foo.bar"))
assert_equal("fooBarBaz", models.dots_to_camel_case("foo.bar.baz"))
assert_equal("foo", models.dots_to_camel_case("foo"))
assert_equal("foo.", models.dots_to_camel_case("foo."))
def test_get_path(self):
assert_equal("/foo/bar", models.get_path("hdfs://host/foo/bar"))
def test_format_counter_name(self):
assert_equal("Foo Bar", views.format_counter_name("fooBar"))
assert_equal("Foo Bar Baz", views.format_counter_name("fooBarBaz"))
assert_equal("Foo", views.format_counter_name("foo"))
assert_equal("Foo.", views.format_counter_name("foo."))
assert_equal("A Bbb Ccc", views.format_counter_name("A_BBB_CCC"))
def get_hadoop_job_id(oozie_api, oozie_jobid, action_index=1, timeout=60, step=5):
hadoop_job_id = None
start = time.time()
while not hadoop_job_id and time.time() - start < timeout:
time.sleep(step)
hadoop_job_id = oozie_api.get_job(oozie_jobid).actions[action_index].externalId
if not hadoop_job_id:
logs = OozieServerProvider.oozie.get_job_log(oozie_jobid)
msg = "[%d] %s took more than %d to create a job: %s" % (time.time(), oozie_jobid, timeout, logs)
LOG.info(msg)
raise Exception(msg)
return hadoop_job_id
class TestJobBrowserWithHadoop(unittest.TestCase, OozieServerProvider):
requires_hadoop = True
user_count = 0
@classmethod
def setup_class(cls):
OozieServerProvider.setup_class()
def setUp(self):
"""
To clean: creating test1, test2, test3...users
"""
TestJobBrowserWithHadoop.user_count += 1
self.username = 'test' + str(TestJobBrowserWithHadoop.user_count)
self.home_dir = '/user/%s' % self.username
self.cluster.fs.do_as_user(self.username, self.cluster.fs.create_home_dir, self.home_dir)
self.client = make_logged_in_client(username=self.username, is_superuser=False, groupname='test')
self.user = User.objects.get(username=self.username)
grant_access(self.username, 'test', 'jobsub')
grant_access(self.username, 'test', 'jobbrowser')
grant_access(self.username, 'test', 'oozie')
add_to_group(self.username)
self.prev_user = self.cluster.fs.user
self.cluster.fs.setuser(self.username)
self.install_examples()
self.design = self.create_design()
# Run the sleep example, since it doesn't require user home directory
design_id = self.design.id
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
self.hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
self.hadoop_job_id_short = views.get_shorter_id(self.hadoop_job_id)
def tearDown(self):
try:
Document.objects.all().delete()
Workflow.objects.all().delete()
# Remove user home directories.
self.cluster.fs.do_as_superuser(self.cluster.fs.rmtree, self.home_dir)
except:
pass
self.cluster.fs.setuser(self.prev_user)
def create_design(self):
if not Document.objects.available_docs(Workflow, self.user).filter(name='sleep_job').exists():
response = self.client.post(reverse('jobsub.views.new_design',
kwargs={'node_type': 'mapreduce'}),
data={'name': 'sleep_job',
'description': '',
'node_type': 'mapreduce',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'files': '[]',
'archives': '[]',
'job_properties': '[{\"name\":\"mapred.reduce.tasks\",\"value\":\"1\"},{\"name\":\"mapred.mapper.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.reducer.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.mapoutput.key.class\",\"value\":\"org.apache.hadoop.io.IntWritable\"},{\"name\":\"mapred.mapoutput.value.class\",\"value\":\"org.apache.hadoop.io.NullWritable\"},{\"name\":\"mapred.output.format.class\",\"value\":\"org.apache.hadoop.mapred.lib.NullOutputFormat\"},{\"name\":\"mapred.input.format.class\",\"value\":\"org.apache.hadoop.examples.SleepJob$SleepInputFormat\"},{\"name\":\"mapred.partitioner.class\",\"value\":\"org.apache.hadoop.examples.SleepJob\"},{\"name\":\"mapred.speculative.execution\",\"value\":\"false\"},{\"name\":\"sleep.job.map.sleep.time\",\"value\":\"0\"},{\"name\":\"sleep.job.reduce.sleep.time\",\"value\":\"${REDUCER_SLEEP_TIME}\"}]'
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert_equal(response.status_code, 200)
return Document.objects.available_docs(Workflow, self.user).get(name='sleep_job').content_object
def install_examples(self):
global _INITIALIZED
if _INITIALIZED:
return
self.client.post(reverse('oozie:install_examples'))
self.cluster.fs.do_as_user(self.username, self.cluster.fs.create_home_dir, self.home_dir)
self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, self.home_dir, 0777, True)
_INITIALIZED = True
def test_uncommon_views(self):
"""
These views exist, but tend not to be ever called, because they're not in the normal UI.
"""
raise SkipTest
self.client.get("/jobbrowser/clusterstatus")
self.client.get("/jobbrowser/queues")
self.client.get("/jobbrowser/jobbrowser")
def test_failed_jobs(self):
"""
Test jobs with genuine failure, not just killed
"""
# Create design that will fail because the script file isn't there
INPUT_DIR = self.home_dir + '/input'
OUTPUT_DIR = self.home_dir + '/output'
try:
self.cluster.fs.mkdir(self.home_dir + "/jt-test_failed_jobs")
self.cluster.fs.mkdir(INPUT_DIR)
self.cluster.fs.rmtree(OUTPUT_DIR)
except:
pass
response = self.client.post(reverse('jobsub.views.new_design', kwargs={'node_type': 'mapreduce'}), {
'name': ['test_failed_jobs-1'],
'description': ['description test_failed_jobs-1'],
'args': '',
'jar_path': '/user/hue/oozie/workspaces/lib/hadoop-examples.jar',
'prepares': '[]',
'archives': '[]',
'files': '[]',
'job_properties': ['[{"name":"mapred.input.dir","value":"%s"},\
{"name":"mapred.output.dir","value":"%s"},\
{"name":"mapred.mapper.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.combiner.class","value":"org.apache.hadoop.mapred.lib.dne"},\
{"name":"mapred.reducer.class","value":"org.apache.hadoop.mapred.lib.dne"}]' % (INPUT_DIR, OUTPUT_DIR)]
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', follow=True)
# Submit the job
design_dict = json.loads(response.content)
design_id = int(design_dict['id'])
response = self.client.post(reverse('oozie:submit_workflow',
args=[design_id]),
data={u'form-MAX_NUM_FORMS': [u''],
u'form-INITIAL_FORMS': [u'1'],
u'form-0-name': [u'REDUCER_SLEEP_TIME'],
u'form-0-value': [u'1'],
u'form-TOTAL_FORMS': [u'1']},
follow=True)
oozie_jobid = response.context['oozie_workflow'].id
job = OozieServerProvider.wait_until_completion(oozie_jobid, timeout=120, step=1)
self.hadoop_job_id = get_hadoop_job_id(self.oozie, oozie_jobid, 1)
self.hadoop_job_id_short = views.get_shorter_id(self.hadoop_job_id)
# Select only killed jobs (should be absent)
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/?format=json&state=killed')
assert_false(self.hadoop_job_id_short in response.content)
# Select only failed jobs (should be present)
# Map job should succeed. Reduce job should fail.
response = self.client.get('/jobbrowser/jobs/?format=json&state=failed')
assert_true(self.hadoop_job_id_short in response.content)
raise SkipTest # Not compatible with MR2
# The single job view should have the failed task table
response = self.client.get('/jobbrowser/jobs/%s' % (self.hadoop_job_id,))
html = response.content.lower()
assert_true('failed task' in html, html)
# The map task should say success (empty input)
map_task_id = self.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (self.hadoop_job_id, map_task_id))
assert_true('succeed' in response.content)
assert_true('failed' not in response.content)
# The reduce task should say failed
reduce_task_id = self.hadoop_job_id.replace('job', 'task') + '_r_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (self.hadoop_job_id, reduce_task_id))
assert_true('succeed' not in response.content)
assert_true('failed' in response.content)
# Selecting by failed state should include the failed map
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=failed' % (self.hadoop_job_id,))
assert_true('r_000000' in response.content)
assert_true('m_000000' not in response.content)
def test_jobs_page(self):
# All jobs page and fetch job ID
# Taking advantage of the fact new jobs are at the top of the list!
response = self.client.get('/jobbrowser/jobs/?format=json')
assert_true(self.hadoop_job_id_short in response.content, response.content)
# Make sure job succeeded
response = self.client.get('/jobbrowser/jobs/?format=json&state=completed')
assert_true(self.hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?format=json&state=failed')
assert_false(self.hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?format=json&state=running')
assert_false(self.hadoop_job_id_short in response.content)
response = self.client.get('/jobbrowser/jobs/?format=json&state=killed')
assert_false(self.hadoop_job_id_short in response.content)
def test_tasks_page(self):
raise SkipTest
# Test tracker page
early_task_id = self.hadoop_job_id.replace('job', 'task') + '_m_000000'
response = self.client.get('/jobbrowser/jobs/%s/tasks/%s' % (self.hadoop_job_id, early_task_id))
tracker_url = re.search('<a href="(/jobbrowser/trackers/.+?)"', response.content).group(1)
response = self.client.get(tracker_url)
assert_true('Tracker at' in response.content)
def test_job_permissions(self):
# Login as ourself
finish = SHARE_JOBS.set_for_testing(True)
try:
response = self.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(self.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = self.client.get('/jobbrowser/jobs/?format=json&user=')
assert_true(self.hadoop_job_id_short in response.content)
finally:
finish()
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "jobbrowser")
finish = SHARE_JOBS.set_for_testing(True)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_true(self.hadoop_job_id_short in response.content)
finally:
finish()
finish = SHARE_JOBS.set_for_testing(False)
try:
response = client_not_me.get('/jobbrowser/jobs/?format=json&user=')
assert_false(self.hadoop_job_id_short in response.content)
finally:
finish()
def test_job_counter(self):
raise SkipTest
# Single job page
response = self.client.get('/jobbrowser/jobs/%s' % self.hadoop_job_id)
# Check some counters for single job.
counters = response.context['job'].counters
counters_file_bytes_written = counters['org.apache.hadoop.mapreduce.FileSystemCounter']['counters']['FILE_BYTES_WRITTEN']
assert_true(counters_file_bytes_written['map'] > 0)
assert_true(counters_file_bytes_written['reduce'] > 0)
def test_task_page(self):
raise SkipTest
response = self.client.get('/jobbrowser/jobs/%s/tasks' % (self.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by tasktype
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktype=reduce' % (self.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
# Select by taskstate
response = self.client.get('/jobbrowser/jobs/%s/tasks?taskstate=succeeded' % (self.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 4)
# Select by text
response = self.client.get('/jobbrowser/jobs/%s/tasks?tasktext=clean' % (self.hadoop_job_id,))
assert_true(len(response.context['page'].object_list), 1)
def test_job_single_logs_page(self):
raise SkipTest
response = self.client.get('/jobbrowser/jobs/%s/single_logs' % (self.hadoop_job_id))
assert_true('syslog' in response.content, response.content)
assert_true('<div class="tab-pane active" id="logsSysLog">' in response.content or
'<div class="tab-pane active" id="logsStdErr">' in response.content or # Depending on Hadoop
'<div class="tab-pane active" id="logsStdOut">' in response.content, # For jenkins
response.content)
class TestMapReduce1NoHadoop:
def test_acls_job(self):
job = MockMr1Job()
assert_true(can_view_job('test', job))
assert_true(can_modify_job('test', job))
assert_false(can_view_job('test2', job))
assert_false(can_modify_job('test2', job))
class MockMr1Job(Job):
def __init__(self):
self.is_mr2 = False
self._full_job_conf = {
'mapreduce.cluster.acls.enabled': True,
'mapreduce.job.acl-modify-job': 'test',
'mapreduce.job.acl-view-job': 'test'
}
class TestMapReduce2NoHadoop:
def setUp(self):
# Beware: Monkey patching
if not hasattr(resource_manager_api, 'old_get_resource_manager_api'):
resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager
if not hasattr(resource_manager_api, 'old_get_mapreduce_api'):
mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api
if not hasattr(history_server_api, 'old_get_history_server_api'):
history_server_api.old_get_history_server_api = history_server_api.get_history_server_api
resource_manager_api.get_resource_manager = lambda: MockResourceManagerApi()
mapreduce_api.get_mapreduce_api = lambda: MockMapreduceApi()
history_server_api.get_history_server_api = lambda: HistoryServerApi()
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "jobbrowser")
self.user = User.objects.get(username='test')
self.c2 = make_logged_in_client(is_superuser=False, username="test2")
grant_access("test2", "test2", "jobbrowser")
self.user2 = User.objects.get(username='test2')
self.finish = [
YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True),
SHARE_JOBS.set_for_testing(False)
]
assert_true(cluster.is_yarn())
def tearDown(self):
resource_manager_api.get_resource_manager = getattr(resource_manager_api, 'old_get_resource_manager')
mapreduce_api.get_mapreduce_api = getattr(mapreduce_api, 'old_get_mapreduce_api')
history_server_api.get_history_server_api = getattr(history_server_api, 'old_get_history_server_api')
for f in self.finish:
f()
def test_jobs(self):
response = self.c.get('/jobbrowser/?format=json')
response_content = json.loads(response.content)
assert_equal(len(response_content['jobs']), 2)
response = self.c.get('/jobbrowser/jobs/?format=json&text=W=MapReduce-copy2')
response_content = json.loads(response.content)
assert_equal(len(response_content['jobs']), 1)
def test_running_job(self):
response = self.c.get('/jobbrowser/jobs/application_1356251510842_0054')
assert_true('job_1356251510842_0054' in response.content)
assert_true('RUNNING' in response.content)
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0054')
assert_true('job_1356251510842_0054' in response.content)
assert_true('RUNNING' in response.content)
def test_finished_job(self):
response = self.c.get('/jobbrowser/jobs/application_1356251510842_0009')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009')
assert_equal(response.context['job'].jobId, 'job_1356251510842_0009')
def job_not_assigned(self):
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009/job_not_assigned//my_url')
assert_equal(response.context['jobid'], 'job_1356251510842_0009')
assert_equal(response.context['path'], '/my_url')
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0009/job_not_assigned//my_url?format=json')
result = json.loads(response.content)
assert_equal(result['status'], 0)
def test_acls_job(self):
response = self.c.get('/jobbrowser/jobs/job_1356251510842_0054') # Check in perm decorator
assert_true(can_view_job('test', response.context['job']))
assert_true(can_modify_job('test', response.context['job']))
response2 = self.c2.get('/jobbrowser/jobs/job_1356251510842_0054')
assert_true('don't have permission to access job' in response2.content, response2.content)
assert_false(can_view_job('test2', response.context['job']))
assert_false(can_modify_job('test2', response.context['job']))
def test_kill_job(self):
job_id = 'application_1356251510842_0054'
try:
response = self.c.post('/jobbrowser/jobs/%s/kill?format=json' % job_id)
assert_equal(json.loads(response.content), {"status": 0})
finally:
MockResourceManagerApi.APPS[job_id]['state'] = 'RUNNING'
class MockResourceManagerApi:
APPS = {
'application_1356251510842_0054': {
u'finishedTime': 1356961070119, u'name': u'oozie:launcher:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'amContainerLogs': u'http://runreal:8042/node/containerlogs/container_1356251510842_0054_01_000001/romain', u'clusterId': 1356251510842,
u'trackingUrl': u'http://localhost:8088/proxy/application_1356251510842_0054/jobhistory/job/job_1356251510842_0054', u'amHostHttpAddress': u'runreal:8042',
u'startedTime': 1356961057225, u'queue': u'default', u'state': u'RUNNING', u'elapsedTime': 12894, u'finalStatus': u'UNDEFINED', u'diagnostics': u'',
u'progress': 100.0, u'trackingUI': u'History', u'id': u'application_1356251510842_0054', u'user': u'test',
# For when the job is KILLED
'startTime': 1356961057226, 'finishTime': 1356961057226,
'applicationType': 'MAPREDUCE'
},
'application_1356251510842_0009': {
u'finishedTime': 1356467118570, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy2:A=Sleep:ID=0000002-121223003201296-oozie-oozi-W',
u'amContainerLogs': u'http://runreal:8042/node/containerlogs/container_1356251510842_0009_01_000001/romain', u'clusterId': 1356251510842,
u'trackingUrl': u'http://localhost:8088/proxy/application_1356251510842_0009/jobhistory/job/job_1356251510842_0009', u'amHostHttpAddress': u'runreal:8042',
u'startedTime': 1356467081121, u'queue': u'default', u'state': u'FINISHED', u'elapsedTime': 37449, u'finalStatus': u'SUCCEEDED', u'diagnostics': u'',
u'progress': 100.0, u'trackingUI': u'History', u'id': u'application_1356251510842_0009', u'user': u'test',
'applicationType': 'MAPREDUCE'
}
}
def __init__(self, oozie_url=None): pass
def apps(self, **kwargs):
return {
'apps': {
'app': [
# RUNNING
MockResourceManagerApi.APPS['application_1356251510842_0054'],
# FINISHED
MockResourceManagerApi.APPS['application_1356251510842_0009'],
]
}
}
def app(self, job_id):
return {
u'app': MockResourceManagerApi.APPS[job_id]
}
class MockMapreduce2Api(object):
"""
MockMapreduceApi and HistoryServerApi are very similar and inherit from it.
"""
def __init__(self, oozie_url=None): pass
def tasks(self, job_id):
return {
u'tasks': {
u'task': [{
u'finishTime': 1357153330271, u'successfulAttempt': u'attempt_1356251510842_0062_m_000000_0', u'elapsedTime': 1901, u'state': u'SUCCEEDED',
u'startTime': 1357153328370, u'progress': 100.0, u'type': u'MAP', u'id': u'task_1356251510842_0062_m_000000'},
{
u'finishTime': 0, u'successfulAttempt': u'', u'elapsedTime': 0, u'state': u'SCHEDULED', u'startTime': 1357153326322, u'progress': 0.0,
u'type': u'REDUCE', u'id': u'task_1356251510842_0062_r_000000'}
]
}
}
def conf(self, job_id):
return {
"conf" : {
"path" : "hdfs://host.domain.com:9000/user/user1/.staging/job_1326232085508_0004/job.xml",
"property" : [
{
"name" : "dfs.datanode.data.dir",
"value" : "/home/hadoop/hdfs/data",
}, {
"name" : "mapreduce.job.acl-modify-job",
"value" : "test",
}, {
"name" : "mapreduce.job.acl-view-job",
"value" : "test",
}
]
}
}
def job_attempts(self, job_id):
return {
"jobAttempts" : {
"jobAttempt" : [
{
"nodeId" : "host.domain.com:8041",
"nodeHttpAddress" : "host.domain.com:8042",
"startTime" : 1326238773493,
"id" : 1,
"logsLink" : "http://host.domain.com:8042/node/containerlogs/container_1326232085508_0004_01_000001",
"containerId" : "container_1326232085508_0004_01_000001"
}
]
}
}
def task_attempts(self, job_id, task_id):
return {
"taskAttempts" : {
"taskAttempt" : [
{
"elapsedMergeTime" : 47,
"shuffleFinishTime" : 1326238780052,
"assignedContainerId" : "container_1326232085508_0004_01_000003",
"progress" : 100,
"elapsedTime" : 0,
"state" : "RUNNING",
"elapsedShuffleTime" : 2592,
"mergeFinishTime" : 1326238780099,
"rack" : "/98.139.92.0",
"elapsedReduceTime" : 0,
"nodeHttpAddress" : "host.domain.com:8042",
"type" : "REDUCE",
"startTime" : 1326238777460,
"id" : "attempt_1326232085508_4_4_r_0_0",
"finishTime" : 0
}
]
}
}
def counters(self, job_id):
return {
"jobCounters" : {
"id" : "job_1326232085508_4_4",
"counterGroup" : [
{
"counterGroupName" : "org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter",
"counter" : [
{
"reduceCounterValue" : 0,
"mapCounterValue" : 0,
"totalCounterValue" : 0,
"name" : "BYTES_READ"
}
]
},
{
"counterGroupName" : "org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter",
"counter" : [
{
"reduceCounterValue" : 0,
"mapCounterValue" : 0,
"totalCounterValue" : 0,
"name" : "BYTES_WRITTEN"
}
]
}
]
}
}
def kill(self, job_id):
job_id = job_id.replace('job', 'application')
MockResourceManagerApi.APPS[job_id]['state'] = 'KILLED'
return {}
class MockMapreduceApi(MockMapreduce2Api):
def job(self, user, job_id):
if '1356251510842_0009' not in job_id:
job = {
u'job': {
u'reducesCompleted': 0, u'mapsRunning': 1, u'id': u'job_1356251510842_0054', u'successfulReduceAttempts': 0, u'successfulMapAttempts': 0,
u'uberized': False, u'reducesTotal': 1, u'elapsedTime': 3426, u'mapsPending': 0, u'state': u'RUNNING', u'failedReduceAttempts': 0,
u'mapsCompleted': 0, u'killedMapAttempts': 0, u'killedReduceAttempts': 0, u'runningReduceAttempts': 0, u'failedMapAttempts': 0, u'mapsTotal': 1,
u'user': u'test', u'startTime': 1357152972886, u'reducesPending': 1, u'reduceProgress': 0.0, u'finishTime': 0,
u'name': u'select avg(salary) from sample_07(Stage-1)', u'reducesRunning': 0, u'newMapAttempts': 0, u'diagnostics': u'', u'mapProgress': 0.0,
u'runningMapAttempts': 1, u'newReduceAttempts': 1,
# Does not seems to exist in API, we actually skip it in case.
"acls" : [{
"value" : "test",
"name" : "mapreduce.job.acl-modify-job"
}, {
"value" : "test",
"name" : "mapreduce.job.acl-view-job"
}
],
}
}
job['job']['id'] = job_id
return job
class HistoryServerApi(MockMapreduce2Api):
def __init__(self, oozie_url=None): pass
def job(self, user, job_id):
if '1356251510842_0054' == job_id:
return {
u'job': {
u'reducesCompleted': 1, u'avgMapTime': 1798, u'avgMergeTime': 1479, u'id': job_id,
u'successfulReduceAttempts': 1, u'successfulMapAttempts': 2, u'uberized': False, u'reducesTotal': 1,
u'state': u'KILLED', u'failedReduceAttempts': 0, u'mapsCompleted': 2,
u'killedMapAttempts': 0, u'diagnostics': u'', u'mapsTotal': 2, u'user': u'test',
u'startTime': 1357151916268, u'avgReduceTime': 137,
u'finishTime': 1357151923925, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'avgShuffleTime': 1421, u'queue': u'default', u'killedReduceAttempts': 0, u'failedMapAttempts': 0
}
}
else:
return {
u'job': {
u'reducesCompleted': 1, u'avgMapTime': 1798, u'avgMergeTime': 1479, u'id': u'job_1356251510842_0009',
u'successfulReduceAttempts': 1, u'successfulMapAttempts': 2, u'uberized': False, u'reducesTotal': 1,
u'state': u'SUCCEEDED', u'failedReduceAttempts': 0, u'mapsCompleted': 2,
u'killedMapAttempts': 0, u'diagnostics': u'', u'mapsTotal': 2, u'user': u'test',
u'startTime': 1357151916268, u'avgReduceTime': 137,
u'finishTime': 1357151923925, u'name': u'oozie:action:T=map-reduce:W=MapReduce-copy:A=Sleep:ID=0000004-121223003201296-oozie-oozi-W',
u'avgShuffleTime': 1421, u'queue': u'default', u'killedReduceAttempts': 0, u'failedMapAttempts': 0
}
}
| {
"content_hash": "22aeb4fc6c350e932f2aaf83afad2cc6",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 937,
"avg_line_length": 43.012084592145015,
"alnum_prop": 0.6302943035751915,
"repo_name": "erickt/hue",
"id": "8d9df27fb438137348507f6cfd9a5551cb4d8f6b",
"size": "29267",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/jobbrowser/src/jobbrowser/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "9315"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10789304"
},
{
"name": "C++",
"bytes": "178518"
},
{
"name": "CSS",
"bytes": "501761"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gettext Catalog",
"bytes": "13534784"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21550731"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2675808"
},
{
"name": "Makefile",
"bytes": "86291"
},
{
"name": "Mako",
"bytes": "2035662"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "31452674"
},
{
"name": "Scala",
"bytes": "60295"
},
{
"name": "Shell",
"bytes": "48346"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "100994"
},
{
"name": "XSLT",
"bytes": "342237"
}
],
"symlink_target": ""
} |
"""Symbols exported by this module are imported into the puls namespace. Usage:
from puls import <symbol>
"""
from __future__ import absolute_import, unicode_literals, division
from puls.compat import range
import math
class AttributeDict(dict):
def __getattr__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise AttributeError(key)
__setattr__ = dict.__setattr__
__delattr__ = dict.__delattr__
class Pagination(object):
def __init__(self, queryset, page, per_page):
self.current = page
self.per_page = per_page
self.total = queryset.count()
self.start = (page - 1) * per_page
self.end = min(self.total, page * per_page)
self.items = queryset[self.start:self.end]
def __iter__(self):
return iter(self.items)
@property
def last(self):
return int(math.ceil(self.total / float(self.per_page)))
@property
def prev(self):
return self.current - 1
@property
def has_prev(self):
return self.current > 1
@property
def has_next(self):
return self.current < self.last
@property
def next(self):
"""Number of the next page"""
return self.current + 1
def all(self, left_edge=2, left_current=2, right_current=2, right_edge=2):
last = 0
for num in range(1, self.last + 1):
if \
num <= left_edge or (
num > self.current - left_current - 1 and
num < self.current + right_current + 1
) or \
num > self.last - right_edge:
if last + 1 != num:
yield None
yield num
last = num
def paginate(queryset, page, per_page=20):
return Pagination(queryset, page, per_page)
| {
"content_hash": "d69a6d14fd3b4aa0ac7512d1cce08f7a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 25.675675675675677,
"alnum_prop": 0.5505263157894736,
"repo_name": "za-creature/puls",
"id": "06d44d2710983e679a3e4acd86a93ef162bf5113",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puls/globals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "96485"
},
{
"name": "HTML",
"bytes": "486320"
},
{
"name": "JavaScript",
"bytes": "272151"
},
{
"name": "Python",
"bytes": "119466"
},
{
"name": "XML",
"bytes": "225"
}
],
"symlink_target": ""
} |
"""
Support for EBox.
Get data from 'My Usage Page' page: https://client.ebox.ca/myusage
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ebox/
"""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD,
CONF_NAME, CONF_MONITORED_VARIABLES)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyebox==0.1.0']
_LOGGER = logging.getLogger(__name__)
GIGABITS = 'Gb' # type: str
PRICE = 'CAD' # type: str
DAYS = 'days' # type: str
PERCENT = '%' # type: str
DEFAULT_NAME = 'EBox'
REQUESTS_TIMEOUT = 15
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
SENSOR_TYPES = {
'usage': ['Usage',
PERCENT, 'mdi:percent'],
'balance': ['Balance',
PRICE, 'mdi:square-inc-cash'],
'limit': ['Data limit',
GIGABITS, 'mdi:download'],
'days_left': ['Days left',
DAYS, 'mdi:calendar-today'],
'before_offpeak_download': ['Download before offpeak',
GIGABITS, 'mdi:download'],
'before_offpeak_upload': ['Upload before offpeak',
GIGABITS, 'mdi:upload'],
'before_offpeak_total': ['Total before offpeak',
GIGABITS, 'mdi:download'],
'offpeak_download': ['Offpeak download',
GIGABITS, 'mdi:download'],
'offpeak_upload': ['Offpeak Upload',
GIGABITS, 'mdi:upload'],
'offpeak_total': ['Offpeak Total',
GIGABITS, 'mdi:download'],
'download': ['Download',
GIGABITS, 'mdi:download'],
'upload': ['Upload',
GIGABITS, 'mdi:upload'],
'total': ['Total',
GIGABITS, 'mdi:download'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_VARIABLES):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the EBox sensor."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
ebox_data = EBoxData(username, password)
ebox_data.update()
except requests.exceptions.HTTPError as error:
_LOGGER.error("Failt login: %s", error)
return False
name = config.get(CONF_NAME)
sensors = []
for variable in config[CONF_MONITORED_VARIABLES]:
sensors.append(EBoxSensor(ebox_data, variable, name))
add_devices(sensors, True)
class EBoxSensor(Entity):
"""Implementation of a EBox sensor."""
def __init__(self, ebox_data, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.ebox_data = ebox_data
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
def update(self):
"""Get the latest data from EBox and update the state."""
self.ebox_data.update()
if self.type in self.ebox_data.data:
self._state = round(self.ebox_data.data[self.type], 2)
class EBoxData(object):
"""Get data from Ebox."""
def __init__(self, username, password):
"""Initialize the data object."""
from pyebox import EboxClient
self.client = EboxClient(username, password, REQUESTS_TIMEOUT)
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Ebox."""
from pyebox.client import PyEboxError
try:
self.client.fetch_data()
except PyEboxError as exp:
_LOGGER.error("Error on receive last EBox data: %s", exp)
return
# Update data
self.data = self.client.get_data()
| {
"content_hash": "038a64427b92c65679f6e955e96f796e",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 74,
"avg_line_length": 30.83225806451613,
"alnum_prop": 0.610797237915882,
"repo_name": "shaftoe/home-assistant",
"id": "3cfa5ef9ff4682264e199ab7a838c6c114d655de",
"size": "4779",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/ebox.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1584258"
},
{
"name": "Python",
"bytes": "5479272"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15017"
}
],
"symlink_target": ""
} |
import datetime
import json
from decimal import Decimal
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator, validate_slug
from django import forms
from django.core.urlresolvers import reverse
from django.db.models import ProtectedError
from django.forms.util import ErrorList
from django.template.loader import render_to_string
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_noop, ugettext as _, ugettext
from crispy_forms.bootstrap import FormActions, StrictButton, InlineField
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from django_countries.data import COUNTRIES
from corehq import privileges, toggles
from corehq.apps.accounting.exceptions import CreateAccountingAdminError
from corehq.apps.accounting.invoicing import DomainInvoiceFactory
from corehq.apps.accounting.tasks import send_subscription_reminder_emails
from corehq.apps.users.models import WebUser
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.django.email import send_HTML_email
from django_prbac.models import Role, Grant, UserRole
from corehq.apps.accounting.async_handlers import (
FeatureRateAsyncHandler,
SoftwareProductRateAsyncHandler,
)
from corehq.apps.accounting.utils import (
is_active_subscription, has_subscription_already_ended, get_money_str,
get_first_last_days, make_anchor_tag
)
from corehq.apps.hqwebapp.crispy import BootstrapMultiField, TextField
from corehq.apps.domain.models import Domain
from corehq.apps.accounting.models import (
BillingAccount,
BillingContactInfo,
BillingRecord,
CreditAdjustment,
CreditAdjustmentReason,
CreditLine,
Currency,
EntryPoint,
Feature,
FeatureRate,
FeatureType,
Invoice,
ProBonoStatus,
SoftwarePlan,
SoftwarePlanEdition,
SoftwarePlanVersion,
SoftwarePlanVisibility,
SoftwareProduct,
SoftwareProductRate,
SoftwareProductType,
Subscription,
SubscriptionType,
WireBillingRecord,
)
class BillingAccountBasicForm(forms.Form):
name = forms.CharField(label="Name")
salesforce_account_id = forms.CharField(label=_("Salesforce Account ID"),
max_length=80,
required=False)
currency = forms.ChoiceField(label="Currency")
emails = forms.CharField(
label=_('Client Contact Emails'),
widget=forms.Textarea,
max_length=BillingContactInfo._meta.get_field('emails').max_length,
)
is_active = forms.BooleanField(
label=_("Account is Active"),
required=False,
initial=True,
)
active_accounts = forms.IntegerField(
label=_("Transfer Subscriptions To"),
help_text=_("Transfer any existing subscriptions to the "
"Billing Account specified here."),
required=False,
)
dimagi_contact = forms.EmailField(
label=_("Dimagi Contact Email"),
max_length=BillingAccount._meta.get_field('dimagi_contact').max_length,
required=False,
)
entry_point = forms.ChoiceField(
label=_("Entry Point"),
choices=EntryPoint.CHOICES,
)
def __init__(self, account, *args, **kwargs):
self.account = account
if account is not None:
contact_info, _ = BillingContactInfo.objects.get_or_create(account=account)
kwargs['initial'] = {
'name': account.name,
'salesforce_account_id': account.salesforce_account_id,
'currency': account.currency.code,
'emails': contact_info.emails,
'is_active': account.is_active,
'dimagi_contact': account.dimagi_contact,
'entry_point': account.entry_point,
}
else:
kwargs['initial'] = {
'currency': Currency.get_default().code,
'entry_point': EntryPoint.CONTRACTED,
}
super(BillingAccountBasicForm, self).__init__(*args, **kwargs)
self.fields['currency'].choices =\
[(cur.code, cur.code) for cur in Currency.objects.order_by('code')]
self.helper = FormHelper()
self.helper.form_id = "account-form"
self.helper.form_class = "form-horizontal"
additional_fields = []
if account is not None:
additional_fields.append(crispy.Field(
'is_active',
data_bind="checked: is_active",
))
if account.subscription_set.count() > 0:
additional_fields.append(crispy.Div(
crispy.Field(
'active_accounts',
css_class="input-xxlarge",
placeholder="Select Active Account",
),
data_bind="visible: showActiveAccounts"
))
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Basic Information',
'name',
crispy.Field('emails', css_class='input-xxlarge'),
'dimagi_contact',
'salesforce_account_id',
'currency',
'entry_point',
crispy.Div(*additional_fields),
),
FormActions(
crispy.ButtonHolder(
crispy.Submit(
'account_basic',
'Update Basic Information'
if account is not None else 'Add New Account'
)
)
)
)
def clean_name(self):
name = self.cleaned_data['name']
conflicting_named_accounts = BillingAccount.objects.filter(name=name)
if self.account:
conflicting_named_accounts = conflicting_named_accounts.exclude(name=self.account.name)
if conflicting_named_accounts.exists():
raise ValidationError(_("Name '%s' is already taken.") % name)
return name
def clean_emails(self):
account_contact_emails = self.cleaned_data['emails']
if account_contact_emails != '':
invalid_emails = []
for email in account_contact_emails.split(','):
email_no_whitespace = email.strip()
# TODO - validate emails
if len(invalid_emails) != 0:
raise ValidationError(
_("Invalid emails: %s") % ', '.join(invalid_emails)
)
return account_contact_emails
def clean_active_accounts(self):
transfer_subs = self.cleaned_data['active_accounts']
if (not self.cleaned_data['is_active'] and self.account is not None
and self.account.subscription_set.count() > 0
and not transfer_subs
):
raise ValidationError(
_("This account has subscriptions associated with it. "
"Please specify a transfer account before deactivating.")
)
if self.account is not None and transfer_subs == self.account.id:
raise ValidationError(
_("The transfer account can't be the same one you're trying "
"to deactivate.")
)
return transfer_subs
def create_account(self):
name = self.cleaned_data['name']
salesforce_account_id = self.cleaned_data['salesforce_account_id']
currency, _ = Currency.objects.get_or_create(
code=self.cleaned_data['currency']
)
account = BillingAccount(
name=name,
salesforce_account_id=salesforce_account_id,
currency=currency,
entry_point=self.cleaned_data['entry_point'],
)
account.save()
contact_info, _ = BillingContactInfo.objects.get_or_create(
account=account,
)
contact_info.emails = self.cleaned_data['emails']
contact_info.save()
return account
def update_basic_info(self, account):
account.name = self.cleaned_data['name']
account.is_active = self.cleaned_data['is_active']
transfer_id = self.cleaned_data['active_accounts']
if transfer_id:
transfer_account = BillingAccount.objects.get(id=transfer_id)
for sub in account.subscription_set.all():
sub.account = transfer_account
sub.save()
account.salesforce_account_id = \
self.cleaned_data['salesforce_account_id']
account.currency, _ = Currency.objects.get_or_create(
code=self.cleaned_data['currency'],
)
account.dimagi_contact = self.cleaned_data['dimagi_contact']
account.entry_point = self.cleaned_data['entry_point']
account.save()
contact_info, _ = BillingContactInfo.objects.get_or_create(
account=account,
)
contact_info.emails = self.cleaned_data['emails']
contact_info.save()
class BillingAccountContactForm(forms.ModelForm):
class Meta:
model = BillingContactInfo
fields = [
'first_name',
'last_name',
'company_name',
'phone_number',
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
'country',
]
def __init__(self, account, *args, **kwargs):
contact_info, _ = BillingContactInfo.objects.get_or_create(
account=account,
)
super(BillingAccountContactForm, self).__init__(instance=contact_info,
*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = "form-horizontal"
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Contact Information',
'first_name',
'last_name',
'company_name',
'phone_number',
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field(
'country',
css_class="input-xlarge",
data_countryname=COUNTRIES.get(
args[0].get('country') if len(args) > 0
else account.billingcontactinfo.country,
''
)
),
),
FormActions(
crispy.ButtonHolder(
crispy.Submit(
'account_contact',
'Update Contact Information'
)
)
),
)
class SubscriptionForm(forms.Form):
account = forms.IntegerField(
label=_("Billing Account")
)
start_date = forms.DateField(
label=_("Start Date"), widget=forms.DateInput()
)
end_date = forms.DateField(
label=_("End Date"), widget=forms.DateInput(), required=False
)
delay_invoice_until = forms.DateField(
label=_("Delay Invoice Until"), widget=forms.DateInput(), required=False
)
plan_product = forms.ChoiceField(
label=_("Core Product"), initial=SoftwareProductType.COMMCARE,
choices=SoftwareProductType.CHOICES,
)
plan_edition = forms.ChoiceField(
label=_("Edition"), initial=SoftwarePlanEdition.ENTERPRISE,
choices=SoftwarePlanEdition.CHOICES,
)
plan_version = forms.IntegerField(label=_("Software Plan"))
domain = forms.CharField(label=_("Project Space"))
salesforce_contract_id = forms.CharField(
label=_("Salesforce Deployment ID"), max_length=80, required=False
)
do_not_invoice = forms.BooleanField(
label=_("Do Not Invoice"), required=False
)
auto_generate_credits = forms.BooleanField(
label=_("Auto-generate Plan Credits"), required=False
)
active_accounts = forms.IntegerField(
label=_("Transfer Subscription To"),
required=False,
)
service_type = forms.ChoiceField(
label=_("Type"),
choices=SubscriptionType.CHOICES,
initial=SubscriptionType.CONTRACTED,
)
pro_bono_status = forms.ChoiceField(
label=_("Pro-Bono"),
choices=ProBonoStatus.CHOICES,
initial=ProBonoStatus.NO,
)
def __init__(self, subscription, account_id, web_user, *args, **kwargs):
# account_id is not referenced if subscription is not None
super(SubscriptionForm, self).__init__(*args, **kwargs)
self.subscription = subscription
self.is_existing = subscription is not None
self.web_user = web_user
today = datetime.date.today()
start_date_field = crispy.Field('start_date', css_class="date-picker")
end_date_field = crispy.Field('end_date', css_class="date-picker")
delay_invoice_until_field = crispy.Field('delay_invoice_until',
css_class="date-picker")
if self.is_existing:
# circular import
from corehq.apps.accounting.views import (
EditSoftwarePlanView, ManageBillingAccountView
)
from corehq.apps.domain.views import DefaultProjectSettingsView
self.fields['account'].initial = subscription.account.id
account_field = TextField(
'account',
'<a href="%(account_url)s">%(account_name)s</a>' % {
'account_url': reverse(ManageBillingAccountView.urlname,
args=[subscription.account.id]),
'account_name': subscription.account.name,
}
)
self.fields['plan_version'].initial = subscription.plan_version.id
plan_version_field = TextField(
'plan_version',
'<a href="%(plan_version_url)s">%(plan_name)s</a>' % {
'plan_version_url': reverse(
EditSoftwarePlanView.urlname,
args=[subscription.plan_version.plan.id]),
'plan_name': subscription.plan_version,
})
try:
plan_product = subscription.plan_version.product_rates.all()[0].product.product_type
self.fields['plan_product'].initial = plan_product
except (IndexError, SoftwarePlanVersion.DoesNotExist):
plan_product = (
'<i class="icon-alert-sign"></i> No Product Exists for '
'the Plan (update required)'
)
plan_product_field = TextField(
'plan_product',
plan_product,
)
self.fields['plan_edition'].initial = subscription.plan_version.plan.edition
plan_edition_field = TextField(
'plan_edition',
self.fields['plan_edition'].initial
)
self.fields['domain'].choices = [
(subscription.subscriber.domain, subscription.subscriber.domain)
]
self.fields['domain'].initial = subscription.subscriber.domain
domain_field = TextField(
'domain',
'<a href="%(project_url)s">%(project_name)s</a>' % {
'project_url': reverse(DefaultProjectSettingsView.urlname,
args=[subscription.subscriber.domain]),
'project_name': subscription.subscriber.domain,
})
self.fields['start_date'].initial = subscription.date_start.isoformat()
self.fields['end_date'].initial = (
subscription.date_end.isoformat()
if subscription.date_end is not None else subscription.date_end
)
self.fields['delay_invoice_until'].initial = subscription.date_delay_invoicing
self.fields['domain'].initial = subscription.subscriber.domain
self.fields['salesforce_contract_id'].initial = subscription.salesforce_contract_id
self.fields['do_not_invoice'].initial = subscription.do_not_invoice
self.fields['auto_generate_credits'].initial = subscription.auto_generate_credits
self.fields['service_type'].initial = subscription.service_type
self.fields['pro_bono_status'].initial = subscription.pro_bono_status
if (subscription.date_start is not None
and subscription.date_start <= today):
start_date_field = TextField(
'start_date',
"%(start_date)s (already started)" % {
'start_date': self.fields['start_date'].initial,
})
if has_subscription_already_ended(subscription):
end_date_field = TextField(
'end_date',
"%(end_date)s (already ended)" % {
'end_date': self.fields['end_date'].initial,
})
if (subscription.date_delay_invoicing is not None
and subscription.date_delay_invoicing <= today):
delay_invoice_until_field = TextField(
'delay_invoice_until',
"%(delay_date)s (date has already passed)" % {
'delay_date': self.fields['delay_invoice_until'].initial,
})
self.fields['plan_version'].required = False
self.fields['domain'].required = False
else:
account_field = crispy.Field(
'account', css_class="input-xxlarge",
placeholder="Search for Billing Account"
)
if account_id is not None:
self.fields['account'].initial = account_id
domain_field = crispy.Field(
'domain', css_class="input-xxlarge",
placeholder="Search for Project Space"
)
plan_product_field = crispy.Field('plan_product')
plan_edition_field = crispy.Field('plan_edition')
plan_version_field = crispy.Field(
'plan_version', css_class="input-xxlarge",
placeholder="Search for Software Plan"
)
self.helper = FormHelper()
self.helper.form_text_inline = True
transfer_fields = []
if self.is_existing:
transfer_fields.extend([
crispy.Field(
'active_accounts',
css_class='input-xxlarge',
placeholder="Select Active Account",
),
])
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'%s Subscription' % ('Edit' if self.is_existing
else 'New'),
account_field,
crispy.Div(*transfer_fields),
start_date_field,
end_date_field,
delay_invoice_until_field,
plan_product_field,
plan_edition_field,
plan_version_field,
domain_field,
'salesforce_contract_id',
'do_not_invoice',
'auto_generate_credits',
'service_type',
'pro_bono_status'
),
FormActions(
crispy.ButtonHolder(
crispy.Submit('set_subscription',
'%s Subscription' % ('Update' if self.is_existing else 'Create'))
)
)
)
def clean_domain(self):
domain_name = self.cleaned_data['domain']
if self.fields['domain'].required:
domain = Domain.get_by_name(domain_name)
if domain is None:
raise forms.ValidationError(_("A valid project space is required."))
return domain_name
def clean(self):
account_id = self.cleaned_data['active_accounts'] or self.cleaned_data['account']
if account_id:
account = BillingAccount.objects.get(id=account_id)
if (
not self.cleaned_data['do_not_invoice']
and (
not BillingContactInfo.objects.filter(account=account).exists()
or not account.billingcontactinfo.emails
)
):
from corehq.apps.accounting.views import ManageBillingAccountView
raise forms.ValidationError(mark_safe(_(
"Please update 'Client Contact Emails' "
'<strong><a href=%(link)s target="_blank">here</a></strong> '
"before using Billing Account <strong>%(account)s</strong>."
) % {
'link': reverse(ManageBillingAccountView.urlname, args=[account.id]),
'account': account.name,
}))
start_date = self.cleaned_data.get('start_date') or self.subscription.date_start
if (self.cleaned_data['end_date'] is not None
and start_date > self.cleaned_data['end_date']):
raise ValidationError(_("End date must be after start date."))
if self.cleaned_data['end_date'] and self.cleaned_data['end_date'] <= datetime.date.today():
raise ValidationError(_("End date must be in the future."))
return self.cleaned_data
def create_subscription(self):
account = BillingAccount.objects.get(id=self.cleaned_data['account'])
domain = self.cleaned_data['domain']
plan_version = SoftwarePlanVersion.objects.get(id=self.cleaned_data['plan_version'])
date_start = self.cleaned_data['start_date']
date_end = self.cleaned_data['end_date']
date_delay_invoicing = self.cleaned_data['delay_invoice_until']
salesforce_contract_id = self.cleaned_data['salesforce_contract_id']
do_not_invoice = self.cleaned_data['do_not_invoice']
auto_generate_credits = self.cleaned_data['auto_generate_credits']
service_type = self.cleaned_data['service_type']
pro_bono_status = self.cleaned_data['pro_bono_status']
sub = Subscription.new_domain_subscription(
account, domain, plan_version,
date_start=date_start,
date_end=date_end,
date_delay_invoicing=date_delay_invoicing,
salesforce_contract_id=salesforce_contract_id,
do_not_invoice=do_not_invoice,
auto_generate_credits=auto_generate_credits,
web_user=self.web_user,
service_type=service_type,
pro_bono_status=pro_bono_status,
internal_change=True,
)
return sub
def clean_active_accounts(self):
transfer_account = self.cleaned_data.get('active_accounts')
if transfer_account and transfer_account == self.subscription.account.id:
raise ValidationError(_("Please select an account other than the "
"current account to transfer to."))
return transfer_account
def update_subscription(self):
self.subscription.update_subscription(
date_start=self.cleaned_data['start_date'],
date_end=self.cleaned_data['end_date'],
date_delay_invoicing=self.cleaned_data['delay_invoice_until'],
do_not_invoice=self.cleaned_data['do_not_invoice'],
auto_generate_credits=self.cleaned_data['auto_generate_credits'],
salesforce_contract_id=self.cleaned_data['salesforce_contract_id'],
web_user=self.web_user,
service_type=self.cleaned_data['service_type'],
pro_bono_status=self.cleaned_data['pro_bono_status'],
)
transfer_account = self.cleaned_data.get('active_accounts')
if transfer_account:
acct = BillingAccount.objects.get(id=transfer_account)
self.subscription.account = acct
self.subscription.save()
class ChangeSubscriptionForm(forms.Form):
subscription_change_note = forms.CharField(
label=_("Note"),
required=True,
widget=forms.Textarea,
)
new_plan_product = forms.ChoiceField(
label=_("Core Product"), initial=SoftwareProductType.COMMCARE,
choices=SoftwareProductType.CHOICES,
)
new_plan_edition = forms.ChoiceField(
label=_("Edition"), initial=SoftwarePlanEdition.ENTERPRISE,
choices=SoftwarePlanEdition.CHOICES,
)
new_plan_version = forms.CharField(label=_("New Software Plan"))
new_date_end = forms.DateField(
label=_("End Date"), widget=forms.DateInput(), required=False
)
service_type = forms.ChoiceField(
label=_("Type"),
choices=SubscriptionType.CHOICES,
initial=SubscriptionType.CONTRACTED,
)
pro_bono_status = forms.ChoiceField(
label=_("Pro-Bono"),
choices=ProBonoStatus.CHOICES,
initial=ProBonoStatus.NO,
)
def __init__(self, subscription, web_user, *args, **kwargs):
self.subscription = subscription
self.web_user = web_user
super(ChangeSubscriptionForm, self).__init__(*args, **kwargs)
if self.subscription.date_end is not None:
self.fields['new_date_end'].initial = subscription.date_end
self.helper = FormHelper()
self.helper.form_class = "form-horizontal"
self.helper.layout = crispy.Layout(
crispy.Fieldset(
"Change Subscription",
crispy.Field('new_date_end', css_class="date-picker"),
'new_plan_product',
'new_plan_edition',
crispy.Field(
'new_plan_version', css_class="input-xxlarge",
placeholder="Search for Software Plan"
),
'service_type',
'pro_bono_status',
'subscription_change_note',
),
FormActions(
StrictButton(
"Change Subscription",
type="submit",
css_class="btn-primary",
),
),
)
def change_subscription(self):
new_plan_version = SoftwarePlanVersion.objects.get(id=self.cleaned_data['new_plan_version'])
return self.subscription.change_plan(
new_plan_version,
date_end=self.cleaned_data['new_date_end'],
web_user=self.web_user,
service_type=self.cleaned_data['service_type'],
pro_bono_status=self.cleaned_data['pro_bono_status'],
internal_change=True,
)
class CreditForm(forms.Form):
amount = forms.DecimalField(label="Amount (USD)")
note = forms.CharField(required=True)
rate_type = forms.ChoiceField(
label=_("Rate Type"),
choices=(
('', 'Any'),
('Product', 'Product'),
('Feature', 'Feature'),
),
required=False,
)
product_type = forms.ChoiceField(required=False, label=_("Product Type"))
feature_type = forms.ChoiceField(required=False, label=_("Feature Type"))
def __init__(self, account, subscription, *args, **kwargs):
self.account = account
self.subscription = subscription
super(CreditForm, self).__init__(*args, **kwargs)
product_choices = [('', 'Any')]
product_choices.extend(SoftwareProductType.CHOICES)
self.fields['product_type'].choices = product_choices
feature_choices = [('', 'Any')]
feature_choices.extend(FeatureType.CHOICES)
self.fields['feature_type'].choices = feature_choices
self.helper = FormHelper()
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Add Credit',
'amount',
'note',
crispy.Field('rate_type', data_bind="value: rateType"),
crispy.Div('product_type', data_bind="visible: showProduct"),
crispy.Div('feature_type', data_bind="visible: showFeature"),
),
FormActions(
crispy.ButtonHolder(
crispy.Submit('adjust_credit', 'Update Credit')
)
)
)
def clean_amount(self):
amount = self.cleaned_data['amount']
field_metadata = CreditAdjustment._meta.get_field('amount')
if amount >= 10 ** (field_metadata.max_digits - field_metadata.decimal_places):
raise ValidationError(mark_safe(_(
'Amount over maximum size. If you need support for '
'quantities this large, please <a data-toggle="modal" '
'data-target="#reportIssueModal" href="#reportIssueModal">'
'Report an Issue</a>.'
)))
return amount
def adjust_credit(self, web_user=None):
amount = self.cleaned_data['amount']
note = self.cleaned_data['note']
product_type = (self.cleaned_data['product_type']
if self.cleaned_data['rate_type'] == 'Product' else None)
feature_type = (self.cleaned_data['feature_type']
if self.cleaned_data['rate_type'] == 'Feature' else None)
CreditLine.add_credit(
amount,
account=self.account,
subscription=self.subscription,
feature_type=feature_type,
product_type=product_type,
note=note,
web_user=web_user,
)
return True
class CancelForm(forms.Form):
note = forms.CharField(
widget=forms.TextInput,
)
def __init__(self, *args, **kwargs):
super(CancelForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Cancel Subscription',
'note',
),
FormActions(
StrictButton(
'CANCEL SUBSCRIPTION',
css_class='btn-danger',
name='cancel_subscription',
type='submit',
)
),
)
class PlanInformationForm(forms.Form):
name = forms.CharField(max_length=80)
description = forms.CharField(required=False)
edition = forms.ChoiceField(choices=SoftwarePlanEdition.CHOICES)
visibility = forms.ChoiceField(choices=SoftwarePlanVisibility.CHOICES)
def __init__(self, plan, *args, **kwargs):
self.plan = plan
if plan is not None:
kwargs['initial'] = {
'name': plan.name,
'description': plan.description,
'edition': plan.edition,
'visibility': plan.visibility,
}
else:
kwargs['initial'] = {
'edition': SoftwarePlanEdition.ENTERPRISE,
'visibility': SoftwarePlanVisibility.INTERNAL,
}
super(PlanInformationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Plan Information',
'name',
'description',
'edition',
'visibility',
),
FormActions(
crispy.ButtonHolder(
crispy.Submit('plan_information',
'%s Software Plan' % ('Update' if plan is not None else 'Create'))
)
)
)
def clean_name(self):
name = self.cleaned_data['name']
if (len(SoftwarePlan.objects.filter(name=name)) != 0
and (self.plan is None or self.plan.name != name)):
raise ValidationError(_('Name already taken. Please enter a new name.'))
return name
def create_plan(self):
name = self.cleaned_data['name']
description = self.cleaned_data['description']
edition = self.cleaned_data['edition']
visibility = self.cleaned_data['visibility']
plan = SoftwarePlan(name=name,
description=description,
edition=edition,
visibility=visibility)
plan.save()
return plan
def update_plan(self, plan):
plan.name = self.cleaned_data['name']
plan.description = self.cleaned_data['description']
plan.edition = self.cleaned_data['edition']
plan.visibility = self.cleaned_data['visibility']
plan.save()
class SoftwarePlanVersionForm(forms.Form):
"""
A form for updating the software plan
"""
update_version = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
feature_id = forms.CharField(
required=False,
label="Search for or Create Feature"
)
new_feature_type = forms.ChoiceField(
required=False,
choices=FeatureType.CHOICES,
)
feature_rates = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
product_id = forms.CharField(
required=False,
label="Search for or Create Product"
)
new_product_type = forms.ChoiceField(
required=False,
choices=SoftwareProductType.CHOICES,
)
product_rates = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
privileges = forms.MultipleChoiceField(
required=False,
label="Privileges",
validators=[MinLengthValidator(1)]
)
role_slug = forms.ChoiceField(
required=False,
label="Role"
)
role_type = forms.ChoiceField(
required=True,
choices=(
('existing', "Use Existing Role"),
('new', "Create New Role"),
)
)
create_new_role = forms.BooleanField(
required=False,
widget=forms.HiddenInput,
)
new_role_slug = forms.CharField(
required=False,
max_length=256,
label="New Role Slug",
)
new_role_name = forms.CharField(
required=False,
max_length=256,
label="New Role Name",
)
new_role_description = forms.CharField(
required=False,
label="New Role Description",
widget=forms.Textarea,
)
def __init__(self, plan, plan_version, *args, **kwargs):
self.plan = plan
self.plan_version = plan_version
self.is_update = False
super(SoftwarePlanVersionForm, self).__init__(*args, **kwargs)
self.fields['privileges'].choices = list(self.available_privileges)
self.fields['role_slug'].choices = [(r['slug'], "%s (%s)" % (r['name'], r['slug'])) for r in self.existing_roles]
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.form_method = 'POST'
self.helper.layout = crispy.Layout(
'update_version',
crispy.Fieldset(
"Permissions",
BootstrapMultiField(
"Role Type",
crispy.Div(
data_bind="template: {"
" name: 'select-role-type-template', "
" data: role"
"}, "
),
),
crispy.Div(
BootstrapMultiField(
'Role',
InlineField('role_slug',
data_bind="value: role.existing.roleSlug",
css_class="input-xxlarge"),
crispy.Div(
data_bind="template: {"
" name: 'selected-role-privileges-template', "
" data: {"
" privileges: role.existing.selectedPrivileges,"
" hasNoPrivileges: role.existing.hasNoPrivileges"
" }"
"}, "
),
data_bind="visible: role.isRoleTypeExisting",
),
),
crispy.Div(
BootstrapMultiField(
"Privileges",
InlineField('privileges', data_bind="selectedOptions: role.new.privileges"),
crispy.Div(
data_bind="template: {"
" name: 'privileges-match-role-template', "
" data: {"
" role: role.new.matchingRole"
" },"
" if: role.new.hasMatchingRole"
"}, "
),
),
crispy.Field('create_new_role', data_bind="value: role.new.allowCreate"),
crispy.Div(
'new_role_slug',
'new_role_name',
'new_role_description',
data_bind="visible: role.new.allowCreate",
css_class="well",
),
data_bind="visible: role.isRoleTypeNew",
),
),
crispy.Fieldset(
"Features",
InlineField('feature_rates', data_bind="value: featureRates.ratesString"),
BootstrapMultiField(
"Add Feature",
InlineField('feature_id', css_class="input-xxlarge",
data_bind="value: featureRates.select2.value"),
StrictButton(
"Select Feature",
css_class="btn-primary",
data_bind="event: {click: featureRates.apply}, "
"visible: featureRates.select2.isExisting",
style="margin-left: 5px;"
),
),
crispy.Div(
css_class="alert alert-error",
data_bind="text: featureRates.error, visible: featureRates.showError"
),
BootstrapMultiField(
"Feature Type",
InlineField(
'new_feature_type',
data_bind="value: featureRates.rateType",
),
crispy.Div(
StrictButton(
"Create Feature",
css_class="btn-success",
data_bind="event: {click: featureRates.createNew}",
),
style="margin: 10px 0;"
),
data_bind="visible: featureRates.select2.isNew",
),
crispy.Div(
data_bind="template: {"
"name: 'feature-rate-form-template', foreach: featureRates.rates"
"}",
),
),
crispy.Fieldset(
"Products",
InlineField('product_rates', data_bind="value: productRates.ratesString"),
BootstrapMultiField(
"Add Product",
InlineField('product_id', css_class="input-xxlarge",
data_bind="value: productRates.select2.value"),
StrictButton(
"Select Product",
css_class="btn-primary",
data_bind="event: {click: productRates.apply}, "
"visible: productRates.select2.isExisting",
style="margin-left: 5px;"
),
),
crispy.Div(
css_class="alert alert-error",
data_bind="text: productRates.error, visible: productRates.showError",
),
BootstrapMultiField(
"Product Type",
InlineField(
'new_product_type',
data_bind="value: productRates.rateType",
),
crispy.Div(
StrictButton(
"Create Product",
css_class="btn-success",
data_bind="event: {click: productRates.createNew}",
),
style="margin: 10px 0;"
),
data_bind="visible: productRates.select2.isNew",
),
crispy.Div(
data_bind="template: {"
"name: 'product-rate-form-template', foreach: productRates.rates"
"}",
),
),
FormActions(
StrictButton(
'Update Plan Version',
css_class='btn-primary',
type="submit",
),
)
)
@property
def available_privileges(self):
for priv in privileges.MAX_PRIVILEGES:
role = Role.objects.get(slug=priv)
yield (role.slug, role.name)
@property
def existing_roles(self):
roles = set([r['role'] for r in SoftwarePlanVersion.objects.values('role').distinct()])
grant_roles = set([r['from_role'] for r in Grant.objects.filter(
to_role__slug__in=privileges.MAX_PRIVILEGES).values('from_role').distinct()])
roles = roles.union(grant_roles)
roles = [Role.objects.get(pk=r) for r in roles]
for role in roles:
yield {
'slug': role.slug,
'name': role.name,
'description': role.description,
'privileges': [(grant.to_role.slug, grant.to_role.name) for grant in role.memberships_granted.all()]
}
@property
def feature_rates_dict(self):
return {
'currentValue': self['feature_rates'].value(),
'handlerSlug': FeatureRateAsyncHandler.slug,
'select2Options': {
'fieldName': 'feature_id',
}
}
@property
def product_rates_dict(self):
return {
'currentValue': self['product_rates'].value(),
'handlerSlug': SoftwareProductRateAsyncHandler.slug,
'select2Options': {
'fieldName': 'product_id',
}
}
@property
def role_dict(self):
return {
'currentValue': self['privileges'].value(),
'multiSelectField': 'privileges',
'existingRoles': list(self.existing_roles),
'roleType': self['role_type'].value() or 'existing',
'newPrivileges': self['privileges'].value(),
'currentRoleSlug': self.plan_version.role.slug if self.plan_version is not None else None,
}
@property
@memoized
def current_features_to_rates(self):
if self.plan_version is not None:
return dict([(r.feature.id, r) for r in self.plan_version.feature_rates.all()])
else:
return {}
@property
@memoized
def current_products_to_rates(self):
if self.plan_version is not None:
return dict([(r.product.id, r) for r in self.plan_version.product_rates.all()])
else:
return {}
def _get_errors_from_subform(self, form_name, subform):
for field, field_errors in subform._errors.items():
for field_error in field_errors:
error_message = "%(form_name)s > %(field_name)s: %(error)s" % {
'form_name': form_name,
'error': field_error,
'field_name': subform[field].label,
}
yield error_message
def _retrieve_feature_rate(self, rate_form):
feature = Feature.objects.get(id=rate_form['feature_id'].value())
new_rate = rate_form.get_instance(feature)
if rate_form.is_new():
# a brand new rate
self.is_update = True
return new_rate
if feature.id not in self.current_features_to_rates.keys():
# the plan does not have this rate yet, compare any changes to the feature's current latest rate
# also mark the form as updated
current_rate = feature.get_rate(default_instance=False)
if current_rate is None:
return new_rate
self.is_update = True
else:
current_rate = self.current_features_to_rates[feature.id]
# note: custom implementation of FeatureRate.__eq__ here...
if not current_rate == new_rate:
self.is_update = True
return new_rate
return current_rate
def _retrieve_product_rate(self, rate_form):
product = SoftwareProduct.objects.get(id=rate_form['product_id'].value())
new_rate = rate_form.get_instance(product)
if rate_form.is_new():
# a brand new rate
self.is_update = True
return new_rate
if product.id not in self.current_products_to_rates.keys():
# the plan does not have this rate yet, compare any changes to the feature's current latest rate
# also mark the form as updated
current_rate = product.get_rate(default_instance=False)
if current_rate is None:
return new_rate
self.is_update = True
else:
current_rate = self.current_products_to_rates[product.id]
# note: custom implementation of SoftwareProductRate.__eq__ here...
if not current_rate == new_rate:
self.is_update = True
return new_rate
return current_rate
def clean_feature_rates(self):
original_data = self.cleaned_data['feature_rates']
rates = json.loads(original_data)
rate_instances = []
errors = ErrorList()
for rate_data in rates:
rate_form = FeatureRateForm(rate_data)
if not rate_form.is_valid():
errors.extend(list(self._get_errors_from_subform(rate_data['name'], rate_form)))
else:
rate_instances.append(self._retrieve_feature_rate(rate_form))
if errors:
self._errors.setdefault('feature_rates', errors)
required_types = dict(FeatureType.CHOICES).keys()
feature_types = [r.feature.feature_type for r in rate_instances]
if any([feature_types.count(t) != 1 for t in required_types]):
raise ValidationError(_(
"You must specify exactly one rate per feature type "
"(SMS, USER, etc.)"
))
self.new_feature_rates = rate_instances
rate_ids = lambda x: set([r.id for r in x])
if (not self.is_update
and (self.plan_version is None
or rate_ids(rate_instances).symmetric_difference(rate_ids(self.plan_version.feature_rates.all())))):
self.is_update = True
return original_data
def clean_product_rates(self):
original_data = self.cleaned_data['product_rates']
rates = json.loads(original_data)
rate_instances = []
errors = ErrorList()
if not rates:
raise ValidationError(_("You must specify at least one product rate."))
for rate_data in rates:
rate_form = ProductRateForm(rate_data)
if not rate_form.is_valid():
errors.extend(list(self._get_errors_from_subform(rate_data['name'], rate_form)))
else:
rate_instances.append(self._retrieve_product_rate(rate_form))
if errors:
self._errors.setdefault('product_rates', errors)
available_types = dict(SoftwareProductType.CHOICES).keys()
product_types = [r.product.product_type for r in rate_instances]
if any([product_types.count(p) > 1 for p in available_types]):
raise ValidationError(_(
"You may have at most ONE rate per product type "
"(CommCare, CommCare Supply, etc.)"
))
self.new_product_rates = rate_instances
rate_ids = lambda x: set([r.id for r in x])
if (not self.is_update
and (self.plan_version is None
or rate_ids(rate_instances).symmetric_difference(rate_ids(self.plan_version.product_rates.all())))):
self.is_update = True
return original_data
def clean_create_new_role(self):
val = self.cleaned_data['create_new_role']
if val:
self.is_update = True
return val
def clean_role_slug(self):
role_slug = self.cleaned_data['role_slug']
if self.plan_version is None or role_slug != self.plan_version.role.slug:
self.is_update = True
return role_slug
def clean_new_role_slug(self):
val = self.cleaned_data['new_role_slug']
if self.cleaned_data['create_new_role'] and not val:
raise ValidationError(_("A slug is required for this new role."))
if val:
validate_slug(val)
if Role.objects.filter(slug=val).count() != 0:
raise ValidationError(_("Enter a unique role slug."))
return val
def clean_new_role_name(self):
val = self.cleaned_data['new_role_name']
if self.cleaned_data['create_new_role'] and not val:
raise ValidationError(_("A name is required for this new role."))
return val
def save(self, request):
if not self.is_update:
messages.info(request, "No changes to rates and roles were present, so the current version was kept.")
return
if self.cleaned_data['create_new_role']:
role = Role.objects.create(
slug=self.cleaned_data['new_role_slug'],
name=self.cleaned_data['new_role_name'],
description=self.cleaned_data['new_role_description'],
)
for privilege in self.cleaned_data['privileges']:
privilege = Role.objects.get(slug=privilege)
Grant.objects.create(
from_role=role,
to_role=privilege,
)
else:
role = Role.objects.get(slug=self.cleaned_data['role_slug'])
new_version = SoftwarePlanVersion(
plan=self.plan,
role=role
)
new_version.save()
for feature_rate in self.new_feature_rates:
feature_rate.save()
new_version.feature_rates.add(feature_rate)
for product_rate in self.new_product_rates:
product_rate.save()
new_version.product_rates.add(product_rate)
new_version.save()
messages.success(request, 'The version for %s Software Plan was successfully updated.' % new_version.plan.name)
class FeatureRateForm(forms.ModelForm):
"""
A form for creating a new FeatureRate.
"""
# feature id will point to a select2 field, hence the CharField here.
feature_id = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
rate_id = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
class Meta:
model = FeatureRate
fields = ['monthly_fee', 'monthly_limit', 'per_excess_fee']
def __init__(self, data=None, *args, **kwargs):
super(FeatureRateForm, self).__init__(data, *args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = crispy.Layout(
crispy.HTML("""
<h4><span data-bind="text: name"></span>
<span class="label"
style="display: inline-block; margin: 0 10px;"
data-bind="text: feature_type"></span></h4>
<hr />
"""),
crispy.Field('feature_id', data_bind="value: feature_id"),
crispy.Field('rate_id', data_bind="value: rate_id"),
crispy.Field('monthly_fee', data_bind="value: monthly_fee"),
crispy.Field('monthly_limit', data_bind="value: monthly_limit"),
crispy.Div(
crispy.Field('per_excess_fee',
data_bind="value: per_excess_fee"),
data_bind="visible: isPerExcessVisible",
),
)
def is_new(self):
return not self['rate_id'].value()
def get_instance(self, feature):
instance = self.save(commit=False)
instance.feature = feature
return instance
class ProductRateForm(forms.ModelForm):
"""
A form for creating a new ProductRate.
"""
# product id will point to a select2 field, hence the CharField here.
product_id = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
rate_id = forms.CharField(
required=False,
widget=forms.HiddenInput,
)
class Meta:
model = SoftwareProductRate
fields = ['monthly_fee']
def __init__(self, data=None, *args, **kwargs):
super(ProductRateForm, self).__init__(data, *args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = crispy.Layout(
crispy.HTML("""
<h4><span data-bind="text: name"></span>
<span class="label"
style="display: inline-block; margin: 0 10px;"
data-bind="text: product_type"></span></h4>
<hr />
"""),
crispy.Field('monthly_fee', data_bind="value: monthly_fee"),
)
def is_new(self):
return not self['rate_id'].value()
def get_instance(self, product):
instance = self.save(commit=False)
instance.product = product
return instance
class EnterprisePlanContactForm(forms.Form):
name = forms.CharField(
label=ugettext_noop("Name")
)
company_name = forms.CharField(
required=False,
label=ugettext_noop("Company / Organization")
)
message = forms.CharField(
required=False,
label=ugettext_noop("Message"),
widget=forms.Textarea
)
def __init__(self, domain, web_user, data=None, *args, **kwargs):
self.domain = domain
self.web_user = web_user
super(EnterprisePlanContactForm, self).__init__(data, *args, **kwargs)
from corehq.apps.domain.views import SelectPlanView
self.helper = FormHelper()
self.helper.form_class = "form form-horizontal"
self.helper.layout = crispy.Layout(
'name',
'company_name',
'message',
FormActions(
StrictButton(
_("Request Quote"),
type="submit",
css_class="btn-primary",
),
crispy.HTML('<a href="%(url)s" class="btn">%(title)s</a>' % {
'url': reverse(SelectPlanView.urlname, args=[self.domain]),
'title': ugettext("Select different plan"),
}),
)
)
def send_message(self):
subject = "[Enterprise Plan Request] %s" % self.domain
context = {
'name': self.cleaned_data['name'],
'company': self.cleaned_data['company_name'],
'message': self.cleaned_data['message'],
'domain': self.domain,
'email': self.web_user.email
}
html_content = render_to_string('accounting/enterprise_request_email.html', context)
text_content = """
Email: %(email)s
Name: %(name)s
Company: %(company)s
Domain: %(domain)s
Message:
%(message)s
""" % context
send_HTML_email(subject, settings.BILLING_EMAIL, html_content, text_content,
email_from=settings.DEFAULT_FROM_EMAIL)
class TriggerInvoiceForm(forms.Form):
month = forms.ChoiceField(label="Invoice Month")
year = forms.ChoiceField(label="Invoice Year")
domain = forms.CharField(label="Invoiced Project")
def __init__(self, *args, **kwargs):
super(TriggerInvoiceForm, self).__init__(*args, **kwargs)
today = datetime.date.today()
self.fields['month'].initial = today.month
self.fields['month'].choices = MONTHS.items()
self.fields['year'].initial = today.year
self.fields['year'].choices = [
(y, y) for y in range(today.year, 2012, -1)
]
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Trigger Invoice Details',
crispy.Field('month', css_class="input-large"),
crispy.Field('year', css_class="input-large"),
crispy.Field('domain', css_class="input-xxlarge",
placeholder="Search for Project")
),
FormActions(
StrictButton(
"Trigger Invoice",
css_class="btn-primary",
type="submit",
),
)
)
def trigger_invoice(self):
year = int(self.cleaned_data['year'])
month = int(self.cleaned_data['month'])
invoice_start, invoice_end = get_first_last_days(year, month)
domain = Domain.get_by_name(self.cleaned_data['domain'])
self.clean_previous_invoices(invoice_start, invoice_end, domain.name)
invoice_factory = DomainInvoiceFactory(invoice_start, invoice_end, domain)
invoice_factory.create_invoices()
def clean_previous_invoices(self, invoice_start, invoice_end, domain_name):
last_generated_invoices = Invoice.objects.filter(
date_start__lte=invoice_end, date_end__gte=invoice_start,
subscription__subscriber__domain=domain_name
).all()
for invoice in last_generated_invoices:
for record in invoice.billingrecord_set.all():
record.pdf.delete()
record.delete()
invoice.subscriptionadjustment_set.all().delete()
invoice.creditadjustment_set.all().delete()
try:
invoice.lineitem_set.all().delete()
except ProtectedError:
# this will happen if there were any credits generated.
# Leave in for now, as it's just for testing purposes.
pass
try:
# we want to get rid of as many old community subscriptions from that month
# as testing will allow.
if invoice.subscription.plan_version.plan.edition == SoftwarePlanEdition.COMMUNITY:
community_sub = invoice.subscription
community_sub.subscriptionadjustment_set.all().delete()
community_sub.subscriptionadjustment_related.all().delete()
community_sub.creditline_set.all().delete()
invoice.delete()
try:
community_sub.delete()
except ProtectedError:
pass
else:
invoice.delete()
except ProtectedError:
# this will happen for credit lines applied to invoices' line items. We don't
# want to throw away the credit lines, as that will affect testing totals
invoice.is_hidden = True
invoice.save()
class TriggerBookkeeperEmailForm(forms.Form):
month = forms.ChoiceField(label="Invoice Month")
year = forms.ChoiceField(label="Invoice Year")
emails = forms.CharField(label="Email To")
def __init__(self, *args, **kwargs):
super(TriggerBookkeeperEmailForm, self).__init__(*args, **kwargs)
today = datetime.date.today()
self.fields['month'].initial = today.month
self.fields['month'].choices = MONTHS.items()
self.fields['year'].initial = today.year
self.fields['year'].choices = [
(y, y) for y in range(today.year, 2012, -1)
]
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Trigger Bookkeeper Email Details',
crispy.Field('emails', css_class='input-xxlarge'),
crispy.Field('month', css_class="input-large"),
crispy.Field('year', css_class="input-large"),
),
FormActions(
StrictButton(
"Trigger Bookkeeper Email",
css_class="btn-primary",
type="submit",
),
)
)
def trigger_email(self):
from corehq.apps.accounting.tasks import send_bookkeeper_email
send_bookkeeper_email(
month=int(self.cleaned_data['month']),
year=int(self.cleaned_data['year']),
emails=self.cleaned_data['emails'].split(',')
)
class TestReminderEmailFrom(forms.Form):
days = forms.ChoiceField(
label="Days Until Subscription Ends",
choices=(
(1, 1),
(10, 10),
(30, 30),
)
)
def __init__(self, *args, **kwargs):
super(TestReminderEmailFrom, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
"Test Subscription Reminder Emails",
'days',
),
crispy.Div(
crispy.HTML(
"Note that this will ONLY send emails to a billing admin "
"for a domain IF the billing admin is an Accounting "
"Previewer."
),
css_class="alert alert-info"
),
FormActions(
StrictButton(
"Send Reminder Emails",
type="submit",
css_class='btn-primary'
)
)
)
def send_emails(self):
send_subscription_reminder_emails(int(self.cleaned_data['days']))
class AdjustBalanceForm(forms.Form):
adjustment_type = forms.ChoiceField(
widget=forms.RadioSelect,
)
custom_amount = forms.DecimalField(
required=False,
)
method = forms.ChoiceField(
choices=CreditAdjustmentReason.CHOICES,
)
note = forms.CharField(
required=False,
widget=forms.Textarea,
)
invoice_id = forms.CharField(
widget=forms.HiddenInput(),
)
def __init__(self, invoice, *args, **kwargs):
self.invoice = invoice
super(AdjustBalanceForm, self).__init__(*args, **kwargs)
self.fields['adjustment_type'].choices = (
('current', 'Add Credit of Current Balance: %s' %
get_money_str(self.invoice.balance)),
('credit', 'Add CREDIT of Custom Amount'),
('debit', 'Add DEBIT of Custom Amount'),
)
self.fields['invoice_id'].initial = invoice.id
self.helper = FormHelper()
self.helper.form_class = "form-horizontal"
self.helper.form_action = reverse('invoice_summary', args=[self.invoice.id])
self.helper.layout = crispy.Layout(
crispy.Div(
crispy.Field(
'adjustment_type',
data_bind="checked: adjustmentType",
),
crispy.HTML('''
<div id="div_id_custom_amount" class="control-group"
data-bind="visible: showCustomAmount">
<label for="id_custom_amount" class="control-label">
Custom amount
</label>
<div class="controls">
<input class="textinput textInput"
id="id_custom_amount" name="custom_amount"
type="number" step="any">
</div>
</div>
'''),
crispy.Field('method'),
crispy.Field('note'),
crispy.Field('invoice_id'),
css_class='modal-body',
css_id="adjust-balance-form-%d" % invoice.id
),
FormActions(
crispy.ButtonHolder(
crispy.Submit(
'adjust_balance',
'Apply',
data_loading_text='Submitting...',
),
crispy.Button(
'close',
'Close',
data_dismiss='modal',
),
),
css_class='modal-footer',
),
)
@property
@memoized
def amount(self):
adjustment_type = self.cleaned_data['adjustment_type']
if adjustment_type == 'current':
return self.invoice.balance
elif adjustment_type == 'credit':
return Decimal(self.cleaned_data['custom_amount'])
elif adjustment_type == 'debit':
return -Decimal(self.cleaned_data['custom_amount'])
else:
raise ValidationError(_("Received invalid adjustment type: %s")
% adjustment_type)
def adjust_balance(self, web_user=None):
CreditLine.add_credit(
-self.amount,
account=self.invoice.subscription.account,
subscription=self.invoice.subscription,
note=self.cleaned_data['note'],
invoice=self.invoice,
reason=self.cleaned_data['method'],
web_user=web_user,
)
self.invoice.update_balance()
self.invoice.save()
class InvoiceInfoForm(forms.Form):
subscription = forms.CharField()
project = forms.CharField()
account = forms.CharField()
current_balance = forms.CharField()
def __init__(self, invoice, *args, **kwargs):
self.invoice = invoice
subscription = invoice.subscription if not invoice.is_wire else None
super(InvoiceInfoForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
from corehq.apps.accounting.views import (
EditSubscriptionView,
ManageBillingAccountView,
)
if not invoice.is_wire:
subscription_link = mark_safe(make_anchor_tag(
reverse(EditSubscriptionView.urlname, args=(subscription.id,)),
u'{plan_name} ({start_date} - {end_date})'.format(
plan_name=subscription.plan_version,
start_date=subscription.date_start,
end_date=subscription.date_end,
)
))
else:
subscription_link = 'N/A'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'{} Invoice #{}'.format('Wire' if invoice.is_wire else '', invoice.invoice_number),
TextField(
'subscription',
subscription_link
),
TextField(
'project',
invoice.get_domain(),
),
TextField(
'account',
mark_safe(
'<a href="%(account_link)s">'
'%(account_name)s'
'</a>' % {
'account_link': reverse(
ManageBillingAccountView.urlname,
args=(invoice.account.id,)
),
'account_name': invoice.account.name,
}
),
),
TextField(
'current_balance',
get_money_str(invoice.balance),
),
crispy.ButtonHolder(
crispy.Button(
'submit',
'Adjust Balance',
data_toggle='modal',
data_target='#adjustBalanceModal-%d' % invoice.id,
css_class='disabled' if invoice.is_wire else '',
),
),
),
)
class ResendEmailForm(forms.Form):
additional_recipients = forms.CharField(
label="Additional Recipients:",
required=False,
)
def __init__(self, invoice, *args, **kwargs):
self.invoice = invoice
super(ResendEmailForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Div(
crispy.HTML(
'This will send an email to: %s.' %
', '.join(invoice.email_recipients)
),
crispy.Field('additional_recipients'),
css_class='modal-body',
),
FormActions(
crispy.ButtonHolder(
crispy.Submit(
'resend_email',
'Send Email',
data_loading_text='Submitting...',
),
crispy.Button(
'close',
'Close',
data_dismiss='modal',
),
),
css_class='modal-footer',
),
)
def clean_additional_recipients(self):
return [
email.strip()
for email in self.cleaned_data['additional_recipients'].split(',')
]
def resend_email(self):
contact_emails = self.invoice.email_recipients
contact_emails += self.cleaned_data['additional_recipients']
if self.invoice.is_wire:
record = WireBillingRecord.generate_record(self.invoice)
else:
record = BillingRecord.generate_record(self.invoice)
record.send_email(contact_emails=contact_emails)
class SuppressInvoiceForm(forms.Form):
submit_kwarg = 'suppress_invoice'
def __init__(self, invoice, *args, **kwargs):
self.invoice = invoice
super(SuppressInvoiceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.layout = crispy.Layout(
crispy.Fieldset(
'Suppress invoice from all reports and user-facing statements',
crispy.Div(
crispy.HTML('Warning: this can only be undone by a developer.'),
css_class='alert alert-error',
)
),
FormActions(
StrictButton(
'Suppress Invoice',
css_class='btn-danger',
name=self.submit_kwarg,
type='submit',
),
),
)
def suppress_invoice(self):
self.invoice.is_hidden_to_ops = True
self.invoice.save()
class CreateAdminForm(forms.Form):
username = forms.CharField(
required=False,
)
def __init__(self, *args, **kwargs):
super(CreateAdminForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_show_labels= False
self.helper.form_style = 'inline'
self.helper.layout = crispy.Layout(
InlineField(
'username',
css_id="select-admin-username",
),
StrictButton(
mark_safe('<i class="icon-plus"></i> %s' % "Add Admin"),
css_class="btn-success",
type="submit",
)
)
def add_admin_user(self):
# create UserRole for user
username = self.cleaned_data['username']
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CreateAccountingAdminError(
"User '%s' does not exist" % username
)
web_user = WebUser.get_by_username(username)
if not web_user or not web_user.is_superuser:
raise CreateAccountingAdminError(
"The user '%s' is not a superuser." % username,
)
try:
user_role = UserRole.objects.get(user=user)
except UserRole.DoesNotExist:
user_privs = Role.objects.get_or_create(
name="Privileges for %s" % user.username,
slug="%s_privileges" % user.username,
)[0]
user_role = UserRole.objects.create(
user=user,
role=user_privs,
)
ops_role = Role.objects.get(slug=privileges.OPERATIONS_TEAM)
if not user_role.role.has_privilege(ops_role):
Grant.objects.create(from_role=user_role.role, to_role=ops_role)
return user
| {
"content_hash": "5db95d00814d2d42bbb9b2f82461ac7f",
"timestamp": "",
"source": "github",
"line_count": 1962,
"max_line_length": 121,
"avg_line_length": 37.95514780835882,
"alnum_prop": 0.5321211795670624,
"repo_name": "puttarajubr/commcare-hq",
"id": "b0834745ec6789ebf4986f2d0484a02bb50cc99c",
"size": "74468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/accounting/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
from datetime import date, datetime, time, timedelta
from django.contrib.auth.models import User
from django_lean.lean_retention.models import DailyActivity
def sort_retention_periods(retention_periods):
result = list(sorted(set(p + 0 for p in retention_periods)))
if result and result[0] < 1:
raise ValueError('retention_periods must be greater than one day,'
'not %s' % result[0])
return result
class Period(object):
def __init__(self, cohort, start_day, end_day):
self.cohort = cohort
self.start_day = start_day
self.end_day = end_day
if self.start_day < 1:
raise ValueError("start day '%s' must be >= 1" % self.start_day)
if self.start_day >= self.end_day:
raise ValueError("start day '%s' must be before end day '%s'" %
(self.start_day, self.end_day))
self._activities = None
self._users = None
def length(self):
return self.end_day - self.start_day
@property
def activities(self):
if self._activities is None:
self._activities = DailyActivity.objects.filter(
user__in=self.cohort.users,
days__range=(self.start_day, self.end_day - 1)
)
return self._activities
@property
def users(self):
if self._users is None:
self._users = User.objects.filter(
id__in=self.activities.values('user')
)
return self._users
@classmethod
def periods(cls, cohort, retention_periods):
last = 1
for period in sort_retention_periods(retention_periods):
yield cls(cohort=cohort, start_day=last, end_day=period)
last = period
class Cohort(object):
def __init__(self, start_date, end_date, retention_periods,
period_class=Period):
if hasattr(start_date, 'date'):
start_date = start_date.date() # Convert to a datetime.date
if hasattr(end_date, 'date'):
end_date = end_date.date() # Convert to a datetime.date
self.start_date = start_date
self.end_date = end_date
if self.start_date > self.end_date:
raise ValueError("start date '%s' cannot be after end date '%s'" %
(self.start_date, self.end_date))
self.retention_periods = sort_retention_periods(retention_periods)
self._Period = Period
self._periods = None
self._users = None
@property
def periods(self):
if self._periods is None:
self._periods = list(
self._Period.periods(cohort=self,
retention_periods=self.retention_periods)
)
return self._periods
@property
def users(self):
if self._users is None:
start = datetime.combine(self.start_date, time(0, 0, 0))
end = datetime.combine(self.end_date, time(23, 59, 59))
self._users = User.objects.filter(date_joined__range=(start, end))
return self._users
@classmethod
def cohorts(cls, end_date, length, retention_periods,
period_class=Period):
if hasattr(end_date, 'date'):
end_date = end_date.date() # Convert to a datetime.date
# The end date falls on the last day of the shortest retention period
retention_periods = sort_retention_periods(retention_periods)
min_period = retention_periods[0] if retention_periods else 0
end_date -= timedelta(days=min_period)
# The start date
one_day = timedelta(days=1)
start_date = end_date - timedelta(days=length) + one_day
# Generate the stream of cohorts, walking backwards
try:
while True:
yield cls(start_date=start_date, end_date=end_date,
retention_periods=retention_periods)
# Walk backwards
start_date -= one_day
end_date -= one_day
except OverflowError:
raise StopIteration # We cannot go further back in time
| {
"content_hash": "64439eeeb578bd928d02c5f3ef81150b",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 37.3125,
"alnum_prop": 0.5793251974156497,
"repo_name": "e-loue/django-lean",
"id": "e9473cf77a6cf64fe4f57c61e49427213984b433",
"size": "4179",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django_lean/lean_retention/reports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2013"
},
{
"name": "Python",
"bytes": "300892"
}
],
"symlink_target": ""
} |
import abc
from oslo.config import cfg
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as qexception
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.service_base import ServicePluginBase
LOG = logging.getLogger(__name__)
# Firewall Exceptions
class FirewallNotFound(qexception.NotFound):
message = _("Firewall %(firewall_id)s could not be found.")
class FirewallInUse(qexception.InUse):
message = _("Firewall %(firewall_id)s is still active.")
class FirewallInPendingState(qexception.Conflict):
message = _("Operation cannot be performed since associated Firewall "
"%(firewall_id)s is in %(pending_state)s.")
class FirewallPolicyNotFound(qexception.NotFound):
message = _("Firewall Policy %(firewall_policy_id)s could not be found.")
class FirewallPolicyInUse(qexception.InUse):
message = _("Firewall Policy %(firewall_policy_id)s is being used.")
class FirewallRuleNotFound(qexception.NotFound):
message = _("Firewall Rule %(firewall_rule_id)s could not be found.")
class FirewallRuleInUse(qexception.InUse):
message = _("Firewall Rule %(firewall_rule_id)s is being used.")
class FirewallRuleNotAssociatedWithPolicy(qexception.InvalidInput):
message = _("Firewall Rule %(firewall_rule_id)s is not associated "
" with Firewall Policy %(firewall_policy_id)s.")
class FirewallRuleInvalidProtocol(qexception.InvalidInput):
message = _("Firewall Rule protocol %(protocol)s is not supported. "
"Only protocol values %(values)s and their integer "
"representation (0 to 255) are supported.")
class FirewallRuleInvalidAction(qexception.InvalidInput):
message = _("Firewall rule action %(action)s is not supported. "
"Only action values %(values)s are supported.")
class FirewallRuleWithPortWithoutProtocolInvalid(qexception.InvalidInput):
message = _("Source/destination port requires a protocol")
class FirewallInvalidPortValue(qexception.InvalidInput):
message = _("Invalid value for port %(port)s.")
class FirewallRuleInfoMissing(qexception.InvalidInput):
message = _("Missing rule info argument for insert/remove "
"rule operation.")
class FirewallInternalDriverError(qexception.NeutronException):
"""Fwaas exception for all driver errors.
On any failure or exception in the driver, driver should log it and
raise this exception to the agent
"""
message = _("%(driver)s: Internal driver error.")
fw_valid_protocol_values = [None, constants.TCP, constants.UDP, constants.ICMP]
fw_valid_action_values = [constants.FWAAS_ALLOW, constants.FWAAS_DENY]
def convert_protocol(value):
if value is None:
return
if value.isdigit():
val = int(value)
if 0 <= val <= 255:
return val
else:
raise FirewallRuleInvalidProtocol(protocol=value,
values=
fw_valid_protocol_values)
elif value.lower() in fw_valid_protocol_values:
return value.lower()
else:
raise FirewallRuleInvalidProtocol(protocol=value,
values=
fw_valid_protocol_values)
def convert_action_to_case_insensitive(value):
if value is None:
return
else:
return value.lower()
def convert_port_to_string(value):
if value is None:
return
else:
return str(value)
def _validate_port_range(data, key_specs=None):
if data is None:
return
data = str(data)
ports = data.split(':')
for p in ports:
try:
val = int(p)
except (ValueError, TypeError):
msg = _("Port '%s' is not a valid number") % p
LOG.debug(msg)
return msg
if val <= 0 or val > 65535:
msg = _("Invalid port '%s'") % p
LOG.debug(msg)
return msg
def _validate_ip_or_subnet_or_none(data, valid_values=None):
if data is None:
return None
msg_ip = attr._validate_ip_address(data, valid_values)
if not msg_ip:
return
msg_subnet = attr._validate_subnet(data, valid_values)
if not msg_subnet:
return
return _("%(msg_ip)s and %(msg_subnet)s") % {'msg_ip': msg_ip,
'msg_subnet': msg_subnet}
attr.validators['type:port_range'] = _validate_port_range
attr.validators['type:ip_or_subnet_or_none'] = _validate_ip_or_subnet_or_none
RESOURCE_ATTRIBUTE_MAP = {
'firewall_rules': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True, 'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'firewall_policy_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True},
'shared': {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
'protocol': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': convert_protocol,
'validate': {'type:values': fw_valid_protocol_values}},
'ip_version': {'allow_post': True, 'allow_put': True,
'default': 4, 'convert_to': attr.convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'source_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_or_subnet_or_none': None},
'is_visible': True, 'default': None},
'destination_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_or_subnet_or_none':
None},
'is_visible': True, 'default': None},
'source_port': {'allow_post': True, 'allow_put': True,
'validate': {'type:port_range': None},
'convert_to': convert_port_to_string,
'default': None, 'is_visible': True},
'destination_port': {'allow_post': True, 'allow_put': True,
'validate': {'type:port_range': None},
'convert_to': convert_port_to_string,
'default': None, 'is_visible': True},
'position': {'allow_post': False, 'allow_put': False,
'default': None, 'is_visible': True},
'action': {'allow_post': True, 'allow_put': True,
'convert_to': convert_action_to_case_insensitive,
'validate': {'type:values': fw_valid_action_values},
'is_visible': True, 'default': 'deny'},
'enabled': {'allow_post': True, 'allow_put': True,
'default': True, 'convert_to': attr.convert_to_boolean,
'is_visible': True},
},
'firewall_policies': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'shared': {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
'firewall_rules': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': attr.convert_none_to_empty_list,
'default': None, 'is_visible': True},
'audited': {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True},
},
'firewalls': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'shared': {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': False, 'required_by_policy': True,
'enforce_policy': True},
'firewall_policy_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True},
},
}
firewall_quota_opts = [
cfg.IntOpt('quota_firewall',
default=1,
help=_('Number of firewalls allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_policy',
default=1,
help=_('Number of firewall policies allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_rule',
default=-1,
help=_('Number of firewall rules allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS')
class Firewall(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Firewall service"
@classmethod
def get_alias(cls):
return "fwaas"
@classmethod
def get_description(cls):
return "Extension for Firewall service"
@classmethod
def get_namespace(cls):
return "http://wiki.openstack.org/Neutron/FWaaS/API_1.0"
@classmethod
def get_updated(cls):
return "2013-02-25T10:00:00-00:00"
@classmethod
def get_resources(cls):
special_mappings = {'firewall_policies': 'firewall_policy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
action_map = {'firewall_policy': {'insert_rule': 'PUT',
'remove_rule': 'PUT'}}
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.FIREWALL,
action_map=action_map)
@classmethod
def get_plugin_interface(cls):
return FirewallPluginBase
def update_attributes_map(self, attributes):
super(Firewall, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class FirewallPluginBase(ServicePluginBase):
def get_plugin_name(self):
return constants.FIREWALL
def get_plugin_type(self):
return constants.FIREWALL
def get_plugin_description(self):
return 'Firewall service plugin'
@abc.abstractmethod
def get_firewalls(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_firewall(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_firewall(self, context, firewall):
pass
@abc.abstractmethod
def update_firewall(self, context, id, firewall):
pass
@abc.abstractmethod
def delete_firewall(self, context, id):
pass
@abc.abstractmethod
def get_firewall_rules(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_firewall_rule(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_firewall_rule(self, context, firewall_rule):
pass
@abc.abstractmethod
def update_firewall_rule(self, context, id, firewall_rule):
pass
@abc.abstractmethod
def delete_firewall_rule(self, context, id):
pass
@abc.abstractmethod
def get_firewall_policy(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_policies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def create_firewall_policy(self, context, firewall_policy):
pass
@abc.abstractmethod
def update_firewall_policy(self, context, id, firewall_policy):
pass
@abc.abstractmethod
def delete_firewall_policy(self, context, id):
pass
@abc.abstractmethod
def insert_rule(self, context, id, rule_info):
pass
@abc.abstractmethod
def remove_rule(self, context, id, rule_info):
pass
| {
"content_hash": "90d0a181809dc1e96d9e27282f91b9b8",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 79,
"avg_line_length": 36.37772397094431,
"alnum_prop": 0.5561102236421726,
"repo_name": "sajuptpm/neutron-ipam",
"id": "b7ee906fd58667b8f4c5c454f63586edd3af5e55",
"size": "15793",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/icehouse",
"path": "neutron/extensions/firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "9102565"
},
{
"name": "Shell",
"bytes": "9603"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import argparse
import tempfile
import os
import signal
import shutil
import urllib
import urllib.request
import hashlib
import time
import socket
import json
import base64
from urllib.parse import urlparse
from prepare_release_candidate import run
from http.client import HTTPConnection
DEFAULT_PLUGINS = ["analysis-icu",
"analysis-kuromoji",
"analysis-phonetic",
"analysis-smartcn",
"analysis-stempel",
"discovery-azure-classic",
"discovery-ec2",
"discovery-file",
"discovery-gce",
"ingest-attachment",
"ingest-geoip",
"lang-javascript",
"lang-python",
"mapper-attachments",
"mapper-murmur3",
"mapper-size",
"repository-azure",
"repository-gcs",
"repository-hdfs",
"repository-s3",
"store-smb"]
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""")
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
def sha1(file):
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def read_fully(file):
with open(file, encoding='utf-8') as f:
return f.read()
def wait_for_node_startup(es_dir, timeout=60, header={}):
print(' Waiting until node becomes available for at most %s seconds' % timeout)
for _ in range(timeout):
conn = None
try:
time.sleep(1)
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=1)
conn.request('GET', '/', headers=header)
res = conn.getresponse()
if res.status == 200:
return True
except IOError as e:
pass
#that is ok it might not be there yet
finally:
if conn:
conn.close()
return False
def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS):
print('Downloading and verifying release %s from %s' % (version, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
print(' ' + '*' * 80)
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
url = '%s/%s' % (base_url, file)
print(' Downloading %s' % (url))
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
os.makedirs(current_artifact_dir)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
print(' Downloading %s' % (sha1_url))
urllib.request.urlretrieve(sha1_url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
expected = read_fully(checksum_file)
actual = sha1(artifact_path)
if expected != actual :
raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual))
gpg_url = ''.join([url, '.asc'])
gpg_file = artifact_path + ".asc"
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
# here we create a temp gpg home where we download the release key as the only key into
# when we verify the signature it will fail if the signed key is not in the keystore and that
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
gpg_home_dir = os.path.join(current_artifact_dir, "gpg_home_dir")
os.makedirs(gpg_home_dir, 0o700)
run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
print(' ' + '*' * 80)
print()
smoke_test_release(version, downloaded_files, hash, plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def get_host_from_ports_file(es_dir):
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
def smoke_test_release(release, files, expected_hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
print(' Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_dir = os.path.join(tmp_dir, 'elasticsearch-%s' % (release))
es_run_path = os.path.join(es_dir, 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(es_dir, 'bin/elasticsearch-plugin')
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), expected_hash, es_plugin_path, 'install -b', plugin))
plugin_names[plugin] = True
if 'x-pack' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
es_shield_path = os.path.join(es_dir, 'bin/x-pack/users')
print(" Install dummy shield user")
run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path))
else:
headers = {}
print(' Starting elasticsearch deamon from [%s]' % es_dir)
try:
run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Escript.inline=true -Escript.stored=true -Erepositories.url.allowed_urls=http://snapshot.test* %s -Epidfile=%s -Enode.portsfile=true'
% (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid')))
if not wait_for_node_startup(es_dir, header=headers):
print("elasticsearch logs:")
print('*' * 80)
logs = read_fully(os.path.join(es_dir, 'logs/prepare_release.log'))
print(logs)
print('*' * 80)
raise RuntimeError('server didn\'t start up')
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=20)
conn.request('GET', '/', headers=headers)
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
if expected_hash != version['build_hash'].strip():
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true', headers=headers)
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'].strip(), False):
raise RuntimeError('Unexpected plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.close()
finally:
pid_path = os.path.join(es_dir, 'es-smoke.pid')
if os.path.exists(pid_path): # try reading the pid and kill the node
pid = int(read_fully(pid_path))
os.kill(pid, signal.SIGKILL)
shutil.rmtree(tmp_dir)
print(' ' + '*' * 80)
print()
def parse_list(string):
return [x.strip() for x in string.split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo')
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
help='The sha1 short hash of the git commit to smoketest')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--fetch_url', '-u', dest='url', default=None,
help='Fetched from the specified URL')
parser.set_defaults(hash=None)
parser.set_defaults(plugins=[])
parser.set_defaults(version=None)
parser.set_defaults(url=None)
args = parser.parse_args()
plugins = args.plugins
version = args.version
hash = args.hash
url = args.url
files = [ x % {'version': version} for x in [
'org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz',
'org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip',
'org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb',
'org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm'
]]
verify_java_version('1.8')
if url:
download_url = url
else:
download_url = '%s/%s-%s' % ('http://download.elasticsearch.org/elasticsearch/staging', version, hash)
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
| {
"content_hash": "9dda529059d03722bd2ad1e7f4fde22d",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 208,
"avg_line_length": 42.45418326693227,
"alnum_prop": 0.6145833333333334,
"repo_name": "strahanjen/strahanjen.github.io",
"id": "883a62210c9b5061580773a7db28a0eafe823abb",
"size": "12499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elasticsearch-master/dev-tools/smoke_test_rc.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "10561"
},
{
"name": "Batchfile",
"bytes": "13128"
},
{
"name": "CSS",
"bytes": "87761"
},
{
"name": "Emacs Lisp",
"bytes": "3341"
},
{
"name": "FreeMarker",
"bytes": "45"
},
{
"name": "Groovy",
"bytes": "262539"
},
{
"name": "HTML",
"bytes": "145224"
},
{
"name": "Java",
"bytes": "38582417"
},
{
"name": "JavaScript",
"bytes": "255577"
},
{
"name": "Perl",
"bytes": "7271"
},
{
"name": "Python",
"bytes": "78733"
},
{
"name": "Ruby",
"bytes": "9921"
},
{
"name": "Shell",
"bytes": "105322"
}
],
"symlink_target": ""
} |
from crudns.settings.prod import * # NOQA (ignore all errors on this line)
| {
"content_hash": "1c7d5f834f79d489b24ab6d6a7339f2c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 75,
"avg_line_length": 76,
"alnum_prop": 0.75,
"repo_name": "CRUDNS/CRUDNS",
"id": "539a1361be0049ac48ade92655eaa332446c7178",
"size": "76",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/crudns/settings/staging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10915"
},
{
"name": "HTML",
"bytes": "239"
},
{
"name": "JavaScript",
"bytes": "158967"
},
{
"name": "Python",
"bytes": "63832"
},
{
"name": "Shell",
"bytes": "5443"
}
],
"symlink_target": ""
} |
import github.GithubObject
class Status(github.GithubObject.NonCompletableGithubObject):
"""
This class represents status as defined in https://status.github.com/api
"""
@property
def status(self):
"""
:type: string
"""
return self._status.value
@property
def last_updated(self):
"""
:type: datetime.datetime
"""
return self._last_updated.value
def _initAttributes(self):
self._status = github.GithubObject.NotSet
self._last_updated = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "status" in attributes: # pragma no branch
self._status = self._makeStringAttribute(attributes["status"])
if "last_updated" in attributes: # pragma no branch
self._last_updated = self._makeDatetimeAttribute(attributes["last_updated"])
| {
"content_hash": "5dc52c9e32383b580a6e3aafbf08988b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 29.096774193548388,
"alnum_prop": 0.6308203991130821,
"repo_name": "ArcherSys/ArcherSys",
"id": "46e3921263a811cc4a03339ae051568384366ab7",
"size": "2548",
"binary": false,
"copies": "73",
"ref": "refs/heads/master",
"path": "Lib/site-packages/github/Status.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import datetime as dt
import json
import os
import re
from mock import MagicMock
from pyVmomi import vim
from requests import Response
from six import iteritems
from tests.common import HERE, VSPHERE_VERSION
from datadog_checks.vsphere.api import VersionInfo
class MockedCounter(object):
def __init__(self, counter):
self.key = counter['key']
self.groupInfo = MagicMock(key=counter['groupInfo.key'])
self.nameInfo = MagicMock(key=counter['nameInfo.key'])
self.rollupType = counter['rollup']
class MockedAPI(object):
def __init__(self, config, _=None):
self.config = config
self.infrastructure_data = {}
self.metrics_data = []
self.mock_events = []
self.server_time = dt.datetime.now()
def get_current_time(self):
return self.server_time
def get_version(self):
about = MagicMock(
version=VSPHERE_VERSION,
build='123456789',
fullName='VMware vCenter Server {} build-14792544'.format(VSPHERE_VERSION),
apiType='VirtualCenter',
)
return VersionInfo(about)
def recursive_parse_topology(self, subtree, parent=None):
current_mor = MagicMock(spec=getattr(vim, subtree['spec']), _moId=subtree['mo_id'])
children = subtree.get('children', [])
self.infrastructure_data[current_mor] = {'name': subtree['name'], 'parent': parent}
if subtree.get('runtime.powerState') == 'on':
self.infrastructure_data[current_mor]['runtime.powerState'] = vim.VirtualMachinePowerState.poweredOn
if 'runtime.host' in subtree:
# Temporary setting 'runtime.host_moId' to the host _moId
# This will be used later to make 'runtime.host' a pointer to the runtime.host mor instance.
self.infrastructure_data[current_mor]['runtime.host_moid'] = subtree['runtime.host']
if 'guest.hostName' in subtree:
self.infrastructure_data[current_mor]['guest.hostName'] = subtree['guest.hostName']
if self.config.should_collect_attributes and 'customValue' in subtree:
mor_attr = []
for key_name, value in iteritems(subtree['customValue']):
mor_attr.append('{}{}:{}'.format(self.config.attr_prefix, key_name, value))
self.infrastructure_data[current_mor]['attributes'] = mor_attr
for c in children:
self.recursive_parse_topology(c, parent=current_mor)
if parent is not None:
return
# Resolve the runtime.host_moId into pointers to the mocked mors.
for _, props in iteritems(self.infrastructure_data):
if 'runtime.host_moid' in props:
hosts = [m for m, p in iteritems(self.infrastructure_data) if p['name'] == props['runtime.host_moid']]
props['runtime.host'] = hosts[0] if hosts else object()
del props['runtime.host_moid']
def smart_connect(self):
pass
def get_perf_counter_by_level(self, _):
with open(os.path.join(HERE, 'fixtures', 'counters.json')) as f:
file_data = json.load(f)
return [MockedCounter(m) for m in file_data]
def get_infrastructure(self):
if not self.infrastructure_data:
with open(os.path.join(HERE, 'fixtures', 'topology.json')) as f:
file_data = json.load(f)
self.recursive_parse_topology(file_data)
return self.infrastructure_data
def query_metrics(self, query_specs):
if not self.metrics_data:
metrics_filename = 'metrics_{}.json'.format(self.config.collection_type)
with open(os.path.join(HERE, 'fixtures', metrics_filename)) as f:
file_data = json.load(f)
for el in file_data:
mocked = MagicMock(
entity=el['entity'], value=el['value'], counterId=el['counterId'], instance=el['instance']
)
self.metrics_data.append(mocked)
data = []
for spec in query_specs:
entity_name = self.infrastructure_data.get(spec.entity)['name']
counter_ids = [i.counterId for i in spec.metricId]
results = [m for m in self.metrics_data if m.entity == entity_name and m.counterId in counter_ids]
values = []
for r in results:
values.append(MagicMock(id=MagicMock(counterId=r.counterId, instance=r.instance), value=r.value))
if results:
data.append(MagicMock(entity=spec.entity, value=values))
return data
def get_max_query_metrics(self):
return 256
def get_new_events(self, start_time):
return self.mock_events
class MockResponse(Response):
def __init__(self, json_data, status_code):
super(MockResponse, self).__init__()
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
def mock_http_rest_api_v6(method, url, *args, **kwargs):
if '/api/' in url:
return MockResponse({}, 404)
if method == 'get':
if re.match(r'.*/category/id:.*$', url):
parts = url.split('_')
num = parts[len(parts) - 1]
return MockResponse(
{
"value": {
"name": "my_cat_name_{}".format(num),
"description": "",
"id": "cat_id_{}".format(num),
"used_by": [],
"cardinality": "SINGLE",
}
},
200,
)
elif re.match(r'.*/tagging/tag/id:.*$', url):
parts = url.split('_')
num = parts[len(parts) - 1]
return MockResponse(
{
"value": {
"category_id": "cat_id_{}".format(num),
"name": "my_tag_name_{}".format(num),
"description": "",
"id": "xxx",
"used_by": [],
}
},
200,
)
elif method == 'post':
assert kwargs['headers']['Content-Type'] == 'application/json'
if re.match(r'.*/session$', url):
return MockResponse(
{"value": "dummy-token"},
200,
)
elif re.match(r'.*/tagging/tag-association\?~action=list-attached-tags-on-objects$', url):
return MockResponse(
{
"value": [
{"object_id": {"id": "VM4-4-1", "type": "VirtualMachine"}, "tag_ids": ["tag_id_1", "tag_id_2"]},
{"object_id": {"id": "10.0.0.104-1", "type": "HostSystem"}, "tag_ids": ["tag_id_2"]},
{"object_id": {"id": "NFS-Share-1", "type": "Datastore"}, "tag_ids": ["tag_id_2"]},
]
},
200,
)
raise Exception("Rest api mock request not matched: method={}, url={}".format(method, url))
def mock_http_rest_api_v7(method, url, *args, **kwargs):
if method == 'get':
if re.match(r'.*/category/.*$', url):
parts = url.split('_')
num = parts[len(parts) - 1]
return MockResponse(
{
'name': 'my_cat_name_{}'.format(num),
'description': 'VM category description',
'id': 'cat_id_{}'.format(num),
'used_by': [],
'cardinality': 'SINGLE',
},
200,
)
elif re.match(r'.*/tagging/tag/.*$', url):
parts = url.split('_')
num = parts[len(parts) - 1]
return MockResponse(
{
'category_id': 'cat_id_{}'.format(num),
'name': 'my_tag_name_{}'.format(num),
'description': '',
'id': 'tag_id_{}'.format(num),
'used_by': [],
},
200,
)
elif method == 'post':
assert kwargs['headers']['Content-Type'] == 'application/json'
if re.match(r'.*/session$', url):
return MockResponse(
"dummy-token",
200,
)
elif re.match(r'.*/tagging/tag-association\?action=list-attached-tags-on-objects$', url):
return MockResponse(
[
{'tag_ids': ['tag_id_1', 'tag_id_2'], 'object_id': {'id': 'VM4-4-1', 'type': 'VirtualMachine'}},
{'tag_ids': ['tag_id_2'], 'object_id': {'id': 'NFS-Share-1', 'type': 'Datastore'}},
{'tag_ids': ['tag_id_2'], 'object_id': {'id': '10.0.0.104-1', 'type': 'HostSystem'}},
],
200,
)
raise Exception("Rest api mock request not matched: method={}, url={}".format(method, url))
| {
"content_hash": "dda271e6f3debce4ea428627273fc9b6",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 120,
"avg_line_length": 38.858369098712444,
"alnum_prop": 0.5115970841616965,
"repo_name": "DataDog/integrations-core",
"id": "dccd4b1cd05fda86cd01d196d5293b99f824e1eb",
"size": "9163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vsphere/tests/mocked_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NewsPortal.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "d1e87b7e158ecc5142490703d5f9b019",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7142857142857143,
"repo_name": "Rahul91/Exotel_NewsPortal",
"id": "29040c66b160ef9d1346dc8151cdb138e086d117",
"size": "253",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42513"
},
{
"name": "HTML",
"bytes": "20394"
},
{
"name": "JavaScript",
"bytes": "77703"
},
{
"name": "Python",
"bytes": "17904"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
import sys
# AC_tools modules
from . AC_time import *
from . core import *
from . utils import *
from . GEOSChem_nc import *
from . GEOS import *
from . HEMCO import *
from . KPP import *
from . mask import *
from . observations import *
from . planeflight import *
from . plotting import *
# from . SMVGEAR import *
from . variables import *
# include the redundant files for now
from . GEOSChem_bpch import *
from . obsolete.plotting_REDUNDANT import *
from . obsolete.variables_REDUNDANT import *
from . obsolete.misc_REDUNDANT import *
from . obsolete.SMVGEAR_REDUNDANT import *
"""
AC_tools is a module of functions started by Tomas, and contributed to by others in the York Atmospheric Modelling group, and hopefully maintained by the Group.
To access the help, from python or ipython, type help(AC_tools) to get general help
To get more detailed help from a module for example, type help(AC_tools.AC_time.py)
If you find missing documentation any thing is unclear in any of this, please request a git push to github.
"""
# Setup logging for module
import logging
level = logging.DEBUG
FORMAT = "%(levelname)8s - %(message)s @---> %(filename)s:%(lineno)s %(funcName)s()"
logging.basicConfig(filename='AC_tools.log', filemode='w', level=level,
format=FORMAT)
logging.getLogger().setLevel(level)
# Import submodules here for easier access
| {
"content_hash": "72ab1109e44a0456acdf024d7443cdc9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 160,
"avg_line_length": 34.80487804878049,
"alnum_prop": 0.7372109320252278,
"repo_name": "tsherwen/AC_tools",
"id": "7e19dcd60fc7fdd6546814e8561bfc235dcb5528",
"size": "1468",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "AC_tools/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1010174"
},
{
"name": "Shell",
"bytes": "363"
}
],
"symlink_target": ""
} |
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets
from rest_framework.pagination import PageNumberPagination
from image_app.serializers import ImageSerializer
from image_app.models import Image, Label
from image_app.filters import LabelsFilter
class StandardResultsSetPagination(PageNumberPagination):
"""
"""
page_size = 50
page_size_query_param = 'page_size'
max_page_size = 1000
class ImageViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
pagination_class = StandardResultsSetPagination
serializer_class = ImageSerializer
queryset = Image.objects.all()
paginate_by_param = 'page_size'
filter_backends = (DjangoFilterBackend,)
filter_class = LabelsFilter
| {
"content_hash": "d639669f189a8968bfd7440e6e8ef139",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.7533742331288343,
"repo_name": "pstrinkle/drf-image-app",
"id": "f17c969f43bd893647a05064c9060dab5bef1c05",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/image_app/views/image/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14358"
},
{
"name": "JavaScript",
"bytes": "34645"
},
{
"name": "Makefile",
"bytes": "93"
},
{
"name": "Python",
"bytes": "33752"
},
{
"name": "Shell",
"bytes": "1138"
}
],
"symlink_target": ""
} |
import flask
import tempfile
import flask_sqlalchemy
import flask_praetorian
import flask_cors
db = flask_sqlalchemy.SQLAlchemy()
guard = flask_praetorian.Praetorian()
cors = flask_cors.CORS()
# A generic user model that might be used by an app powered by flask-praetorian
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Text, unique=True)
hashed_password = db.Column(db.Text)
roles = db.Column(db.Text)
is_active = db.Column(db.Boolean, default=True, server_default="true")
@property
def identity(self):
"""
*Required Attribute or Property*
flask-praetorian requires that the user class has an ``identity`` instance
attribute or property that provides the unique id of the user instance
"""
return self.id
@property
def rolenames(self):
"""
*Required Attribute or Property*
flask-praetorian requires that the user class has a ``rolenames`` instance
attribute or property that provides a list of strings that describe the roles
attached to the user instance
"""
try:
return self.roles.split(",")
except Exception:
return []
@property
def password(self):
"""
*Required Attribute or Property*
flask-praetorian requires that the user class has a ``password`` instance
attribute or property that provides the hashed password assigned to the user
instance
"""
return self.hashed_password
@classmethod
def lookup(cls, username):
"""
*Required Method*
flask-praetorian requires that the user class implements a ``lookup()``
class method that takes a single ``username`` argument and returns a user
instance if there is one that matches or ``None`` if there is not.
"""
return cls.query.filter_by(username=username).one_or_none()
@classmethod
def identify(cls, id):
"""
*Required Method*
flask-praetorian requires that the user class implements an ``identify()``
class method that takes a single ``id`` argument and returns user instance if
there is one that matches or ``None`` if there is not.
"""
return cls.query.get(id)
def is_valid(self):
return self.is_active
# Initialize flask app for the example
app = flask.Flask(__name__)
app.debug = True
app.config["SECRET_KEY"] = "top secret"
app.config["JWT_ACCESS_LIFESPAN"] = {"hours": 24}
app.config["JWT_REFRESH_LIFESPAN"] = {"days": 30}
# Initialize the flask-praetorian instance for the app
guard.init_app(app, User)
# Initialize a local database for the example
local_database = tempfile.NamedTemporaryFile(prefix="local", suffix=".db")
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///{}".format(local_database)
db.init_app(app)
# Initializes CORS so that the api_tool can talk to the example app
cors.init_app(app)
# Add users for the example
with app.app_context():
db.create_all()
db.session.add(
User(
username="TheDude",
hashed_password=guard.hash_password("abides"),
)
)
db.session.add(
User(
username="Walter",
hashed_password=guard.hash_password("calmerthanyouare"),
roles="admin",
)
)
db.session.add(
User(
username="Donnie",
hashed_password=guard.hash_password("iamthewalrus"),
roles="operator",
)
)
db.session.add(
User(
username="Maude",
hashed_password=guard.hash_password("andthorough"),
roles="operator,admin",
)
)
db.session.commit()
# Set up some routes for the example
@app.route("/login", methods=["POST"])
def login():
"""
Logs a user in by parsing a POST request containing user credentials and
issuing a JWT token.
.. example::
$ curl http://localhost:5000/login -X POST \
-d '{"username":"Walter","password":"calmerthanyouare"}'
"""
req = flask.request.get_json(force=True)
username = req.get("username", None)
password = req.get("password", None)
user = guard.authenticate(username, password)
ret = {"access_token": guard.encode_jwt_token(user)}
return (flask.jsonify(ret), 200)
@app.route("/protected")
@flask_praetorian.auth_required
def protected():
"""
A protected endpoint. The auth_required decorator will require a header
containing a valid JWT
.. example::
$ curl http://localhost:5000/protected -X GET \
-H "Authorization: Bearer <your_token>"
"""
return flask.jsonify(
message="protected endpoint (allowed user {})".format(
flask_praetorian.current_user().username,
)
)
@app.route("/protected_admin_required")
@flask_praetorian.roles_required("admin")
def protected_admin_required():
"""
A protected endpoint that requires a role. The roles_required decorator
will require that the supplied JWT includes the required roles
.. example::
$ curl http://localhost:5000/protected_admin_required -X GET \
-H "Authorization: Bearer <your_token>"
"""
return flask.jsonify(
message="protected_admin_required endpoint (allowed user {})".format(
flask_praetorian.current_user().username,
)
)
@app.route("/protected_operator_accepted")
@flask_praetorian.roles_accepted("operator", "admin")
def protected_operator_accepted():
"""
A protected endpoint that accepts any of the listed roles. The
roles_accepted decorator will require that the supplied JWT includes at
least one of the accepted roles
.. example::
$ curl http://localhost/protected_operator_accepted -X GET \
-H "Authorization: Bearer <your_token>"
"""
return flask.jsonify(
message="protected_operator_accepted endpoint (allowed usr {})".format(
flask_praetorian.current_user().username,
)
)
# Run the example
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| {
"content_hash": "033ed738f419cecf76e69ff1c93c2e2d",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 85,
"avg_line_length": 29.99514563106796,
"alnum_prop": 0.6368344392296488,
"repo_name": "dusktreader/flask-praetorian",
"id": "37ae6ed1032b9ceb74dd1e002a1c0ccd1b57e135",
"size": "6179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13428"
},
{
"name": "Python",
"bytes": "161693"
}
],
"symlink_target": ""
} |
"""QuickLangevin.py - test the Langevin dynamics.
Tests Langevin dynamics using the EMT Copper potential.
"""
import sys, time
from numpy import *
import asap3
from asap3.testtools import ReportTest
from asap3.md.langevin import Langevin
from asap3.md.verlet import VelocityVerlet
from ase.lattice.cubic import FaceCenteredCubic
nequil = 50
nequilprint = 10
nsteps = 500
nprint = 100
tolerance = 0.1
nminor = 25
timestep = 0.5
# Set up atoms in a regular simple-cubic lattice.
atoms = FaceCenteredCubic(size=(5,5,5), symbol="Cu", pbc=False,
latticeconstant = 3.5)
atoms.set_calculator(asap3.EMT())
ReportTest("Number of atoms", len(atoms), 500, 0)
# Make a small perturbation of the momenta
atoms.set_momenta(1e-6 * random.random([len(atoms), 3]))
print "Initializing ..."
predyn = VelocityVerlet(atoms, 0.5)
predyn.run(2500)
initr = atoms.get_positions()
initp = atoms.get_momenta()
def targetfunc(params, x):
return params[0] * exp(-params[1] * x) + params[2]
output = file("Langevin.dat", "w")
for temp, frict in ((0.01, 0.01),):
dyn = Langevin(atoms, timestep, temp, frict)
print ""
print "Testing Langevin dynamics with T = %f eV and lambda = %f" % (temp, frict)
ekin = atoms.get_kinetic_energy()/len(atoms)
print ekin
output.write("%.8f\n" % ekin)
temperatures = [(0, 2.0 / 3.0 * ekin)]
a = 0.1
b = frict
c = temp
print "Equilibrating ..."
tstart = time.time()
for i in xrange(1,nequil+1):
dyn.run(nminor)
ekin = atoms.get_kinetic_energy() / len(atoms)
if i % nequilprint == 0:
print "%.6f T = %.6f (goal: %f)" % \
(ekin, 2./3. * ekin, temp)
output.write("%.8f\n" % ekin)
tequil = time.time() - tstart
print "This took %s minutes." % (tequil / 60)
output.write("&\n")
temperatures = []
print "Taking data - this takes", nsteps/nequil, "times longer!"
tstart = time.time()
for i in xrange(1,nsteps+1):
dyn.run(nminor)
ekin = atoms.get_kinetic_energy() / len(atoms)
temperatures.append(2.0/3.0 * ekin)
if i % nprint == 0:
tnow = time.time() - tstart
tleft = (nsteps-i) * tnow / i
print "%.6f (time left: %.1f minutes)" % (ekin, tleft/60)
output.write("%.8f\n" % ekin)
output.write("&\n")
temperatures = array(temperatures)
mean = sum(temperatures) / len(temperatures)
print "Mean temperature:", mean, "eV"
print ""
print "This test is statistical, and may in rare cases fail due to a"
print "statistical fluctuation."
print ""
ReportTest("Mean temperature:", mean, temp, tolerance*temp)
output.close()
| {
"content_hash": "ecd10a01b92444a0f5919096417d68a7",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 84,
"avg_line_length": 30.233333333333334,
"alnum_prop": 0.6181550900404263,
"repo_name": "auag92/n2dm",
"id": "e6f843b93b79c02363160db0fae1dae94f3f2399",
"size": "2745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Asap-3.8.4/Test/QuickLangevin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4529"
},
{
"name": "C++",
"bytes": "1472384"
},
{
"name": "CSS",
"bytes": "5059"
},
{
"name": "Jupyter Notebook",
"bytes": "7328"
},
{
"name": "Makefile",
"bytes": "86067"
},
{
"name": "Matlab",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1232765"
},
{
"name": "Shell",
"bytes": "13226"
},
{
"name": "Smarty",
"bytes": "4212"
},
{
"name": "TeX",
"bytes": "5561"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("site", "0014_handle_taxes")]
operations = [
migrations.AddField(
model_name="sitesettings",
name="track_inventory_by_default",
field=models.BooleanField(default=True),
)
]
| {
"content_hash": "6bdfa14deb153b5288544dcacb26978c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 52,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.6153846153846154,
"repo_name": "mociepka/saleor",
"id": "96100d5372f763366775b3ea13795c3b7d4dd82d",
"size": "387",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "saleor/site/migrations/0015_sitesettings_handle_stock_by_default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
from discord import Embed, Color, Game
from utils import functions
description = "Add roles wich languages you write in."
perm = 2
customenabled = False
async def ex(message, client):
author = message.author
content = message.content
channel = message.channel
global customenabled
args = " ".join(content.split()[1:])
if customenabled and (args.startswith("off") or args.startswith("disable")):
customenabled = False
await client.send_message(channel, embed=Embed(description="Disbaled custom botmessage."))
await client.change_presence(game=Game(name=functions.get_members_msg(client)))
else:
await client.send_message(channel, embed=Embed(description="%s changed bot message to `%s`." % (author.mention, args), color=Color.blue()))
await client.change_presence(game=Game(name=args + " | !help"))
customenabled = True
| {
"content_hash": "5c3a0cc13739b7783c83ff5295eae08c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 147,
"avg_line_length": 34.69230769230769,
"alnum_prop": 0.6951219512195121,
"repo_name": "zekroTJA/regiusBot",
"id": "e0283dc78331371e37a18dbb38841335e16c7aaa",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/cmd_botmsg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76868"
},
{
"name": "Shell",
"bytes": "12"
}
],
"symlink_target": ""
} |
"""
ROS service source code generation for Python.
Converts ROS .srv files into Python source code implementations.
"""
import os
import sys
import genrb.generator
import genrb.genrb_main
if __name__ == "__main__":
genrb.genrb_main.genmain(sys.argv, 'gensrv_rb.py', genrb.generator.SrvGenerator())
| {
"content_hash": "9f7cc59bad7f5544e1d1b9a0b2f41f53",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 21.857142857142858,
"alnum_prop": 0.7320261437908496,
"repo_name": "OTL/genrb",
"id": "229d4c891bc88c9b5f4e566cbea362a0219e0819",
"size": "1933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/gensrv_rb.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "52646"
}
],
"symlink_target": ""
} |
version = '0.5'
def main():
from tippet import baker
baker.run()
| {
"content_hash": "eee3bf0d4ccec4f474d2d612bd391d0a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 28,
"avg_line_length": 14.8,
"alnum_prop": 0.6081081081081081,
"repo_name": "kizbitz/tippet",
"id": "7c8f4160b0a9b5ad8a2920e80bb2031ae00b121a",
"size": "74",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tippet/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25145"
},
{
"name": "Shell",
"bytes": "6710"
}
],
"symlink_target": ""
} |
import os
import logging
logger = logging.getLogger("eapptools")
RUNMODE_DEV = 'DEV'
RUNMODE_PROD = 'PROD'
if not 'SERVER_SOFTWARE' in os.environ or os.environ['SERVER_SOFTWARE'].startswith('Dev'):
RUNMODE = RUNMODE_DEV
else:
RUNMODE = RUNMODE_PROD
logger.info("RUNMODE=%s", RUNMODE)
# config paramater names
CFG_GLOBAL_APP_DIR = 'global_app_dir'
CFG_HTML_DIR = 'html_dir'
CFG_DEFAULT_PAGE = 'default_page'
CFG_RESOURCE_MAPPING = 'resource_mapping'
CFG_PAGE_MAPPING = 'page_mapping'
CFG_DEV_JS = "dev_js"
CFG_PROD_JS = "prod_js"
_config = {
CFG_GLOBAL_APP_DIR : '/',
CFG_HTML_DIR : 'html',
CFG_DEFAULT_PAGE : '/index.html'
}
# Access levels
ACCESS_NONE = 0
ACCESS_ALL = 1
ACCESS_ADMIN = 2
ACCESS_USER = 3
def get_config():
return _config
| {
"content_hash": "bd8f259b3c9939368892dc96b67b5ffd",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 90,
"avg_line_length": 18.4390243902439,
"alnum_prop": 0.6944444444444444,
"repo_name": "oliverbo/eapptools",
"id": "2e4d66dde40a17be40dac00d7d302dc808af2863",
"size": "799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22312"
}
],
"symlink_target": ""
} |
from proxy import Proxy, proxied_attr
from proxy import proxied_attr_get as pag, proxied_attr_set as pas, proxied_attr_getset as pags
from arc_utils import arcmethod
import errors
from errors import not_implemented, fail
import urllib
class group(Proxy):
name = property(pag("name"))
features = property(*pags("features"))
parameters = property(*pags("parameters"))
# alias for backwards-compatibility
params = property(pag("parameters"))
modifyFeatures = arcmethod(*pags("features"), heterogeneous=True, preserve_order=True)
def getConfig(self, **options):
if len(options) > 0:
not_implemented()
return self.cm.fetch_json_resource("/config/group/%s" % urllib.quote_plus(self.name))
def explain(self):
not_implemented()
def modifyParams(self, command, params, **options):
command = command.upper()
if command == "ADD":
for k, v in params.iteritems():
self.parameters[k] = v
elif command == "REMOVE":
for k in [k for k in params if k in self.parameters]:
del self.parameters[k]
elif command == "REPLACE":
self.parameters = params
else:
fail(errors.make(errors.BAD_COMMAND, errors.GROUP), "Invalid command %s" % command)
self.update()
def members(self):
all_nodes = [self.cm.make_proxy_object("node", node, True) for node in self.cm.list_objects("node")]
return [node.name for node in all_nodes if self.name in node.memberships]
membership = property(members)
proxied_attr(group, "name")
proxied_attr(group, "features")
proxied_attr(group, "parameters")
| {
"content_hash": "1df635e11f9ab12a2a3464f8e5701566",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 108,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.6336633663366337,
"repo_name": "willb/wallaroo",
"id": "fd9512e98f4b7eeb0ac8347a3a48ad3e017e110a",
"size": "2344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clients/python-wallaroo/wallaroo/client/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "168563"
},
{
"name": "Python",
"bytes": "49696"
},
{
"name": "Ruby",
"bytes": "183608"
},
{
"name": "Shell",
"bytes": "192"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import enum
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Callable, List, Tuple
if TYPE_CHECKING:
from minihack import MiniHack
from nle.nethack import Command, CompassDirection
Y_cmd = CompassDirection.NW
class EventType(enum.IntEnum):
MESSAGE = 0
LOC_ACTION = 1
COORD = 2
LOC = 3
COMESTIBLES = [
"orange",
"meatball",
"meat ring",
"meat stick",
"kelp frond",
"eucalyptus leaf",
"clove of garlic",
"sprig of wolfsbane",
"carrot",
"egg",
"banana",
"melon",
"candy bar",
"lump of royal jelly",
]
class Event(ABC):
"""An event which can occur in a MiniHack episode.
This is the base class of all other events.
"""
def __init__(
self,
reward: float,
repeatable: bool,
terminal_required: bool,
terminal_sufficient: bool,
):
"""Initialise the Event.
Args:
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
self.reward = reward
self.repeatable = repeatable
self.terminal_required = terminal_required
self.terminal_sufficient = terminal_sufficient
self.achieved = False
@abstractmethod
def check(self, env, previous_observation, action, observation) -> float:
"""Check whether the environment is in the state such that this event
has occured.
Args:
env (MiniHack):
The MiniHack environment in question.
previous_observation (tuple):
The previous state observation.
action (int):
The action taken.
observation (tuple):
The current observation.
Returns:
float: The reward.
"""
pass
def reset(self):
"""Reset the event, if there is any state necessary."""
self.achieved = False
def _set_achieved(self) -> float:
if not self.repeatable:
self.achieved = True
return self.reward
def _standing_on_top(env, location):
return not env.screen_contains(location)
class LocActionEvent(Event):
"""An event which checks whether an action is performed at a specified
location.
"""
def __init__(
self,
*args,
loc: str,
action: Command,
):
"""Initialise the Event.
Args:
loc (str):
The name of the location to reach.
action (int):
The action to perform.
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
super().__init__(*args)
self.loc = loc
self.action = action
self.status = False
def check(self, env, previous_observation, action, observation) -> float:
del previous_observation, observation
if env._actions[action] == self.action and _standing_on_top(
env, self.loc
):
self.status = True
elif env._actions[action] == Y_cmd and self.status:
return self._set_achieved()
else:
self.status = False
return 0
def reset(self):
super().reset()
self.status = False
class LocEvent(Event):
"""An event which checks whether a specified location is reached."""
def __init__(self, *args, loc: str):
super().__init__(*args)
"""Initialise the Event.
Args:
loc (str):
The name of the location to reach.
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
self.loc = loc
def check(self, env, previous_observation, action, observation) -> float:
del previous_observation, action, observation
if _standing_on_top(env, self.loc):
return self._set_achieved()
return 0.0
class CoordEvent(Event):
"""An event which occurs when reaching certain coordinates."""
def __init__(self, *args, coordinates: Tuple[int, int]):
"""Initialise the Event.
Args:
coordinates (tuple):
The coordinates to reach for the event.
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
super().__init__(*args)
self.coordinates = coordinates
def check(self, env, previous_observation, action, observation) -> float:
coordinates = tuple(observation[env._blstats_index][:2])
if self.coordinates == coordinates:
return self._set_achieved()
return 0.0
class MessageEvent(Event):
"""An event which occurs when any of the `messages` appear."""
def __init__(self, *args, messages: List[str]):
"""Initialise the Event.
Args:
messages (list):
The messages to be seen to trigger the event.
reward (float):
The reward for the event occuring
repeatable (bool):
Whether the event can occur repeated (i.e. if the reward can be
collected repeatedly
terminal_required (bool):
Whether this event is required for the episode to terminate.
terminal_sufficient (bool):
Whether this event causes the episode to terminate on its own.
"""
super().__init__(*args)
self.messages = messages
def check(self, env, previous_observation, action, observation) -> float:
del previous_observation, action
curr_msg = (
observation[env._original_observation_keys.index("message")]
.tobytes()
.decode("utf-8")
)
for msg in self.messages:
if msg in curr_msg:
return self._set_achieved()
return 0.0
class AbstractRewardManager(ABC):
"""This is the abstract base class for the ``RewardManager`` that is used
for defining custom reward functions.
"""
def __init__(self):
self.terminal_sufficient = None
self.terminal_required = None
@abstractmethod
def collect_reward(self) -> float:
"""Return reward calculated and accumulated in check_episode_end_call,
and then reset it.
Returns:
flaot: The reward.
"""
raise NotImplementedError
@abstractmethod
def check_episode_end_call(
self, env, previous_observation, action, observation
) -> bool:
"""Check if the task has ended, and accumulate any reward from the
transition in ``self._reward``.
Args:
env (MiniHack):
The MiniHack environment in question.
previous_observation (tuple):
The previous state observation.
action (int):
The action taken.
observation (tuple):
The current observation.
Returns:
bool: Boolean whether the episode has ended.
"""
raise NotImplementedError
@abstractmethod
def reset(self) -> None:
"""Reset all events, to be called when a new episode occurs."""
raise NotImplementedError
class RewardManager(AbstractRewardManager):
"""This class is used for managing rewards, events and termination for
MiniHack tasks.
Some notes on the ordering or calls in the MiniHack/NetHack base class:
- ``step(action)`` is called on the environment
- Within ``step``, first a copy of the last observation is made, and then the
underlying NetHack game is stepped
- Then ``_is_episode_end(observation)`` is called to check whether this the
episode has ended (and this is overridden if we've gone over our
max_steps, or the underlying NetHack game says we're done (i.e. we died)
- Then ``_reward_fn(last_observation, observation)`` is called to calculate
the reward at this time-step
- if ``end_status`` tells us the game is done, we quit the game
- then ``step`` returns the observation, calculated reward, done, and some
statistics.
All this means that we need to check whether an observation is terminal in
``_is_episode_end`` before we're calculating the reward function.
The call of ``_is_episode_end`` in ``MiniHack`` will call
``check_episode_end_call`` in this class, which checks for termination and
accumulates any reward, which is returned and zeroed in ``collect_reward``.
"""
def __init__(self):
self.events: List[Event] = []
self.custom_reward_functions: List[
Callable[[MiniHack, Any, int, Any], float]
] = []
self._reward = 0.0
# Only used for GroupedRewardManager
self.terminal_sufficient = None
self.terminal_required = None
def add_custom_reward_fn(
self, reward_fn: Callable[[MiniHack, Any, int, Any], float]
) -> None:
"""Add a custom reward function which is called every after step to
calculate reward.
The function should be a callable which takes the environment, previous
observation, action and current observation and returns a float reward.
Args:
reward_fn (Callable[[MiniHack, Any, int, Any], float]):
A reward function which takes an environment, previous
observation, action, next observation and returns a reward.
"""
self.custom_reward_functions.append(reward_fn)
def add_event(self, event: Event):
"""Add an event to be managed by the reward manager.
Args:
event (Event):
The event to be added.
"""
self.events.append(event)
def _add_message_event(
self, msgs, reward, repeatable, terminal_required, terminal_sufficient
):
self.add_event(
MessageEvent(
reward,
repeatable,
terminal_required,
terminal_sufficient,
messages=msgs,
)
)
def _add_loc_action_event(
self,
loc,
action,
reward,
repeatable,
terminal_required,
terminal_sufficient,
):
try:
action = Command[action.upper()]
except KeyError:
raise KeyError(
"Action {} is not in the action space.".format(action.upper())
)
self.add_event(
LocActionEvent(
reward,
repeatable,
terminal_required,
terminal_sufficient,
loc=loc.lower(),
action=action,
)
)
def add_eat_event(
self,
name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add an event which is triggered when `name` is eaten.
Args:
name (str):
The name of the object being eaten.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
msgs = [
f"This {name} is delicious",
"Blecch! Rotten food!",
"last bite of your meal",
]
if name == "apple":
msgs.append("Delicious! Must be a Macintosh!")
msgs.append("Core dumped.")
if name == "pear":
msgs.append("Core dumped.")
self._add_message_event(
msgs, reward, repeatable, terminal_required, terminal_sufficient
)
def add_wield_event(
self,
name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when a specific weapon is wielded.
Args:
name (str):
The name of the weapon to be wielded.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
msgs = [
f"{name} wields itself to your hand!",
f"{name} (weapon in hand)",
]
self._add_message_event(
msgs, reward, repeatable, terminal_required, terminal_sufficient
)
def add_wear_event(
self,
name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when a specific armor is worn.
Args:
name (str):
The name of the armor to be worn.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
msgs = [f"You are now wearing a {name}"]
self._add_message_event(
msgs, reward, repeatable, terminal_required, terminal_sufficient
)
def add_amulet_event(
self,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when an amulet is worn.
Args:
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self._add_message_event(
["amulet (being worn)."],
reward,
repeatable,
terminal_required,
terminal_sufficient,
)
def add_kill_event(
self,
name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when a specified monster is killed.
Args:
name (str):
The name of the monster to be killed.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self._add_message_event(
[f"You kill the {name}"],
reward,
repeatable,
terminal_required,
terminal_sufficient,
)
def add_message_event(
self,
msgs: List[str],
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered when any of the given messages are seen.
Args:
msgs (List[str]):
The name of the monster to be killed.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self._add_message_event(
msgs, reward, repeatable, terminal_required, terminal_sufficient
)
def add_positional_event(
self,
place_name: str,
action_name: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered on taking a given action at a given place.
Args:
place_name (str):
The name of the place to trigger the event.
action_name (int):
The name of the action to trigger the event.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self._add_loc_action_event(
place_name,
action_name,
reward,
repeatable,
terminal_required,
terminal_sufficient,
)
def add_coordinate_event(
self,
coordinates: Tuple[int, int],
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered on when reaching the specified
coordinates.
Args:
coordinates (Tuple[int, int]):
The coordinates to be reached (tuple of ints).
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self.add_event(
CoordEvent(
reward,
repeatable,
terminal_required,
terminal_sufficient,
coordinates=coordinates,
)
)
def add_location_event(
self,
location: str,
reward=1,
repeatable=False,
terminal_required=True,
terminal_sufficient=False,
):
"""Add event which is triggered on reaching a specified location.
Args:
name (str):
The name of the location to be reached.
reward (float):
The reward for this event. Defaults to 1.
repeatable (bool):
Whether this event can be triggered multiple times. Defaults to
False.
terminal_required (bool):
Whether this event is required for termination. Defaults to
True.
terminal_sufficient (bool):
Whether this event is sufficient for termination. Defaults to
False.
"""
self.add_event(
LocEvent(
reward,
repeatable,
terminal_required,
terminal_sufficient,
loc=location,
)
)
def _set_achieved(self, event: Event) -> float:
if not event.repeatable:
event.achieved = True
return event.reward
def _standing_on_top(self, env, name):
"""Returns whether the agents is standing on top of the given object.
The object name (e.g. altar, sink, fountain) must exist on the map.
Args:
env (MiniHack):
The environment object.
name (str):
The name of the object.
Returns:
bool: True if the object name is not in the screen descriptions
with agent info taking the space of the corresponding tile rather
than the object).
"""
return not env.screen_contains(name)
def check_episode_end_call(
self, env, previous_observation, action, observation
) -> bool:
reward = 0.0
for event in self.events:
if event.achieved:
continue
reward += event.check(
env, previous_observation, action, observation
)
for custom_reward_function in self.custom_reward_functions:
reward += custom_reward_function(
env, previous_observation, action, observation
)
self._reward += reward
return self._check_complete()
def _check_complete(self) -> bool:
"""Checks whether the episode is complete.
Requires any event which is sufficient to be achieved, OR all required
events to be achieved."""
result = True
for event in self.events:
# This event is enough, we're done
if event.achieved and event.terminal_sufficient:
return True
# We need this event and we haven't done it, we're not done
if not event.achieved and event.terminal_required:
result = False
# We've achieved all terminal_required events, we're done
return result
def collect_reward(self) -> float:
result = self._reward
self._reward = 0.0
return result
def reset(self):
self._reward = 0.0
for event in self.events:
event.reset()
class SequentialRewardManager(RewardManager):
"""A reward manager that ignores ``terminal_required`` and
``terminal_sufficient``, and just require every event is completed in the
order it is added to the reward manager.
"""
def __init__(self):
self.current_event_idx = 0
super().__init__()
def check_episode_end_call(
self, env, previous_observation, action, observation
):
event = self.events[self.current_event_idx]
reward = event.check(env, previous_observation, action, observation)
if event.achieved:
self.current_event_idx += 1
self._reward += reward
return self._check_complete()
def _check_complete(self) -> bool:
return self.current_event_idx == len(self.events)
class GroupedRewardManager(AbstractRewardManager):
"""Operates as a collection of reward managers.
The rewards from each reward manager are summed, and termination can be
specified by ``terminal_sufficient`` and ``terminal_required`` on each
reward manager.
Given this can be nested arbitrarily deeply (as each reward manager could
itself be a GroupedRewardManager), this enables complex specification of
groups of rewards.
"""
def __init__(self):
self.reward_managers: List[AbstractRewardManager] = []
def check_episode_end_call(
self, env, previous_observation, action, observation
) -> bool:
for reward_manager in self.reward_managers:
result = reward_manager.check_episode_end_call(
env, previous_observation, action, observation
)
# This reward manager has completed and it's sufficient so we're
# done
if reward_manager.terminal_sufficient and result:
return True
# This reward manager is required and hasn't completed, so we're
# not done
if reward_manager.terminal_required and not result:
return False
# If we've got here we've completed all required reward managers, so
# we're done
return True
def add_reward_manager(
self,
reward_manager: AbstractRewardManager,
terminal_required: bool,
terminal_sufficient: bool,
) -> None:
"""Add a new reward manager, with ``terminal_sufficient`` and
``terminal_required`` acting as for individual events.
Args:
reward_manager (RewardManager):
The reward manager to be added.
terminal_required (bool):
Whether this reward manager terminating is required for the
episode to terminate.
terminal_sufficient:
Whether this reward manager terminating is sufficient for the
episode to terminate.
"""
reward_manager.terminal_required = terminal_required
reward_manager.terminal_sufficient = terminal_sufficient
self.reward_managers.append(reward_manager)
def collect_reward(self):
reward = 0.0
for reward_manager in self.reward_managers:
reward += reward_manager.collect_reward()
return reward
def reset(self):
self._reward = 0.0
for reward_manager in self.reward_managers:
reward_manager.reset()
| {
"content_hash": "feea26f63a8787e9c49efa97a7c59d20",
"timestamp": "",
"source": "github",
"line_count": 860,
"max_line_length": 82,
"avg_line_length": 32.205813953488374,
"alnum_prop": 0.563093475827707,
"repo_name": "facebookresearch/minihack",
"id": "cca9fb7c50d20e2927711445f0278850348bc6d7",
"size": "27748",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "minihack/reward_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5309"
},
{
"name": "Jupyter Notebook",
"bytes": "5525"
},
{
"name": "Python",
"bytes": "427522"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
} |
import wx
import wx.grid as Grid
import images
#---------------------------------------------------------------------------
class MegaTable(Grid.GridTableBase):
"""
A custom wx.Grid Table using user supplied data
"""
def __init__(self, data, colnames, plugins):
"""data is a list of the form
[(rowname, dictionary),
dictionary.get(colname, None) returns the data for column
colname
"""
# The base class must be initialized *first*
Grid.GridTableBase.__init__(self)
self.data = data
self.colnames = colnames
self.plugins = plugins or {}
# XXX
# we need to store the row length and column length to
# see if the table has changed size
self._rows = self.GetNumberRows()
self._cols = self.GetNumberCols()
def GetNumberCols(self):
return len(self.colnames)
def GetNumberRows(self):
return len(self.data)
def GetColLabelValue(self, col):
return self.colnames[col]
def GetRowLabelValue(self, row):
return "row %03d" % int(self.data[row][0])
def GetValue(self, row, col):
return str(self.data[row][1].get(self.GetColLabelValue(col), ""))
def GetRawValue(self, row, col):
return self.data[row][1].get(self.GetColLabelValue(col), "")
def SetValue(self, row, col, value):
self.data[row][1][self.GetColLabelValue(col)] = value
def ResetView(self, grid):
"""
(Grid) -> Reset the grid view. Call this to
update the grid if rows and columns have been added or deleted
"""
grid.BeginBatch()
for current, new, delmsg, addmsg in [
(self._rows, self.GetNumberRows(), Grid.GRIDTABLE_NOTIFY_ROWS_DELETED, Grid.GRIDTABLE_NOTIFY_ROWS_APPENDED),
(self._cols, self.GetNumberCols(), Grid.GRIDTABLE_NOTIFY_COLS_DELETED, Grid.GRIDTABLE_NOTIFY_COLS_APPENDED),
]:
if new < current:
msg = Grid.GridTableMessage(self,delmsg,new,current-new)
grid.ProcessTableMessage(msg)
elif new > current:
msg = Grid.GridTableMessage(self,addmsg,new-current)
grid.ProcessTableMessage(msg)
self.UpdateValues(grid)
grid.EndBatch()
self._rows = self.GetNumberRows()
self._cols = self.GetNumberCols()
# update the column rendering plugins
self._updateColAttrs(grid)
# update the scrollbars and the displayed part of the grid
grid.AdjustScrollbars()
grid.ForceRefresh()
def UpdateValues(self, grid):
"""Update all displayed values"""
# This sends an event to the grid table to update all of the values
msg = Grid.GridTableMessage(self, Grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
grid.ProcessTableMessage(msg)
def _updateColAttrs(self, grid):
"""
wx.Grid -> update the column attributes to add the
appropriate renderer given the column name. (renderers
are stored in the self.plugins dictionary)
Otherwise default to the default renderer.
"""
col = 0
for colname in self.colnames:
attr = Grid.GridCellAttr()
if colname in self.plugins:
renderer = self.plugins[colname](self)
if renderer.colSize:
grid.SetColSize(col, renderer.colSize)
if renderer.rowSize:
grid.SetDefaultRowSize(renderer.rowSize)
attr.SetReadOnly(True)
attr.SetRenderer(renderer)
grid.SetColAttr(col, attr)
col += 1
# ------------------------------------------------------
# begin the added code to manipulate the table (non wx related)
def AppendRow(self, row):
#print('append')
entry = {}
for name in self.colnames:
entry[name] = "Appended_%i"%row
# XXX Hack
# entry["A"] can only be between 1..4
entry["A"] = random.choice(range(4))
self.data.insert(row, ["Append_%i"%row, entry])
def DeleteCols(self, cols):
"""
cols -> delete the columns from the dataset
cols hold the column indices
"""
# we'll cheat here and just remove the name from the
# list of column names. The data will remain but
# it won't be shown
deleteCount = 0
cols = cols[:]
cols.sort()
for i in cols:
self.colnames.pop(i-deleteCount)
# we need to advance the delete count
# to make sure we delete the right columns
deleteCount += 1
if not len(self.colnames):
self.data = []
def DeleteRows(self, rows):
"""
rows -> delete the rows from the dataset
rows hold the row indices
"""
deleteCount = 0
rows = rows[:]
rows.sort()
for i in rows:
self.data.pop(i-deleteCount)
# we need to advance the delete count
# to make sure we delete the right rows
deleteCount += 1
def SortColumn(self, col):
"""
col -> sort the data based on the column indexed by col
"""
name = self.colnames[col]
_data = []
for row in self.data:
rowname, entry = row
_data.append((entry.get(name, None), row))
_data.sort()
self.data = []
for sortvalue, row in _data:
self.data.append(row)
# end table manipulation code
# ----------------------------------------------------------
# --------------------------------------------------------------------
# Sample wx.Grid renderers
class MegaImageRenderer(Grid.GridCellRenderer):
def __init__(self, table):
"""
Image Renderer Test. This just places an image in a cell
based on the row index. There are N choices and the
choice is made by choice[row%N]
"""
Grid.GridCellRenderer.__init__(self)
self.table = table
self._choices = [images.Smiles.GetBitmap,
images.Mondrian.GetBitmap,
images.WXPdemo.GetBitmap,
]
self.colSize = None
self.rowSize = None
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
choice = self.table.GetRawValue(row, col)
bmp = self._choices[ choice % len(self._choices)]()
image = wx.MemoryDC()
image.SelectObject(bmp)
# clear the background
dc.SetBackgroundMode(wx.SOLID)
if isSelected:
dc.SetBrush(wx.Brush(wx.BLUE, wx.BRUSHSTYLE_SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.PENSTYLE_SOLID))
else:
dc.SetBrush(wx.Brush(wx.WHITE, wx.BRUSHSTYLE_SOLID))
dc.SetPen(wx.Pen(wx.WHITE, 1, wx.PENSTYLE_SOLID))
dc.DrawRectangle(rect)
# copy the image but only to the size of the grid cell
width, height = bmp.GetWidth(), bmp.GetHeight()
if width > rect.width-2:
width = rect.width-2
if height > rect.height-2:
height = rect.height-2
dc.Blit(rect.x+1, rect.y+1, width, height,
image,
0, 0, wx.COPY, True)
class MegaFontRenderer(Grid.GridCellRenderer):
def __init__(self, table, color="blue", font="ARIAL", fontsize=8):
"""Render data in the specified color and font and fontsize"""
Grid.GridCellRenderer.__init__(self)
self.table = table
self.color = color
self.font = wx.Font(fontsize, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, 0, font)
self.selectedBrush = wx.Brush("blue", wx.BRUSHSTYLE_SOLID)
self.normalBrush = wx.Brush(wx.WHITE, wx.BRUSHSTYLE_SOLID)
self.colSize = None
self.rowSize = 50
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
# Here we draw text in a grid cell using various fonts
# and colors. We have to set the clipping region on
# the grid's DC, otherwise the text will spill over
# to the next cell
dc.SetClippingRegion(rect)
# clear the background
dc.SetBackgroundMode(wx.SOLID)
if isSelected:
dc.SetBrush(wx.Brush(wx.BLUE, wx.BRUSHSTYLE_SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.PENSTYLE_SOLID))
else:
dc.SetBrush(wx.Brush(wx.WHITE, wx.BRUSHSTYLE_SOLID))
dc.SetPen(wx.Pen(wx.WHITE, 1, wx.PENSTYLE_SOLID))
dc.DrawRectangle(rect)
text = self.table.GetValue(row, col)
dc.SetBackgroundMode(wx.SOLID)
# change the text background based on whether the grid is selected
# or not
if isSelected:
dc.SetBrush(self.selectedBrush)
dc.SetTextBackground("blue")
else:
dc.SetBrush(self.normalBrush)
dc.SetTextBackground("white")
dc.SetTextForeground(self.color)
dc.SetFont(self.font)
dc.DrawText(text, rect.x+1, rect.y+1)
# Okay, now for the advanced class :)
# Let's add three dots "..."
# to indicate that that there is more text to be read
# when the text is larger than the grid cell
width, height = dc.GetTextExtent(text)
if width > rect.width-2:
width, height = dc.GetTextExtent("...")
x = rect.x+1 + rect.width-2 - width
dc.DrawRectangle(x, rect.y+1, width+1, height)
dc.DrawText("...", x, rect.y+1)
dc.DestroyClippingRegion()
# --------------------------------------------------------------------
# Sample Grid using a specialized table and renderers that can
# be plugged in based on column names
class MegaGrid(Grid.Grid):
def __init__(self, parent, data, colnames, plugins=None):
"""parent, data, colnames, plugins=None
Initialize a grid using the data defined in data and colnames
(see MegaTable for a description of the data format)
plugins is a dictionary of columnName -> column renderers.
"""
# The base class must be initialized *first*
Grid.Grid.__init__(self, parent, -1)
self._table = MegaTable(data, colnames, plugins)
self.SetTable(self._table)
self._plugins = plugins
self.Bind(Grid.EVT_GRID_LABEL_RIGHT_CLICK, self.OnLabelRightClicked)
def Reset(self):
"""reset the view based on the data in the table. Call
this when rows are added or destroyed"""
self._table.ResetView(self)
def OnLabelRightClicked(self, evt):
# Did we click on a row or a column?
row, col = evt.GetRow(), evt.GetCol()
if row == -1: self.colPopup(col, evt)
elif col == -1: self.rowPopup(row, evt)
def rowPopup(self, row, evt):
"""(row, evt) -> display a popup menu when a row label is right clicked"""
appendID = wx.NewId()
deleteID = wx.NewId()
x = self.GetRowSize(row)/2
if not self.GetSelectedRows():
self.SelectRow(row)
menu = wx.Menu()
xo, yo = evt.GetPosition()
menu.Append(appendID, "Append Row")
menu.Append(deleteID, "Delete Row(s)")
def append(event, self=self, row=row):
self._table.AppendRow(row)
self.Reset()
def delete(event, self=self, row=row):
rows = self.GetSelectedRows()
self._table.DeleteRows(rows)
self.Reset()
self.Bind(wx.EVT_MENU, append, id=appendID)
self.Bind(wx.EVT_MENU, delete, id=deleteID)
self.PopupMenu(menu)
menu.Destroy()
return
def colPopup(self, col, evt):
"""(col, evt) -> display a popup menu when a column label is
right clicked"""
x = self.GetColSize(col)/2
menu = wx.Menu()
id1 = wx.NewId()
sortID = wx.NewId()
xo, yo = evt.GetPosition()
self.SelectCol(col)
cols = self.GetSelectedCols()
self.Refresh()
menu.Append(id1, "Delete Col(s)")
menu.Append(sortID, "Sort Column")
def delete(event, self=self, col=col):
cols = self.GetSelectedCols()
self._table.DeleteCols(cols)
self.Reset()
def sort(event, self=self, col=col):
self._table.SortColumn(col)
self.Reset()
self.Bind(wx.EVT_MENU, delete, id=id1)
if len(cols) == 1:
self.Bind(wx.EVT_MENU, sort, id=sortID)
self.PopupMenu(menu)
menu.Destroy()
return
# -----------------------------------------------------------------
# Test data
# data is in the form
# [rowname, dictionary]
# where dictionary.get(colname, None) -> returns the value for the cell
#
# the colname must also be supplied
import random
colnames = ["Row", "This", "Is", "A", "Test"]
data = []
for row in range(1000):
d = {}
for name in ["This", "Test", "Is"]:
d[name] = random.random()
d["Row"] = len(data)
# XXX
# the "A" column can only be between one and 4
d["A"] = random.choice(range(4))
data.append((str(row), d))
class MegaFontRendererFactory:
def __init__(self, color, font, fontsize):
"""
(color, font, fontsize) -> set of a factory to generate
renderers when called.
func = MegaFontRenderFactory(color, font, fontsize)
renderer = func(table)
"""
self.color = color
self.font = font
self.fontsize = fontsize
def __call__(self, table):
return MegaFontRenderer(table, self.color, self.font, self.fontsize)
#---------------------------------------------------------------------------
class TestFrame(wx.Frame):
def __init__(self, parent, plugins={"This":MegaFontRendererFactory("red", "ARIAL", 8),
"A":MegaImageRenderer,
"Test":MegaFontRendererFactory("orange", "TIMES", 24),}):
wx.Frame.__init__(self, parent, -1,
"Test Frame", size=(640,480))
grid = MegaGrid(self, data, colnames, plugins)
grid.Reset()
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, "Show the MegaGrid", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
win = TestFrame(self)
win.Show(True)
#---------------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
overview = """Mega Grid Example
This example attempts to show many examples and tricks of
using a virtual grid object. Hopefully the source isn't too jumbled.
Features:
<ol>
<li>Uses a virtual grid
<li>Columns and rows have popup menus (right click on labels)
<li>Columns and rows can be deleted (i.e. table can be
resized)
<li>Dynamic renderers. Renderers are plugins based on
column header name. Shows a simple Font Renderer and
an Image Renderer.
</ol>
Look for 'XXX' in the code to indicate some workarounds for non-obvious
behavior and various hacks.
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| {
"content_hash": "e890e959245f6e6d7869e50d96ed7d87",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 120,
"avg_line_length": 31.409274193548388,
"alnum_prop": 0.5581231144489377,
"repo_name": "dnxbjyj/python-basic",
"id": "332693b50c15f32456bdde5116cb735a9cd195a3",
"size": "15602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/wxpython/wxPython-demo-4.0.1/demo/Grid_MegaExample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "70"
},
{
"name": "HTML",
"bytes": "274934"
},
{
"name": "Jupyter Notebook",
"bytes": "868723"
},
{
"name": "Python",
"bytes": "4032747"
},
{
"name": "Shell",
"bytes": "446"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.