code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from tcp_ip_raw_socket import *
def main():
fd = createSocket()
pkt = buildPacket("10.1.1.2", "10.1.1.1", 54321, 80, "Hello, how are you?")
try:
print "Starting flood"
while True:
sendPacket(fd, pkt, "10.1.1.2")
except KeyboardInterrupt:
print "Closing..."
if __name__ == "__main__":
main() | Digoss/funny_python | syn_flood.py | Python | bsd-3-clause | 310 |
__author__ = 'Bohdan Mushkevych'
from threading import Thread
from werkzeug.wrappers import Request
from werkzeug.wsgi import ClosingIterator
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.serving import run_simple
from synergy.conf import settings
from synergy.system.system_logger import get_logger
from synergy.scheduler.scheduler_constants import PROCESS_MX
from synergy.mx.utils import STATIC_PATH, local, local_manager, url_map, jinja_env
from synergy.mx import views
from flow.mx import views as flow_views
from flow.mx import STATIC_FLOW_ENDPOINT, STATIC_FLOW_PATH
import socket
socket.setdefaulttimeout(10.0) # set default socket timeout at 10 seconds
class MX(object):
""" MX stands for Management Extension and represents HTTP server serving UI front-end for Synergy Scheduler """
def __init__(self, mbean):
local.application = self
self.mx_thread = None
self.mbean = mbean
jinja_env.globals['mbean'] = mbean
self.dispatch = SharedDataMiddleware(self.dispatch, {
f'/scheduler/static': STATIC_PATH,
f'/{STATIC_FLOW_ENDPOINT}': STATIC_FLOW_PATH,
})
# during the get_logger call a 'werkzeug' logger will be created
# later, werkzeug._internal.py -> _log() will assign the logger to global _logger variable
self.logger = get_logger(PROCESS_MX)
def dispatch(self, environ, start_response):
local.application = self
request = Request(environ)
local.url_adapter = adapter = url_map.bind_to_environ(environ)
local.request = request
try:
endpoint, values = adapter.match()
# first - try to read from synergy.mx.views
handler = getattr(views, endpoint, None)
if not handler:
# otherwise - read from flow.mx.views
handler = getattr(flow_views, endpoint)
response = handler(request, **values)
except NotFound:
response = views.not_found(request)
response.status_code = 404
except HTTPException as e:
response = e
return ClosingIterator(response(environ, start_response),
[local_manager.cleanup])
def __call__(self, environ, start_response):
return self.dispatch(environ, start_response)
def start(self, hostname=None, port=None):
""" Spawns a new HTTP server, residing on defined hostname and port
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
"""
if hostname is None:
hostname = settings.settings['mx_host']
if port is None:
port = settings.settings['mx_port']
reloader = False # use_reloader: the default setting for the reloader.
debugger = False #
evalex = True # should the exception evaluation feature be enabled?
threaded = False # True if each request is handled in a separate thread
processes = 1 # if greater than 1 then handle each request in a new process
reloader_interval = 1 # the interval for the reloader in seconds.
static_files = None # static_files: optional dict of static files.
extra_files = None # extra_files: optional list of extra files to track for reloading.
ssl_context = None # ssl_context: optional SSL context for running server in HTTPS mode.
self.mx_thread = Thread(target=run_simple(hostname=hostname,
port=port,
application=self,
use_debugger=debugger,
use_evalex=evalex,
extra_files=extra_files,
use_reloader=reloader,
reloader_interval=reloader_interval,
threaded=threaded,
processes=processes,
static_files=static_files,
ssl_context=ssl_context))
self.mx_thread.daemon = True
self.mx_thread.start()
def stop(self):
""" method stops currently running HTTP server, if any
:see: `werkzeug.serving.make_environ`
http://flask.pocoo.org/snippets/67/ """
func = jinja_env.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('MX Error: no Shutdown Function registered for the Werkzeug Server')
func()
if __name__ == '__main__':
from synergy.scheduler.scheduler_constants import PROCESS_SCHEDULER
from synergy.scheduler.synergy_scheduler import Scheduler
scheduler = Scheduler(PROCESS_SCHEDULER)
app = MX(scheduler)
app.start()
| mushkevych/scheduler | synergy/mx/synergy_mx.py | Python | bsd-3-clause | 5,162 |
from datetime import datetime
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
assert msg in str(m[0].message)
# convert to utc
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=["b", "a"], columns=["e", "d"])
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_assign_columns(self, float_frame):
float_frame["hi"] = "there"
df = float_frame.copy()
df.columns = ["foo", "bar", "baz", "quux", "foo2"]
tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)
tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {
"self",
"mapper",
"index",
"columns",
"axis",
"inplace",
"copy",
"level",
"errors",
}
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {
"self",
"labels",
"index",
"columns",
"axis",
"limit",
"copy",
"level",
"method",
"fill_value",
"tolerance",
}
class TestIntervalIndex:
def test_setitem(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df["B"] = s
df["C"] = np.array(s)
df["D"] = s.values
df["E"] = np.array(s.values)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
assert is_object_dtype(df["C"])
assert is_object_dtype(df["E"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B), check_names=False)
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"], check_names=False)
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"], check_names=False)
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_set_reset_index(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
df["B"] = s
df = df.set_index("B")
df = df.reset_index()
| TomAugspurger/pandas | pandas/tests/frame/test_alter_axes.py | Python | bsd-3-clause | 8,801 |
# -*- coding: utf-8 -*-
import os.path
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.server.handler.threadedhandler import WebSocketHandler, EchoWebSocketHandler
class BroadcastWebSocketHandler(WebSocketHandler):
def received_message(self, m):
cherrypy.engine.publish('websocket-broadcast', str(m))
class Root(object):
@cherrypy.expose
def index(self):
return """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>WebSocket example displaying Android device sensors</title>
<link rel="stylesheet" href="/css/style.css" type="text/css" />
<script type="application/javascript" src="/js/jquery-1.6.2.min.js"> </script>
<script type="application/javascript" src="/js/jcanvas.min.js"> </script>
<script type="application/javascript" src="/js/droidsensor.js"> </script>
<script type="application/javascript">
$(document).ready(function() {
initWebSocket();
drawAll();
});
</script>
</head>
<body>
<section id="content" class="body">
<canvas id="canvas" width="900" height="620"></canvas>
</section>
</body>
</html>
"""
@cherrypy.expose
def ws(self):
cherrypy.log("Handler created: %s" % repr(cherrypy.request.ws_handler))
if __name__ == '__main__':
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': 9000,
'tools.staticdir.root': os.path.abspath(os.path.join(os.path.dirname(__file__), 'static'))
}
)
print os.path.abspath(os.path.join(__file__, 'static'))
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
cherrypy.quickstart(Root(), '', config={
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'css'
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'images'
},
'/ws': {
'tools.websocket.on': True,
'tools.websocket.handler_cls': BroadcastWebSocketHandler
}
}
)
| progrium/WebSocket-for-Python | example/droid_sensor_cherrypy_server.py | Python | bsd-3-clause | 2,324 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
This example demonstrates isocurve for triangular mesh with vertex data.
"""
import numpy as np
from vispy import app, scene
from vispy.geometry.generation import create_sphere
import sys
# Create a canvas with a 3D viewport
canvas = scene.SceneCanvas(keys='interactive',
title='Isocurve for Triangular Mesh Example')
canvas.show()
view = canvas.central_widget.add_view()
cols = 10
rows = 10
radius = 2
nbr_level = 20
mesh = create_sphere(cols, rows, radius=radius)
vertices = mesh.get_vertices()
tris = mesh.get_faces()
cl = np.linspace(-radius, radius, nbr_level+2)[1:-1]
scene.visuals.Isoline(vertices=vertices, tris=tris, data=vertices[:, 2],
levels=cl, color_lev='winter', parent=view.scene)
# Add a 3D axis to keep us oriented
scene.visuals.XYZAxis(parent=view.scene)
view.camera = scene.TurntableCamera()
view.camera.set_range((-1, 1), (-1, 1), (-1, 1))
if __name__ == '__main__' and sys.flags.interactive == 0:
app.run()
| Eric89GXL/vispy | examples/basics/scene/isocurve_for_trisurface.py | Python | bsd-3-clause | 1,317 |
"""
Forest of trees-based ensemble methods.
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
import numbers
from warnings import catch_warnings, simplefilter, warn
import threading
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from joblib import Parallel
from ..base import is_classifier
from ..base import ClassifierMixin, RegressorMixin, MultiOutputMixin
from ..metrics import accuracy_score, r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, compute_sample_weight, deprecated
from ..exceptions import DataConversionWarning
from ._base import BaseEnsemble, _partition_estimators
from ..utils.fixes import delayed
from ..utils.fixes import _joblib_parallel_args
from ..utils.multiclass import check_classification_targets, type_of_target
from ..utils.validation import check_is_fitted, _check_sample_weight
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _get_n_samples_bootstrap(n_samples, max_samples):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0.0, 1.0]`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, numbers.Integral):
if not (1 <= max_samples <= n_samples):
msg = "`max_samples` must be in range 1 to {} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, numbers.Real):
if not (0 < max_samples <= 1):
msg = "`max_samples` must be in range (0.0, 1.0] but got value {}"
raise ValueError(msg.format(max_samples))
return round(n_samples * max_samples)
msg = "`max_samples` should be int or float, but got type '{}'"
raise TypeError(msg.format(type(max_samples)))
def _generate_sample_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples_bootstrap)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples,
n_samples_bootstrap)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None,
n_samples_bootstrap=None):
"""
Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples,
n_samples_bootstrap)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with catch_warnings():
simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y,
indices=indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y,
indices=indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta):
"""
Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100, *,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
self.max_samples = max_samples
def apply(self, X):
"""
Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : ndarray of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer="threads"))(
delayed(tree.apply)(X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""
Return the decision path in the forest.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator matrix where non zero elements indicates
that the samples goes through the nodes. The matrix is of CSR
format.
n_nodes_ptr : ndarray of shape (n_estimators + 1,)
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer='threads'))(
delayed(tree.decision_path)(X, check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""
Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
"""
# Validate or convert input data
if issparse(y):
raise ValueError(
"sparse multilabel-indicator for y is not supported."
)
X, y = self._validate_data(X, y, multi_output=True,
accept_sparse="csc", dtype=DTYPE)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if self.criterion == "poisson":
if np.any(y < 0):
raise ValueError("Some value(s) of y are negative which is "
"not allowed for Poisson regression.")
if np.sum(y) <= 0:
raise ValueError("Sum of y is not strictly positive which "
"is necessary for Poisson regression.")
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Get bootstrap sample size
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples=X.shape[0],
max_samples=self.max_samples
)
# Check parameters
self._validate_estimator()
# TODO: Remove in v1.2
if isinstance(self, (RandomForestRegressor, ExtraTreesRegressor)):
if self.criterion == "mse":
warn(
"Criterion 'mse' was deprecated in v1.0 and will be "
"removed in version 1.2. Use `criterion='squared_error'` "
"which is equivalent.",
FutureWarning
)
elif self.criterion == "mae":
warn(
"Criterion 'mae' was deprecated in v1.0 and will be "
"removed in version 1.2. Use `criterion='absolute_error'` "
"which is equivalent.",
FutureWarning
)
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [self._make_estimator(append=False,
random_state=random_state)
for i in range(n_more_estimators)]
# Parallel loop: we prefer the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading more efficient than multiprocessing in
# that case. However, for joblib 0.12+ we respect any
# parallel_backend contexts set at a higher level,
# since correctness does not rely on using threads.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer='threads'))(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight,
n_samples_bootstrap=n_samples_bootstrap)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
y_type = type_of_target(y)
if y_type in ("multiclass-multioutput", "unknown"):
# FIXME: we could consider to support multiclass-multioutput if
# we introduce or reuse a constructor parameter (e.g.
# oob_score) allowing our user to pass a callable defining the
# scoring strategy on OOB sample.
raise ValueError(
f"The type of target cannot be used to compute OOB "
f"estimates. Got {y_type} while only the following are "
f"supported: continuous, continuous-multioutput, binary, "
f"multiclass, multilabel-indicator."
)
self._set_oob_score_and_attributes(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score_and_attributes(self, X, y):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
"""
def _compute_oob_predictions(self, X, y):
"""Compute and set the OOB score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
Returns
-------
oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or \
(n_samples, 1, n_outputs)
The OOB predictions.
"""
X = self._validate_data(X, dtype=DTYPE, accept_sparse='csr',
reset=False)
n_samples = y.shape[0]
n_outputs = self.n_outputs_
if is_classifier(self) and hasattr(self, "n_classes_"):
# n_classes_ is a ndarray at this stage
# all the supported type of target will have the same number of
# classes in all outputs
oob_pred_shape = (n_samples, self.n_classes_[0], n_outputs)
else:
# for regression, n_classes_ does not exist and we create an empty
# axis to be consistent with the classification case and make
# the array operations compatible with the 2 settings
oob_pred_shape = (n_samples, 1, n_outputs)
oob_pred = np.zeros(shape=oob_pred_shape, dtype=np.float64)
n_oob_pred = np.zeros((n_samples, n_outputs), dtype=np.int64)
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples, self.max_samples,
)
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples, n_samples_bootstrap,
)
y_pred = self._get_oob_predictions(
estimator, X[unsampled_indices, :]
)
oob_pred[unsampled_indices, ...] += y_pred
n_oob_pred[unsampled_indices, :] += 1
for k in range(n_outputs):
if (n_oob_pred == 0).any():
warn(
"Some inputs do not have OOB scores. This probably means "
"too few trees were used to compute any reliable OOB "
"estimates.", UserWarning
)
n_oob_pred[n_oob_pred == 0] = 1
oob_pred[..., k] /= n_oob_pred[..., [k]]
return oob_pred
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""
Validate X whenever one tries to predict, apply, predict_proba."""
check_is_fitted(self)
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
check_is_fitted(self)
all_importances = Parallel(n_jobs=self.n_jobs,
**_joblib_parallel_args(prefer='threads'))(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_ if tree.tree_.node_count > 1)
if not all_importances:
return np.zeros(self.n_features_in_, dtype=np.float64)
all_importances = np.mean(all_importances,
axis=0, dtype=np.float64)
return all_importances / np.sum(all_importances)
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute n_features_ was deprecated in version 1.0 and will be "
"removed in 1.2. Use 'n_features_in_' instead."
)
@property
def n_features_(self):
return self.n_features_in_
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta):
"""
Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100, *,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None):
super().__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples)
@staticmethod
def _get_oob_predictions(tree, X):
"""Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeClassifier object
A single decision tree classifier.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, n_classes, n_outputs)
The OOB associated predictions.
"""
y_pred = tree.predict_proba(X, check_input=False)
y_pred = np.array(y_pred, copy=False)
if y_pred.ndim == 2:
# binary and multiclass
y_pred = y_pred[..., np.newaxis]
else:
# Roll the first `n_outputs` axis to the last axis. We will reshape
# from a shape of (n_outputs, n_samples, n_classes) to a shape of
# (n_samples, n_classes, n_outputs).
y_pred = np.rollaxis(y_pred, axis=0, start=3)
return y_pred
def _set_oob_score_and_attributes(self, X, y):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
"""
self.oob_decision_function_ = super()._compute_oob_predictions(X, y)
if self.oob_decision_function_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_decision_function_ = self.oob_decision_function_.squeeze(
axis=-1
)
self.oob_score_ = accuracy_score(
y, np.argmax(self.oob_decision_function_, axis=1)
)
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = \
np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, str):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample".'
'Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or '
'"balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight '
'("balanced", classes, y). In place of y you can use '
'a large enough sample of the full training set '
'target to properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""
Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
# all dtypes should be the same, so just take the first
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_),
dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""
Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest.
The class probability of a single tree is the fraction of samples of
the same class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(_accumulate_prediction)(e.predict_proba, X, all_proba,
lock)
for e in self.estimators_)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def predict_log_proba(self, X):
"""
Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(RegressorMixin, BaseForest, metaclass=ABCMeta):
"""
Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=100, *,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
max_samples=None):
super().__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples)
def predict(self, X):
"""
Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock)
for e in self.estimators_)
y_hat /= len(self.estimators_)
return y_hat
@staticmethod
def _get_oob_predictions(tree, X):
"""Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeRegressor object
A single decision tree regressor.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, 1, n_outputs)
The OOB associated predictions.
"""
y_pred = tree.predict(X, check_input=False)
if y_pred.ndim == 1:
# single output regression
y_pred = y_pred[:, np.newaxis, np.newaxis]
else:
# multioutput regression
y_pred = y_pred[:, np.newaxis, :]
return y_pred
def _set_oob_score_and_attributes(self, X, y):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
"""
self.oob_prediction_ = super()._compute_oob_predictions(X, y).squeeze(
axis=1
)
if self.oob_prediction_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_prediction_ = self.oob_prediction_.squeeze(axis=-1)
self.oob_score_ = r2_score(y, self.oob_prediction_)
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order='C')
averaged_predictions = np.zeros(shape=grid.shape[0],
dtype=np.float64, order='C')
for tree in self.estimators_:
# Note: we don't sum in parallel because the GIL isn't released in
# the fast method.
tree.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions)
# Average over the forest
averaged_predictions /= len(self.estimators_)
return averaged_predictions
class RandomForestClassifier(ForestClassifier):
"""
A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and uses averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is controlled with the `max_samples` parameter if
`bootstrap=True` (default), otherwise the whole dataset is used to build
each tree.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"gini", "entropy"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"auto", "sqrt", "log2"}, int or float, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`round(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
bootstrap : bool, default=True
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate the generalization score.
Only available if bootstrap=True.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls both the randomness of the bootstrapping of the samples used
when building trees (if ``bootstrap=True``) and the sampling of the
features to consider when looking for the best split at each node
(if ``max_features < n_features``).
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \
default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
Attributes
----------
base_estimator_ : DecisionTreeClassifier
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes) or \
(n_samples, n_classes, n_outputs)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
See Also
--------
DecisionTreeClassifier, ExtraTreesClassifier
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
Examples
--------
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = RandomForestClassifier(max_depth=2, random_state=0)
>>> clf.fit(X, y)
RandomForestClassifier(...)
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
"""
def __init__(self,
n_estimators=100, *,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None):
super().__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state", "ccp_alpha"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
class RandomForestRegressor(ForestRegressor):
"""
A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and uses averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is controlled with the `max_samples` parameter if
`bootstrap=True` (default), otherwise the whole dataset is used to build
each tree.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"squared_error", "mse", "absolute_error", "poisson"}, \
default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion, "absolute_error"
for the mean absolute error, and "poisson" which uses reduction in
Poisson deviance to find splits.
Training using "absolute_error" is significantly slower
than when using "squared_error".
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. versionadded:: 1.0
Poisson criterion.
.. deprecated:: 1.0
Criterion "mse" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="squared_error"` which is equivalent.
.. deprecated:: 1.0
Criterion "mae" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="absolute_error"` which is equivalent.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"auto", "sqrt", "log2"}, int or float, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`round(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
bootstrap : bool, default=True
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate the generalization score.
Only available if bootstrap=True.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls both the randomness of the bootstrapping of the samples used
when building trees (if ``bootstrap=True``) and the sampling of the
features to consider when looking for the best split at each node
(if ``max_features < n_features``).
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
Attributes
----------
base_estimator_ : DecisionTreeRegressor
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Prediction computed with out-of-bag estimate on the training set.
This attribute exists only when ``oob_score`` is True.
See Also
--------
DecisionTreeRegressor, ExtraTreesRegressor
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
The default value ``max_features="auto"`` uses ``n_features``
rather than ``n_features / 3``. The latter was originally suggested in
[1], whereas the former was more recently justified empirically in [2].
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
.. [2] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = RandomForestRegressor(max_depth=2, random_state=0)
>>> regr.fit(X, y)
RandomForestRegressor(...)
>>> print(regr.predict([[0, 0, 0, 0]]))
[-8.32987858]
"""
def __init__(self,
n_estimators=100, *,
criterion="squared_error",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
ccp_alpha=0.0,
max_samples=None):
super().__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state", "ccp_alpha"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
class ExtraTreesClassifier(ForestClassifier):
"""
An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"gini", "entropy"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"auto", "sqrt", "log2"}, int or float, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`round(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
bootstrap : bool, default=False
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate the generalization score.
Only available if bootstrap=True.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls 3 sources of randomness:
- the bootstrapping of the samples used when building trees
(if ``bootstrap=True``)
- the sampling of the features to consider when looking for the best
split at each node (if ``max_features < n_features``)
- the draw of the splits for each of the `max_features`
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \
default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
Attributes
----------
base_estimator_ : ExtraTreesClassifier
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes) or \
(n_samples, n_classes, n_outputs)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
See Also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.ensemble import ExtraTreesClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = ExtraTreesClassifier(n_estimators=100, random_state=0)
>>> clf.fit(X, y)
ExtraTreesClassifier(random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
"""
def __init__(self,
n_estimators=100, *,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None):
super().__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state", "ccp_alpha"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
class ExtraTreesRegressor(ForestRegressor):
"""
An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"squared_error", "mse", "absolute_error", "mae"}, \
default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion, and "absolute_error"
for the mean absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. deprecated:: 1.0
Criterion "mse" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="squared_error"` which is equivalent.
.. deprecated:: 1.0
Criterion "mae" was deprecated in v1.0 and will be removed in
version 1.2. Use `criterion="absolute_error"` which is equivalent.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"auto", "sqrt", "log2"}, int or float, default="auto"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`round(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
bootstrap : bool, default=False
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate the generalization score.
Only available if bootstrap=True.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls 3 sources of randomness:
- the bootstrapping of the samples used when building trees
(if ``bootstrap=True``)
- the sampling of the features to consider when looking for the best
split at each node (if ``max_features < n_features``)
- the draw of the splits for each of the `max_features`
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
Attributes
----------
base_estimator_ : ExtraTreeRegressor
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_ : int
The number of features.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Prediction computed with out-of-bag estimate on the training set.
This attribute exists only when ``oob_score`` is True.
See Also
--------
sklearn.tree.ExtraTreeRegressor : Base estimator for this ensemble.
RandomForestRegressor : Ensemble regressor using trees with optimal splits.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.ensemble import ExtraTreesRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> reg = ExtraTreesRegressor(n_estimators=100, random_state=0).fit(
... X_train, y_train)
>>> reg.score(X_test, y_test)
0.2708...
"""
def __init__(self,
n_estimators=100, *,
criterion="squared_error",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
ccp_alpha=0.0,
max_samples=None):
super().__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state", "ccp_alpha"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
class RandomTreesEmbedding(BaseForest):
"""
An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int, default=100
Number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
max_depth : int, default=5
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 1.0 (renaming of 0.25).
Use ``min_impurity_decrease`` instead.
sparse_output : bool, default=True
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`transform`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the generation of the random `y` used to fit the trees
and the draw of the splits for each feature at the trees' nodes.
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
Attributes
----------
base_estimator_ : DecisionTreeClassifier instance
The child estimator template used to create the collection of fitted
sub-estimators.
estimators_ : list of DecisionTreeClassifier instances
The collection of fitted sub-estimators.
feature_importances_ : ndarray of shape (n_features,)
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_outputs_ : int
The number of outputs when ``fit`` is performed.
one_hot_encoder_ : OneHotEncoder instance
One-hot encoder used to create the sparse embedding.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
Examples
--------
>>> from sklearn.ensemble import RandomTreesEmbedding
>>> X = [[0,0], [1,0], [0,1], [-1,0], [0,-1]]
>>> random_trees = RandomTreesEmbedding(
... n_estimators=5, random_state=0, max_depth=1).fit(X)
>>> X_sparse_embedding = random_trees.transform(X)
>>> X_sparse_embedding.toarray()
array([[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.],
[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.],
[0., 1., 0., 1., 0., 1., 0., 1., 0., 1.],
[1., 0., 1., 0., 1., 0., 1., 0., 1., 0.],
[0., 1., 1., 0., 1., 0., 0., 1., 1., 0.]])
"""
criterion = "squared_error"
max_features = 1
def __init__(self,
n_estimators=100, *,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
sparse_output=True,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
super().__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"min_impurity_decrease", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=None)
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score_and_attributes(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""
Fit estimator and transform dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix of shape (n_samples, n_out)
Transformed dataset.
"""
X = self._validate_data(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super().fit(X, y, sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""
Transform dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix of shape (n_samples, n_out)
Transformed dataset.
"""
check_is_fitted(self)
return self.one_hot_encoder_.transform(self.apply(X))
| kevin-intel/scikit-learn | sklearn/ensemble/_forest.py | Python | bsd-3-clause | 102,940 |
from __future__ import unicode_literals
from celery_longterm_scheduler import get_scheduler
from celery_longterm_scheduler.conftest import CELERY
import mock
import pendulum
@CELERY.task
def echo(arg):
return arg
def test_should_store_all_arguments_needed_for_send_task(celery_worker):
# Cannot do this with a Mock, since they (technically correctly)
# differentiate recording calls between args and kw, so a call
# `send_task(1, 2, 3)` is not considered equal to
# `send_task(1, args=2, kwargs=3)`, although semantically it is the same.
def record_task(
name, args=None, kwargs=None, countdown=None, eta=None,
task_id=None, producer=None, connection=None, router=None,
result_cls=None, expires=None, publisher=None, link=None,
link_error=None, add_to_parent=True, group_id=None, retries=0,
chord=None, reply_to=None, time_limit=None, soft_time_limit=None,
root_id=None, parent_id=None, route_name=None, shadow=None,
chain=None, task_type=None, **options):
options.update(dict(
args=args, kwargs=kwargs, countdown=countdown,
eta=eta, task_id=task_id, producer=producer, connection=connection,
router=router, result_cls=result_cls, expires=expires,
publisher=publisher, link=link, link_error=link_error,
add_to_parent=add_to_parent, group_id=group_id, retries=retries,
chord=chord, reply_to=reply_to, time_limit=time_limit,
soft_time_limit=soft_time_limit, root_id=root_id,
parent_id=parent_id, route_name=route_name, shadow=shadow,
chain=chain, task_type=task_type
))
calls.append((name, options))
calls = []
with mock.patch.object(CELERY, 'send_task', new=record_task):
result = echo.apply_async(('foo',), eta=pendulum.now())
task = get_scheduler(CELERY).backend.get(result.id)
args = task[0]
kw = task[1]
# schedule() always generates an ID itself (to reuse it for the
# scheduler storage), while the normal apply_async() defers that to
# send_task(). We undo this here for comparison purposes.
kw['task_id'] = None
CELERY.send_task(*args, **kw)
scheduled_call = calls[0]
echo.apply_async(('foo',))
normal_call = calls[1]
# Special edge case, see Task._schedule() for an explanation
normal_call[1]['result_cls'] = None
assert scheduled_call == normal_call
def test_should_bypass_if_no_eta_given():
with mock.patch(
'celery_longterm_scheduler.task.Task._schedule') as schedule:
result = echo.apply_async(('foo',))
assert schedule.call_count == 0
result.get() # Be careful about test isolation
result = echo.apply_async(('foo',), eta=None)
assert schedule.call_count == 0
result.get() # Be careful about test isolation
echo.apply_async(('foo',), eta=pendulum.now())
assert schedule.call_count == 1
| ZeitOnline/celery_longterm_scheduler | src/celery_longterm_scheduler/tests/test_task.py | Python | bsd-3-clause | 3,059 |
# proxy module
from pyface.ui.wx.system_metrics import *
| enthought/etsproxy | enthought/pyface/ui/wx/system_metrics.py | Python | bsd-3-clause | 57 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-12 08:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import tagulous.models.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='label',
field=models.CharField(default='', help_text='The name of the tag, without ancestors', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='tag',
name='level',
field=models.IntegerField(default=1, help_text='The level of the tag in the tree'),
),
migrations.AddField(
model_name='tag',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='core.Tag'),
),
migrations.AddField(
model_name='tag',
name='path',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AlterField(
model_name='analysis',
name='tags',
field=tagulous.models.fields.TagField(_set_tag_meta=True, force_lowercase=True, help_text='Enter a comma-separated tag string', to='core.Tag', tree=True),
),
migrations.AlterField(
model_name='experiment',
name='tags',
field=tagulous.models.fields.TagField(_set_tag_meta=True, force_lowercase=True, help_text='Enter a comma-separated tag string', to='core.Tag', tree=True),
),
migrations.AlterUniqueTogether(
name='tag',
unique_together=set([('slug', 'parent')]),
),
]
| Candihub/pixel | apps/core/migrations/0002_auto_20171012_0855.py | Python | bsd-3-clause | 1,878 |
#!/usr/bin/env python3
#==============================================================================
# author : Pavel Polishchuk
# date : 14-08-2019
# version :
# python_version :
# copyright : Pavel Polishchuk 2019
# license :
#==============================================================================
__version__ = "0.2.9"
| DrrDom/crem | crem/__init__.py | Python | bsd-3-clause | 379 |
# Copyright (c) 2015, National Documentation Centre (EKT, www.ekt.gr)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# Neither the name of the National Documentation Centre nor the
# names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = 'kutsurak'
class RundeckException(Exception):
def __init__(self, *args, **kwargs):
super(RundeckException, self).__init__(*args, **kwargs)
| EKT/pyrundeck | pyrundeck/exceptions.py | Python | bsd-3-clause | 1,781 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyPulses Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Sequence context used for testing.
"""
from atom.api import Float, set_default
from exopy_pulses.pulses.contexts.base_context import BaseContext
class DummyContext(BaseContext):
"""Context limited to testing purposes.
"""
logical_channels = set_default(('Ch1_L', 'Ch2_L'))
analogical_channels = set_default(('Ch1_A', 'Ch2_A'))
sampling = Float(1.0)
def compile_and_transfer_sequence(self, sequence, driver=None):
"""Simply evaluate and simplify the underlying sequence.
"""
items, errors = self.preprocess_sequence(sequence)
if not items:
return False, {}, errors
return True, {'test': True}, {}
def list_sequence_infos(self):
return {'test': False}
def _get_sampling_time(self):
return self.sampling
| Ecpy/ecpy_pulses | exopy_pulses/testing/context.py | Python | bsd-3-clause | 1,203 |
__author__ = 'Thomas Rueckstiess, [email protected]'
from agent import Agent
from pybrain.datasets import ReinforcementDataSet
class HistoryAgent(Agent):
""" This agent stores actions, states, and rewards encountered during interaction with an environment
in a ReinforcementDataSet (which is a variation of SequentialDataSet). The stored history can
be used for learning and is erased by resetting the agent. It also makes sure that integrateObservation,
getAction and giveReward are called in exactly that order. """
def __init__(self, indim, outdim):
# store input and output dimension
self.indim = indim
self.outdim = outdim
# create history dataset
self.remember = True
self.history = ReinforcementDataSet(indim, outdim)
# initialize temporary variables
self.lastobs = None
self.lastaction = None
def integrateObservation(self, obs):
""" 1. stores the observation received in a temporary variable until action is called and
reward is given. """
assert self.lastobs == None
assert self.lastaction == None
self.lastobs = obs
def getAction(self):
""" 2. stores the action in a temporary variable until reward is given. """
assert self.lastobs != None
assert self.lastaction == None
# implement getAction in subclass and set self.lastaction
def enableHistory(self):
self.remember = True
def disableHistory(self):
self.remember = False
def giveReward(self, r):
""" 3. stores observation, action and reward in the history dataset. """
# step 3: assume that state and action have been set
assert self.lastobs != None
assert self.lastaction != None
# store state, action and reward in dataset
if self.remember:
self.history.addSample(self.lastobs, self.lastaction, r)
self.lastobs = None
self.lastaction = None
def reset(self):
""" clears the history of the agent. """
self.history.clear()
| daanwierstra/pybrain | pybrain/rl/agents/history.py | Python | bsd-3-clause | 2,202 |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.sqlite',
}
}
INSTALLED_APPS = [
'nocaptcha_recaptcha',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.common.CommonMiddleware',
]
NORECAPTCHA_SECRET_KEY = 'privkey'
NORECAPTCHA_SITE_KEY = 'pubkey'
| ImaginaryLandscape/django-nocaptcha-recaptcha | test_settings.py | Python | bsd-3-clause | 636 |
# -*- coding: utf-8 -*-
"""Test forms."""
from personal_website.article.forms import ArticleForm
from personal_website.public.forms import LoginForm
class TestArticleForm:
"""Article Form."""
def test_title_required(self, article):
"""Publish article."""
form = ArticleForm(body=article.body, published=article.published)
assert form.validate() is False
assert 'title - This field is required.' in form.title.errors
def test_validate_title_exists(self, article):
"""Title already exists."""
article.published = True
article.save()
form = ArticleForm(title=article.title, body=article.body, published=True)
assert form.validate() is False
assert 'Title already Used' in form.title.errors
def test_validate_slug_exists(self, article):
"""Title already exists."""
article.published = True
article.save()
form = ArticleForm(title=article.title, body=article.body, published=True)
assert form.validate() is False
assert 'Error producing url. Try a different title.' in form.title.errors
class TestLoginForm:
"""Login form."""
def test_validate_success(self, user):
"""Login successful."""
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='example')
assert form.validate() is True
assert form.user == user
def test_validate_unknown_username(self, db):
"""Unknown username."""
form = LoginForm(username='unknown', password='example')
assert form.validate() is False
assert 'Unknown username' in form.username.errors
assert form.user is None
def test_validate_invalid_password(self, user):
"""Invalid password."""
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='wrongpassword')
assert form.validate() is False
assert 'Invalid password' in form.password.errors
def test_validate_inactive_user(self, user):
"""Inactive user."""
user.active = False
user.set_password('example')
user.save()
# Correct username and password, but user is not activated
form = LoginForm(username=user.username, password='example')
assert form.validate() is False
assert 'User not activated' in form.username.errors
| arewellborn/Personal-Website | tests/test_forms.py | Python | bsd-3-clause | 2,446 |
from django.contrib import admin
import models
admin.site.register(models.Song)
admin.site.register(models.Station)
admin.site.register(models.Vote)
admin.site.register(models.StationPoll)
admin.site.register(models.StationVote) | f4nt/djpandora | djpandora/admin.py | Python | bsd-3-clause | 230 |
# Copyright (c) 2013,Vienna University of Technology, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Jul 31, 2013
@author: Christoph Paulik [email protected]
'''
import os
import unittest
from pytesmo.io.ismn import readers
import pandas as pd
from datetime import datetime
import numpy as np
class TestReaders(unittest.TestCase):
def setUp(self):
self.filename_format_header_values = os.path.join(os.path.dirname(__file__), '..', 'test-data', 'ismn', 'format_header_values', 'SMOSMANIA',
'SMOSMANIA_SMOSMANIA_Narbonne_sm_0.050000_0.050000_ThetaProbe-ML2X_20070101_20070131.stm')
self.filename_format_ceop_sep = os.path.join(os.path.dirname(__file__), '..', 'test-data', 'ismn', 'format_ceop_sep', 'SMOSMANIA',
'SMOSMANIA_SMOSMANIA_Narbonne_sm_0.050000_0.050000_ThetaProbe-ML2X_20070101_20070131.stm')
self.filename_format_ceop = os.path.join(os.path.dirname(__file__), '..', 'test-data', 'ismn', 'format_ceop', 'SMOSMANIA',
'SMOSMANIA_SMOSMANIA_NBN_20100304_20130801.stm')
self.filename_malformed = os.path.join(os.path.dirname(__file__), '..', 'test-data', 'ismn', 'malformed',
'mal_formed_file.txt')
self.metadata_ref = {'network': 'SMOSMANIA',
'station': 'Narbonne',
'latitude': 43.15,
'longitude': 2.9567,
'elevation': 112.0,
'depth_from': [0.05],
'depth_to': [0.05],
'variable': ['soil moisture'],
'sensor': 'ThetaProbe-ML2X'}
self.metadata_ref_ceop = dict(self.metadata_ref)
self.metadata_ref_ceop['depth_from'] = ['multiple']
self.metadata_ref_ceop['depth_to'] = ['multiple']
self.metadata_ref_ceop['variable'] = ['ts', 'sm']
self.metadata_ref_ceop['sensor'] = 'n.s'
def test_get_info_from_file(self):
header_elements, filename_elements = readers.get_info_from_file(
self.filename_format_ceop_sep)
assert sorted(header_elements) == sorted(['2007/01/01', '01:00', '2007/01/01',
'01:00', 'SMOSMANIA', 'SMOSMANIA',
'Narbonne', '43.15000', '2.95670',
'112.00', '0.05', '0.05', '0.2140', 'U', 'M'])
assert sorted(filename_elements) == sorted(['SMOSMANIA', 'SMOSMANIA', 'Narbonne', 'sm',
'0.050000', '0.050000', 'ThetaProbe-ML2X',
'20070101', '20070131.stm'])
def test_get_metadata_header_values(self):
metadata = readers.get_metadata_header_values(
self.filename_format_header_values)
for key in metadata:
assert metadata[key] == self.metadata_ref[key]
def test_reader_format_header_values(self):
dataset = readers.read_format_header_values(
self.filename_format_header_values)
assert dataset.network == 'SMOSMANIA'
assert dataset.station == 'Narbonne'
assert dataset.latitude == 43.15
assert dataset.longitude == 2.9567
assert dataset.elevation == 112.0
assert dataset.variable == ['soil moisture']
assert dataset.depth_from == [0.05]
assert dataset.depth_to == [0.05]
assert dataset.sensor == 'ThetaProbe-ML2X'
assert type(dataset.data) == pd.DataFrame
assert dataset.data.index[7] == datetime(2007, 1, 1, 8, 0, 0)
assert sorted(dataset.data.columns) == sorted(
['soil moisture', 'soil moisture_flag', 'soil moisture_orig_flag'])
assert dataset.data['soil moisture'].values[8] == 0.2135
assert dataset.data['soil moisture_flag'].values[8] == 'U'
assert dataset.data['soil moisture_orig_flag'].values[8] == 'M'
def test_get_metadata_ceop_sep(self):
metadata = readers.get_metadata_ceop_sep(self.filename_format_ceop_sep)
for key in metadata:
assert metadata[key] == self.metadata_ref[key]
def test_reader_format_ceop_sep(self):
dataset = readers.read_format_ceop_sep(self.filename_format_ceop_sep)
assert dataset.network == 'SMOSMANIA'
assert dataset.station == 'Narbonne'
assert dataset.latitude == 43.15
assert dataset.longitude == 2.9567
assert dataset.elevation == 112.0
assert dataset.variable == ['soil moisture']
assert dataset.depth_from == [0.05]
assert dataset.depth_to == [0.05]
assert dataset.sensor == 'ThetaProbe-ML2X'
assert type(dataset.data) == pd.DataFrame
assert dataset.data.index[7] == datetime(2007, 1, 1, 8, 0, 0)
assert sorted(dataset.data.columns) == sorted(
['soil moisture', 'soil moisture_flag', 'soil moisture_orig_flag'])
assert dataset.data['soil moisture'].values[8] == 0.2135
assert dataset.data['soil moisture_flag'].values[8] == 'U'
assert dataset.data['soil moisture_orig_flag'].values[347] == 'M'
def test_get_metadata_ceop(self):
metadata = readers.get_metadata_ceop(self.filename_format_ceop)
assert metadata == self.metadata_ref_ceop
def test_reader_format_ceop(self):
dataset = readers.read_format_ceop(self.filename_format_ceop)
assert dataset.network == 'SMOSMANIA'
assert dataset.station == 'Narbonne'
assert dataset.latitude == 43.15
assert dataset.longitude == 2.9567
assert dataset.elevation == 112.0
assert sorted(dataset.variable) == sorted(['sm', 'ts'])
assert sorted(dataset.depth_from) == sorted([0.05, 0.1, 0.2, 0.3])
assert sorted(dataset.depth_to) == sorted([0.05, 0.1, 0.2, 0.3])
assert dataset.sensor == 'n.s'
assert type(dataset.data) == pd.DataFrame
assert dataset.data.index[7] == (
0.05, 0.05, datetime(2010, 10, 21, 9, 0, 0))
assert sorted(dataset.data.columns) == sorted(
['sm', 'sm_flag', 'ts', 'ts_flag'])
assert dataset.data['sm'].values[8] == 0.2227
assert dataset.data['sm_flag'].values[8] == 'U'
assert np.isnan(dataset.data.ix[0.3, 0.3]['ts'].values[6])
assert dataset.data.ix[0.3, 0.3]['ts_flag'].values[6] == 'M'
def test_reader_get_format(self):
fileformat = readers.get_format(self.filename_format_header_values)
assert fileformat == 'header_values'
fileformat = readers.get_format(self.filename_format_ceop_sep)
assert fileformat == 'ceop_sep'
fileformat = readers.get_format(self.filename_format_ceop)
assert fileformat == 'ceop'
with self.assertRaises(readers.ReaderException):
fileformat = readers.get_format(self.filename_malformed)
if __name__ == '__main__':
unittest.main()
| christophreimer/pytesmo | tests/test_ismn/test_readers.py | Python | bsd-3-clause | 8,660 |
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import transaction
from django.forms.models import inlineformset_factory, modelform_factory
from django.forms.widgets import HiddenInput
from django.shortcuts import get_object_or_404
from vanilla import FormView
from django.utils.translation import ugettext_lazy as _
from declry.forms import BootstrapModelForm
from django import http
from django.utils.decorators import classonlymethod
from django.template import (RequestContext,
loader, TemplateDoesNotExist)
import autocomplete_light
from django.views.decorators.csrf import requires_csrf_token
import logging
import smtplib
from email.mime.text import MIMEText
__author__ = 'jorgeramos'
class ModelFormView(FormView):
model = None
related_model = None
success_url_path = '{}_{}_view'
success_url_args = ''
template_name = "edit.html"
title = _('Edit')
form = BootstrapModelForm
fields = None
hidden_fields = []
exclude = []
formfield_callback = None
instance = None
@classonlymethod
def as_view(cls, **initkwargs):
view = super(ModelFormView, cls).as_view(**initkwargs)
if hasattr(cls, 'title'):
view.label = cls.title
return view
def get_form_class(self):
"""
Returns the form class to use in this view.
"""
if self.form_class:
return self.form_class
elif self.related_model:
widgets = autocomplete_light.get_widgets_dict(self.related_model)
return modelform_factory(self.related_model, widgets = widgets, form = self.form, fields=self.fields, exclude =self.exclude, formfield_callback=self.get_formfield_callback())
elif self.model:
widgets = autocomplete_light.get_widgets_dict(self.model)
return modelform_factory(self.model, widgets = widgets, form = self.form, fields=self.fields, exclude =self.exclude, formfield_callback=self.get_formfield_callback())
msg = "'%s' must either define 'form_class' or define 'model' or override 'get_form_class()'"
raise ImproperlyConfigured(msg % self.__class__.__name__)
def get_form(self, data=None, files=None, **kwargs):
"""
Given `data` and `files` QueryDicts, and optionally other named
arguments, and returns a form.
"""
cls = self.get_form_class()
return cls(instance = self.get_modelform_instance(), data=data, files=files, **kwargs)
def get(self, request, object_id=None, *args, **kwargs):
self.request = request
self.object_id = object_id
form = self.get_form()
context = self.get_context_data(form=form)
return self.render_to_response(context)
def post(self, request, object_id=None):
self.request = request
self.object_id = object_id
if 'go_back' in request.POST:
return self.form_valid(None)
form = self.get_form(data=request.POST, files=request.FILES)
if form.is_valid():
inst= form.save()
if not self.object_id:
self.object_id = inst.pk
self.success_url = self.get_success_url()
return self.form_valid(form)
return self.form_invalid(form)
def get_success_url(self):
return reverse(self.success_url_path.format(self.model._meta.app_label, self.model._meta.module_name),
args=(self.object_id,)) + self.success_url_args
def form_invalid(self, form, **kwargs):
context = self.get_context_data(form=form, **kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
kwargs['title'] = self.title
kwargs['instance'] = self.get_instance()
kwargs['view'] = self
return kwargs
def get_formfield_callback(self):
def formfield_callback(f, **kwargs):
field = f.formfield(**kwargs)
if f.name in self.hidden_fields:
field.widget = HiddenInput()
return field
return formfield_callback
def get_instance(self):
if self.instance:
return self.instance
if self.request and self.object_id:
return get_object_or_404(self.model, pk=self.object_id)
def get_modelform_instance(self):
return self.get_instance()
class InlineFormView(FormView):
model = None
related_model = None
form = BootstrapModelForm
prefix = "inlineform"
template_name = "inline_edit.html"
title = None
instance = None
success_path = '{}_{}_view'
success_path_args = ''
fields = None
extra = 1
exclude = []
formfield_callback = None
@classonlymethod
def as_view(cls, **initkwargs):
view = super(InlineFormView,cls).as_view(**initkwargs)
if hasattr(cls, 'title'):
view.label = cls.title
return view
def get_form_class(self):
"""
Returns the form class to use in this view.
"""
if self.form_class:
return self.form_class
if self.model and self.related_model:
return inlineformset_factory(self.model, self.related_model, form=self.form, extra=self.extra, exclude = self.exclude,
fields = self.fields, formfield_callback= self.get_formfield_callback())
msg = "'%s' must either define 'form_class' or define 'model' and 'related_model' or override 'get_form_class()'"
raise ImproperlyConfigured(msg % self.__class__.__name__)
def get_form(self, data=None, files=None, **kwargs):
"""
Given `data` and `files` QueryDicts, and optionally other named
arguments, and returns a form.
"""
cls = self.get_form_class()
return cls(prefix = self.prefix, instance=self.get_instance(), data=data, files=files, **kwargs)
def get_context_data(self, **kwargs):
"""
Takes a set of keyword arguments to use as the base context, and
returns a context dictionary to use for the view, additionally adding
in 'view'.
"""
kwargs.update({
'prefix' : self.prefix,
'title': self.title,
'instance' : self.get_instance()
})
kwargs['view'] = self
return kwargs
def get(self, request, object_id, *args, **kwargs):
self.request = request
self.object_id = object_id
form = self.get_form()
context = self.get_context_data(form=form)
return self.render_to_response(context)
def post(self, request, object_id):
self.request = request
self.object_id = object_id
if 'add_{}'.format(self.prefix) in request.POST:
return self.process_add_element()
elif 'go_back' in request.POST:
return self.form_valid(None)
else:
form = self.get_form(data = request.POST, files= request.FILES)
if form.is_valid():
with transaction.commit_on_success():
#try:
form.save()
#except:
# transaction.rollback()
return self.form_valid(form)
return self.form_invalid(form)
def process_add_element(self):
post_copy = self.request.POST.copy()
post_copy['{}-TOTAL_FORMS'.format(self.prefix)] = int(post_copy['{}-TOTAL_FORMS'.format(self.prefix)]) + 1
form = self.get_form(data=post_copy, files=self.request.FILES)
context = self.get_context_data(form=form)
return self.render_to_response(context)
def get_success_url(self):
if self.success_path is None:
msg = "'%s' must define 'success_url' or override 'form_valid()'"
raise ImproperlyConfigured(msg % self.__class__.__name__)
else:
return reverse(self.success_path.format(self.model._meta.app_label, self.model._meta.module_name),
args=(self.object_id,)) + self.success_path_args
def get_instance(self):
if self.instance:
return self.instance
if self.request and self.object_id:
return get_object_or_404(self.model, pk=self.object_id)
def get_formfield_callback(self):
return self.formfield_callback
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
session_logger = logging.getLogger('session')
@requires_csrf_token
def permission_denied(request, template_name='403.html'):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context: None
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 2616) will be returned.
"""
print "YAAAA"
email_txt="""
Erro 403
Path: {}
Cookies: {}
User: {}
Roles: {}
Bom trabalho
A Equipa do Scholr
"""
user = request.user if hasattr(request, 'user') else '?'
roles = request.user.roles if hasattr(request, 'user') and hasattr(request.user,'roles') else '---'
session_logger.error(u'{} with cookies {}, user: {}, roles: {}'.format(request.path, request.COOKIES, user, roles))
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseForbidden('<h1>403 TESTE</h1>')
return http.HttpResponseForbidden(template.render(RequestContext(request)))
| jAlpedrinha/DeclRY | declry/views.py | Python | bsd-3-clause | 9,652 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FanUser',
fields=[
('user_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('slug', models.SlugField(unique=True)),
('is_contributor', models.BooleanField(default=False)),
('desc', models.TextField(blank=True)),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=('users.user',),
),
]
| vivyly/fancastic_17 | fancastic_17/fan/migrations/0001_initial.py | Python | bsd-3-clause | 909 |
"""
=====================================================
Analysis for project 29
=====================================================
:Author: Nick Ilott
:Release: $Id$
:Date: |today|
:Tags: Python
"""
# load modules
from ruffus import *
import CGAT.Experiment as E
import logging as L
import CGAT.Database as Database
import CGAT.CSV as CSV
import sys
import os
import re
import shutil
import itertools
import math
import glob
import time
import gzip
import collections
import random
import numpy
import sqlite3
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import CGAT.IndexedFasta as IndexedFasta
from rpy2.robjects import r as R
import rpy2.robjects as ro
import rpy2.robjects.vectors as rovectors
from rpy2.rinterface import RRuntimeError
#from pandas import *
import PipelineProj029
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
import CGATPipelines.Pipeline as P
P.getParameters(
["pipeline.ini"])
PARAMS = P.PARAMS
###################################################################
# connecting to database
###################################################################
def connect():
'''connect to database.
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database"])
return dbh
###################################################################
###################################################################
###################################################################
# This first section deals with collating the information from
# pipeline_metagenomeassembly.py. We produce plots of relative
# abundance correllations etc between different samples
###################################################################
###################################################################
###################################################################
@follows(mkdir("metaphlan.dir"))
@split(os.path.join(PARAMS.get("communities_dir"), PARAMS.get("communities_db")),
"metaphlan.dir/relab.*.matrix")
def buildRelativeAbundanceMatricesMetaphlan(infile, outfiles):
'''
build a matrix combining the relative abundance estimations
for all samples
'''
# filenames to derive tablenames
dirname = PARAMS.get("communities_dir")
exp = "metaphlan.dir/*.relab"
files = glob.glob(os.path.join(dirname, exp))
tablenames = [
os.path.basename(x).replace(".", "_").replace("-", "_") \
for x in files]
tablenames.sort()
for level in ["phylum", "class", "order", "family", "genus", "species"]:
outfile = os.path.join("communities.dir", "relab." + level + ".matrix")
PipelineProj029.buildRelativeAbundanceMatrix(infile,
tablenames,
outfile,
level = level)
###################################################################
###################################################################
###################################################################
@follows(mkdir("kraken.dir"))
@split(os.path.join(PARAMS.get("communities_dir"), PARAMS.get("communities_db")),
"kraken.dir/*norm*.matrix")
def buildAbundanceMatricesKraken(infile, outfiles):
'''
build a matrix combining the rpm estimations
for all samples
'''
# filenames to derive tablenames
dirname = PARAMS.get("communities_dir")
exp = "kraken.dir/*.counts.norm.tsv.gz"
files = glob.glob(os.path.join(dirname, exp))
tablenames = [
"kraken_" + os.path.basename(P.snip(x, ".tsv.gz")).replace(".", "_").replace("-", "_") \
for x in files]
tablenames.sort()
for level in ["phylum", "class", "order", "family", "genus", "species"]:
outfile = os.path.join("kraken.dir", "counts.norm." + level + ".matrix")
PipelineProj029.buildRelativeAbundanceMatrix(infile,
tablenames,
outfile,
level = level)
###################################################################
###################################################################
###################################################################
@follows(mkdir("diamond.dir"))
@split(os.path.join(PARAMS.get("communities_dir"), PARAMS.get("communities_db")),
"diamond.dir/*norm*.matrix")
def buildAbundanceMatricesDiamond(infile, outfiles):
'''
build a matrix combining the rpm estimations
for all samples
'''
# filenames to derive tablenames
dirname = PARAMS.get("communities_dir")
exp = "diamond.dir/*.taxa.count"
files = glob.glob(os.path.join(dirname, exp))
tablenames = [
os.path.basename(x).replace(".", "_").replace("-", "_") \
for x in files]
tablenames.sort()
for level in ["phylum", "class", "order", "family", "genus", "species"]:
outfile = os.path.join("diamond.dir", "counts.norm." + level + ".matrix")
PipelineProj029.buildRelativeAbundanceMatrix(infile,
tablenames,
outfile,
level = level)
###################################################################
###################################################################
###################################################################
COMMUNITIES_TARGETS = []
communities_targets = {"kraken": buildAbundanceMatricesKraken,
"metaphlan": buildRelativeAbundanceMatricesMetaphlan,
"diamond": buildAbundanceMatricesDiamond}
for x in P.asList(PARAMS.get("classifiers")):
COMMUNITIES_TARGETS.append(communities_targets[x])
@transform(COMMUNITIES_TARGETS,
suffix(".matrix"), ".barplot.pdf")
def barplotAbundances(infile, outfile):
'''
barplot the species relative abundances
'''
threshold = PARAMS.get("communities_threshold")
PipelineProj029.barplotAbundances(infile,
outfile,
threshold)
###################################################################
###################################################################
###################################################################
@transform(COMMUNITIES_TARGETS, suffix(".matrix"), ".ratio.tsv")
def calculateFirmicutesBacteroidetesRatio(infile, outfile):
'''
barplot the species relative abundances
'''
threshold = PARAMS.get("communities_threshold")
PipelineProj029.calculateFirmicutesBacteroidetesRatio(infile,
outfile,
threshold)
###################################################################
###################################################################
###################################################################
@transform(calculateFirmicutesBacteroidetesRatio, suffix(".tsv"), ".pdf")
def plotFirmicutesBacteroidetesRatio(infile, outfile):
'''
produce boxplot of firmicutes/bacteroidetes ratio
'''
PipelineProj029.plotFirmicutesBacteroidetesRatio(infile,
outfile)
###################################################################
###################################################################
###################################################################
@transform(calculateFirmicutesBacteroidetesRatio, suffix(".tsv"), ".signif")
def calculateSignificanceOfFirmicutesBacteroidetesRatio(infile, outfile):
'''
use tuleyHSD to calculate significance between groups with
multiple testing correction
'''
PipelineProj029.calculateSignificanceOfFirmicutesBacteroidetesRatio(infile,
outfile)
###################################################################
###################################################################
###################################################################
@jobs_limit(1, "R")
@transform(COMMUNITIES_TARGETS, suffix(".matrix"), ".barplot.numbers.pdf")
def plotHowManySpecies(infile, outfile):
'''
how many samples have how many species?
'''
PipelineProj029.plotHowManySpecies(infile, outfile)
###################################################################
###################################################################
###################################################################
@transform(COMMUNITIES_TARGETS, suffix(".matrix"), ".heatmap.pdf")
def heatmapAbundances(infile, outfile):
'''
heatmap the species relative abundances
'''
threshold = PARAMS.get("communities_threshold")
PipelineProj029.heatmapAbundances(infile,
outfile,
threshold,
"covariates.tsv")
###################################################################
###################################################################
###################################################################
@transform(COMMUNITIES_TARGETS, suffix(".matrix"), ".signif")
def testSignificanceOfAbundances(infile, outfile):
'''
use an anova to test significance. This is not ideal but serves
as a quick look
'''
PipelineProj029.anovaTest(infile,
"covariates.tsv",
outfile,
threshold = PARAMS.get("communities_threshold"),
over = PARAMS.get("communities_over"))
###################################################################
###################################################################
###################################################################
@transform(testSignificanceOfAbundances,
suffix(".signif"),
add_inputs(COMMUNITIES_TARGETS),
".signif.pdf")
def plotSignificantResults(infiles, outfile):
'''
barplot those taxa that are different across groups
'''
inf = infiles[0]
track = P.snip(inf, ".signif")
abundance_file = [m for m in infiles[1] if m.find(track) != -1][0]
threshold = PARAMS.get("communities_threshold")
PipelineProj029.plotSignificantResults(inf, abundance_file, outfile, threshold)
###################################################################
###################################################################
###################################################################
# KEGG analysis
###################################################################
###################################################################
###################################################################
@follows(mkdir("kegg.dir"))
@merge(glob.glob(os.path.join(PARAMS.get("communities_dir"), "kegg.dir/*.kegg.counts")), "kegg.dir/kegg.pathways.matrix")
def combineKeggTables(infiles, outfile):
'''
merge counts for kegg pathways
'''
headers = ",".join(
[re.match(".*.dir/(.*).kegg.counts", x).groups()[0]
for x in infiles])
directory = os.path.dirname(infiles[0])
statement = '''python %(scriptsdir)s/combine_tables.py
--glob=%(directory)s/*.kegg.counts
--headers=%(headers)s
--columns=1
--log=%(outfile)s.log
> %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@follows(barplotAbundances,
plotFirmicutesBacteroidetesRatio,
calculateSignificanceOfFirmicutesBacteroidetesRatio,
plotHowManySpecies,
heatmapAbundances,
plotSignificantResults)
def full():
pass
#########################################
#########################################
#########################################
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| CGATOxford/proj029 | Proj029Pipelines/pipeline_proj029.py | Python | bsd-3-clause | 12,528 |
import os
from time import sleep
from cement.utils.test import TestApp
from cement.utils.misc import init_defaults
if 'REDIS_HOST' in os.environ.keys():
redis_host = os.environ['REDIS_HOST']
else:
redis_host = 'localhost'
defaults = init_defaults('cache.redis')
defaults['cache.redis']['host'] = redis_host
defaults['cache.redis']['port'] = 6379
defaults['cache.redis']['db'] = 0
class RedisApp(TestApp):
class Meta:
extensions = ['redis']
cache_handler = 'redis'
config_defaults = defaults
def test_redis_set(key):
with RedisApp() as app:
app.cache.set(key, 1001)
assert int(app.cache.get(key)) == 1001
def test_redis_get(key):
with RedisApp() as app:
# get empty value
app.cache.delete(key)
assert app.cache.get(key) is None
# get empty value with fallback
assert app.cache.get(key, 1234) == 1234
def test_redis_delete(key):
with RedisApp() as app:
app.cache.set(key, 1001)
assert int(app.cache.get(key)) == 1001
app.cache.delete(key)
assert app.cache.get(key) is None
def test_redis_purge(key):
with RedisApp() as app:
app.cache.set(key, 1002)
app.cache.purge()
assert app.cache.get(key) is None
def test_redis_expire(key):
with RedisApp() as app:
app.cache.set(key, 1003, time=2)
sleep(3)
assert app.cache.get(key) is None
| datafolklabs/cement | tests/ext/test_ext_redis.py | Python | bsd-3-clause | 1,437 |
#!/usr/bin/env python
import sys
from hiclib import mapping, fragmentHiC
from mirnylib import h5dict, genome
import h5py
basedir = sys.argv[1]
genome_db = genome.Genome('%s/Data/Genome/mm9_fasta' % basedir, readChrms=['1'], chrmFileTemplate="%s.fa")
temp = h5py.File('%s/Data/Timing/hiclib_data_norm.hdf5' % basedir, 'r')
weights = temp['weights'][...]
temp.close()
fragments = fragmentHiC.HiCdataset(
filename='temp',
genome=genome_db,
maximumMoleculeLength=500,
mode='a',
enzymeName="NcoI",
inMemory=True)
fragments.load('%s/Data/Timing/hiclib_data_norm.hdf5' % basedir)
fragments.weights = weights
fragments.fragmentWeights = weights
fragments.vectors['weights'] = 'float32'
fragments.saveHeatmap('%s/Data/Timing/hiclib_heatmap.hdf5' % basedir, resolution=10000, useWeights=True) | bxlab/HiFive_Paper | Scripts/Timing/hiclib_heatmap.py | Python | bsd-3-clause | 813 |
#from django.conf import settings
#settings.INSTALLED_APPS += ("mptt", "hvad", "galleries",) | marcopompili/django-market | django_market/__init__.py | Python | bsd-3-clause | 94 |
from rdr_service.model.genomics import GenomicGCValidationMetrics, GenomicSetMember
from rdr_service.tools.tool_libs.tool_base import cli_run, ToolBase
tool_cmd = 'backfill-gvcf'
tool_desc = 'Backfill the gVCF paths in genomic_gc_validation_metrics'
class GVcfBackfillTool(ToolBase):
def run(self):
super(GVcfBackfillTool, self).run()
# Get list of paths
path_list = self.get_paths_from_file()
for path in path_list:
sample_id = self.get_sample_id_from_gvcf_path(path)
metric = self.get_metric_from_sample_id(sample_id)
self.update_metric_gvcf_path(metric, path)
def get_paths_from_file(self):
path_set = set()
with open(self.args.input_file, encoding='utf-8-sig') as f:
lines = f.readlines()
for line in lines:
path_set.add(line.strip())
return list(path_set)
@staticmethod
def get_sample_id_from_gvcf_path(path):
# Based on naming convention:
# gs://prod-genomics-data-northwest/Wgs_sample_raw_data/
# SS_VCF_research/UW_A100329930_21055000718_702252_v1.hard-filtered.gvcf.gz
return path.split("_")[7]
def get_metric_from_sample_id(self, sample_id):
with self.get_session() as session:
return session.query(GenomicGCValidationMetrics).join(
GenomicSetMember,
GenomicSetMember.id == GenomicGCValidationMetrics.genomicSetMemberId
).filter(
GenomicSetMember.sampleId == sample_id
).one_or_none()
def update_metric_gvcf_path(self, metric, path):
if self.args.md5:
metric.gvcfMd5Received = 1
metric.gvcfMd5Path = path
else:
metric.gvcfReceived = 1
metric.gvcfPath = path
with self.get_session() as session:
session.merge(metric)
def add_additional_arguments(parser):
parser.add_argument('--input-file', required=True, help='path of text file with list of gVCF paths')
parser.add_argument('--md5', required=False, action="store_true", help='backfilling md5 files')
def run():
return cli_run(tool_cmd, tool_desc, GVcfBackfillTool, add_additional_arguments)
| all-of-us/raw-data-repository | rdr_service/tools/tool_libs/backfill_gvcf_paths.py | Python | bsd-3-clause | 2,248 |
#!/usr/bin/python2
"""Syncs to a given Cobalt build id.
Syncs current gclient instance to a given build id, as
generated by "build_id.py" and stored on carbon-airlock-95823.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import shutil
import subprocess
import sys
import requests
_BUILD_ID_QUERY_URL = (
"https://carbon-airlock-95823.appspot.com/build_version/search")
_BUILD_ID_QUERY_PARAMETER_NAME = "build_number"
class SubprocessFailedException(Exception):
"""Exception for non-zero subprocess exits."""
def __init__(self, command):
super(SubprocessFailedException, self).__init__() # pylint: disable=super-with-arguments
self.command = command
def __str__(self):
return "Subprocess failed '{0}'".format(self.command)
def _RunGitCommand(gitargs, **kwargs):
"""Runs a git command with "gitargs", returning the output splitlines().
Args:
gitargs: Commandline args that follow 'git'.
**kwargs: Keyword args for Popen.
Returns:
All of stdout, as an array of lines.
Raises:
SubprocessFailedException: if the exit code is nonzero.
"""
result_tuple = _RunGitCommandReturnExitCode(gitargs, **kwargs)
if result_tuple[0] != 0:
raise SubprocessFailedException(" ".join(["git"] + gitargs))
return result_tuple[1]
def _RunGitCommandReturnExitCode(gitargs, **kwargs):
"""Runs a git command with "gitargs", returning the exit code and output.
Args:
gitargs: Commandline args that follow 'git'.
**kwargs: Keyword args for Popen.
Returns:
Tuple of (exit code, all of stdout as an array of lines).
"""
popen_args = ["git"] + gitargs
with subprocess.Popen(popen_args, stdout=subprocess.PIPE, **kwargs) as p:
output = p.stdout.read().splitlines()
return p.wait(), output
def main():
dev_null = open(os.devnull, "w") # pylint: disable=consider-using-with
arg_parser = argparse.ArgumentParser(
description="Syncs to a given Cobalt build id")
arg_parser.add_argument("buildid", nargs=1)
arg_parser.add_argument(
"--force",
default=False,
action="store_true",
help="Deletes directories that don't match the requested format.")
args = arg_parser.parse_args()
r = requests.get(
_BUILD_ID_QUERY_URL,
params={_BUILD_ID_QUERY_PARAMETER_NAME: args.buildid[0]})
if not r.ok:
print(
"HTTP request failed\n{0} {1}\n{2}".format(r.status_code, r.reason,
r.text),
file=sys.stderr)
return 1
# The response starts with a security-related close expression line
outer_json = json.loads(r.text.splitlines()[1])
hashes = json.loads(outer_json["deps"])
git_root = os.getcwd()
for relpath, rep_hash in hashes.items():
path = os.path.normpath(os.path.join(git_root, relpath))
if not os.path.exists(path):
# No warning in this case, we will attempt to clone the repository in
# the next pass through the repos.
continue
is_dirty = (
bool(
_RunGitCommandReturnExitCode(["diff", "--no-ext-diff", "--quiet"],
cwd=path,
stderr=dev_null)[0]) or
bool(
_RunGitCommandReturnExitCode(
["diff", "--no-ext-diff", "--quiet", "--cached"],
cwd=path,
stderr=dev_null)[0]))
if is_dirty:
print("{0} is dirty, please resolve".format(relpath))
return 1
(requested_repo, _) = rep_hash.split("@")
remote_url = _RunGitCommand(["config", "--get", "remote.origin.url"],
cwd=path)[0].strip().decode("utf-8")
if requested_repo.endswith(".git"):
if remote_url + ".git" == requested_repo:
print(("WARNING: You are syncing to {0} instead of {1}. While these "
"point to the same repo, the differing extension will cause "
"different build ids to be generated. If you need the same "
"id, you'll need to specifically clone {0} (note the .git "
"extension).").format(requested_repo, remote_url))
remote_url += ".git"
if remote_url != requested_repo:
if args.force and path != git_root:
shutil.rmtree(path)
else:
print(("{0} exists but does not point to the requested repo for that "
"path, {1}. Either replace that directory manually or run this "
"script with --force. --force will not try to remove the top "
"level repository.").format(path, requested_repo))
return 1
for relpath, rep_hash in hashes.items():
path = os.path.normpath(os.path.join(git_root, relpath))
# repo_hash has a repo path prefix like this:
# 'https://chromium.googlesource.com/chromium/llvm-project/libcxx.git
# @48198f9110397fff47fe7c37cbfa296be7d44d3d'
(requested_repo, requested_hash) = rep_hash.split("@")
if not os.path.exists(path):
print("Missing path {0}, cloning from {1}.".format(path, requested_repo))
try:
# The clone command will create all missing directories leading to the
# path. If the clone is successful, we continue on as usual and let
# the subsequent logic here checkout the appropriate git hash.
_RunGitCommand(["clone", "-q", requested_repo, path])
except SubprocessFailedException:
print("There was an error cloning the repository.")
continue
current_hash = _RunGitCommand(["rev-parse", "HEAD"], cwd=path)[0]
if requested_hash == current_hash:
continue
symbolic_ref = None
try:
symbolic_ref = _RunGitCommand(["symbolic-ref", "--short", "-q", "HEAD"],
cwd=path,
stderr=dev_null)[0]
except SubprocessFailedException:
pass
user_visible_commit = symbolic_ref if symbolic_ref else current_hash[0:7]
print("{0} was at {1} now {2}".format(path, user_visible_commit,
requested_hash[0:7]))
_RunGitCommand(["checkout", "-q", "--detach", requested_hash], cwd=path)
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except SubprocessFailedException as ex:
print(str(ex), file=sys.stderr)
sys.exit(1)
| youtube/cobalt | cobalt/build/sync_to_build_id.py | Python | bsd-3-clause | 6,399 |
from models import *
from django.db import connection
import collections
import time
import calendar
def GetCreditCardList(contactid):
cards_list = []
orders = Orders.objects.all().filter(ocustomerid = contactid)
cards_hash = {}
for order in orders:
if order.ocardno:
if order.ocardno not in cards_hash:
cards_hash[order.ocardno] = "Card Ending in %s" %order.ocardno[-4:]
# Preparing Cards List
cards_list = []
for key, value in cards_hash.items():
cards_list.append((key, value))
return cards_list
def GenerateShippingCalander():
month = [['', '', '','','', '', ''],
['', '', '','','', '', ''],
['', '', '','','', '', ''],
['', '', '','','', '', ''],
['', '', '','','', '', ''],
['', '', '','','', '', ''],
]
today = time.localtime().tm_mday
start_time = time.strptime("2013/06/01", "%Y/%m/01")
day = start_time.tm_mday
wday = start_time.tm_wday
last_day = calendar.monthrange(start_time.tm_year, start_time.tm_mon)[1]
row_no = 0
while day <= last_day:
cur_time = time.strptime(time.strftime("%Y/%m/" + str(day), start_time), "%Y/%m/%d")
day = cur_time.tm_mday
wday = cur_time.tm_wday
script = ''
bgcolor = "#FFFFFF"
if day < today:
bgcolor = "#999999"
elif day == today:
bgcolor = "#CC9966"
elif day == today + 1:
bgcolor = "#99CC00"
script = time.strftime("%m/" + str(day).zfill(2) + "/%Y", start_time)
elif day == today + 2:
bgcolor = "#663366"
script = time.strftime("%m/" + str(day).zfill(2) + "/%Y", start_time)
elif day > today + 2:
bgcolor = "#00CCCC"
script = time.strftime("%m/" + str(day).zfill(2) + "/%Y", start_time)
if day >= today:
if wday == 6:
bgcolor = "#DB9E9B"
script = ''
elif wday == 5:
script = time.strftime("%m/" + str(day).zfill(2) + "/%Y", start_time)
bgcolor = "#FFCC33"
day_hash = {'wday': wday, 'day': day, 'bgcolor':bgcolor, 'script':script}
month[row_no][wday] = day_hash
if wday == 6:
row_no += 1
day += 1
return month
def GetPriorityShippingCharge(category_id):
shipping_cat_obj = ShippingCategory.objects.filter(id = category_id, status='ACTIVE')[0]
return shipping_cat_obj.priority_shipping
def GetSaturdayShippingCharge(category_id):
shipping_cat_obj = ShippingCategory.objects.filter(id = category_id, status='ACTIVE')[0]
return shipping_cat_obj.saturday_delivery
def GetAlaskaShippingCharge(category_id):
shipping_cat_obj = ShippingCategory.objects.filter(id = category_id, status='ACTIVE')[0]
return shipping_cat_obj.alaska_delivery
class Error(object):
def __init__(self):
self.IsError = False
self._error_number = 0
self._error_message = ''
def RaiseError(self, p_message):
self.IsError = True
self._error_message = p_message
def Error(self):
self.IsError = False;
msg = self._error_message
self._error_message = ""
return msg
class StoreCredit(object):
def __init__(self):
self.id = 0
self.credit_value = 0.0
class MyShippingCategory(object):
def __init__(self):
self.id = -1
self.shipping_charge = 0.0
self.fuel_charge = 0.0
self.tax = 0.0
self.tax_value = 0.0
self.promotions = 0.0
self.shipping_value = 0.0
self.supplies_total = 0.0
self.freeshipping_diff = 0.0
self.shipping_items = []
class CartInfo(Error):
'''Holds final order summary of the Cart'''
def __init__(self):
super(CartInfo, self).__init__()
self.subtotal = 0.0
self.shipping_total = 0.0
self.fuelcharge_total = 0.0
self.tax_total = 0.0
self.promotions_total = 0
self.store_credit = 0.0
self.order_total = 0.0
self.is_storecredit_applied = False
self.store_credit_id = 0
self.store_credit = 0.0
self.cc_approval_code = ''
def ApplyStoreCredit(self, obj):
self.store_credit_id = obj.id
credit_value = obj.credit_value
self.store_credit = credit_value
if self.order_total >= self.store_credit:
self.order_total -= self.store_credit
self.store_credit = 0
elif self.order_total < self.store_credit and self.order_total > 0:
self.store_credit -= self.order_total
self.order_total = 0
def GetShippingCategoryID(self, catalog_id):
#pc_object = ProductCategory.objects.get(catalogid=catalog_id)
#psc_object = ProductShippingCategories.objects.get(product_category_id = pc_object.categoryid)
cursor = connection.cursor()
cursor.execute("SELECT psc.shipping_category_id, sc.category_name FROM product_shipping_categories psc "
"inner join product_category pc on (psc.product_category_id = pc.categoryid) "
"inner join shipping_category sc on (psc.shipping_category_id = sc.id)"
"where product_category_id in (SELECT categoryid FROM product_category WHERE catalogid = %d) " %catalog_id)
row = cursor.fetchone()
cursor.close()
shipping_category_id = row[0]
shipping_category_name = row[1]
return shipping_category_id, shipping_category_name
def Add(self, cart_dict, catalog_id):
items_dict = cart_dict # key is ItemID and value is CartItem object
if catalog_id in items_dict.keys():
cart_item = items_dict[catalog_id]
# Checking whether the one more item is allowed the the existing quantity.
if (cart_item.quantity + 1) > cart_item.qoh:
self.RaiseError("Quantity out of order. You can not add more items.")
return items_dict
cart_item.quantity += 1
items_dict[catalog_id] = cart_item
else:
cart_item = CartItem(catalog_id)
if cart_item.qoh <= 0:
self.RaiseError("Quantity is out of order")
return cart_dict
cart_item.shipping_category, cart_item.shipping_category_name = self.GetShippingCategoryID(catalog_id)
cart_item.quantity = 1
items_dict[catalog_id] = cart_item
return items_dict
def Delete(self, cart_dict, catalog_id):
del cart_dict[catalog_id]
return cart_dict
def Update(self, cart_dict, catalog_id, quantity):
cart_item = cart_dict[catalog_id]
if quantity <= 0:
self.RaiseError("Quantity should be greater than 0 or remove from cart")
return cart_dict
if quantity <= cart_item.qoh:
cart_item.quantity = quantity
else:
self.RaiseError("Quantity is out of order")
return cart_dict
return cart_dict
def GetOrderValue(self, cart_dict):
order_value = 0
for key, value in cart_dict.items():
value.CalculateTotals()
order_value += value.subtotal
return order_value
def GetShippingCharge(self, category_id, shipping_value, state, excluded_zip_codes):
shipping_charge = -1
fuel_charge = 0
free_shipping_diff = 0
shipping_cat_obj = ShippingCategory.objects.filter(id = category_id, status='ACTIVE')[0]
fuel_charge = shipping_cat_obj.fuel_charge
if shipping_cat_obj.is_free_shipping == 1:
# Return 0 as shipping charge and also get fuel charge as Tuple
shipping_charge = 0
return (shipping_charge, fuel_charge, free_shipping_diff)
if shipping_cat_obj.flatrate_shipping_charge > 0.0:
shipping_charge = shipping_cat_obj.flatrate_shipping_charge
return (shipping_charge, fuel_charge, free_shipping_diff)
# Fetching Rules.
shipping_charge_objs = ShippingCharges.objects.filter(shipping_category_id = category_id,
order_total_min__lte = shipping_value,
order_total_max__gte = shipping_value,
shipping_state = state)
# Calculating shipping charge as per the rules.
# If no rules, applying flat_rate shipping charge
if shipping_charge_objs:
shp_charge_obj = shipping_charge_objs[0]
shipping_charge = shp_charge_obj.shipping_charge
else:
shipping_charge = shp_charge_obj.flatrate_shipping_charge
# Calculating free shipping suggestion.
if shipping_charge > 0:
shipping_charge_objs = ShippingCharges.objects.filter(shipping_category_id = category_id,
shipping_charge = 0,
shipping_state = state)
if shipping_charge_objs:
shp_charge_obj = shipping_charge_objs[0]
free_shipping_diff = (shp_charge_obj.order_total_min - shipping_value)
else:
free_shipping_diff = 0
return (shipping_charge, fuel_charge, free_shipping_diff)
def GetItemsByShippingCategory(self, cart_dict):
items_dict = cart_dict
state = 'FL'
excluded_zips = []
tax_list = Tax.objects.filter(tax_country = 'US', tax_state = state)
if tax_list:
tax = tax_list[0].tax_value1
else:
tax = 7.0
# Dictionary contains shipping category id as a key and a list of items as values.
shipping_categories_dict = {}
shipping_cat_names_hash = {}
# Collecting Category wise Items
for key, item in items_dict.items():
item.CalculateTotals()
shipping_category_id = item.shipping_category
if item.shipping_category in shipping_categories_dict:
shipping_categories_dict[item.shipping_category].append(item)
else:
shipping_categories_dict[item.shipping_category] = [item]
shipping_cat_names_hash[item.shipping_category] = item.shipping_category_name
# Calculating Shipping Charge, Fuel Charge and Tax for each category
my_shipping_obj_list = []
for key, value in shipping_categories_dict.items():
shipping_category = MyShippingCategory()
shipping_category.id = key
shipping_category.name = shipping_cat_names_hash[key]
shipping_category.shipping_items = value
# Calculating Shipping Value
for item in shipping_category.shipping_items:
shipping_category.shipping_value += float(item.subtotal)
(shipping_category.shipping_charge, shipping_category.fuel_charge,
shipping_category.freeshipping_diff) = self.GetShippingCharge(shipping_category.id,
shipping_category.shipping_value,
state, excluded_zips)
shipping_category.tax = tax
shipping_category.tax_value = (shipping_category.shipping_value * shipping_category.tax)/100
shipping_category.supplies_total = (shipping_category.shipping_value +
shipping_category.shipping_charge +
shipping_category.fuel_charge +
shipping_category.tax_value -
shipping_category.promotions)
self.subtotal += shipping_category.shipping_value
self.shipping_total += shipping_category.shipping_charge
self.fuelcharge_total += shipping_category.fuel_charge
self.tax_total += shipping_category.tax_value
self.promotions_total += shipping_category.promotions
my_shipping_obj_list.append(shipping_category)
self.order_total = self.subtotal + self.shipping_total + self.fuelcharge_total + self.tax_total - self.promotions_total
# Applying Store Credit
#if self.is_storecredit_applied:
#od = collections.OrderedDict(sorted(shipping_categories_dict.items()))
return my_shipping_obj_list
def GetNumberOfItems(self, p_dict):
cart_dict = p_dict;
item_count = 0
for key, value in cart_dict.items():
item_count += value
return item_count
class CartItem(Error):
def __init__(self, item_id=None):
super(CartItem, self).__init__()
self.catalog_id = -1
self.item_name = ''
self.price = 0.0
self.saleprice = 0.0
self.quantity = 0
self.qoh = 0 # (Quantity on Hand)
self.shipping_category = 0
self.shipping_category_name = ''
self.shipping_charge = 0
self.tax_percent = 0.0
self.tax_value = 0.0
self.fuel_charge = 0.0
self.promotions = 0.0
self.is_reward_enabled = False
self.reward_points = 0
self.thumbnail = ''
self.image1 = ''
self.image2 = ''
self.image3 = ''
self.extra_fied_3 = ''
self.subtotal = 0.0
self.shipping_total = 0.0
self.fuel_charge_total = 0.0
self.promotions_total = 0.0
self.tax_total = 0.0
self.supplies_total = 0.0
if item_id:
self.FillItem(item_id)
return
def CalculateTotals(self):
if self.saleprice > 0:
self.subtotal = self.saleprice * self.quantity
else:
self.subtotal = self.price * self.quantity
self.shipping_total = 0.0
self.fuel_charge_total = 0.0
self.promotions_total = 0.0
self.tax_total = 0.0
self.supplies_total = 0.0
def FillItem(self, p_catalog_id):
'''Fills the current class object with the data fetched from the DB.
Returns: False if product not found.
'''
# Fetching product from the DB.
product_list = Products.objects.filter(catalogid=p_catalog_id)
if not product_list:
self.RaiseError("Item not found")
return False
product = product_list[0]
#product = Products()
self.catalog_id = product.catalogid
self.item_name = product.name
self.price = product.price
self.saleprice = product.saleprice
self.qoh = product.stock # (Quantity on Hand)
# No need to fill the values. Will be calculated for every category.
self.shipping_category = 0
self.shipping_charge = 0
self.tax_percent = 0.0
self.tax_value = 0.0
self.fuel_charge = 0.0
# Update this value when User is applied Coupon.
self.promotions = 0.0
if product.reward_disable == 1:
self.is_reward_enabled = False
else:
self.is_reward_enabled = True
self.reward_points = product.reward_points
self.thumbnail = product.thumbnail
self.image1 = product.image1
self.image2 = product.image2
self.image3 = product.image3
self.extra_fied_3 = product.extra_field_3
#self.subtotal = 0.0
#self.shipping_total = 0.0
#self.fuel_charge_total = 0.0
#self.promotions_total = 0.0
#self.tax_total = 0.0
#self.supplies_total = 0.0
def Set(self, p_catalog_id, p_item_name, p_price, p_saleprice, p_quantity,
p_qoh, p_shipping_category, p_shipping_charge, p_tax_percent,
p_fuel_charge, p_promotions, p_is_rewards_enabled,
p_reward_points, p_thumbnail, p_image1, p_image2, p_image3,
p_extra_field_3=""):
self.catalog_id = p_catalog_id
self.item_name = p_item_name
self.price = p_price
self.saleprice = p_saleprice
self.quantity = p_quantity
self.qoh = p_qoh # (Quantity on Hand)
self.shipping_category = p_shipping_category
self.shipping_charge = p_shipping_charge
self.tax_percent = p_tax_percent
self.fuel_charge = p_fuel_charge
self.promotions = p_promotions
self.is_reward_enabled = p_is_rewards_enabled
self.reward_points = p_reward_points
self.thumbnail = p_thumbnail
self.image1 = p_image1
self.image2 = p_image2
self.image3 = p_image3
self.extra_fied_3 = p_extra_fied_3
#
# self.id = id
# self.name = name
# self.quantity = quantity
# self.price = price
# self.saleprice = saleprice
# #self.fuelcharge = fuelcharge
# self.fuelcharge = 2.99 * quantity
# self.promotions = promotions
# if saleprice <= 0 :
# self.subtotal = price * quantity
# else:
# self.subtotal = saleprice * quantity
#
# self.shipping = shipping
# self.tax = tax
# self.taxvalue = float(self.subtotal) * float(tax)/float(100)
# self.total = float(self.subtotal) + float(shipping) + self.taxvalue + self.fuelcharge - self.promotions
# self.thumbnail = thumbnail
# self.image1 = image1
# self.image2 = image2
# self.image3 = image3
# self.extra_field_3 = extra_field_3
#
# if reward_disable == 0:
# self.reward_points = reward_points
# else:
# self.reward_points = 0
#
# product_category_list = ProductCategory.objects.filter(catalogid = id)
#
# logging.info(product_category_list[0].id)
# if product_category_list:
# category_id, category_name, parent_id = self.GetParentCategory(product_category_list[0].categoryid)
#
# (self.shipping, free_shipping_min_value) = self.GetShippingCharge(category_name, self.subtotal)
# self.free_shipping_suggestion_val = free_shipping_min_value - self.subtotal
#
#
# self.category_id = 0
# def GetParentCategory(self, category_id):
# #SELECT category_name, category_parent from category where id = 4
# cursor = connection.cursor()
# parent_id = 99999
# levels = 0
# while (parent_id > 0 and levels < 100):
# cursor.execute("SELECT id, category_name, category_parent from category where id = %d" %category_id)
# row = cursor.fetchone()
# category_id = row[0]
# category_name = row[1]
# parent_id = row[2]
# category_id = parent_id
# levels += 1
#
# return (category_id, category_name, parent_id)
#
# def GetShippingCharge(self, category_name, sub_total):
# shipping_charge = 0.0
# free_shipping_min_value = -1
# if category_name.__contains__('Marine Life'):
# free_shipping_min_value = 199
# if sub_total >= 00.01 and sub_total <= 98.99:
# shipping_charge = 34.99
# elif sub_total >= 99.00 and sub_total <= 198.99:
# shipping_charge = 24.99
# else:
# shipping_charge = 0
#
# elif category_name.__contains__('Live Goods'):
# free_shipping_min_value = 199
# if sub_total >= 00.01 and sub_total <= 98.99:
# shipping_charge = 34.99
# elif sub_total >= 99.00 and sub_total <= 198.99:
# shipping_charge = 24.99
# else:
# shipping_charge = 0
#
# elif category_name.__contains__('Live Rock & Sand'):
# free_shipping_min_value = 0
# if sub_total >= 00.01 and sub_total <= 98.99:
# shipping_charge = 4.99
# elif sub_total >= 99.00 and sub_total <= 198.99:
# shipping_charge = 4.99
# else:
# shipping_charge = 4.99
#
# elif category_name.__contains__('FastTrack Supplies'):
# free_shipping_min_value = 0
# if sub_total >= 00.01 and sub_total <= 98.99:
# shipping_charge = 4.99
# elif sub_total >= 99.00 and sub_total <= 198.99:
# shipping_charge = 4.99
# else:
# shipping_charge = 4.99
#
# elif category_name.__contains__('Aquarium Supplies On Sale'):
# free_shipping_min_value = 0
# if sub_total >= 00.01 and sub_total <= 98.99:
# shipping_charge = 4.99
# elif sub_total >= 99.00 and sub_total <= 198.99:
# shipping_charge = 4.99
# else:
# shipping_charge = 4.99
#
# return (shipping_charge, free_shipping_min_value)
| hughsons/saltwaterfish | classes_bkp_0621.py | Python | bsd-3-clause | 19,445 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
def buildImage(opt):
dpath = os.path.join(opt['datapath'], 'COCO-IMG')
if not build_data.built(dpath):
print('[building image data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# download the image data.
fname1 = 'train2014.zip'
fname2 = 'val2014.zip'
fname3 = 'test2014.zip'
url = 'http://msvocds.blob.core.windows.net/coco2014/'
build_data.download(dpath, url + fname1)
build_data.download(dpath, url + fname2)
build_data.download(dpath, url + fname3)
build_data.untar(dpath, fname1, False)
build_data.untar(dpath, fname2, False)
build_data.untar(dpath, fname3, False)
# Mark the data as built.
build_data.mark_done(dpath)
def build(opt):
dpath = os.path.join(opt['datapath'], 'VQA-v2')
if not build_data.built(dpath):
print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname1 = 'v2_Questions_Train_mscoco.zip'
fname2 = 'v2_Questions_Val_mscoco.zip'
fname3 = 'v2_Questions_Test_mscoco.zip'
fname4 = 'v2_Annotations_Val_mscoco.zip'
fname5 = 'v2_Annotations_Train_mscoco.zip'
url = 'http://visualqa.org/data/mscoco/vqa/'
build_data.download(dpath, url + fname1)
build_data.download(dpath, url + fname2)
build_data.download(dpath, url + fname3)
build_data.download(dpath, url + fname4)
build_data.download(dpath, url + fname5)
build_data.untar(dpath, fname1)
build_data.untar(dpath, fname2)
build_data.untar(dpath, fname3)
build_data.untar(dpath, fname4)
build_data.untar(dpath, fname5)
# Mark the data as built.
build_data.mark_done(dpath)
| calee88/ParlAI | parlai/tasks/vqa_v2/build.py | Python | bsd-3-clause | 2,271 |
from django.core.exceptions import ImproperlyConfigured
from django.db.models import F, fields
from django.db.models.functions import Cast, Coalesce
from django.utils.translation import gettext_lazy as _
from .conf import get_default_language, get_fallback_chain, get_modeltrans_setting
from .utils import (
FallbackTransform,
build_localized_fieldname,
get_instance_field_value,
get_language,
)
try:
# django==3.1 moved JSONField into django.db.models
from django.db.models import JSONField
from django.db.models.fields.json import KeyTextTransform
except ImportError:
from django.contrib.postgres.fields import JSONField
from django.contrib.postgres.fields.jsonb import KeyTextTransform
SUPPORTED_FIELDS = (fields.CharField, fields.TextField)
DEFAULT_LANGUAGE = get_default_language()
def translated_field_factory(original_field, language=None, *args, **kwargs):
if not isinstance(original_field, SUPPORTED_FIELDS):
raise ImproperlyConfigured(
"{} is not supported by django-modeltrans.".format(original_field.__class__.__name__)
)
class Specific(TranslatedVirtualField, original_field.__class__):
pass
Specific.__name__ = "Translated{}".format(original_field.__class__.__name__)
return Specific(original_field, language, *args, **kwargs)
class TranslatedVirtualField:
"""
A field representing a single field translated to a specific language.
Arguments:
original_field: The original field to be translated
language: The language to translate to, or `None` to track the current active Django language.
"""
# Implementation inspired by HStoreVirtualMixin from:
# https://github.com/djangonauts/django-hstore/blob/master/django_hstore/virtual.py
def __init__(self, original_field, language=None, *args, **kwargs):
# TODO: this feels like a big hack.
self.__dict__.update(original_field.__dict__)
self.original_field = original_field
self.language = language
self.blank = kwargs["blank"]
self.null = kwargs["null"]
self.concrete = False
self._help_text = kwargs.pop("help_text", None)
@property
def original_name(self):
return self.original_field.name
@property
def help_text(self):
if self._help_text is not None:
return self._help_text
if get_modeltrans_setting("MODELTRANS_ADD_FIELD_HELP_TEXT") and self.language is None:
return _("current language: {}").format(get_language())
def contribute_to_class(self, cls, name):
self.model = cls
self.attname = name
self.name = name
self.column = None
# Use a translated verbose name:
translated_field_name = _(self.original_field.verbose_name)
if self.language is not None:
translated_field_name += " ({})".format(self.language.upper())
self.verbose_name = translated_field_name
setattr(cls, name, self)
cls._meta.add_field(self, private=True)
def db_type(self, connection):
return None
def get_instance_fallback_chain(self, instance, language):
"""
Return the fallback chain for the instance.
Most of the time, it is just the configured fallback chain, but if the per-record-fallback feature
is used, the value of the field is added (if not None).
"""
default = get_fallback_chain(language)
i18n_field = instance._meta.get_field("i18n")
if i18n_field.fallback_language_field:
record_fallback_language = get_instance_field_value(
instance, i18n_field.fallback_language_field
)
if record_fallback_language:
return (record_fallback_language, *default)
return default
def __get__(self, instance, instance_type=None):
# This method is apparently called with instance=None from django.
# django-hstor raises AttributeError here, but that doesn't solve our problem.
if instance is None:
return
if "i18n" in instance.get_deferred_fields():
raise ValueError(
"Getting translated values on a model fetched with defer('i18n') is not supported."
)
language = self.get_language()
original_value = getattr(instance, self.original_name)
if language == DEFAULT_LANGUAGE and original_value:
return original_value
# Make sure we test for containment in a dict, not in None
if instance.i18n is None:
instance.i18n = {}
field_name = build_localized_fieldname(self.original_name, language)
# Just return the value if this is an explicit field (<name>_<lang>)
if self.language is not None:
return instance.i18n.get(field_name)
# This is the _i18n version of the field, and the current language is not available,
# so we walk the fallback chain:
for fallback_language in (language,) + self.get_instance_fallback_chain(instance, language):
if fallback_language == DEFAULT_LANGUAGE:
if original_value:
return original_value
else:
continue
field_name = build_localized_fieldname(self.original_name, fallback_language)
if field_name in instance.i18n and instance.i18n[field_name]:
return instance.i18n.get(field_name)
# finally, return the original field if all else fails.
return getattr(instance, self.original_name)
def __set__(self, instance, value):
if instance.i18n is None:
instance.i18n = {}
language = self.get_language()
if language == DEFAULT_LANGUAGE:
setattr(instance, self.original_name, value)
else:
field_name = build_localized_fieldname(self.original_name, language)
# if value is None, remove field from `i18n`.
if value is None:
instance.i18n.pop(field_name, None)
else:
instance.i18n[field_name] = value
def get_field_name(self):
"""
Returns the field name for the current virtual field.
The field name is ``<original_field_name>_<language>`` in case of a specific
translation or ``<original_field_name>_i18n`` for the currently active language.
"""
if self.language is None:
lang = "i18n"
else:
lang = self.get_language()
return build_localized_fieldname(self.original_name, lang)
def get_language(self):
"""
Returns the language for this field.
In case of an explicit language (title_en), it returns "en", in case of
`title_i18n`, it returns the currently active Django language.
"""
return self.language if self.language is not None else get_language()
def output_field(self):
"""
The type of field used to Cast/Coalesce to.
Mainly because a max_length argument is required for CharField
until this PR is merged: https://github.com/django/django/pull/8758
"""
Field = self.original_field.__class__
if isinstance(self.original_field, fields.CharField):
return Field(max_length=self.original_field.max_length)
return Field()
def _localized_lookup(self, language, bare_lookup):
if language == DEFAULT_LANGUAGE:
return bare_lookup.replace(self.name, self.original_name)
# When accessing a table directly, the i18_lookup will be just "i18n", while following relations
# they are in the lookup first.
i18n_lookup = bare_lookup.replace(self.name, "i18n")
# To support per-row fallback languages, an F-expression is passed as language parameter.
if isinstance(language, F):
# abuse build_localized_fieldname without language to get "<field>_"
field_prefix = build_localized_fieldname(self.original_name, "")
return FallbackTransform(field_prefix, language, i18n_lookup)
else:
return KeyTextTransform(
build_localized_fieldname(self.original_name, language), i18n_lookup
)
def as_expression(self, bare_lookup, fallback=True):
"""
Compose an expression to get the value for this virtual field in a query.
"""
language = self.get_language()
if language == DEFAULT_LANGUAGE:
return F(self._localized_lookup(language, bare_lookup))
if not fallback:
i18n_lookup = self._localized_lookup(language, bare_lookup)
return Cast(i18n_lookup, self.output_field())
fallback_chain = get_fallback_chain(language)
# First, add the current language to the list of lookups
lookups = [self._localized_lookup(language, bare_lookup)]
# Optionnally add the lookup for the per-row fallback language
i18n_field = self.model._meta.get_field("i18n")
if i18n_field.fallback_language_field:
lookups.append(
self._localized_lookup(F(i18n_field.fallback_language_field), bare_lookup)
)
# and now, add the list of fallback languages to the lookup list
for fallback_language in fallback_chain:
lookups.append(self._localized_lookup(fallback_language, bare_lookup))
return Coalesce(*lookups, output_field=self.output_field())
class TranslationField(JSONField):
"""
This model field is used to store the translations in the translated model.
Arguments:
fields (iterable): List of model field names to make translatable.
required_languages (iterable or dict): List of languages required for the model.
If a dict is supplied, the keys must be translated field names with the value
containing a list of required languages for that specific field.
virtual_fields (bool): If `False`, do not add virtual fields to access
translated values with.
Set to `True` during migration from django-modeltranslation to prevent
collisions with it's database fields while having the `i18n` field available.
fallback_language_field: If not None, this should be the name of the field containing a
language code to use as the first language in any fallback chain.
For example: if you have a model instance with 'nl' as language_code, and set
fallback_language_field='language_code', 'nl' will always be tried after the current
language before any other language.
"""
description = "Translation storage for a model"
def __init__(
self,
fields=None,
required_languages=None,
virtual_fields=True,
fallback_language_field=None,
*args,
**kwargs,
):
self.fields = fields or ()
self.required_languages = required_languages or ()
self.virtual_fields = virtual_fields
self.fallback_language_field = fallback_language_field
kwargs["editable"] = False
kwargs["null"] = True
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["editable"]
del kwargs["null"]
kwargs["fields"] = self.fields
kwargs["required_languages"] = self.required_languages
kwargs["virtual_fields"] = self.virtual_fields
return name, path, args, kwargs
def get_translated_fields(self):
"""Return a generator for all translated fields."""
for field in self.model._meta.get_fields():
if isinstance(field, TranslatedVirtualField):
yield field
def contribute_to_class(self, cls, name):
if name != "i18n":
raise ImproperlyConfigured('{} must have name "i18n"'.format(self.__class__.__name__))
super().contribute_to_class(cls, name)
| zostera/django-modeltrans | modeltrans/fields.py | Python | bsd-3-clause | 12,105 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
from contextlib import contextmanager
import six
from cryptography import utils
from cryptography.exceptions import (
InternalError, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import (
CMACBackend, CipherBackend, DERSerializationBackend, DSABackend,
EllipticCurveBackend, HMACBackend, HashBackend, PBKDF2HMACBackend,
PEMSerializationBackend, RSABackend, X509Backend
)
from cryptography.hazmat.backends.openssl.ciphers import (
_AESCTRCipherContext, _CipherContext
)
from cryptography.hazmat.backends.openssl.cmac import _CMACContext
from cryptography.hazmat.backends.openssl.dsa import (
_DSAParameters, _DSAPrivateKey, _DSAPublicKey
)
from cryptography.hazmat.backends.openssl.ec import (
_EllipticCurvePrivateKey, _EllipticCurvePublicKey
)
from cryptography.hazmat.backends.openssl.hashes import _HashContext
from cryptography.hazmat.backends.openssl.hmac import _HMACContext
from cryptography.hazmat.backends.openssl.rsa import (
_RSAPrivateKey, _RSAPublicKey
)
from cryptography.hazmat.backends.openssl.x509 import (
_Certificate, _CertificateSigningRequest
)
from cryptography.hazmat.bindings.openssl.binding import Binding
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from cryptography.hazmat.primitives.asymmetric.padding import (
MGF1, OAEP, PKCS1v15, PSS
)
from cryptography.hazmat.primitives.ciphers.algorithms import (
AES, ARC4, Blowfish, CAST5, Camellia, IDEA, SEED, TripleDES
)
from cryptography.hazmat.primitives.ciphers.modes import (
CBC, CFB, CFB8, CTR, ECB, GCM, OFB
)
_MemoryBIO = collections.namedtuple("_MemoryBIO", ["bio", "char_ptr"])
_OpenSSLError = collections.namedtuple("_OpenSSLError",
["code", "lib", "func", "reason"])
@utils.register_interface(CipherBackend)
@utils.register_interface(CMACBackend)
@utils.register_interface(DERSerializationBackend)
@utils.register_interface(DSABackend)
@utils.register_interface(EllipticCurveBackend)
@utils.register_interface(HashBackend)
@utils.register_interface(HMACBackend)
@utils.register_interface(PBKDF2HMACBackend)
@utils.register_interface(RSABackend)
@utils.register_interface(PEMSerializationBackend)
@utils.register_interface(X509Backend)
class Backend(object):
"""
OpenSSL API binding interfaces.
"""
name = "openssl"
def __init__(self):
self._binding = Binding()
self._ffi = self._binding.ffi
self._lib = self._binding.lib
self._binding.init_static_locks()
# adds all ciphers/digests for EVP
self._lib.OpenSSL_add_all_algorithms()
# registers available SSL/TLS ciphers and digests
self._lib.SSL_library_init()
# loads error strings for libcrypto and libssl functions
self._lib.SSL_load_error_strings()
self._cipher_registry = {}
self._register_default_ciphers()
self.activate_osrandom_engine()
def activate_builtin_random(self):
# Obtain a new structural reference.
e = self._lib.ENGINE_get_default_RAND()
if e != self._ffi.NULL:
self._lib.ENGINE_unregister_RAND(e)
# Reset the RNG to use the new engine.
self._lib.RAND_cleanup()
# decrement the structural reference from get_default_RAND
res = self._lib.ENGINE_finish(e)
assert res == 1
def activate_osrandom_engine(self):
# Unregister and free the current engine.
self.activate_builtin_random()
# Fetches an engine by id and returns it. This creates a structural
# reference.
e = self._lib.ENGINE_by_id(self._lib.Cryptography_osrandom_engine_id)
assert e != self._ffi.NULL
# Initialize the engine for use. This adds a functional reference.
res = self._lib.ENGINE_init(e)
assert res == 1
# Set the engine as the default RAND provider.
res = self._lib.ENGINE_set_default_RAND(e)
assert res == 1
# Decrement the structural ref incremented by ENGINE_by_id.
res = self._lib.ENGINE_free(e)
assert res == 1
# Decrement the functional ref incremented by ENGINE_init.
res = self._lib.ENGINE_finish(e)
assert res == 1
# Reset the RNG to use the new engine.
self._lib.RAND_cleanup()
def openssl_version_text(self):
"""
Friendly string name of the loaded OpenSSL library. This is not
necessarily the same version as it was compiled against.
Example: OpenSSL 1.0.1e 11 Feb 2013
"""
return self._ffi.string(
self._lib.SSLeay_version(self._lib.SSLEAY_VERSION)
).decode("ascii")
def create_hmac_ctx(self, key, algorithm):
return _HMACContext(self, key, algorithm)
def hash_supported(self, algorithm):
digest = self._lib.EVP_get_digestbyname(algorithm.name.encode("ascii"))
return digest != self._ffi.NULL
def hmac_supported(self, algorithm):
return self.hash_supported(algorithm)
def create_hash_ctx(self, algorithm):
return _HashContext(self, algorithm)
def cipher_supported(self, cipher, mode):
if self._evp_cipher_supported(cipher, mode):
return True
elif isinstance(mode, CTR) and isinstance(cipher, AES):
return True
else:
return False
def _evp_cipher_supported(self, cipher, mode):
try:
adapter = self._cipher_registry[type(cipher), type(mode)]
except KeyError:
return False
evp_cipher = adapter(self, cipher, mode)
return self._ffi.NULL != evp_cipher
def register_cipher_adapter(self, cipher_cls, mode_cls, adapter):
if (cipher_cls, mode_cls) in self._cipher_registry:
raise ValueError("Duplicate registration for: {0} {1}.".format(
cipher_cls, mode_cls)
)
self._cipher_registry[cipher_cls, mode_cls] = adapter
def _register_default_ciphers(self):
for mode_cls in [CBC, CTR, ECB, OFB, CFB, CFB8]:
self.register_cipher_adapter(
AES,
mode_cls,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
for mode_cls in [CBC, CTR, ECB, OFB, CFB]:
self.register_cipher_adapter(
Camellia,
mode_cls,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
for mode_cls in [CBC, CFB, CFB8, OFB]:
self.register_cipher_adapter(
TripleDES,
mode_cls,
GetCipherByName("des-ede3-{mode.name}")
)
self.register_cipher_adapter(
TripleDES,
ECB,
GetCipherByName("des-ede3")
)
for mode_cls in [CBC, CFB, OFB, ECB]:
self.register_cipher_adapter(
Blowfish,
mode_cls,
GetCipherByName("bf-{mode.name}")
)
for mode_cls in [CBC, CFB, OFB, ECB]:
self.register_cipher_adapter(
SEED,
mode_cls,
GetCipherByName("seed-{mode.name}")
)
for cipher_cls, mode_cls in itertools.product(
[CAST5, IDEA],
[CBC, OFB, CFB, ECB],
):
self.register_cipher_adapter(
cipher_cls,
mode_cls,
GetCipherByName("{cipher.name}-{mode.name}")
)
self.register_cipher_adapter(
ARC4,
type(None),
GetCipherByName("rc4")
)
self.register_cipher_adapter(
AES,
GCM,
GetCipherByName("{cipher.name}-{cipher.key_size}-{mode.name}")
)
def create_symmetric_encryption_ctx(self, cipher, mode):
if (isinstance(mode, CTR) and isinstance(cipher, AES) and
not self._evp_cipher_supported(cipher, mode)):
# This is needed to provide support for AES CTR mode in OpenSSL
# 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5
# extended life ends 2020).
return _AESCTRCipherContext(self, cipher, mode)
else:
return _CipherContext(self, cipher, mode, _CipherContext._ENCRYPT)
def create_symmetric_decryption_ctx(self, cipher, mode):
if (isinstance(mode, CTR) and isinstance(cipher, AES) and
not self._evp_cipher_supported(cipher, mode)):
# This is needed to provide support for AES CTR mode in OpenSSL
# 0.9.8. It can be removed when we drop 0.9.8 support (RHEL 5
# extended life ends 2020).
return _AESCTRCipherContext(self, cipher, mode)
else:
return _CipherContext(self, cipher, mode, _CipherContext._DECRYPT)
def pbkdf2_hmac_supported(self, algorithm):
if self._lib.Cryptography_HAS_PBKDF2_HMAC:
return self.hmac_supported(algorithm)
else:
# OpenSSL < 1.0.0 has an explicit PBKDF2-HMAC-SHA1 function,
# so if the PBKDF2_HMAC function is missing we only support
# SHA1 via PBKDF2_HMAC_SHA1.
return isinstance(algorithm, hashes.SHA1)
def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
key_material):
buf = self._ffi.new("char[]", length)
if self._lib.Cryptography_HAS_PBKDF2_HMAC:
evp_md = self._lib.EVP_get_digestbyname(
algorithm.name.encode("ascii"))
assert evp_md != self._ffi.NULL
res = self._lib.PKCS5_PBKDF2_HMAC(
key_material,
len(key_material),
salt,
len(salt),
iterations,
evp_md,
length,
buf
)
assert res == 1
else:
if not isinstance(algorithm, hashes.SHA1):
raise UnsupportedAlgorithm(
"This version of OpenSSL only supports PBKDF2HMAC with "
"SHA1.",
_Reasons.UNSUPPORTED_HASH
)
res = self._lib.PKCS5_PBKDF2_HMAC_SHA1(
key_material,
len(key_material),
salt,
len(salt),
iterations,
length,
buf
)
assert res == 1
return self._ffi.buffer(buf)[:]
def _err_string(self, code):
err_buf = self._ffi.new("char[]", 256)
self._lib.ERR_error_string_n(code, err_buf, 256)
return self._ffi.string(err_buf, 256)[:]
def _consume_errors(self):
errors = []
while True:
code = self._lib.ERR_get_error()
if code == 0:
break
lib = self._lib.ERR_GET_LIB(code)
func = self._lib.ERR_GET_FUNC(code)
reason = self._lib.ERR_GET_REASON(code)
errors.append(_OpenSSLError(code, lib, func, reason))
return errors
def _unknown_error(self, error):
return InternalError(
"Unknown error code {0} from OpenSSL, "
"you should probably file a bug. {1}.".format(
error.code, self._err_string(error.code)
)
)
def _bn_to_int(self, bn):
assert bn != self._ffi.NULL
if six.PY3:
# Python 3 has constant time from_bytes, so use that.
bn_num_bytes = (self._lib.BN_num_bits(bn) + 7) // 8
bin_ptr = self._ffi.new("unsigned char[]", bn_num_bytes)
bin_len = self._lib.BN_bn2bin(bn, bin_ptr)
# A zero length means the BN has value 0
assert bin_len >= 0
assert bin_ptr != self._ffi.NULL
return int.from_bytes(self._ffi.buffer(bin_ptr)[:bin_len], "big")
else:
# Under Python 2 the best we can do is hex()
hex_cdata = self._lib.BN_bn2hex(bn)
assert hex_cdata != self._ffi.NULL
hex_str = self._ffi.string(hex_cdata)
self._lib.OPENSSL_free(hex_cdata)
return int(hex_str, 16)
def _int_to_bn(self, num, bn=None):
"""
Converts a python integer to a BIGNUM. The returned BIGNUM will not
be garbage collected (to support adding them to structs that take
ownership of the object). Be sure to register it for GC if it will
be discarded after use.
"""
assert bn is None or bn != self._ffi.NULL
if bn is None:
bn = self._ffi.NULL
if six.PY3:
# Python 3 has constant time to_bytes, so use that.
binary = num.to_bytes(int(num.bit_length() / 8.0 + 1), "big")
bn_ptr = self._lib.BN_bin2bn(binary, len(binary), bn)
assert bn_ptr != self._ffi.NULL
return bn_ptr
else:
# Under Python 2 the best we can do is hex()
hex_num = hex(num).rstrip("L").lstrip("0x").encode("ascii") or b"0"
bn_ptr = self._ffi.new("BIGNUM **")
bn_ptr[0] = bn
res = self._lib.BN_hex2bn(bn_ptr, hex_num)
assert res != 0
assert bn_ptr[0] != self._ffi.NULL
return bn_ptr[0]
def generate_rsa_private_key(self, public_exponent, key_size):
rsa._verify_rsa_parameters(public_exponent, key_size)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
bn = self._int_to_bn(public_exponent)
bn = self._ffi.gc(bn, self._lib.BN_free)
res = self._lib.RSA_generate_key_ex(
rsa_cdata, key_size, bn, self._ffi.NULL
)
assert res == 1
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPrivateKey(self, rsa_cdata, evp_pkey)
def generate_rsa_parameters_supported(self, public_exponent, key_size):
return (public_exponent >= 3 and public_exponent & 1 != 0 and
key_size >= 512)
def load_rsa_private_numbers(self, numbers):
rsa._check_private_key_components(
numbers.p,
numbers.q,
numbers.d,
numbers.dmp1,
numbers.dmq1,
numbers.iqmp,
numbers.public_numbers.e,
numbers.public_numbers.n
)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
rsa_cdata.p = self._int_to_bn(numbers.p)
rsa_cdata.q = self._int_to_bn(numbers.q)
rsa_cdata.d = self._int_to_bn(numbers.d)
rsa_cdata.dmp1 = self._int_to_bn(numbers.dmp1)
rsa_cdata.dmq1 = self._int_to_bn(numbers.dmq1)
rsa_cdata.iqmp = self._int_to_bn(numbers.iqmp)
rsa_cdata.e = self._int_to_bn(numbers.public_numbers.e)
rsa_cdata.n = self._int_to_bn(numbers.public_numbers.n)
res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)
assert res == 1
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPrivateKey(self, rsa_cdata, evp_pkey)
def load_rsa_public_numbers(self, numbers):
rsa._check_public_key_components(numbers.e, numbers.n)
rsa_cdata = self._lib.RSA_new()
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
rsa_cdata.e = self._int_to_bn(numbers.e)
rsa_cdata.n = self._int_to_bn(numbers.n)
res = self._lib.RSA_blinding_on(rsa_cdata, self._ffi.NULL)
assert res == 1
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPublicKey(self, rsa_cdata, evp_pkey)
def _rsa_cdata_to_evp_pkey(self, rsa_cdata):
evp_pkey = self._lib.EVP_PKEY_new()
assert evp_pkey != self._ffi.NULL
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
res = self._lib.EVP_PKEY_set1_RSA(evp_pkey, rsa_cdata)
assert res == 1
return evp_pkey
def _bytes_to_bio(self, data):
"""
Return a _MemoryBIO namedtuple of (BIO, char*).
The char* is the storage for the BIO and it must stay alive until the
BIO is finished with.
"""
data_char_p = self._ffi.new("char[]", data)
bio = self._lib.BIO_new_mem_buf(
data_char_p, len(data)
)
assert bio != self._ffi.NULL
return _MemoryBIO(self._ffi.gc(bio, self._lib.BIO_free), data_char_p)
def _create_mem_bio(self):
"""
Creates an empty memory BIO.
"""
bio_method = self._lib.BIO_s_mem()
assert bio_method != self._ffi.NULL
bio = self._lib.BIO_new(bio_method)
assert bio != self._ffi.NULL
bio = self._ffi.gc(bio, self._lib.BIO_free)
return bio
def _read_mem_bio(self, bio):
"""
Reads a memory BIO. This only works on memory BIOs.
"""
buf = self._ffi.new("char **")
buf_len = self._lib.BIO_get_mem_data(bio, buf)
assert buf_len > 0
assert buf[0] != self._ffi.NULL
bio_data = self._ffi.buffer(buf[0], buf_len)[:]
return bio_data
def _evp_pkey_to_private_key(self, evp_pkey):
"""
Return the appropriate type of PrivateKey given an evp_pkey cdata
pointer.
"""
key_type = evp_pkey.type
if key_type == self._lib.EVP_PKEY_RSA:
rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
return _RSAPrivateKey(self, rsa_cdata, evp_pkey)
elif key_type == self._lib.EVP_PKEY_DSA:
dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
return _DSAPrivateKey(self, dsa_cdata, evp_pkey)
elif (self._lib.Cryptography_HAS_EC == 1 and
key_type == self._lib.EVP_PKEY_EC):
ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey)
else:
raise UnsupportedAlgorithm("Unsupported key type.")
def _evp_pkey_to_public_key(self, evp_pkey):
"""
Return the appropriate type of PublicKey given an evp_pkey cdata
pointer.
"""
key_type = evp_pkey.type
if key_type == self._lib.EVP_PKEY_RSA:
rsa_cdata = self._lib.EVP_PKEY_get1_RSA(evp_pkey)
assert rsa_cdata != self._ffi.NULL
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
return _RSAPublicKey(self, rsa_cdata, evp_pkey)
elif key_type == self._lib.EVP_PKEY_DSA:
dsa_cdata = self._lib.EVP_PKEY_get1_DSA(evp_pkey)
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
return _DSAPublicKey(self, dsa_cdata, evp_pkey)
elif (self._lib.Cryptography_HAS_EC == 1 and
key_type == self._lib.EVP_PKEY_EC):
ec_cdata = self._lib.EVP_PKEY_get1_EC_KEY(evp_pkey)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey)
else:
raise UnsupportedAlgorithm("Unsupported key type.")
def _pem_password_cb(self, password):
"""
Generate a pem_password_cb function pointer that copied the password to
OpenSSL as required and returns the number of bytes copied.
typedef int pem_password_cb(char *buf, int size,
int rwflag, void *userdata);
Useful for decrypting PKCS8 files and so on.
Returns a tuple of (cdata function pointer, callback function).
"""
def pem_password_cb(buf, size, writing, userdata):
pem_password_cb.called += 1
if not password:
pem_password_cb.exception = TypeError(
"Password was not given but private key is encrypted."
)
return 0
elif len(password) < size:
pw_buf = self._ffi.buffer(buf, size)
pw_buf[:len(password)] = password
return len(password)
else:
pem_password_cb.exception = ValueError(
"Passwords longer than {0} bytes are not supported "
"by this backend.".format(size - 1)
)
return 0
pem_password_cb.called = 0
pem_password_cb.exception = None
return (
self._ffi.callback("int (char *, int, int, void *)",
pem_password_cb),
pem_password_cb
)
def _mgf1_hash_supported(self, algorithm):
if self._lib.Cryptography_HAS_MGF1_MD:
return self.hash_supported(algorithm)
else:
return isinstance(algorithm, hashes.SHA1)
def rsa_padding_supported(self, padding):
if isinstance(padding, PKCS1v15):
return True
elif isinstance(padding, PSS) and isinstance(padding._mgf, MGF1):
return self._mgf1_hash_supported(padding._mgf._algorithm)
elif isinstance(padding, OAEP) and isinstance(padding._mgf, MGF1):
return isinstance(padding._mgf._algorithm, hashes.SHA1)
else:
return False
def generate_dsa_parameters(self, key_size):
if key_size not in (1024, 2048, 3072):
raise ValueError(
"Key size must be 1024 or 2048 or 3072 bits.")
if (self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f and
key_size > 1024):
raise ValueError(
"Key size must be 1024 because OpenSSL < 1.0.0 doesn't "
"support larger key sizes.")
ctx = self._lib.DSA_new()
assert ctx != self._ffi.NULL
ctx = self._ffi.gc(ctx, self._lib.DSA_free)
res = self._lib.DSA_generate_parameters_ex(
ctx, key_size, self._ffi.NULL, 0,
self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
assert res == 1
return _DSAParameters(self, ctx)
def generate_dsa_private_key(self, parameters):
ctx = self._lib.DSA_new()
assert ctx != self._ffi.NULL
ctx = self._ffi.gc(ctx, self._lib.DSA_free)
ctx.p = self._lib.BN_dup(parameters._dsa_cdata.p)
ctx.q = self._lib.BN_dup(parameters._dsa_cdata.q)
ctx.g = self._lib.BN_dup(parameters._dsa_cdata.g)
self._lib.DSA_generate_key(ctx)
evp_pkey = self._dsa_cdata_to_evp_pkey(ctx)
return _DSAPrivateKey(self, ctx, evp_pkey)
def generate_dsa_private_key_and_parameters(self, key_size):
parameters = self.generate_dsa_parameters(key_size)
return self.generate_dsa_private_key(parameters)
def load_dsa_private_numbers(self, numbers):
dsa._check_dsa_private_numbers(numbers)
parameter_numbers = numbers.public_numbers.parameter_numbers
dsa_cdata = self._lib.DSA_new()
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
dsa_cdata.p = self._int_to_bn(parameter_numbers.p)
dsa_cdata.q = self._int_to_bn(parameter_numbers.q)
dsa_cdata.g = self._int_to_bn(parameter_numbers.g)
dsa_cdata.pub_key = self._int_to_bn(numbers.public_numbers.y)
dsa_cdata.priv_key = self._int_to_bn(numbers.x)
evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata)
return _DSAPrivateKey(self, dsa_cdata, evp_pkey)
def load_dsa_public_numbers(self, numbers):
dsa._check_dsa_parameters(numbers.parameter_numbers)
dsa_cdata = self._lib.DSA_new()
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
dsa_cdata.p = self._int_to_bn(numbers.parameter_numbers.p)
dsa_cdata.q = self._int_to_bn(numbers.parameter_numbers.q)
dsa_cdata.g = self._int_to_bn(numbers.parameter_numbers.g)
dsa_cdata.pub_key = self._int_to_bn(numbers.y)
evp_pkey = self._dsa_cdata_to_evp_pkey(dsa_cdata)
return _DSAPublicKey(self, dsa_cdata, evp_pkey)
def load_dsa_parameter_numbers(self, numbers):
dsa._check_dsa_parameters(numbers)
dsa_cdata = self._lib.DSA_new()
assert dsa_cdata != self._ffi.NULL
dsa_cdata = self._ffi.gc(dsa_cdata, self._lib.DSA_free)
dsa_cdata.p = self._int_to_bn(numbers.p)
dsa_cdata.q = self._int_to_bn(numbers.q)
dsa_cdata.g = self._int_to_bn(numbers.g)
return _DSAParameters(self, dsa_cdata)
def _dsa_cdata_to_evp_pkey(self, dsa_cdata):
evp_pkey = self._lib.EVP_PKEY_new()
assert evp_pkey != self._ffi.NULL
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
res = self._lib.EVP_PKEY_set1_DSA(evp_pkey, dsa_cdata)
assert res == 1
return evp_pkey
def dsa_hash_supported(self, algorithm):
if self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f:
return isinstance(algorithm, hashes.SHA1)
else:
return self.hash_supported(algorithm)
def dsa_parameters_supported(self, p, q, g):
if self._lib.OPENSSL_VERSION_NUMBER < 0x1000000f:
return utils.bit_length(p) <= 1024 and utils.bit_length(q) <= 160
else:
return True
def cmac_algorithm_supported(self, algorithm):
return (
self._lib.Cryptography_HAS_CMAC == 1 and
self.cipher_supported(
algorithm, CBC(b"\x00" * algorithm.block_size)
)
)
def create_cmac_ctx(self, algorithm):
return _CMACContext(self, algorithm)
def load_pem_private_key(self, data, password):
return self._load_key(
self._lib.PEM_read_bio_PrivateKey,
self._evp_pkey_to_private_key,
data,
password,
)
def load_pem_public_key(self, data):
mem_bio = self._bytes_to_bio(data)
evp_pkey = self._lib.PEM_read_bio_PUBKEY(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if evp_pkey != self._ffi.NULL:
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
return self._evp_pkey_to_public_key(evp_pkey)
else:
# It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still
# need to check to see if it is a pure PKCS1 RSA public key (not
# embedded in a subjectPublicKeyInfo)
self._consume_errors()
res = self._lib.BIO_reset(mem_bio.bio)
assert res == 1
rsa_cdata = self._lib.PEM_read_bio_RSAPublicKey(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if rsa_cdata != self._ffi.NULL:
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPublicKey(self, rsa_cdata, evp_pkey)
else:
self._handle_key_loading_error()
def load_der_private_key(self, data, password):
# OpenSSL has a function called d2i_AutoPrivateKey that can simplify
# this. Unfortunately it doesn't properly support PKCS8 on OpenSSL
# 0.9.8 so we can't use it. Instead we sequentially try to load it 3
# different ways. First we'll try to load it as a traditional key
bio_data = self._bytes_to_bio(data)
key = self._evp_pkey_from_der_traditional_key(bio_data, password)
if not key:
# Okay so it's not a traditional key. Let's try
# PKCS8 unencrypted. OpenSSL 0.9.8 can't load unencrypted
# PKCS8 keys using d2i_PKCS8PrivateKey_bio so we do this instead.
# Reset the memory BIO so we can read the data again.
res = self._lib.BIO_reset(bio_data.bio)
assert res == 1
key = self._evp_pkey_from_der_unencrypted_pkcs8(bio_data, password)
if key:
return self._evp_pkey_to_private_key(key)
else:
# Finally we try to load it with the method that handles encrypted
# PKCS8 properly.
return self._load_key(
self._lib.d2i_PKCS8PrivateKey_bio,
self._evp_pkey_to_private_key,
data,
password,
)
def _evp_pkey_from_der_traditional_key(self, bio_data, password):
key = self._lib.d2i_PrivateKey_bio(bio_data.bio, self._ffi.NULL)
if key != self._ffi.NULL:
key = self._ffi.gc(key, self._lib.EVP_PKEY_free)
if password is not None:
raise TypeError(
"Password was given but private key is not encrypted."
)
return key
else:
self._consume_errors()
return None
def _evp_pkey_from_der_unencrypted_pkcs8(self, bio_data, password):
info = self._lib.d2i_PKCS8_PRIV_KEY_INFO_bio(
bio_data.bio, self._ffi.NULL
)
info = self._ffi.gc(info, self._lib.PKCS8_PRIV_KEY_INFO_free)
if info != self._ffi.NULL:
key = self._lib.EVP_PKCS82PKEY(info)
assert key != self._ffi.NULL
key = self._ffi.gc(key, self._lib.EVP_PKEY_free)
if password is not None:
raise TypeError(
"Password was given but private key is not encrypted."
)
return key
else:
self._consume_errors()
return None
def load_der_public_key(self, data):
mem_bio = self._bytes_to_bio(data)
evp_pkey = self._lib.d2i_PUBKEY_bio(mem_bio.bio, self._ffi.NULL)
if evp_pkey != self._ffi.NULL:
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
return self._evp_pkey_to_public_key(evp_pkey)
else:
# It's not a (RSA/DSA/ECDSA) subjectPublicKeyInfo, but we still
# need to check to see if it is a pure PKCS1 RSA public key (not
# embedded in a subjectPublicKeyInfo)
self._consume_errors()
res = self._lib.BIO_reset(mem_bio.bio)
assert res == 1
rsa_cdata = self._lib.d2i_RSAPublicKey_bio(
mem_bio.bio, self._ffi.NULL
)
if rsa_cdata != self._ffi.NULL:
rsa_cdata = self._ffi.gc(rsa_cdata, self._lib.RSA_free)
evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
return _RSAPublicKey(self, rsa_cdata, evp_pkey)
else:
self._handle_key_loading_error()
def load_pem_x509_certificate(self, data):
mem_bio = self._bytes_to_bio(data)
x509 = self._lib.PEM_read_bio_X509(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if x509 == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load certificate")
x509 = self._ffi.gc(x509, self._lib.X509_free)
return _Certificate(self, x509)
def load_der_x509_certificate(self, data):
mem_bio = self._bytes_to_bio(data)
x509 = self._lib.d2i_X509_bio(mem_bio.bio, self._ffi.NULL)
if x509 == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load certificate")
x509 = self._ffi.gc(x509, self._lib.X509_free)
return _Certificate(self, x509)
def load_pem_x509_csr(self, data):
mem_bio = self._bytes_to_bio(data)
x509_req = self._lib.PEM_read_bio_X509_REQ(
mem_bio.bio, self._ffi.NULL, self._ffi.NULL, self._ffi.NULL
)
if x509_req == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load request")
x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free)
return _CertificateSigningRequest(self, x509_req)
def load_der_x509_csr(self, data):
mem_bio = self._bytes_to_bio(data)
x509_req = self._lib.d2i_X509_REQ_bio(mem_bio.bio, self._ffi.NULL)
if x509_req == self._ffi.NULL:
self._consume_errors()
raise ValueError("Unable to load request")
x509_req = self._ffi.gc(x509_req, self._lib.X509_REQ_free)
return _CertificateSigningRequest(self, x509_req)
def _load_key(self, openssl_read_func, convert_func, data, password):
mem_bio = self._bytes_to_bio(data)
password_callback, password_func = self._pem_password_cb(password)
evp_pkey = openssl_read_func(
mem_bio.bio,
self._ffi.NULL,
password_callback,
self._ffi.NULL
)
if evp_pkey == self._ffi.NULL:
if password_func.exception is not None:
errors = self._consume_errors()
assert errors
raise password_func.exception
else:
self._handle_key_loading_error()
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
if password is not None and password_func.called == 0:
raise TypeError(
"Password was given but private key is not encrypted.")
assert (
(password is not None and password_func.called == 1) or
password is None
)
return convert_func(evp_pkey)
def _handle_key_loading_error(self):
errors = self._consume_errors()
if not errors:
raise ValueError("Could not unserialize key data.")
elif errors[0][1:] in (
(
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_DECRYPTFINAL_EX,
self._lib.EVP_R_BAD_DECRYPT
),
(
self._lib.ERR_LIB_PKCS12,
self._lib.PKCS12_F_PKCS12_PBE_CRYPT,
self._lib.PKCS12_R_PKCS12_CIPHERFINAL_ERROR,
)
):
raise ValueError("Bad decrypt. Incorrect password?")
elif errors[0][1:] in (
(
self._lib.ERR_LIB_PEM,
self._lib.PEM_F_PEM_GET_EVP_CIPHER_INFO,
self._lib.PEM_R_UNSUPPORTED_ENCRYPTION
),
(
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_PBE_CIPHERINIT,
self._lib.EVP_R_UNKNOWN_PBE_ALGORITHM
)
):
raise UnsupportedAlgorithm(
"PEM data is encrypted with an unsupported cipher",
_Reasons.UNSUPPORTED_CIPHER
)
elif any(
error[1:] == (
self._lib.ERR_LIB_EVP,
self._lib.EVP_F_EVP_PKCS82PKEY,
self._lib.EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM
)
for error in errors
):
raise UnsupportedAlgorithm(
"Unsupported public key algorithm.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
)
else:
assert errors[0][1] in (
self._lib.ERR_LIB_EVP,
self._lib.ERR_LIB_PEM,
self._lib.ERR_LIB_ASN1,
)
raise ValueError("Could not unserialize key data.")
def elliptic_curve_supported(self, curve):
if self._lib.Cryptography_HAS_EC != 1:
return False
try:
curve_nid = self._elliptic_curve_to_nid(curve)
except UnsupportedAlgorithm:
curve_nid = self._lib.NID_undef
ctx = self._lib.EC_GROUP_new_by_curve_name(curve_nid)
if ctx == self._ffi.NULL:
errors = self._consume_errors()
assert (
curve_nid == self._lib.NID_undef or
errors[0][1:] == (
self._lib.ERR_LIB_EC,
self._lib.EC_F_EC_GROUP_NEW_BY_CURVE_NAME,
self._lib.EC_R_UNKNOWN_GROUP
)
)
return False
else:
assert curve_nid != self._lib.NID_undef
self._lib.EC_GROUP_free(ctx)
return True
def elliptic_curve_signature_algorithm_supported(
self, signature_algorithm, curve
):
if self._lib.Cryptography_HAS_EC != 1:
return False
# We only support ECDSA right now.
if not isinstance(signature_algorithm, ec.ECDSA):
return False
# Before 0.9.8m OpenSSL can't cope with digests longer than the curve.
if (
self._lib.OPENSSL_VERSION_NUMBER < 0x009080df and
curve.key_size < signature_algorithm.algorithm.digest_size * 8
):
return False
return self.elliptic_curve_supported(curve)
def generate_elliptic_curve_private_key(self, curve):
"""
Generate a new private key on the named curve.
"""
if self.elliptic_curve_supported(curve):
curve_nid = self._elliptic_curve_to_nid(curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
res = self._lib.EC_KEY_generate_key(ec_cdata)
assert res == 1
res = self._lib.EC_KEY_check_key(ec_cdata)
assert res == 1
evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata)
return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey)
else:
raise UnsupportedAlgorithm(
"Backend object does not support {0}.".format(curve.name),
_Reasons.UNSUPPORTED_ELLIPTIC_CURVE
)
def load_elliptic_curve_private_numbers(self, numbers):
public = numbers.public_numbers
curve_nid = self._elliptic_curve_to_nid(public.curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
ec_cdata = self._ec_key_set_public_key_affine_coordinates(
ec_cdata, public.x, public.y)
res = self._lib.EC_KEY_set_private_key(
ec_cdata, self._int_to_bn(numbers.private_value))
assert res == 1
evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata)
return _EllipticCurvePrivateKey(self, ec_cdata, evp_pkey)
def load_elliptic_curve_public_numbers(self, numbers):
curve_nid = self._elliptic_curve_to_nid(numbers.curve)
ec_cdata = self._lib.EC_KEY_new_by_curve_name(curve_nid)
assert ec_cdata != self._ffi.NULL
ec_cdata = self._ffi.gc(ec_cdata, self._lib.EC_KEY_free)
ec_cdata = self._ec_key_set_public_key_affine_coordinates(
ec_cdata, numbers.x, numbers.y)
evp_pkey = self._ec_cdata_to_evp_pkey(ec_cdata)
return _EllipticCurvePublicKey(self, ec_cdata, evp_pkey)
def _ec_cdata_to_evp_pkey(self, ec_cdata):
evp_pkey = self._lib.EVP_PKEY_new()
assert evp_pkey != self._ffi.NULL
evp_pkey = self._ffi.gc(evp_pkey, self._lib.EVP_PKEY_free)
res = self._lib.EVP_PKEY_set1_EC_KEY(evp_pkey, ec_cdata)
assert res == 1
def _elliptic_curve_to_nid(self, curve):
"""
Get the NID for a curve name.
"""
curve_aliases = {
"secp192r1": "prime192v1",
"secp256r1": "prime256v1"
}
curve_name = curve_aliases.get(curve.name, curve.name)
curve_nid = self._lib.OBJ_sn2nid(curve_name.encode())
if curve_nid == self._lib.NID_undef:
raise UnsupportedAlgorithm(
"{0} is not a supported elliptic curve".format(curve.name),
_Reasons.UNSUPPORTED_ELLIPTIC_CURVE
)
return curve_nid
@contextmanager
def _tmp_bn_ctx(self):
bn_ctx = self._lib.BN_CTX_new()
assert bn_ctx != self._ffi.NULL
bn_ctx = self._ffi.gc(bn_ctx, self._lib.BN_CTX_free)
self._lib.BN_CTX_start(bn_ctx)
try:
yield bn_ctx
finally:
self._lib.BN_CTX_end(bn_ctx)
def _ec_key_determine_group_get_set_funcs(self, ctx):
"""
Given an EC_KEY determine the group and what methods are required to
get/set point coordinates.
"""
assert ctx != self._ffi.NULL
nid_two_field = self._lib.OBJ_sn2nid(b"characteristic-two-field")
assert nid_two_field != self._lib.NID_undef
group = self._lib.EC_KEY_get0_group(ctx)
assert group != self._ffi.NULL
method = self._lib.EC_GROUP_method_of(group)
assert method != self._ffi.NULL
nid = self._lib.EC_METHOD_get_field_type(method)
assert nid != self._lib.NID_undef
if nid == nid_two_field and self._lib.Cryptography_HAS_EC2M:
set_func = self._lib.EC_POINT_set_affine_coordinates_GF2m
get_func = self._lib.EC_POINT_get_affine_coordinates_GF2m
else:
set_func = self._lib.EC_POINT_set_affine_coordinates_GFp
get_func = self._lib.EC_POINT_get_affine_coordinates_GFp
assert set_func and get_func
return set_func, get_func, group
def _ec_key_set_public_key_affine_coordinates(self, ctx, x, y):
"""
This is a port of EC_KEY_set_public_key_affine_coordinates that was
added in 1.0.1.
Sets the public key point in the EC_KEY context to the affine x and y
values.
"""
if x < 0 or y < 0:
raise ValueError(
"Invalid EC key. Both x and y must be non-negative."
)
set_func, get_func, group = (
self._ec_key_determine_group_get_set_funcs(ctx)
)
point = self._lib.EC_POINT_new(group)
assert point != self._ffi.NULL
point = self._ffi.gc(point, self._lib.EC_POINT_free)
bn_x = self._int_to_bn(x)
bn_y = self._int_to_bn(y)
with self._tmp_bn_ctx() as bn_ctx:
check_x = self._lib.BN_CTX_get(bn_ctx)
check_y = self._lib.BN_CTX_get(bn_ctx)
res = set_func(group, point, bn_x, bn_y, bn_ctx)
assert res == 1
res = get_func(group, point, check_x, check_y, bn_ctx)
assert res == 1
res = self._lib.BN_cmp(bn_x, check_x)
assert res == 0
res = self._lib.BN_cmp(bn_y, check_y)
assert res == 0
res = self._lib.EC_KEY_set_public_key(ctx, point)
assert res == 1
res = self._lib.EC_KEY_check_key(ctx)
if res != 1:
self._consume_errors()
raise ValueError("Invalid EC key.")
return ctx
def _private_key_bytes(self, encoding, format, encryption_algorithm,
evp_pkey, cdata):
if not isinstance(encoding, serialization.Encoding):
raise TypeError("encoding must be an item from the Encoding enum")
if not isinstance(format, serialization.PrivateFormat):
raise TypeError(
"format must be an item from the PrivateFormat enum"
)
if not isinstance(encryption_algorithm,
serialization.KeySerializationEncryption):
raise TypeError(
"Encryption algorithm must be a KeySerializationEncryption "
"instance"
)
if isinstance(encryption_algorithm, serialization.NoEncryption):
password = b""
passlen = 0
evp_cipher = self._ffi.NULL
elif isinstance(encryption_algorithm,
serialization.BestAvailableEncryption):
# This is a curated value that we will update over time.
evp_cipher = self._lib.EVP_get_cipherbyname(
b"aes-256-cbc"
)
password = encryption_algorithm.password
passlen = len(password)
if passlen > 1023:
raise ValueError(
"Passwords longer than 1023 bytes are not supported by "
"this backend"
)
else:
raise ValueError("Unsupported encryption type")
if encoding is serialization.Encoding.PEM:
if format is serialization.PrivateFormat.PKCS8:
write_bio = self._lib.PEM_write_bio_PKCS8PrivateKey
key = evp_pkey
elif format is serialization.PrivateFormat.TraditionalOpenSSL:
if evp_pkey.type == self._lib.EVP_PKEY_RSA:
write_bio = self._lib.PEM_write_bio_RSAPrivateKey
elif evp_pkey.type == self._lib.EVP_PKEY_DSA:
write_bio = self._lib.PEM_write_bio_DSAPrivateKey
elif (self._lib.Cryptography_HAS_EC == 1 and
evp_pkey.type == self._lib.EVP_PKEY_EC):
write_bio = self._lib.PEM_write_bio_ECPrivateKey
key = cdata
elif encoding is serialization.Encoding.DER:
if format is serialization.PrivateFormat.TraditionalOpenSSL:
if not isinstance(
encryption_algorithm, serialization.NoEncryption
):
raise ValueError(
"Encryption is not supported for DER encoded "
"traditional OpenSSL keys"
)
return self._private_key_bytes_traditional_der(
evp_pkey.type, cdata
)
elif format is serialization.PrivateFormat.PKCS8:
write_bio = self._lib.i2d_PKCS8PrivateKey_bio
key = evp_pkey
bio = self._create_mem_bio()
res = write_bio(
bio,
key,
evp_cipher,
password,
passlen,
self._ffi.NULL,
self._ffi.NULL
)
assert res == 1
return self._read_mem_bio(bio)
def _private_key_bytes_traditional_der(self, key_type, cdata):
if key_type == self._lib.EVP_PKEY_RSA:
write_bio = self._lib.i2d_RSAPrivateKey_bio
elif (self._lib.Cryptography_HAS_EC == 1 and
key_type == self._lib.EVP_PKEY_EC):
write_bio = self._lib.i2d_ECPrivateKey_bio
elif key_type == self._lib.EVP_PKEY_DSA:
write_bio = self._lib.i2d_DSAPrivateKey_bio
bio = self._create_mem_bio()
res = write_bio(bio, cdata)
assert res == 1
return self._read_mem_bio(bio)
def _public_key_bytes(self, encoding, format, evp_pkey, cdata):
if not isinstance(encoding, serialization.Encoding):
raise TypeError("encoding must be an item from the Encoding enum")
if not isinstance(format, serialization.PublicFormat):
raise TypeError(
"format must be an item from the PublicFormat enum"
)
if format is serialization.PublicFormat.SubjectPublicKeyInfo:
if encoding is serialization.Encoding.PEM:
write_bio = self._lib.PEM_write_bio_PUBKEY
elif encoding is serialization.Encoding.DER:
write_bio = self._lib.i2d_PUBKEY_bio
key = evp_pkey
elif format is serialization.PublicFormat.PKCS1:
# Only RSA is supported here.
assert evp_pkey.type == self._lib.EVP_PKEY_RSA
if encoding is serialization.Encoding.PEM:
write_bio = self._lib.PEM_write_bio_RSAPublicKey
elif encoding is serialization.Encoding.DER:
write_bio = self._lib.i2d_RSAPublicKey_bio
key = cdata
bio = self._create_mem_bio()
res = write_bio(bio, key)
assert res == 1
return self._read_mem_bio(bio)
class GetCipherByName(object):
def __init__(self, fmt):
self._fmt = fmt
def __call__(self, backend, cipher, mode):
cipher_name = self._fmt.format(cipher=cipher, mode=mode).lower()
return backend._lib.EVP_get_cipherbyname(cipher_name.encode("ascii"))
backend = Backend()
| sholsapp/cryptography | src/cryptography/hazmat/backends/openssl/backend.py | Python | bsd-3-clause | 49,028 |
"""Workflow for uploading many many many log files at once."""
from __future__ import absolute_import
import os
from os.path import isfile, getsize
import logging
import re
import luigi
import psycopg2
import pandas as pd
# import sqlalchemy
try:
from .pylog_parse import LogFile
except:
from pylog_parse import LogFile
logger = logging.getLogger(__name__)
class LogTask(luigi.Task):
"""Base task for log workflow."""
path = luigi.Parameter()
logtype = luigi.Parameter()
_conf = luigi.configuration.get_config()
_conf.reload()
_password = _conf.get('postgres', 'password')
_host = _conf.get('postgres', 'host')
_port = _conf.get('postgres', 'port')
_database = _conf.get('pylog', 'database')
_user = _conf.get('postgres', 'user')
_url = """postgresql://{u}:{p}@{h}:{port}/{db}""".format(u=_user,
p=_password,
h=_host,
port=_port,
db=_database)
@property
def url(self):
"""Postgresql url property."""
return self._url
def folder_size(path):
"""Return size of folder at path."""
return sum(getsize(f) for f in os.listdir('.') if isfile(f))
def list_directory_files(path, folders=False):
"""Yield all filenames in a path."""
for f in os.listdir(path):
if f[0] == '.':
continue
current_path = os.path.join(path, f)
if folders is False:
if os.path.isfile(current_path):
if os.path.getsize(current_path) != 0:
yield current_path
else:
file_ext = os.path.splitext(f)[1]
if file_ext == '' or file_ext == '/':
yield current_path
def get_subfolders(path):
for f in list_directory_files(path, folders=True):
file_ext = os.path.splitext(f)[1]
if file_ext == ''or file_ext == '/':
yield f
def get_sublogs(path):
for f in list_directory_files(path):
file_ext = os.path.splitext(f)[1]
if file_ext == '.log':
yield f
class CheckLogPath(luigi.ExternalTask):
path = luigi.Parameter()
def output(self):
return luigi.LocalTarget(path=self.path)
class UploadLogs(LogTask):
def requires(self):
return CheckLogPath(path=self.path)
def output(self):
groups = re.search(r'2015.(\d{2}).(\d{2})', self.path).groups()
csv = '/home/ubuntu/elb2/2015-{m}-{d}.csv'.format(m=groups[0],
d=groups[1])
return luigi.LocalTarget(path=csv)
def run(self):
conn = psycopg2.connect(self.url)
log = LogFile(path=self.path, log_type=self.logtype)
cursor = conn.cursor()
cursor.close()
log.to_csv(self.output().path, con=conn, copy=True)
if os.path.exists(self.output().path):
df = pd.DataFrame({'length': log.length}, index=[0])
os.remove(self.output().path)
df.to_csv(self.output().path, index=False) # Only keep head of csv
class LogPaths(LogTask):
def requires(self):
log_files = [f for f in get_sublogs(self.path)]
subfolders = [f for f in get_subfolders(self.path)]
logger.debug('Path: {p}'.format(p=self.path))
logger.debug('Subfolders: {s}'.format(s=subfolders))
logger.debug('Subfiles: {f}'.format(f=log_files))
for fold in subfolders:
sub = [f for f in get_subfolders(fold)]
files = [f for f in get_sublogs(fold)]
if len(sub) > 0:
logger.info('Subfolders of {f}: {s}'.format(f=fold, s=sub))
yield LogPaths(path=fold, logtype=self.logtype)
elif len(files) > 0:
yield UploadLogs(path=fold, logtype=self.logtype)
for f in log_files:
yield UploadLogs(path=f, logtype=self.logtype)
def run(self):
total_length = 0
for f in self.input():
total_length = total_length + pd.read_csv(f.path()).iloc[0][0]
logger.info('AllFilesLength: {l}'.format(l=total_length))
if __name__ == '__main__':
luigi.run()
| sethmenghi/pylog_parse | pylog_parse/workflow.py | Python | bsd-3-clause | 4,335 |
import os
from setuptools import setup, find_packages
setup(
name='django-scrape',
version='0.1',
author='Luke Hodkinson',
author_email='[email protected]',
maintainer='Luke Hodkinson',
maintainer_email='[email protected]',
url='https://github.com/furious-luke/django-scrape',
description='A django application for easier web scraping.',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
classifiers = [
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
license='BSD',
packages=find_packages(),
include_package_data=True,
package_data={'': ['*.txt', '*.js', '*.html', '*.*']},
install_requires=['setuptools'],
zip_safe=False,
)
| furious-luke/django-scrape | setup.py | Python | bsd-3-clause | 990 |
from __future__ import absolute_import
import base64
import json
import unittest
import urllib
import urllib2
import urlparse
from celery.exceptions import RetryTaskError
from mock import MagicMock as Mock
import mock
from . import tasks
from .conf import settings as mp_settings
class TestCase(unittest.TestCase):
def setUp(self):
super(TestCase, self).setUp()
patcher = mock.patch('urllib2.urlopen')
self.addCleanup(patcher.stop)
self.mock_urlopen = patcher.start()
self.mock_urlopen.return_value.read.return_value = '1'
# Setup token for mixpanel
mp_settings.MIXPANEL_API_TOKEN = 'testmixpanel'
@staticmethod
def assertDictEqual(a, b):
assert a == b, "Dicts are not equal.\nExpected: %s\nActual: %s" % (
json.dumps(b, indent=3, sort_keys=True),
json.dumps(a, indent=3, sort_keys=True))
def _test_any(self, task, *args, **kwargs):
result = kwargs.pop('result', True)
server = kwargs.pop('server', mp_settings.MIXPANEL_API_SERVER)
endpoint = kwargs.pop('endpoint', mp_settings.MIXPANEL_TRACKING_ENDPOINT)
data = kwargs.pop('data', {})
actual = task(*args, **kwargs)
self.assertTrue(self.mock_urlopen.called)
self.assertEqual(actual, result)
url = self.mock_urlopen.call_args[0][0]
scheme, netloc, path, params, querystr, frag = urlparse.urlparse(url)
query = urlparse.parse_qs(querystr, keep_blank_values=True, strict_parsing=True)
self.assertEqual(netloc, server)
self.assertEqual(path, endpoint)
self.assertEqual(query.keys(), ['data'])
datastr = base64.b64decode(query['data'][0])
actual = json.loads(datastr)
self.assertDictEqual(actual, data)
class EventTrackerTest(TestCase):
def _test_event(self, *args, **kwargs):
return self._test_any(tasks.event_tracker, *args, **kwargs)
def test_event(self):
self._test_event('clicked button',
data={
"event": "clicked button",
"properties": { "token": "testmixpanel" },
},
)
def test_event_props(self):
self._test_event('User logged in',
properties={
"distinct_id": "c9533b5b-d69e-479a-ae5f-42dd7a9752a0",
"partner": True,
"userid": 456,
"code": "double oh 7",
},
data={
"event": "User logged in",
"properties": {
"distinct_id": "c9533b5b-d69e-479a-ae5f-42dd7a9752a0",
"partner": True,
"userid": 456,
"code": "double oh 7",
"token": "testmixpanel",
},
},
)
def test_event_token(self):
self._test_event('Override token',
token="footoken",
data={
"event": "Override token",
"properties": { "token": "footoken" },
},
)
class PeopleTrackerTest(TestCase):
def _test_people(self, *args, **kwargs):
kwargs.setdefault('endpoint', mp_settings.MIXPANEL_PEOPLE_TRACKING_ENDPOINT)
return self._test_any(tasks.people_tracker, *args, **kwargs)
def test_validation(self):
self.assertRaises(tasks.InvalidPeopleProperties,
tasks.people_tracker, 'foo')
self.assertRaises(tasks.InvalidPeopleProperties,
tasks.people_tracker, 'foo', set={1:2}, add={3:4})
result = tasks.people_tracker('foo', set={1:2})
self.assertEqual(result, True)
result = tasks.people_tracker('foo', add={3:4})
self.assertEqual(result, True)
def test_people_set(self):
self._test_people('c9533b5b-d69e-479a-ae5f-42dd7a9752a0',
set={
"$first_name": "Aron",
},
data={
"$distinct_id": "c9533b5b-d69e-479a-ae5f-42dd7a9752a0",
"$token": "testmixpanel",
"$set": {
"$first_name": "Aron",
},
})
def test_people_add(self):
self._test_people('c9533b5b-d69e-479a-ae5f-42dd7a9752a0',
add={
"visits": 1,
},
data={
"$distinct_id": "c9533b5b-d69e-479a-ae5f-42dd7a9752a0",
"$token": "testmixpanel",
"$add": {
"visits": 1,
},
})
def test_people_token(self):
self._test_people('c9533b5b-d69e-479a-ae5f-42dd7a9752a0',
token="footoken",
set={
"$first_name": "Aron",
},
data={
"$distinct_id": "c9533b5b-d69e-479a-ae5f-42dd7a9752a0",
"$token": "footoken",
"$set": {
"$first_name": "Aron",
},
})
def test_people_extra(self):
self._test_people('c9533b5b-d69e-479a-ae5f-42dd7a9752a0',
set={
"$first_name": "Aron",
},
extra={
"$ignore_time": True,
},
data={
"$distinct_id": "c9533b5b-d69e-479a-ae5f-42dd7a9752a0",
"$token": "testmixpanel",
"$ignore_time": True,
"$set": {
"$first_name": "Aron",
},
})
class FunnelTrackerTest(TestCase):
def _test_funnel(self, *args, **kwargs):
return self._test_any(tasks.funnel_event_tracker, *args, **kwargs)
def test_validation(self):
funnel = 'test_funnel'
step = 'test_step'
goal = 'test_goal'
# Missing distinct_id
properties = {}
self.assertRaises(tasks.InvalidFunnelProperties,
tasks.funnel_event_tracker,
funnel, step, goal, properties)
# With distinct_id
properties = {
'distinct_id': 'c9533b5b-d69e-479a-ae5f-42dd7a9752a0',
}
result = tasks.funnel_event_tracker(funnel, step, goal, properties)
self.assertEqual(result, True)
def test_funnel(self):
funnel = 'test_funnel'
step = 'test_step'
goal = 'test_goal'
self._test_funnel(funnel, step, goal,
properties={
'distinct_id': 'c9533b5b-d69e-479a-ae5f-42dd7a9752a0',
},
data={
"event": "mp_funnel",
"properties": {
"distinct_id": "c9533b5b-d69e-479a-ae5f-42dd7a9752a0",
"funnel": "test_funnel",
"goal": "test_goal",
"step": "test_step",
"token": "testmixpanel"
},
},
)
class FailuresTestCase(TestCase):
def test_failed_request(self):
self.mock_urlopen.side_effect = urllib2.URLError("You're doing it wrong")
# This wants to test RetryTaskError, but that isn't available with
# CELERY_ALWAYS_EAGER
self.assertRaises(tasks.FailedEventRequest, # RetryTaskError
tasks.event_tracker,
'event_foo')
def test_failed_response(self):
self.mock_urlopen.return_value.read.return_value = '0'
result = tasks.event_tracker('event_foo')
self.assertEqual(result, False)
| bss/mixpanel-celery | mixpanel/tests.py | Python | bsd-3-clause | 7,560 |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/rl_config.py
__version__=''' $Id$ '''
__doc__='''Configuration file. You may edit this if you wish.'''
allowTableBoundsErrors = 1 # set to 0 to die on too large elements in tables in debug (recommend 1 for production use)
shapeChecking = 1
defaultEncoding = 'WinAnsiEncoding' # 'WinAnsi' or 'MacRoman'
defaultGraphicsFontName= 'Times-Roman' #initializer for STATE_DEFAULTS in shapes.py
pageCompression = 1 # default page compression mode
defaultPageSize = 'A4' #default page size
defaultImageCaching = 0 #set to zero to remove those annoying cached images
ZLIB_WARNINGS = 1
warnOnMissingFontGlyphs = 0 #if 1, warns of each missing glyph
verbose = 0
showBoundary = 0 # turns on and off boundary behaviour in Drawing
emptyTableAction= 'error' # one of 'error', 'indicate', 'ignore'
invariant= 0 #produces repeatable,identical PDFs with same timestamp info (for regression testing)
eps_preview_transparent= None #set to white etc
eps_preview= 1 #set to False to disable
eps_ttf_embed= 1 #set to False to disable
eps_ttf_embed_uid= 0 #set to 1 to enable
overlapAttachedSpace= 1 #if set non false then adajacent flowable space after
#and space before are merged (max space is used).
longTableOptimize = 0 #default don't use Henning von Bargen's long table optimizations
autoConvertEncoding = 0 #convert internally as needed (experimental)
_FUZZ= 1e-6 #fuzz for layout arithmetic
wrapA85= 0 #set to 1 to get old wrapped line behaviour
fsEncodings=('utf8','cp1252','cp430') #encodings to attempt utf8 conversion with
odbc_driver= 'odbc' #default odbc driver
platypus_link_underline= 0 #paragraph links etc underlined if true
canvas_basefontname= 'Helvetica' #this is used to initialize the canvas; if you override to make
#something else you are responsible for ensuring the font is registered etc etc
allowShortTableRows=1 #allows some rows in a table to be short
imageReaderFlags=0 #attempt to convert images into internal memory files to reduce
#the number of open files (see lib.utils.ImageReader)
#if imageReaderFlags&2 then attempt autoclosing of those files
#if imageReaderFlags&4 then cache data
#if imageReaderFlags==-1 then use Ralf Schmitt's re-opening approach
# places to look for T1Font information
T1SearchPath = (
'c:/Program Files/Adobe/Acrobat 9.0/Resource/Font',
'c:/Program Files/Adobe/Acrobat 8.0/Resource/Font',
'c:/Program Files/Adobe/Acrobat 7.0/Resource/Font',
'c:/Program Files/Adobe/Acrobat 6.0/Resource/Font', #Win32, Acrobat 6
'c:/Program Files/Adobe/Acrobat 5.0/Resource/Font', #Win32, Acrobat 5
'c:/Program Files/Adobe/Acrobat 4.0/Resource/Font', #Win32, Acrobat 4
'%(disk)s/Applications/Python %(sys_version)s/reportlab/fonts', #Mac?
'/usr/lib/Acrobat9/Resource/Font', #Linux, Acrobat 5?
'/usr/lib/Acrobat8/Resource/Font', #Linux, Acrobat 5?
'/usr/lib/Acrobat7/Resource/Font', #Linux, Acrobat 5?
'/usr/lib/Acrobat6/Resource/Font', #Linux, Acrobat 5?
'/usr/lib/Acrobat5/Resource/Font', #Linux, Acrobat 5?
'/usr/lib/Acrobat4/Resource/Font', #Linux, Acrobat 4
'/usr/local/Acrobat9/Resource/Font', #Linux, Acrobat 5?
'/usr/local/Acrobat8/Resource/Font', #Linux, Acrobat 5?
'/usr/local/Acrobat7/Resource/Font', #Linux, Acrobat 5?
'/usr/local/Acrobat6/Resource/Font', #Linux, Acrobat 5?
'/usr/local/Acrobat5/Resource/Font', #Linux, Acrobat 5?
'/usr/local/Acrobat4/Resource/Font', #Linux, Acrobat 4
'%(REPORTLAB_DIR)s/fonts', #special
'%(REPORTLAB_DIR)s/../fonts', #special
'%(REPORTLAB_DIR)s/../../fonts', #special
'%(HOME)s/fonts', #special
)
# places to look for TT Font information
TTFSearchPath = (
'c:/winnt/fonts',
'c:/windows/fonts',
'/usr/lib/X11/fonts/TrueType/',
'%(REPORTLAB_DIR)s/fonts', #special
'%(REPORTLAB_DIR)s/../fonts', #special
'%(REPORTLAB_DIR)s/../../fonts',#special
'%(HOME)s/fonts', #special
#mac os X - from
#http://developer.apple.com/technotes/tn/tn2024.html
'~/Library/Fonts',
'/Library/Fonts',
'/Network/Library/Fonts',
'/System/Library/Fonts',
)
# places to look for CMap files - should ideally merge with above
CMapSearchPath = (
'/usr/lib/Acrobat9/Resource/CMap',
'/usr/lib/Acrobat8/Resource/CMap',
'/usr/lib/Acrobat7/Resource/CMap',
'/usr/lib/Acrobat6/Resource/CMap',
'/usr/lib/Acrobat5/Resource/CMap',
'/usr/lib/Acrobat4/Resource/CMap',
'/usr/local/Acrobat9/Resource/CMap',
'/usr/local/Acrobat8/Resource/CMap',
'/usr/local/Acrobat7/Resource/CMap',
'/usr/local/Acrobat6/Resource/CMap',
'/usr/local/Acrobat5/Resource/CMap',
'/usr/local/Acrobat4/Resource/CMap',
'C:\\Program Files\\Adobe\\Acrobat\\Resource\\CMap',
'C:\\Program Files\\Adobe\\Acrobat 9.0\\Resource\\CMap',
'C:\\Program Files\\Adobe\\Acrobat 8.0\\Resource\\CMap',
'C:\\Program Files\\Adobe\\Acrobat 7.0\\Resource\\CMap',
'C:\\Program Files\\Adobe\\Acrobat 6.0\\Resource\\CMap',
'C:\\Program Files\\Adobe\\Acrobat 5.0\\Resource\\CMap',
'C:\\Program Files\\Adobe\\Acrobat 4.0\\Resource\\CMap',
'%(REPORTLAB_DIR)s/fonts/CMap', #special
'%(REPORTLAB_DIR)s/../fonts/CMap', #special
'%(REPORTLAB_DIR)s/../../fonts/CMap', #special
'%(HOME)s/fonts/CMap', #special
)
#### Normally don't need to edit below here ####
try:
from local_rl_config import *
except:
pass
_SAVED = {}
sys_version=None
def _setOpt(name, value, conv=None):
'''set a module level value from environ/default'''
from os import environ
ename = 'RL_'+name
if environ.has_key(ename):
value = environ[ename]
if conv: value = conv(value)
globals()[name] = value
def _startUp():
'''This function allows easy resetting to the global defaults
If the environment contains 'RL_xxx' then we use the value
else we use the given default'''
V='''T1SearchPath
CMapSearchPath
TTFSearchPath
allowTableBoundsErrors
shapeChecking
defaultEncoding
defaultGraphicsFontName
pageCompression
defaultPageSize
defaultImageCaching
ZLIB_WARNINGS
warnOnMissingFontGlyphs
verbose
showBoundary
emptyTableAction
invariant
eps_preview_transparent
eps_preview
eps_ttf_embed
eps_ttf_embed_uid
overlapAttachedSpace
longTableOptimize
autoConvertEncoding
_FUZZ
wrapA85
fsEncodings
odbc_driver
platypus_link_underline
canvas_basefontname
allowShortTableRows
imageReaderFlags'''.split()
import os, sys
global sys_version, _unset_
sys_version = sys.version.split()[0] #strip off the other garbage
from reportlab.lib import pagesizes
from reportlab.lib.utils import rl_isdir
if _SAVED=={}:
_unset_ = getattr(sys,'_rl_config__unset_',None)
if _unset_ is None:
class _unset_: pass
sys._rl_config__unset_ = _unset_ = _unset_()
for k in V:
_SAVED[k] = globals()[k]
#places to search for Type 1 Font files
import reportlab
D = {'REPORTLAB_DIR': os.path.abspath(os.path.dirname(reportlab.__file__)),
'HOME': os.environ.get('HOME',os.getcwd()),
'disk': os.getcwd().split(':')[0],
'sys_version': sys_version,
}
for name in ('T1SearchPath','TTFSearchPath','CMapSearchPath'):
P=[]
for p in _SAVED[name]:
d = (p % D).replace('/',os.sep)
if rl_isdir(d): P.append(d)
_setOpt(name,P)
for k in V[3:]:
v = _SAVED[k]
if isinstance(v,(int,float)): conv = type(v)
elif k=='defaultPageSize': conv = lambda v,M=pagesizes: getattr(M,v)
else: conv = None
_setOpt(k,v,conv)
_registered_resets=[]
def register_reset(func):
_registered_resets[:] = [x for x in _registered_resets if x()]
L = [x for x in _registered_resets if x() is func]
if L: return
from weakref import ref
_registered_resets.append(ref(func))
def _reset():
#attempt to reset reportlab and friends
_startUp() #our reset
for f in _registered_resets[:]:
c = f()
if c:
c()
else:
_registered_resets.remove(f)
_startUp()
| makinacorpus/reportlab-ecomobile | src/reportlab/rl_config.py | Python | bsd-3-clause | 10,215 |
import os
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from twimonial.models import Twimonial, User
from twimonial.ui import render_write
import config
class HomePage(webapp.RequestHandler):
def get(self):
if config.CACHE:
# Check cache first
cached_page = memcache.get('homepage')
if cached_page:
self.response.out.write(cached_page)
return
# Get latest five testimonials
latest_twimonials = [t.dictize() for t in Twimonial.all().order('-created_at').fetch(5)]
tmpl_values = {
'latest_twimonials': latest_twimonials,
'pop_users_twimonials': User.get_popular_users_testimonials(),
}
# Send out and cache it
rendered_page = render_write(tmpl_values, 'home.html', self.request,
self.response)
if config.CACHE:
memcache.set('homepage', rendered_page, config.CACHE_TIME_HOMEPAGE)
def head(self):
pass
class NotFoundPage(webapp.RequestHandler):
def get(self):
self.error(404)
tmpl_values = {
}
render_write(tmpl_values, '404.html', self.request, self.response)
def head(self):
self.error(404)
class StaticPage(webapp.RequestHandler):
def get(self, pagename):
render_write({}, pagename + '.html', self.request, self.response)
def head(self):
pass
class ListPage(webapp.RequestHandler):
def get(self, screen_names_string):
limit = 10
screen_names = [name for name in screen_names_string.split('-') if name][:limit]
screen_names.sort()
screen_names_string = '-'.join(screen_names)
# Check cache first
cached_page = memcache.get(screen_names_string, 'listpage')
if cached_page:
self.response.out.write(cached_page)
return
twimonials = [t.dictize() for t in Twimonial.get_tos(screen_names)]
missings = []
t_screen_names = [t['to_user']['screen_name'].lower() for t in twimonials]
for name in screen_names:
if name.lower() not in t_screen_names:
missings.append(name)
tmpl_values = {
'twimonials': twimonials,
'missings': ', '.join(missings),
}
# Send out and cache it
rendered_page = render_write(tmpl_values, 'list.html', self.request, self.response)
memcache.set(screen_names_string, rendered_page,
config.CACHE_TIME_LISTPAGE, namespace='listpage')
application = webapp.WSGIApplication([
('/', HomePage),
('/(about|terms|faq)', StaticPage),
('/list/([-_a-zA-Z0-9]+)', ListPage),
('/.*', NotFoundPage),
],
debug=config.DEBUG)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| livibetter-backup/twimonial | src/index.py | Python | bsd-3-clause | 2,779 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rfMHC documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import rfMHC
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rfMHC'
copyright = u'2014, David Olivieri'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = rfMHC.__version__
# The full version, including alpha/beta/rc tags.
release = rfMHC.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rfMHCdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'rfMHC.tex',
u'rfMHC Documentation',
u'David Olivieri', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rfMHC',
u'rfMHC Documentation',
[u'David Olivieri'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rfMHC',
u'rfMHC Documentation',
u'David Olivieri',
'rfMHC',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| dnolivieri/rfMHC | docs/conf.py | Python | bsd-3-clause | 8,369 |
def extractFullybookedtranslationsWordpressCom(item):
'''
Parser for 'fullybookedtranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractFullybookedtranslationsWordpressCom.py | Python | bsd-3-clause | 586 |
"""
Set of utility programs for IRIS.
"""
import os
import re
import io
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from glob import glob
# pylint: disable=F0401,E0611,E1103
from urllib.request import urlopen
from urllib.parse import urljoin, urlparse
from urllib.error import HTTPError, URLError
def iris_timeline_parse(timeline_file):
"""
Parses an IRIS timeline file (SCI format) into a structured array. This
version outputs a strucured array instead of a pandas DataSet.
Parameters
----------
timeline_file - string
Filename with timeline file, or URL to the file.
Returns
-------
result - pandas.DataFrame
DataFrame with timeline.
"""
from sunpy.time import parse_time
data = []
slews = []
curr_slew = np.array([np.nan, np.nan])
line_pat = re.compile('.+OBSID=.+rpt.+endtime', re.IGNORECASE)
slew_pat = re.compile('.+I_EVENT_MESSAGE.+MSG="SLEW*', re.IGNORECASE)
if urlparse(timeline_file).netloc == '': # local file
file_obj = open(timeline_file, 'r')
else: # network location
try:
tmp = urlopen(timeline_file).read()
file_obj = io.StringIO(tmp)
except (HTTPError, URLError):
raise EOFError(('iris_timeline_parse: could not open the '
'following file:\n' + timeline_file))
for line in file_obj:
if slew_pat.match(line):
tmp = line.split('=')[1].replace('"', '').strip('SLEW_').split('_')
curr_slew = np.array(tmp).astype('f')
if line_pat.match(line):
data.append(line.replace('//', '').replace(' x ', ', ').strip())
slews.append(curr_slew) # include most up to date slew
file_obj.close()
if len(data) == 0:
raise EOFError(('iris_timeline_parse: could not find any'
' observations in:\n' + str(timeline_file)))
arr_type = [('date_obs', 'datetime64[us]'), ('date_end', 'datetime64[us]'),
('obsid', 'i8'), ('repeats', 'i4'), ('duration', 'f'),
('size', 'f'), ('description', '|S200'), ('xpos', 'f'),
('ypos', 'f'), ('timeline_name', '|S200')]
result = np.zeros(len(data), dtype=arr_type)
result['timeline_name'] = timeline_file
for i, line in enumerate(data):
date_tmp = line.split()[0]
if date_tmp[-2:] == '60': # deal with non-compliant second formats
date_tmp = date_tmp[:-2] + '59.999999'
result[i]['date_obs'] = parse_time(date_tmp)
tmp = line.replace(' Mbits, end', ', end') # Remove new Mbits size str
tmp = tmp.split('desc=')
result[i]['description'] = tmp[1]
tmp = tmp[0]
tmp = [k.split('=')[-1] for k in ' '.join(tmp.split()[1:]).split(',')]
result[i]['obsid'] = int(tmp[0])
result[i]['repeats'] = int(tmp[1])
result[i]['duration'] = float(tmp[2][:-1])
result[i]['size'] = float(tmp[3])
tmp = tmp[4].split()
result[i]['date_end'] = parse_time(date_tmp[:9] + tmp[-1]) + \
timedelta(days=int(tmp[0].strip('+')))
result[i]['xpos'] = slews[i][0]
result[i]['ypos'] = slews[i][1]
return pd.DataFrame(result) # order by date_obs
def get_iris_timeline(date_start, date_end, path=None, fmt='%Y/%m/%d',
pattern='.*IRIS_science_timeline.+txt'):
"""
Gets IRIS timelines for a given time period.
"""
if path is None:
path = ('http://iris.lmsal.com/health-safety/timeline/'
'iris_tim_archive/')
print('Locating files...')
file_obj = FileCrawler(date_start, date_end, path, pattern, fmt)
result = pd.DataFrame()
for tfile in file_obj.files:
try:
print('Parsing:\n' + tfile)
timeline = iris_timeline_parse(tfile)
result = result.append(timeline)
except EOFError:
print('get_iris_timeline: could not read timeline data from:\n' +
tfile)
return result
def get_iris_files(date_start, date_end, pattern='iris.*.fits', base='level1',
path='/Users/tiago/data/IRIS/data/'):
"""
Gets list of IRIS observations for a given time period.
Parameters
----------
date_start : str or datetime object
Starting date to search
date_end : str or datetime object
Ending date to search
path : str
Base path to look into
pattern : str
Regular expression used to match file names.
Returns
-------
files : list
List of strings with matching file names.
"""
file_path = os.path.join(path, base)
file_obj = FileCrawler(date_start, date_end, file_path, pattern,
fmt='%Y/%m/%d/H%H%M')
return file_obj.files
class FileCrawler(object):
"""
Crawls through file names in a local or remote (http) path.
Parameters
----------
date_start : str or datetime object
Starting date to search
date_end : str or datetime object
Ending date to search
path : str
Base path to look into
pattern : str
Regular expression used to match file names.
recursive: bool
If True, will recursively search subdirectories of dates.
Attributes
----------
date_start : str or datetime object
Starting date given as input
date_end : str or datetime object
Ending date given as input
paths : list
List of file paths given the supplied dates
files : list
List of file names given the supplied path, dates, and pattern
Methods
-------
get_remote_paths(date_start, date_end, path, fmt='%Y%m%d')
Finds existing remote paths within specified dates in path, given fmt.
get_remote_files(path, pattern)
Finds existing remote files within specified path matching pattern.
"""
def __init__(self, date_start, date_end, path, pattern, fmt='%Y%m%d',
verbose=False):
self.date_start = date_start
self.date_end = date_end
self.paths = self.get_paths(date_start, date_end, path, fmt)
if verbose:
print('Found the following paths:')
for item in self.paths:
print(item)
self.files = []
for item in self.paths:
self.files += self.get_files(item, pattern)
if verbose:
print('Found the following files:')
for item in self.files:
print(item)
@classmethod
def get_paths(cls, date_start, date_end, path, fmt='%Y%m%d'):
"""
Gets paths within specified date range.
Parameters
----------
date_start : str or datetime object
Starting date to search
date_end : str or datetime object
Ending date to search
path : str
Base path where to look for locations (if starts with http,
remote search will be done)
format : str
datetime format string for date in directories.
Returns
-------
dates - list
List with path locations (local directories or remote paths)
"""
from sunpy.time import parse_time
dates = []
date_start = parse_time(date_start)
date_end = parse_time(date_end)
curr = date_start
if '%H' in fmt:
incr = [0, 1] # increment only hours
else:
incr = [1, 0] # increment only days
if urlparse(path).netloc == '': # local file
while curr <= date_end:
curr_path = os.path.join(path, datetime.strftime(curr, fmt))
curr += timedelta(days=incr[0], hours=incr[1])
if os.path.isdir(curr_path):
dates.append(curr_path)
else: # network location
while curr <= date_end:
curr_path = urljoin(path, datetime.strftime(curr, fmt) + '/')
curr += timedelta(days=incr[0], hours=incr[1])
try:
urlopen(curr_path)
dates.append(curr_path)
except (HTTPError, URLError):
continue
return dates
@classmethod
def get_files(cls, path, pattern):
"""
Obtains local or remote files patching a pattern.
Parameters
----------
path : str
Local directory or remote URL (e.g. 'http://www.google.com/test/')
pattern : str
Regular expression to be matched in href link names.
Returns
-------
files : list
List of strings. Each string has the path for the files matching
the pattern (and are made sure exist).
.. todo:: add recursive option, add option for FTP
"""
from bs4 import BeautifulSoup
files = []
pat_re = re.compile(pattern, re.IGNORECASE)
if urlparse(path).scheme == '': # local file
all_files = glob(path + '/*')
for item in all_files:
if pat_re.match(item) and os.path.isfile(item):
files.append(item)
elif urlparse(path).scheme == 'http':
soup = BeautifulSoup(urlopen(path).read())
for link in soup.find_all('a'):
if pat_re.match(link.get('href')):
file_url = urljoin(path, link.get('href'))
try: # Add only links that exist
urlopen(file_url)
files.append(file_url)
except (HTTPError, URLError):
pass
elif urlparse(path).scheme == 'ftp':
raise NotImplementedError('ftp not yet supported...')
return files
| ITA-Solar/helita | helita/obs/iris_util.py | Python | bsd-3-clause | 9,925 |
from __future__ import absolute_import, division, print_function
_TRIGGERS = {}
def register(tpe, start, stop, join):
def decorator(f):
_TRIGGERS[tpe] = {
"parser": f,
"start": start,
"stop": stop,
"join": join,
"threads": []
}
return f
return decorator
def lookup(tpe):
assert tpe in _TRIGGERS
return _TRIGGERS[tpe]["parser"]
def start():
for trigger in _TRIGGERS.values():
trigger["threads"].append(trigger["start"]())
def stop():
for trigger in _TRIGGERS.values():
for thread in trigger["threads"]:
if thread is not None:
trigger["stop"](thread)
else:
trigger["stop"]()
def join():
for trigger in _TRIGGERS.values():
for thread in trigger["threads"]:
if thread is not None:
trigger["join"](thread)
else:
trigger["join"]()
| stcorp/legato | legato/registry.py | Python | bsd-3-clause | 987 |
import sublime, sublime_plugin, os
class ExpandTabsOnSave(sublime_plugin.EventListener):
def on_pre_save(self, view):
if view.settings().get('expand_tabs_on_save') == 1:
view.window().run_command('expand_tabs')
| edonet/package | Edoner/expand_tabs_on_save.py | Python | isc | 236 |
import os
import load_data
import numpy as np
from keras.backend import theano_backend as K
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.utils.generic_utils import Progbar
from keras.callbacks import Callback
import generative_models as gm
from common import CsvHistory
from common import merge_result_batches
import adverse_models as am
from collections import Counter
from scipy.stats import entropy
def train(train, dev, model, model_dir, batch_size, glove, beam_size,
samples_per_epoch, val_samples, cmodel, epochs):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
hypo_len = model.get_layer('hypo_input').input_shape[1] -1
ne = model.get_layer('noise_embeddings')
vae = model.get_layer('vae_output')
g_train = train_generator(train, batch_size, hypo_len,
'class_input' in model.input_names, ne, vae)
saver = ModelCheckpoint(model_dir + '/weights.hdf5', monitor = 'hypo_loss', mode = 'min', save_best_only = True)
#saver = ModelCheckpoint(model_dir + '/weights{epoch:02d}.hdf5')
#es = EarlyStopping(patience = 4, monitor = 'hypo_loss', mode = 'min')
csv = CsvHistory(model_dir + '/history.csv')
gtest = gm.gen_test(model, glove, batch_size)
noise_size = ne.output_shape[-1] if ne else model.get_layer('expansion').input_shape[-1]
cb = ValidateGen(dev, gtest, beam_size, hypo_len, val_samples, noise_size, glove, cmodel, True, True)
hist = model.fit_generator(g_train, samples_per_epoch = samples_per_epoch, nb_epoch = epochs,
callbacks = [cb, saver, csv])
return hist
def train_generator(train, batch_size, hypo_len, cinput, ninput, vae):
while True:
mb = load_data.get_minibatches_idx(len(train[0]), batch_size, shuffle=True)
for i, train_index in mb:
if len(train_index) != batch_size:
continue
padded_p = train[0][train_index]
padded_h = train[1][train_index]
label = train[2][train_index]
hypo_input = np.concatenate([np.zeros((batch_size, 1)), padded_h], axis = 1)
train_input = np.concatenate([padded_h, np.zeros((batch_size, 1))], axis = 1)
inputs = [padded_p, hypo_input] + ([train_index[:, None]] if ninput else []) + [train_input]
if cinput:
inputs.append(label)
outputs = [np.ones((batch_size, hypo_len + 1, 1))]
if vae:
outputs += [np.zeros(batch_size)]
yield (inputs, outputs)
def generative_predict_beam(test_model, premises, noise_batch, class_indices, return_best, hypo_len):
core_model, premise_func, noise_func = test_model
version = int(core_model.name[-1])
batch_size = core_model.input_layers[0].input_shape[0]
beam_size = batch_size / len(premises)
dup_premises = np.repeat(premises, beam_size, axis = 0)
premise = premise_func(dup_premises) if version != 9 else None
class_input = np.repeat(class_indices, beam_size, axis = 0)
embed_vec = np.repeat(noise_batch, beam_size, axis = 0)
if version == 8:
noise = noise_func(embed_vec, class_input)
elif version == 6 or version == 7:
noise = noise_func(embed_vec[:,-1,:], class_input)
elif version == 9:
noise = noise_func(embed_vec, class_input, dup_premises)
elif version == 5:
noise = noise_func(embed_vec)
core_model.reset_states()
core_model.get_layer('attention').set_state(noise)
word_input = np.zeros((batch_size, 1))
result_probs = np.zeros(batch_size)
debug_probs = np.zeros((hypo_len, batch_size))
lengths = np.zeros(batch_size)
words = None
probs = None
for i in range(hypo_len):
data = [premise, word_input, noise, np.zeros((batch_size,1))]
if version == 9:
data = data[1:]
preds = core_model.predict_on_batch(data)
preds = np.log(preds)
split_preds = np.array(np.split(preds, len(premises)))
if probs is None:
if beam_size == 1:
word_input = np.argmax(split_preds[:, 0, 0], axis = 1)[:,None]
else:
word_input = np.argpartition(-split_preds[:, 0, 0], beam_size)[:,:beam_size]
probs = split_preds[:,0,0][np.arange(len(premises))[:, np.newaxis],[word_input]].ravel()
word_input= word_input.ravel()[:,None]
words = np.array(word_input)
debug_probs[0] = probs
else:
split_cprobs = (preds[:,-1,:] + probs[:, None]).reshape((len(premises), -1))
if beam_size == 1:
max_indices = np.argmax(split_cprobs, axis = 1)[:,None]
else:
max_indices = np.argpartition(-split_cprobs, beam_size)[:,:beam_size]
probs = split_cprobs[np.arange(len(premises))[:, np.newaxis],[max_indices]].ravel()
word_input = (max_indices % preds.shape[-1]).ravel()[:,None]
state_indices = (max_indices / preds.shape[-1]) + np.arange(0, batch_size, beam_size)[:, None]
state_indices = state_indices.ravel()
shuffle_states(core_model, state_indices)
words = np.concatenate([words[state_indices], word_input], axis = -1)
debug_probs = debug_probs[:, state_indices]
debug_probs[i] = probs - np.sum(debug_probs, axis = 0)
lengths += 1 * (word_input[:,0] > 0).astype('int')
if (np.sum(word_input) == 0):
words = np.concatenate([words, np.zeros((batch_size, hypo_len - words.shape[1]))],
axis = -1)
break
result_probs = probs / -lengths
if return_best:
best_ind = np.argmin(np.array(np.split(result_probs, len(premises))), axis =1) + np.arange(0, batch_size, beam_size)
return words[best_ind], result_probs[best_ind]
else:
return words, result_probs, debug_probs
def shuffle_states(graph_model, indices):
for l in graph_model.layers:
if getattr(l, 'stateful', False):
for s in l.states:
K.set_value(s, s.get_value()[indices])
def val_generator(dev, gen_test, beam_size, hypo_len, noise_size):
batch_size = gen_test[0].input_layers[0].input_shape[0]
per_batch = batch_size / beam_size
while True:
mb = load_data.get_minibatches_idx(len(dev[0]), per_batch, shuffle=False)
for i, train_index in mb:
if len(train_index) != per_batch:
continue
premises = dev[0][train_index]
noise_input = np.random.normal(scale=0.11, size=(per_batch, 1, noise_size))
class_indices = dev[2][train_index]
words, loss = generative_predict_beam(gen_test, premises, noise_input,
class_indices, True, hypo_len)
yield premises, words, loss, noise_input, class_indices
def single_generate(premise, label, gen_test, beam_size, hypo_len, noise_size, noise_input = None):
batch_size = gen_test[0].input_layers[0].input_shape[0]
per_batch = batch_size / beam_size
premises = [premise] * per_batch
if noise_input is None:
noise_input = np.random.normal(scale=0.11, size=(per_batch, 1, noise_size))
class_indices = np.ones(per_batch) * label
class_indices = load_data.convert_to_one_hot(class_indices, 3)
words, loss = generative_predict_beam(gen_test, premises, noise_input,
class_indices, True, hypo_len)
return words
def validate(dev, gen_test, beam_size, hypo_len, samples, noise_size, glove, cmodel = None, adverse = False,
diverse = False):
vgen = val_generator(dev, gen_test, beam_size, hypo_len, noise_size)
p = Progbar(samples)
batchez = []
while p.seen_so_far < samples:
batch = next(vgen)
preplexity = np.mean(np.power(2, batch[2]))
loss = np.mean(batch[2])
losses = [('hypo_loss',loss),('perplexity', preplexity)]
if cmodel is not None:
ceval = cmodel.evaluate([batch[0], batch[1]], batch[4], verbose = 0)
losses += [('class_loss', ceval[0]), ('class_acc', ceval[1])]
probs = cmodel.predict([batch[0], batch[1]], verbose = 0)
losses += [('class_entropy', np.mean(-np.sum(probs * np.log(probs), axis=1)))]
p.add(len(batch[0]), losses)
batchez.append(batch)
batchez = merge_result_batches(batchez)
res = {}
if adverse:
val_loss = adverse_validation(dev, batchez, glove)
print 'adverse_loss:', val_loss
res['adverse_loss'] = val_loss
if diverse:
div, _, _, _ = diversity(dev, gen_test, beam_size, hypo_len, noise_size, 64, 32)
res['diversity'] = div
print
for val in p.unique_values:
arr = p.sum_values[val]
res[val] = arr[0] / arr[1]
return res
def adverse_validation(dev, batchez, glove):
samples = len(batchez[1])
discriminator = am.discriminator(glove, 50)
ad_model = am.adverse_model(discriminator)
res = ad_model.fit([dev[1][:samples], batchez[1]], np.zeros(samples), validation_split=0.1,
verbose = 0, nb_epoch = 20, callbacks = [EarlyStopping(patience=2)])
return np.min(res.history['val_loss'])
def diversity(dev, gen_test, beam_size, hypo_len, noise_size, per_premise, samples):
step = len(dev[0]) / samples
sind = [i * step for i in range(samples)]
p = Progbar(per_premise * samples)
for i in sind:
hypos = []
unique_words = []
hypo_list = []
premise = dev[0][i]
prem_list = set(cut_zeros(list(premise)))
while len(hypos) < per_premise:
label = np.argmax(dev[2][i])
words = single_generate(premise, label, gen_test, beam_size, hypo_len, noise_size)
hypos += [str(ex) for ex in words]
unique_words += [int(w) for ex in words for w in ex if w > 0]
hypo_list += [set(cut_zeros(list(ex))) for ex in words]
jacks = []
prem_jacks = []
for u in range(len(hypo_list)):
sim_prem = len(hypo_list[u] & prem_list)/float(len(hypo_list[u] | prem_list))
prem_jacks.append(sim_prem)
for v in range(u+1, len(hypo_list)):
sim = len(hypo_list[u] & hypo_list[v])/float(len(hypo_list[u] | hypo_list[v]))
jacks.append(sim)
avg_dist_hypo = 1 - np.mean(jacks)
avg_dist_prem = 1 - np.mean(prem_jacks)
d = entropy(Counter(hypos).values())
w = entropy(Counter(unique_words).values())
p.add(len(hypos), [('diversity', d),('word_entropy', w),('avg_dist_hypo', avg_dist_hypo), ('avg_dist_prem', avg_dist_prem)])
arrd = p.sum_values['diversity']
arrw = p.sum_values['word_entropy']
arrj = p.sum_values['avg_dist_hypo']
arrp = p.sum_values['avg_dist_prem']
return arrd[0] / arrd[1], arrw[0] / arrw[1], arrj[0] / arrj[1], arrp[0] / arrp[1]
def cut_zeros(list):
return [a for a in list if a > 0]
class ValidateGen(Callback):
def __init__(self, dev, gen_test, beam_size, hypo_len, samples, noise_size,
glove, cmodel, adverse, diverse):
self.dev = dev
self.gen_test=gen_test
self.beam_size = beam_size
self.hypo_len = hypo_len
self.samples = samples
self.noise_size = noise_size
self.cmodel= cmodel
self.glove = glove
self.adverse = adverse
self.diverse = diverse
def on_epoch_end(self, epoch, logs={}):
gm.update_gen_weights(self.gen_test[0], self.model)
val_log = validate(self.dev, self.gen_test, self.beam_size, self.hypo_len, self.samples,
self.noise_size, self.glove, self.cmodel, self.adverse, self.diverse)
logs.update(val_log)
| jstarc/deep_reasoning | generative_alg.py | Python | mit | 12,160 |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the wallet resends transactions periodically."""
from collections import defaultdict
import time
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import ToHex
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import assert_equal, wait_until
class P2PStoreTxInvs(P2PInterface):
def __init__(self):
super().__init__()
self.tx_invs_received = defaultdict(int)
def on_inv(self, message):
# Store how many times invs have been received for each tx.
for i in message.inv:
if i.type == 1:
# save txid
self.tx_invs_received[i.hash] += 1
class ResendWalletTransactionsTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0] # alias
node.add_p2p_connection(P2PStoreTxInvs())
self.log.info("Create a new transaction and wait until it's broadcast")
txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16)
# Can take a few seconds due to transaction trickling
wait_until(lambda: node.p2p.tx_invs_received[txid] >= 1, lock=mininode_lock)
# Add a second peer since txs aren't rebroadcast to the same peer (see filterInventoryKnown)
node.add_p2p_connection(P2PStoreTxInvs())
self.log.info("Create a block")
# Create and submit a block without the transaction.
# Transactions are only rebroadcast if there has been a block at least five minutes
# after the last time we tried to broadcast. Use mocktime and give an extra minute to be sure.
block_time = int(time.time()) + 6 * 60
node.setmocktime(block_time)
block = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockchaininfo()['blocks']), block_time)
block.nVersion = 3
block.rehash()
block.solve()
node.submitblock(ToHex(block))
# Transaction should not be rebroadcast
node.p2ps[1].sync_with_ping()
assert_equal(node.p2ps[1].tx_invs_received[txid], 0)
self.log.info("Transaction should be rebroadcast after 30 minutes")
# Use mocktime and give an extra 5 minutes to be sure.
rebroadcast_time = int(time.time()) + 41 * 60
node.setmocktime(rebroadcast_time)
wait_until(lambda: node.p2ps[1].tx_invs_received[txid] >= 1, lock=mininode_lock)
if __name__ == '__main__':
ResendWalletTransactionsTest().main()
| nlgcoin/guldencoin-official | test/functional/wallet_resendwallettransactions.py | Python | mit | 2,912 |
#import SYS
import ShareYourSystem as SYS
#Definition
MyBrianer=SYS.BrianerClass(
).collect(
"Neurongroupers",
'P',
SYS.NeurongrouperClass(
#Here are defined the brian classic shared arguments for each pop
**{
'NeurongroupingKwargVariablesDict':
{
'N':2,
'model':
'''
Jr : 1
dr/dt = (-r+Jr)/(20*ms) : 1
'''
},
'ConnectingGraspClueVariablesList':
[
SYS.GraspDictClass(
{
'HintVariable':'/NodePointDeriveNoder/<Neurongroupers>PNeurongrouper',
'SynapsingKwargVariablesDict':
{
'model':
'''
J : 1
Jr_post=J*r_pre : 1 (summed)
'''
},
'SynapsingWeigthSymbolStr':'J',
'SynapsingWeigthFloatsArray':SYS.array(
[
[0.,-2.],
[4.,0.]
]
),
"SynapsingDelayDict":{'r':1.*SYS.brian2.ms}
}
)
]
}
).collect(
"StateMoniters",
'Rate',
SYS.MoniterClass(
**{
'MoniteringVariableStr':'r',
'MoniteringRecordTimeIndexIntsArray':[0,1]
}
)
)
).network(
**{
'RecruitingConcludeConditionVariable':[
(
'MroClassesList',
SYS.contains,
SYS.NeurongrouperClass
)
]
}
).brian()
#init variables
map(
lambda __BrianedNeuronGroup:
__BrianedNeuronGroup.__setattr__(
'r',
1.+SYS.array(map(float,xrange(__BrianedNeuronGroup.N)))
),
MyBrianer.BrianedNeuronGroupsList
)
#run
MyBrianer.run(100)
#plot
M=MyBrianer['<Neurongroupers>PNeurongrouper']['<StateMoniters>RateMoniter'].StateMonitor
SYS.plot(M.t, M.r.T)
SYS.show()
| Ledoux/ShareYourSystem | Pythonlogy/draft/Simulaters/Brianer/draft/07_ExampleDoc.py | Python | mit | 1,597 |
# -*- coding: utf-8 -*-
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class RegisterToken(models.Model):
user = models.ForeignKey(User)
token = models.CharField(_(u'token'), max_length=32)
created = models.DateTimeField(_(u'created'), editable=False, auto_now_add=True)
@property
def is_valid(self):
valid_period = datetime.timedelta(days=1)
now = datetime.datetime.now()
return now < self.created + valid_period
class Meta:
verbose_name = _(u'register token')
verbose_name_plural = _(u'register tokens')
| dotKom/studlan | apps/authentication/models.py | Python | mit | 674 |
from py_word_suggest.utils import *
import pytest
raw_json = """
{"lang:nl:0:ben":[["ik", 22.0], ["er", 8.0], ["een", 7.0], ["je", 5.0]],"lang:nl:0:Ik":[["heb", 66.0], ["ben", 52.0], ["denk", 15.0], [
"wil", 13.0], ["acht", 1.0]],"lang:eng:0:I":[["am", 100], ["want", 246], ["love", 999]],"lang:eng:0:am":[["the",100], ["Alice", 50],["Bob", 45]]}
"""
invalid_json = """
test
"""
@pytest.fixture(scope="session")
def invalid_json_file(tmpdir_factory):
fn = tmpdir_factory.mktemp("data_tmp").join("test_invalid.json")
fn.write(invalid_json)
invalid_json_fn = fn
return fn
@pytest.fixture(scope="session")
def raw_json_file(tmpdir_factory):
f = tmpdir_factory.mktemp("data_tmp").join("test.json")
f.write(raw_json)
return f
def setUp(invalid_json_file):
invalid_json_file
@pytest.mark.parametrize("testInput, expectedOutput, state",
[
(b'{"lang:nl:0:Ik":[["heb", 66.0], ["ben", 52.0], ["denk", 15.0], ["wil", 13.0], ["acht", 1.0]]}', {
'lang:nl:0:Ik': [['heb', 66.0], ['ben', 52.0], ['denk', 15.0], ['wil', 13.0], ['acht', 1.0]]}, 'normalState'),
('{"lang:nl:0:Ik":[["heb", 66.0], ["ben", 52.0], ["denk", 15.0], ["wil", 13.0], ["acht", 1.0]]}', {
'lang:nl:0:Ik': [['heb', 66.0], ['ben', 52.0], ['denk', 15.0], ['wil', 13.0], ['acht', 1.0]]}, 'normalState'),
('"lang:nl"', "Error load_json_string, jsonString, '\"lang:nl\"' needs to be a string represetation of a json object, jsonString needs to be set between braces. A str item needs to be set between double quotes.", 'errorState'),
(b'"lang:nl"', "Error load_json_string, jsonString, 'b'\"lang:nl\"'' needs to be a string represetation of a json object, jsonString needs to be set between braces. A str item needs to be set between double quotes.", 'errorState'),
(b'\'lang\':0"', "Error load_json_string, jsonString, 'b'\\'lang\\':0\"'' needs to be a string represetation of a json object, jsonString needs to be set between braces. A str item needs to be set between double quotes.", 'errorState'),
(0, "Error load_json_string, jsonString, '0' needs to be a string represetation of a json object, jsonString needs to be set between braces. A str item needs to be set between double quotes.", 'errorState'),
]
)
def test_load_json_from_string(testInput, expectedOutput, state):
"""utils, Json from string"""
# Test normal behavior
if state == 'normalState':
assert load_json_string(testInput) == expectedOutput
# Test expect error
if state == 'errorState':
with pytest.raises(utilsError) as e:
load_json_string(testInput)
assert str(e.value) == expectedOutput
@pytest.mark.parametrize("testInput, expectedOutput, state",
[
('\"lang:nl:0:Ik\"', {"lang:nl:0:Ik": [["heb", 66.0], ["ben", 52.0], [
"denk", 15.0], ["wil", 13.0], ["acht", 1.0]]}, 'normalState'),
('\"lang:nl:0:Ik', "Error, grep_jsonstring_from_system: '\"lang:nl:0:Ik' needs to be a str type and need to be between double quotes.", 'errorState'),
('lang:nl:0:Ik\"', "Error, grep_jsonstring_from_system: 'lang:nl:0:Ik\"' needs to be a str type and need to be between double quotes.", 'errorState'),
('lang:nl:0:Ik', "Error, grep_jsonstring_from_system: 'lang:nl:0:Ik' needs to be a str type and need to be between double quotes.", 'errorState'),
(0, "Error, grep_jsonstring_from_system: '0' needs to be a str type and need to be between double quotes.", 'errorState'),
('\"NoKeyFound\"', False, 'normalState'),
('\"NO-MATCH\"', False, 'normalState'),
('\"NOEXISTINGFILE\"', "Error, grep_jsonstring_from_system: File NOEXISTINGFILE not exists or is busy.", 'fileError'),
# ('lang:nl:0:Ik' ,b'"lang:nl:0:Ik":[["heb", 66.0], ["ben", 52.0], ["denk", 15.0], ["wil", 13.0], ["acht", 1.0]]','":.*]]','defaultArguments'),
]
)
def test_grep_jsonstring_from_system(raw_json_file, testInput, expectedOutput, state):
"""utils, Grep bigram from file with system jq util"""
# Test default argument
if state == 'fileError':
raw_json_file = 'NOEXISTINGFILE'
with pytest.raises(utilsError) as e:
grep_jsonstring_from_system(testInput, raw_json_file)
assert str(e.value) == expectedOutput
# Test normal behavior
if state == 'normalState':
assert grep_jsonstring_from_system(
testInput, raw_json_file) == expectedOutput
# Test expect error
if state == 'errorState':
# pudb.set_trace()
with pytest.raises(utilsError) as e:
grep_jsonstring_from_system(testInput, raw_json_file)
# pudb.set_trace()
assert str(e.value) == expectedOutput
@pytest.mark.parametrize("testInput,expected_output",
[
('', True),
(None, True),
('NonWwhiteSpaces', False),
('String with white-space', True),
(10, False)
]
)
def test_is_empty(testInput, expected_output):
"""utils, is_empty: Check if an object is empty or contains spaces"""
assert is_empty(testInput) == expected_output
@pytest.mark.parametrize("testInput,expectedOutput",
[
("String", True),
(['lol,lol2'], True),
(('lol', 'lol2'), True),
({'lol', 'lol2'}, True),
(10, False),
(None, False)
]
)
def test_is_iterable(testInput, expectedOutput):
"""utils, is_iterable Check if an object is iterable"""
assert is_iterable(testInput) == expectedOutput
@pytest.mark.parametrize("testInput, collection, expectedOutput, errorState",
[
('Love', ['I', 'Love', 'python'], True, False),
('love', ['I', 'Love', 'python'], False, False),
('', ['I', 'Love', 'python'], False, False),
(None, ['I', 'Love', 'python'], False, False),
(None, "String",
"Error: collection is not iterable or is a string", True),
('Love', 8, "Error: collection is not iterable or is a string", True), (
'Love', None, "Error: collection is not iterable or is a string", True),
]
)
def test_containing(testInput, collection, expectedOutput, errorState):
"""utils: Check if collection contains an item"""
if errorState is False:
assert containing(collection, testInput) == expectedOutput
else:
with pytest.raises(utilsError) as e:
containing(collection, testInput)
assert str(e.value) == expectedOutput
@pytest.mark.parametrize("testInput, expectedOutput, state",
[
(None, {"lang:nl:0:ben": [["ik", 22.0], ["er", 8.0], ["een", 7.0], ["je", 5.0]], "lang:nl:0:Ik":[["heb", 66.0], ["ben", 52.0], ["denk", 15.0], [
"wil", 13.0], ["acht", 1.0]], "lang:eng:0:I":[["am", 100], ["want", 246], ["love", 999]], "lang:eng:0:am":[["the", 100], ["Alice", 50], ["Bob", 45]]}, 'normalState'),
(None, "Error, load_data_from_json: \'NOEXISTINGFILE\' does not exists.",
'noFileExistState'),
(None, "Error, load_data_from_json: '{}' needs to be a json object.",
'invalidJsonState'),
(None, "Error, load_data_from_json: Function recuires a filename (str).",
'ValueErrorState'),
(13458, "Error, load_data_from_json: Function recuires a filename (str).",
'ValueErrorState'),
(True, "Error, load_data_from_json: Function recuires a filename (str).",
'ValueErrorState'),
(False, "Error, load_data_from_json: Function recuires a filename (str).",
'ValueErrorState'),
]
)
def test_load_json_from_file(raw_json_file, invalid_json_file, testInput, expectedOutput, state):
"""utils, load json data from file"""
# Test default argument
# Test normal behavior
if state == 'normalState':
assert load_data_from_json(str(raw_json_file)) == expectedOutput
# Test noFileExistState
if state == 'noFileExistState':
raw_json_file = 'NOEXISTINGFILE'
with pytest.raises(FileNotFoundError) as e:
load_data_from_json(raw_json_file)
assert str(e.value) == expectedOutput
# Test invalid_json_file error
if state == 'invalidJsonState':
with pytest.raises(utilsError) as e:
load_data_from_json(str(invalid_json_file))
assert str(e.value) == expectedOutput.format(str(invalid_json_file))
# Test noFileExistState
if state == 'ValueErrorState':
with pytest.raises(ValueError) as e:
load_data_from_json(testInput)
assert str(e.value) == expectedOutput
| eronde/vim_suggest | tests/test_utils.py | Python | mit | 9,992 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class BuildConfig(AppConfig):
name = 'build'
| inventree/InvenTree | InvenTree/build/apps.py | Python | mit | 150 |
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.pardir)
from testing_harness import TestHarness
if __name__ == '__main__':
harness = TestHarness('statepoint.5.*', True)
harness.main()
| kellyrowland/openmc | tests/test_many_scores/test_many_scores.py | Python | mit | 212 |
class CouldNotSendError(Exception): pass
class AlertIDAlreadyInUse(Exception): pass
class AlertBackendIDAlreadyInUse(Exception): pass
class InvalidApplicableUsers(Exception): pass | jiaaro/django-alert | alert/exceptions.py | Python | mit | 180 |
"""
Combination of multiple media players into one for a universal controller.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.universal/
"""
import logging
# pylint: disable=import-error
from copy import copy
from homeassistant.components.media_player import (
ATTR_APP_ID, ATTR_APP_NAME, ATTR_MEDIA_ALBUM_ARTIST, ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST, ATTR_MEDIA_CHANNEL, ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_DURATION, ATTR_MEDIA_EPISODE,
ATTR_MEDIA_PLAYLIST, ATTR_MEDIA_SEASON, ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE, ATTR_MEDIA_TITLE, ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL, ATTR_MEDIA_VOLUME_MUTED,
ATTR_SUPPORTED_MEDIA_COMMANDS, DOMAIN, SERVICE_PLAY_MEDIA,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP, MediaPlayerDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_ENTITY_PICTURE, CONF_NAME, SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_MEDIA_SEEK, SERVICE_TURN_OFF,
SERVICE_TURN_ON, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET, SERVICE_VOLUME_UP, STATE_IDLE, STATE_OFF, STATE_ON)
from homeassistant.helpers.event import track_state_change
from homeassistant.helpers.service import call_from_config
ATTR_ACTIVE_CHILD = 'active_child'
CONF_ATTRS = 'attributes'
CONF_CHILDREN = 'children'
CONF_COMMANDS = 'commands'
CONF_PLATFORM = 'platform'
CONF_SERVICE = 'service'
CONF_SERVICE_DATA = 'service_data'
CONF_STATE = 'state'
OFF_STATES = [STATE_IDLE, STATE_OFF]
REQUIREMENTS = []
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the universal media players."""
if not validate_config(config):
return
player = UniversalMediaPlayer(hass,
config[CONF_NAME],
config[CONF_CHILDREN],
config[CONF_COMMANDS],
config[CONF_ATTRS])
add_devices([player])
def validate_config(config):
"""Validate universal media player configuration."""
del config[CONF_PLATFORM]
# Validate name
if CONF_NAME not in config:
_LOGGER.error('Universal Media Player configuration requires name')
return False
validate_children(config)
validate_commands(config)
validate_attributes(config)
del_keys = []
for key in config:
if key not in [CONF_NAME, CONF_CHILDREN, CONF_COMMANDS, CONF_ATTRS]:
_LOGGER.warning(
'Universal Media Player (%s) unrecognized parameter %s',
config[CONF_NAME], key)
del_keys.append(key)
for key in del_keys:
del config[key]
return True
def validate_children(config):
"""Validate children."""
if CONF_CHILDREN not in config:
_LOGGER.info(
'No children under Universal Media Player (%s)', config[CONF_NAME])
config[CONF_CHILDREN] = []
elif not isinstance(config[CONF_CHILDREN], list):
_LOGGER.warning(
'Universal Media Player (%s) children not list in config. '
'They will be ignored.',
config[CONF_NAME])
config[CONF_CHILDREN] = []
def validate_commands(config):
"""Validate commands."""
if CONF_COMMANDS not in config:
config[CONF_COMMANDS] = {}
elif not isinstance(config[CONF_COMMANDS], dict):
_LOGGER.warning(
'Universal Media Player (%s) specified commands not dict in '
'config. They will be ignored.',
config[CONF_NAME])
config[CONF_COMMANDS] = {}
def validate_attributes(config):
"""Validate attributes."""
if CONF_ATTRS not in config:
config[CONF_ATTRS] = {}
elif not isinstance(config[CONF_ATTRS], dict):
_LOGGER.warning(
'Universal Media Player (%s) specified attributes '
'not dict in config. They will be ignored.',
config[CONF_NAME])
config[CONF_ATTRS] = {}
for key, val in config[CONF_ATTRS].items():
attr = val.split('|', 1)
if len(attr) == 1:
attr.append(None)
config[CONF_ATTRS][key] = attr
class UniversalMediaPlayer(MediaPlayerDevice):
"""Representation of an universal media player."""
# pylint: disable=too-many-public-methods
def __init__(self, hass, name, children, commands, attributes):
"""Initialize the Universal media device."""
# pylint: disable=too-many-arguments
self.hass = hass
self._name = name
self._children = children
self._cmds = commands
self._attrs = attributes
self._child_state = None
def on_dependency_update(*_):
"""Update ha state when dependencies update."""
self.update_ha_state(True)
depend = copy(children)
for entity in attributes.values():
depend.append(entity[0])
track_state_change(hass, depend, on_dependency_update)
def _entity_lkp(self, entity_id, state_attr=None):
"""Look up an entity state."""
state_obj = self.hass.states.get(entity_id)
if state_obj is None:
return
if state_attr:
return state_obj.attributes.get(state_attr)
return state_obj.state
def _override_or_child_attr(self, attr_name):
"""Return either the override or the active child for attr_name."""
if attr_name in self._attrs:
return self._entity_lkp(self._attrs[attr_name][0],
self._attrs[attr_name][1])
return self._child_attr(attr_name)
def _child_attr(self, attr_name):
"""Return the active child's attributes."""
active_child = self._child_state
return active_child.attributes.get(attr_name) if active_child else None
def _call_service(self, service_name, service_data=None,
allow_override=False):
"""Call either a specified or active child's service."""
if allow_override and service_name in self._cmds:
call_from_config(
self.hass, self._cmds[service_name], blocking=True)
return
if service_data is None:
service_data = {}
active_child = self._child_state
service_data[ATTR_ENTITY_ID] = active_child.entity_id
self.hass.services.call(DOMAIN, service_name, service_data,
blocking=True)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def master_state(self):
"""Return the master state for entity or None."""
if CONF_STATE in self._attrs:
master_state = self._entity_lkp(self._attrs[CONF_STATE][0],
self._attrs[CONF_STATE][1])
return master_state if master_state else STATE_OFF
else:
return None
@property
def name(self):
"""Return the name of universal player."""
return self._name
@property
def state(self):
"""Current state of media player.
Off if master state is off
else Status of first active child
else master state or off
"""
master_state = self.master_state # avoid multiple lookups
if master_state == STATE_OFF:
return STATE_OFF
active_child = self._child_state
if active_child:
return active_child.state
return master_state if master_state else STATE_OFF
@property
def volume_level(self):
"""Volume level of entity specified in attributes or active child."""
return self._child_attr(ATTR_MEDIA_VOLUME_LEVEL)
@property
def is_volume_muted(self):
"""Boolean if volume is muted."""
return self._override_or_child_attr(ATTR_MEDIA_VOLUME_MUTED) \
in [True, STATE_ON]
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._child_attr(ATTR_MEDIA_CONTENT_ID)
@property
def media_content_type(self):
"""Content type of current playing media."""
return self._child_attr(ATTR_MEDIA_CONTENT_TYPE)
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._child_attr(ATTR_MEDIA_DURATION)
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._child_attr(ATTR_ENTITY_PICTURE)
@property
def media_title(self):
"""Title of current playing media."""
return self._child_attr(ATTR_MEDIA_TITLE)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ARTIST)
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ALBUM_NAME)
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ALBUM_ARTIST)
@property
def media_track(self):
"""Track number of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_TRACK)
@property
def media_series_title(self):
"""The title of the series of current playing media (TV Show only)."""
return self._child_attr(ATTR_MEDIA_SERIES_TITLE)
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
return self._child_attr(ATTR_MEDIA_SEASON)
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
return self._child_attr(ATTR_MEDIA_EPISODE)
@property
def media_channel(self):
"""Channel currently playing."""
return self._child_attr(ATTR_MEDIA_CHANNEL)
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return self._child_attr(ATTR_MEDIA_PLAYLIST)
@property
def app_id(self):
"""ID of the current running app."""
return self._child_attr(ATTR_APP_ID)
@property
def app_name(self):
"""Name of the current running app."""
return self._child_attr(ATTR_APP_NAME)
@property
def supported_media_commands(self):
"""Flag media commands that are supported."""
flags = self._child_attr(ATTR_SUPPORTED_MEDIA_COMMANDS) or 0
if SERVICE_TURN_ON in self._cmds:
flags |= SUPPORT_TURN_ON
if SERVICE_TURN_OFF in self._cmds:
flags |= SUPPORT_TURN_OFF
if any([cmd in self._cmds for cmd in [SERVICE_VOLUME_UP,
SERVICE_VOLUME_DOWN]]):
flags |= SUPPORT_VOLUME_STEP
flags &= ~SUPPORT_VOLUME_SET
if SERVICE_VOLUME_MUTE in self._cmds and \
ATTR_MEDIA_VOLUME_MUTED in self._attrs:
flags |= SUPPORT_VOLUME_MUTE
return flags
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
active_child = self._child_state
return {ATTR_ACTIVE_CHILD: active_child.entity_id} \
if active_child else {}
def turn_on(self):
"""Turn the media player on."""
self._call_service(SERVICE_TURN_ON, allow_override=True)
def turn_off(self):
"""Turn the media player off."""
self._call_service(SERVICE_TURN_OFF, allow_override=True)
def mute_volume(self, is_volume_muted):
"""Mute the volume."""
data = {ATTR_MEDIA_VOLUME_MUTED: is_volume_muted}
self._call_service(SERVICE_VOLUME_MUTE, data, allow_override=True)
def set_volume_level(self, volume_level):
"""Set volume level, range 0..1."""
data = {ATTR_MEDIA_VOLUME_LEVEL: volume_level}
self._call_service(SERVICE_VOLUME_SET, data)
def media_play(self):
"""Send play commmand."""
self._call_service(SERVICE_MEDIA_PLAY)
def media_pause(self):
"""Send pause command."""
self._call_service(SERVICE_MEDIA_PAUSE)
def media_previous_track(self):
"""Send previous track command."""
self._call_service(SERVICE_MEDIA_PREVIOUS_TRACK)
def media_next_track(self):
"""Send next track command."""
self._call_service(SERVICE_MEDIA_NEXT_TRACK)
def media_seek(self, position):
"""Send seek command."""
data = {ATTR_MEDIA_SEEK_POSITION: position}
self._call_service(SERVICE_MEDIA_SEEK, data)
def play_media(self, media_type, media_id):
"""Play a piece of media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type,
ATTR_MEDIA_CONTENT_ID: media_id}
self._call_service(SERVICE_PLAY_MEDIA, data)
def volume_up(self):
"""Turn volume up for media player."""
self._call_service(SERVICE_VOLUME_UP, allow_override=True)
def volume_down(self):
"""Turn volume down for media player."""
self._call_service(SERVICE_VOLUME_DOWN, allow_override=True)
def media_play_pause(self):
"""Play or pause the media player."""
self._call_service(SERVICE_MEDIA_PLAY_PAUSE)
def update(self):
"""Update state in HA."""
for child_name in self._children:
child_state = self.hass.states.get(child_name)
if child_state and child_state.state not in OFF_STATES:
self._child_state = child_state
return
self._child_state = None
| instantchow/home-assistant | homeassistant/components/media_player/universal.py | Python | mit | 13,920 |
import gc
import os.path
from mock import Mock
from pythoscope.code_trees_manager import CodeTreeNotFound, \
FilesystemCodeTreesManager
from pythoscope.store import CodeTree, Module
from assertions import *
from helper import TempDirectory
class TestFilesystemCodeTreesManager(TempDirectory):
def setUp(self):
super(TestFilesystemCodeTreesManager, self).setUp()
self.manager = FilesystemCodeTreesManager(self.tmpdir)
def assert_empty_cache(self):
assert_equal(None, self.manager._cached_code_tree)
def assert_cache(self, module_subpath):
assert_equal(module_subpath, self.manager._cached_code_tree[0])
def assert_recalled_tree(self, module_subpath, code):
assert_equal(code, self.manager.recall_code_tree(module_subpath).code)
def assert_code_tree_saved(self, module_subpath, saved=True):
path = self.manager._code_tree_path(module_subpath)
assert_equal(saved, os.path.exists(path))
def assert_code_tree_not_saved(self, module_subpath):
self.assert_code_tree_saved(module_subpath, saved=False)
def assert_calls_once(self, mock, callback):
"""Assert that given callback calls given Mock object exactly once.
"""
before_count = mock.call_count
callback()
assert_equal(before_count + 1, mock.call_count)
def test_remembered_code_trees_can_be_recalled(self):
code_tree = CodeTree(None)
self.manager.remember_code_tree(code_tree, "module.py")
assert_equal(code_tree, self.manager.recall_code_tree("module.py"))
def test_remembered_and_forgotten_code_trees_cannot_be_recalled(self):
code_tree = CodeTree(None)
self.manager.remember_code_tree(code_tree, "module.py")
self.manager.forget_code_tree("module.py")
assert_raises(CodeTreeNotFound, lambda: self.manager.recall_code_tree("module.py"))
def test_cache_is_empty_right_after_initialization(self):
self.assert_empty_cache()
def test_cache_is_empty_after_clearing(self):
code_tree = CodeTree(None)
self.manager.remember_code_tree(code_tree, "module.py")
self.manager.clear_cache()
self.assert_empty_cache()
def test_cache_contains_the_last_recalled_or_remembered_code_tree(self):
# We use numbers to identify CodeTrees. We cannot use their id, because
# pickling doesn't preserve those.
cts = map(CodeTree, [0, 1, 2])
for i, ct in enumerate(cts):
self.manager.remember_code_tree(ct, "module%d.py" % i)
# Checking all combinations of recall/remember calls.
self.assert_recalled_tree("module0.py", 0)
self.assert_cache("module0.py")
self.assert_recalled_tree("module1.py", 1)
self.assert_cache("module1.py")
self.manager.remember_code_tree(CodeTree(3), "module3.py")
self.assert_cache("module3.py")
self.manager.remember_code_tree(CodeTree(4), "module4.py")
self.assert_cache("module4.py")
self.assert_recalled_tree("module2.py", 2)
self.assert_cache("module2.py")
def test_remembering_code_tree_saves_it_to_the_filesystem(self):
code_tree = CodeTree(None)
self.manager.remember_code_tree(code_tree, "module.py")
self.assert_code_tree_saved("module.py")
def test_forgetting_code_tree_removes_its_file_from_the_filesystem(self):
code_tree = CodeTree(None)
self.manager.remember_code_tree(code_tree, "module.py")
self.manager.forget_code_tree("module.py")
self.assert_code_tree_not_saved("module.py")
def test_when_clearing_cache_code_tree_currently_in_cache_is_saved_to_the_filesystem(self):
code_tree = CodeTree(None)
code_tree.save = Mock()
self.manager.remember_code_tree(code_tree, "module.py")
self.assert_cache("module.py")
self.assert_calls_once(code_tree.save, self.manager.clear_cache)
def test_code_tree_not_in_cache_can_be_garbage_collected(self):
code_tree = CodeTree(None)
self.manager.remember_code_tree(code_tree, "module.py")
# Referred from the test and from the CodeTreesManager.
assert_length(gc.get_referrers(code_tree), 2)
self.manager.clear_cache()
# No longer referred from the CodeTreesManager.
assert_length(gc.get_referrers(code_tree), 1)
| mkwiatkowski/pythoscope | test/test_code_trees_manager.py | Python | mit | 4,388 |
# -*- coding: utf-8 -
#
# This file is part of socketpool.
# See the NOTICE for more information.
import errno
import os
import platform
import select
import socket
import sys
try:
from importlib import import_module
except ImportError:
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def load_backend(backend_name):
""" load pool backend. If this is an external module it should be
passed as "somelib.backend_mod", for socketpool backend you can just
pass the name.
Supported backend are :
- thread: connection are maintained in a threadsafe queue.
- gevent: support gevent
- eventlet: support eventlet
"""
try:
if len(backend_name.split(".")) > 1:
mod = import_module(backend_name)
else:
mod = import_module("socketpool.backend_%s" % backend_name)
return mod
except ImportError:
error_msg = "%s isn't a socketpool backend" % backend_name
raise ImportError(error_msg)
def can_use_kqueue():
# See Issue #15. kqueue doesn't work on OS X 10.6 and below.
if not hasattr(select, "kqueue"):
return False
if platform.system() == 'Darwin' and platform.mac_ver()[0] < '10.7':
return False
return True
def is_connected(skt):
try:
fno = skt.fileno()
except socket.error as e:
if e[0] == errno.EBADF:
return False
raise
try:
if hasattr(select, "epoll"):
ep = select.epoll()
ep.register(fno, select.EPOLLOUT | select.EPOLLIN)
events = ep.poll(0)
for fd, ev in events:
if fno == fd and \
(ev & select.EPOLLOUT or ev & select.EPOLLIN):
ep.unregister(fno)
return True
ep.unregister(fno)
elif hasattr(select, "poll"):
p = select.poll()
p.register(fno, select.POLLOUT | select.POLLIN)
events = p.poll(0)
for fd, ev in events:
if fno == fd and \
(ev & select.POLLOUT or ev & select.POLLIN):
p.unregister(fno)
return True
p.unregister(fno)
elif can_use_kqueue():
kq = select.kqueue()
events = [
select.kevent(fno, select.KQ_FILTER_READ, select.KQ_EV_ADD),
select.kevent(fno, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
]
kq.control(events, 0)
kevents = kq.control(None, 4, 0)
for ev in kevents:
if ev.ident == fno:
if ev.flags & select.KQ_EV_ERROR:
return False
else:
return True
# delete
events = [
select.kevent(fno, select.KQ_FILTER_READ, select.KQ_EV_DELETE),
select.kevent(fno, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
]
kq.control(events, 0)
kq.close()
return True
else:
r, _, _ = select.select([fno], [], [], 0)
if not r:
return True
except IOError:
pass
except (ValueError, select.error,) as e:
pass
return False
| benoitc/socketpool | socketpool/util.py | Python | mit | 4,541 |
import sys
from pycsp import * | IanField90/Coursework | Part 3/SE3AC11/eticket/eticket_uint.py | Python | mit | 30 |
#!/usr/bin/python3
import os
import re
import requests
def download_file(url):
out_file = os.path.join("SOURCES", url.rsplit("/")[-1])
r = requests.get(url, stream=True)
print("Downloading {} to {}".format(url, out_file))
with open(out_file, "wb") as out:
for chunk in r.iter_content(chunk_size=None):
if chunk:
out.write(chunk)
spec = "{}.spec".format(os.environ['CIRCLE_PROJECT_REPONAME'])
with open(spec, 'r') as f:
for line in f.readlines():
if line.startswith("Version:"):
NGINX_VERSION = re.search("([0-9.])+", line).group()
if line.startswith("%define nps_version"):
NPS_VERSION = re.search("([0-9.])+", line).group()
ngx_files = [
"https://nginx.org/download/nginx-{NGINX_VERSION}.tar.gz",
"https://nginx.org/download/nginx-{NGINX_VERSION}.tar.gz.asc"
]
for f in ngx_files:
download_file(f.format(NGINX_VERSION=NGINX_VERSION))
nps_files = [
"https://github.com/pagespeed/ngx_pagespeed/archive/v{NPS_VERSION}-beta.zip",
"https://dl.google.com/dl/page-speed/psol/{NPS_VERSION}-x64.tar.gz",
"https://dl.google.com/dl/page-speed/psol/{NPS_VERSION}-ia32.tar.gz"
]
for f in nps_files:
download_file(f.format(NPS_VERSION=NPS_VERSION))
| kyl191/nginx-pagespeed | download_sources.py | Python | mit | 1,266 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.video.v1.room.room_participant.room_participant_published_track import PublishedTrackList
from twilio.rest.video.v1.room.room_participant.room_participant_subscribe_rule import SubscribeRulesList
from twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track import SubscribedTrackList
class ParticipantList(ListResource):
def __init__(self, version, room_sid):
"""
Initialize the ParticipantList
:param Version version: Version that contains the resource
:param room_sid: The SID of the participant's room
:returns: twilio.rest.video.v1.room.room_participant.ParticipantList
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantList
"""
super(ParticipantList, self).__init__(version)
# Path Solution
self._solution = {'room_sid': room_sid, }
self._uri = '/Rooms/{room_sid}/Participants'.format(**self._solution)
def stream(self, status=values.unset, identity=values.unset,
date_created_after=values.unset, date_created_before=values.unset,
limit=None, page_size=None):
"""
Streams ParticipantInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param ParticipantInstance.Status status: Read only the participants with this status
:param unicode identity: Read only the Participants with this user identity value
:param datetime date_created_after: Read only Participants that started after this date in UTC ISO 8601 format
:param datetime date_created_before: Read only Participants that started before this date in ISO 8601 format
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.ParticipantInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
status=status,
identity=identity,
date_created_after=date_created_after,
date_created_before=date_created_before,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, status=values.unset, identity=values.unset,
date_created_after=values.unset, date_created_before=values.unset,
limit=None, page_size=None):
"""
Lists ParticipantInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param ParticipantInstance.Status status: Read only the participants with this status
:param unicode identity: Read only the Participants with this user identity value
:param datetime date_created_after: Read only Participants that started after this date in UTC ISO 8601 format
:param datetime date_created_before: Read only Participants that started before this date in ISO 8601 format
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.video.v1.room.room_participant.ParticipantInstance]
"""
return list(self.stream(
status=status,
identity=identity,
date_created_after=date_created_after,
date_created_before=date_created_before,
limit=limit,
page_size=page_size,
))
def page(self, status=values.unset, identity=values.unset,
date_created_after=values.unset, date_created_before=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ParticipantInstance records from the API.
Request is executed immediately
:param ParticipantInstance.Status status: Read only the participants with this status
:param unicode identity: Read only the Participants with this user identity value
:param datetime date_created_after: Read only Participants that started after this date in UTC ISO 8601 format
:param datetime date_created_before: Read only Participants that started before this date in ISO 8601 format
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantPage
"""
data = values.of({
'Status': status,
'Identity': identity,
'DateCreatedAfter': serialize.iso8601_datetime(date_created_after),
'DateCreatedBefore': serialize.iso8601_datetime(date_created_before),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return ParticipantPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ParticipantInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ParticipantPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ParticipantContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.video.v1.room.room_participant.ParticipantContext
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantContext
"""
return ParticipantContext(self._version, room_sid=self._solution['room_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a ParticipantContext
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.video.v1.room.room_participant.ParticipantContext
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantContext
"""
return ParticipantContext(self._version, room_sid=self._solution['room_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Video.V1.ParticipantList>'
class ParticipantPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ParticipantPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param room_sid: The SID of the participant's room
:returns: twilio.rest.video.v1.room.room_participant.ParticipantPage
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantPage
"""
super(ParticipantPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ParticipantInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.video.v1.room.room_participant.ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantInstance
"""
return ParticipantInstance(self._version, payload, room_sid=self._solution['room_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Video.V1.ParticipantPage>'
class ParticipantContext(InstanceContext):
def __init__(self, version, room_sid, sid):
"""
Initialize the ParticipantContext
:param Version version: Version that contains the resource
:param room_sid: The SID of the room with the Participant resource to fetch
:param sid: The SID that identifies the resource to fetch
:returns: twilio.rest.video.v1.room.room_participant.ParticipantContext
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantContext
"""
super(ParticipantContext, self).__init__(version)
# Path Solution
self._solution = {'room_sid': room_sid, 'sid': sid, }
self._uri = '/Rooms/{room_sid}/Participants/{sid}'.format(**self._solution)
# Dependents
self._published_tracks = None
self._subscribed_tracks = None
self._subscribe_rules = None
def fetch(self):
"""
Fetch the ParticipantInstance
:returns: The fetched ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return ParticipantInstance(
self._version,
payload,
room_sid=self._solution['room_sid'],
sid=self._solution['sid'],
)
def update(self, status=values.unset):
"""
Update the ParticipantInstance
:param ParticipantInstance.Status status: The new status of the resource
:returns: The updated ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantInstance
"""
data = values.of({'Status': status, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return ParticipantInstance(
self._version,
payload,
room_sid=self._solution['room_sid'],
sid=self._solution['sid'],
)
@property
def published_tracks(self):
"""
Access the published_tracks
:returns: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackList
"""
if self._published_tracks is None:
self._published_tracks = PublishedTrackList(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['sid'],
)
return self._published_tracks
@property
def subscribed_tracks(self):
"""
Access the subscribed_tracks
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
"""
if self._subscribed_tracks is None:
self._subscribed_tracks = SubscribedTrackList(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['sid'],
)
return self._subscribed_tracks
@property
def subscribe_rules(self):
"""
Access the subscribe_rules
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribe_rule.SubscribeRulesList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribe_rule.SubscribeRulesList
"""
if self._subscribe_rules is None:
self._subscribe_rules = SubscribeRulesList(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['sid'],
)
return self._subscribe_rules
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Video.V1.ParticipantContext {}>'.format(context)
class ParticipantInstance(InstanceResource):
class Status(object):
CONNECTED = "connected"
DISCONNECTED = "disconnected"
def __init__(self, version, payload, room_sid, sid=None):
"""
Initialize the ParticipantInstance
:returns: twilio.rest.video.v1.room.room_participant.ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantInstance
"""
super(ParticipantInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'room_sid': payload.get('room_sid'),
'account_sid': payload.get('account_sid'),
'status': payload.get('status'),
'identity': payload.get('identity'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'start_time': deserialize.iso8601_datetime(payload.get('start_time')),
'end_time': deserialize.iso8601_datetime(payload.get('end_time')),
'duration': deserialize.integer(payload.get('duration')),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'room_sid': room_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantContext
"""
if self._context is None:
self._context = ParticipantContext(
self._version,
room_sid=self._solution['room_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def room_sid(self):
"""
:returns: The SID of the participant's room
:rtype: unicode
"""
return self._properties['room_sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def status(self):
"""
:returns: The status of the Participant
:rtype: ParticipantInstance.Status
"""
return self._properties['status']
@property
def identity(self):
"""
:returns: The string that identifies the resource's User
:rtype: unicode
"""
return self._properties['identity']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def start_time(self):
"""
:returns: The time of participant connected to the room in ISO 8601 format
:rtype: datetime
"""
return self._properties['start_time']
@property
def end_time(self):
"""
:returns: The time when the participant disconnected from the room in ISO 8601 format
:rtype: datetime
"""
return self._properties['end_time']
@property
def duration(self):
"""
:returns: Duration of time in seconds the participant was connected
:rtype: unicode
"""
return self._properties['duration']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The URLs of related resources
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch the ParticipantInstance
:returns: The fetched ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantInstance
"""
return self._proxy.fetch()
def update(self, status=values.unset):
"""
Update the ParticipantInstance
:param ParticipantInstance.Status status: The new status of the resource
:returns: The updated ParticipantInstance
:rtype: twilio.rest.video.v1.room.room_participant.ParticipantInstance
"""
return self._proxy.update(status=status, )
@property
def published_tracks(self):
"""
Access the published_tracks
:returns: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackList
"""
return self._proxy.published_tracks
@property
def subscribed_tracks(self):
"""
Access the subscribed_tracks
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackList
"""
return self._proxy.subscribed_tracks
@property
def subscribe_rules(self):
"""
Access the subscribe_rules
:returns: twilio.rest.video.v1.room.room_participant.room_participant_subscribe_rule.SubscribeRulesList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribe_rule.SubscribeRulesList
"""
return self._proxy.subscribe_rules
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Video.V1.ParticipantInstance {}>'.format(context)
| twilio/twilio-python | twilio/rest/video/v1/room/room_participant/__init__.py | Python | mit | 20,355 |
from nltk.corpus import wordnet
import json
import codecs
import sys
import time
major_weight = 1.0
minor_weight = 0.8
similarity_threshold = 0.65
def fn(ss1, ss2, weight):
similarity = ss1.wup_similarity(ss2)
return (ss1, ss2, weight * similarity if similarity else 0 )
def isSynsetForm(s):
return '.n.' in s or '.s.' in s or '.v.' in s
if len(sys.argv) != 3:
print 'usage <seed synset> <kadist.json>'
sys.exit(-1)
else:
candidates = []
source = wordnet.synset(sys.argv[1])
tagged_works = 0
with codecs.open(sys.argv[2], 'rb', 'utf-8') as f:
kadist = json.loads(f.read())
for m in kadist:
if m['major_tags']:
tagged_works += 1
candidates += [(wordnet.synset(tag), major_weight) for tag in m['major_tags'] if isSynsetForm(tag)]
if m['minor_tags']:
candidates += [(wordnet.synset(tag), minor_weight) for tag in m['minor_tags'] if isSynsetForm(tag) ]
start = time.time()
print 'starting similarity calculations on', tagged_works, 'tagged works'
similarities = (fn(source, candidate[0], candidate[1]) for candidate in set([c for c in candidates if c[0] != source]))
for result in (sorted( (sim for sim in similarities if sim[2] >= similarity_threshold), key=lambda x: x[2])):
print result[0].name(), result[1].name(), result[2]
print source.name(), 'occurs as a tag', len([c for c in candidates if c[0] == source]),'times'
print 'number lookups', len(candidates), 'duration', time.time()-start
| darenr/MOMA-Art | kadist/wup.py | Python | mit | 1,480 |
'''@file alignment_decoder.py
contains the AlignmentDecoder'''
import os
import struct
import numpy as np
import tensorflow as tf
import decoder
class AlignmentDecoder(decoder.Decoder):
'''gets the HMM state posteriors'''
def __call__(self, inputs, input_seq_length):
'''decode a batch of data
Args:
inputs: the inputs as a dictionary of [batch_size x time x ...]
tensors
input_seq_length: the input sequence lengths as a dictionary of
[batch_size] vectors
Returns:
- the decoded sequences as a dictionary of outputs
'''
with tf.name_scope('alignment_decoder'):
#create the decoding graph
logits, logits_seq_length = self.model(
inputs, input_seq_length, targets=[],
target_seq_length=[], is_training=False)
#compute the log probabilities
logprobs = tf.log(tf.nn.softmax(logits.values()[0]))
#read the prior if it exists, otherwise use uniform prior
if os.path.exists(self.conf['prior']):
prior = np.load(self.conf['prior'])
else:
print(
'WARNING could not find prior in file %s using uniform'
' prior' % self.conf['prior'])
output_dim = self.model.output_dims.values()[0]
prior = np.ones([output_dim])/output_dim
#compute posterior to pseudo likelihood
loglikes = logprobs - np.log(prior)
outputs = {o:(loglikes, logits_seq_length[o]) for o in logits}
return outputs
def write(self, outputs, directory, names):
'''write the output of the decoder to disk
args:
outputs: the outputs of the decoder
directory: the directory where the results should be written
names: the names of the utterances in outputs
'''
for o in outputs:
if not os.path.isdir(os.path.join(directory, o)):
os.makedirs(os.path.join(directory, o))
batch_size = outputs[o][0].shape[0]
scp_file = os.path.join(directory, o, 'feats.scp')
ark_file = os.path.join(directory, o, 'loglikes.ark')
for i in range(batch_size):
output = outputs[o][0][i, :outputs[o][1][i]]
arkwrite(scp_file, ark_file, names[i], output)
def update_evaluation_loss(self, loss, outputs, references,
reference_seq_length):
'''update the evaluation loss
args:
loss: the current evaluation loss
outputs: the outputs of the decoder as a dictionary
references: the references as a dictionary
reference_seq_length: the sequence lengths of the references
Returns:
an op to update the evalution loss
'''
raise Exception('AlignmentDecoder can not be used to validate')
def arkwrite(scp_file, ark_file, name, array):
'''write the array to the arkfile'''
scp_fid = open(scp_file, 'a')
ark_fid = open(ark_file, 'ab')
rows, cols = array.shape
ark_fid.write(struct.pack('<%ds'%(len(name)), name))
pos = ark_fid.tell()
ark_fid.write(struct.pack('<xcccc', 'B', 'F', 'M', ' '))
ark_fid.write(struct.pack('<bi', 4, rows))
ark_fid.write(struct.pack('<bi', 4, cols))
ark_fid.write(array)
scp_fid.write('%s %s:%s\n' % (name, ark_file, pos))
scp_fid.close()
ark_fid.close()
| vrenkens/Nabu-asr | nabu/neuralnetworks/decoders/alignment_decoder.py | Python | mit | 3,558 |
from unittest import TestCase
from unittest.mock import patch
from genes.apt.get import APTGet
class APTGetInstallTestCase(TestCase):
def test_apt_get_install_no_items_fails(self):
with patch('genes.process.process.Popen') as mock_popen:
apt_get = APTGet()
with self.assertRaises(ValueError):
apt_get.install()
mock_popen.assert_not_called()
def test_apt_get_install_single_item_calls_popen(self):
with patch('genes.process.process.Popen') as mock_popen:
apt_get = APTGet()
apt_get.install('test')
mock_popen.assert_called_with(('apt-get', '-y', 'install', 'test'))
def test_apt_get_install_multiple_items_calls_popen(self):
with patch('genes.process.process.Popen') as mock_popen:
apt_get = APTGet()
apt_get.install('test1', 'test2')
mock_popen.assert_called_once_with(('apt-get', '-y', 'install', 'test1', 'test2'))
class TestAPTGetUpdate(TestCase):
def test_apt_get_update_calls_popen(self):
with patch('genes.process.process.Popen') as mock_popen:
apt_get = APTGet()
apt_get.update()
mock_popen.assert_called_once_with(('apt-get', '-y', 'update'))
class TestAPTGetUpgrade(TestCase):
def test_apt_get_upgrade_with_no_items_calls_popen(self):
with patch('genes.process.process.Popen') as mock_popen:
apt_get = APTGet()
apt_get.upgrade()
mock_popen.assert_called_once_with(('apt-get', '-y', 'upgrade'))
| hatchery/Genepool2 | genes/apt/test_get.py | Python | mit | 1,570 |
import src.dot.dotentity
class DotHeart(src.dot.dotentity.DotEntity):
def __init__(self):
res = [
"assets/img/red-brick.png",
"assets/img/black-brick.png"
]
grid = [
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
]
src.dot.dotentity.DotEntity.__init__(self, grid, res)
def setSmall(self):
self.setDotScale(0.5)
def setMedium(self):
self.setDotScale(0.75)
def setLarge(self):
self.setDotScale(1)
| m4nolo/steering-all | src/dot/entities/dotheart.py | Python | mit | 817 |
import bootstrap # noqa
import pytest
from modviz.cli import parse_arguments, validate_path, validate_fold_paths
def test_argument_parsing():
with pytest.raises(SystemExit):
parse_arguments([])
namespace = parse_arguments(["foo"])
assert namespace.path == "foo"
assert namespace.target is None
assert namespace.fold_paths is None
assert namespace.exclude_paths is None
namespace = parse_arguments(["foo", "-o", "test.html"])
assert namespace.path == "foo"
assert namespace.target is "test.html"
assert namespace.fold_paths is None
assert namespace.exclude_paths is None
namespace = parse_arguments(["foo", "-o", "test.html", "-e", "foo", "bar"])
assert namespace.path == "foo"
assert namespace.target is "test.html"
assert namespace.fold_paths is None
assert namespace.exclude_paths == ["foo", "bar"]
namespace = parse_arguments(["foo", "-o", "test.html", "-f", "foo", "bar"])
assert namespace.path == "foo"
assert namespace.target is "test.html"
assert namespace.fold_paths == ["foo", "bar"]
assert namespace.exclude_paths is None
def test_validate_path():
assert validate_path("/")
assert not validate_path("/imprettysureidontexist")
def test_validate_fold_paths():
root = "/"
assert validate_fold_paths(root, [])
assert validate_fold_paths(root, ["/a", "/b"])
with pytest.raises(ValueError):
validate_fold_paths(root, ["foo"])
| Bogdanp/modviz | tests/test_cli.py | Python | mit | 1,467 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_commoner_tatooine_rodian_male_04.iff"
result.attribute_template_id = 9
result.stfName("npc_name","rodian_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/mobile/shared_dressed_commoner_tatooine_rodian_male_04.py | Python | mit | 466 |
import functools
from ...drivers.spi_interfaces import SPI_INTERFACES
USAGE = """
A spi_interface is represented by a string.
Possible values are """ + ', '.join(sorted(SPI_INTERFACES.__members__))
@functools.singledispatch
def make(c):
raise ValueError("Don't understand type %s" % type(c), USAGE)
@make.register(SPI_INTERFACES)
def _(c):
return c
@make.register(str)
def _(c):
return SPI_INTERFACES[c]
| ManiacalLabs/BiblioPixel | bibliopixel/project/types/spi_interface.py | Python | mit | 424 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
# Some initial, primary places.
PLACES = [
('Work', 'work'),
('Home', 'home'),
('School', 'school'),
]
def create_primary_place(apps, schema_editor=None):
Place = apps.get_model("userprofile", "Place")
for name, slug in PLACES:
Place.objects.create(name=name, slug=slug, primary=True)
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0010_auto_20150908_2010'),
]
operations = [
migrations.RunPython(create_primary_place),
]
| izzyalonso/tndata_backend | tndata_backend/userprofile/migrations/0011_create_places.py | Python | mit | 622 |
from __future__ import unicode_literals, division, absolute_import
import argparse
import logging
import re
import time
from copy import copy
from datetime import datetime, timedelta
from sqlalchemy import (Column, Integer, String, Unicode, DateTime, Boolean,
desc, select, update, delete, ForeignKey, Index, func, and_, not_, UniqueConstraint)
from sqlalchemy.orm import relation, backref
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from sqlalchemy.exc import OperationalError, IntegrityError
from flexget import db_schema, options, plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.manager import Session
from flexget.utils import qualities
from flexget.utils.log import log_once
from flexget.plugins.parsers import ParseWarning, SERIES_ID_TYPES
from flexget.plugin import get_plugin_by_name
from flexget.utils.sqlalchemy_utils import (table_columns, table_exists, drop_tables, table_schema, table_add_column,
create_index)
from flexget.utils.tools import merge_dict_from_to, parse_timedelta
from flexget.utils.database import quality_property
SCHEMA_VER = 12
log = logging.getLogger('series')
Base = db_schema.versioned_base('series', SCHEMA_VER)
@db_schema.upgrade('series')
def upgrade(ver, session):
if ver is None:
if table_exists('episode_qualities', session):
log.info('Series database format is too old to upgrade, dropping and recreating tables.')
# Drop the deprecated data
drop_tables(['series', 'series_episodes', 'episode_qualities'], session)
# Create new tables from the current models
Base.metadata.create_all(bind=session.bind)
# Upgrade episode_releases table to have a proper count and seed it with appropriate numbers
columns = table_columns('episode_releases', session)
if not 'proper_count' in columns:
log.info('Upgrading episode_releases table to have proper_count column')
table_add_column('episode_releases', 'proper_count', Integer, session)
release_table = table_schema('episode_releases', session)
for row in session.execute(select([release_table.c.id, release_table.c.title])):
# Recalculate the proper_count from title for old episodes
proper_count = get_plugin_by_name('parsing').parse_series(row['title']).proper_count
session.execute(update(release_table, release_table.c.id == row['id'], {'proper_count': proper_count}))
ver = 0
if ver == 0:
log.info('Migrating first_seen column from series_episodes to episode_releases table.')
# Create the column in episode_releases
table_add_column('episode_releases', 'first_seen', DateTime, session)
# Seed the first_seen value for all the past releases with the first_seen of their episode.
episode_table = table_schema('series_episodes', session)
release_table = table_schema('episode_releases', session)
for row in session.execute(select([episode_table.c.id, episode_table.c.first_seen])):
session.execute(update(release_table, release_table.c.episode_id == row['id'],
{'first_seen': row['first_seen']}))
ver = 1
if ver == 1:
log.info('Adding `identified_by` column to series table.')
table_add_column('series', 'identified_by', String, session)
ver = 2
if ver == 2:
log.info('Creating index on episode_releases table.')
create_index('episode_releases', session, 'episode_id')
ver = 3
if ver == 3:
# Remove index on Series.name
try:
Index('ix_series_name').drop(bind=session.bind)
except OperationalError:
log.debug('There was no ix_series_name index to remove.')
# Add Series.name_lower column
log.info('Adding `name_lower` column to series table.')
table_add_column('series', 'name_lower', Unicode, session)
series_table = table_schema('series', session)
create_index('series', session, 'name_lower')
# Fill in lower case name column
session.execute(update(series_table, values={'name_lower': func.lower(series_table.c.name)}))
ver = 4
if ver == 4:
log.info('Adding `identified_by` column to episodes table.')
table_add_column('series_episodes', 'identified_by', String, session)
series_table = table_schema('series', session)
# Clear out identified_by id series so that they can be auto detected again
session.execute(update(series_table, series_table.c.identified_by != 'ep', {'identified_by': None}))
# Warn users about a possible config change needed.
log.warning('If you are using `identified_by: id` option for the series plugin for date, '
'or abolute numbered series, you will need to update your config. Two new identified_by modes have '
'been added, `date` and `sequence`. In addition, if you are using auto identified_by, it will'
'be relearned based on upcoming episodes.')
ver = 5
if ver == 5:
# Episode advancement now relies on identified_by being filled for the episodes.
# This action retroactively marks 'ep' mode for all episodes where the series is already in 'ep' mode.
series_table = table_schema('series', session)
ep_table = table_schema('series_episodes', session)
ep_mode_series = select([series_table.c.id], series_table.c.identified_by == 'ep')
where_clause = and_(ep_table.c.series_id.in_(ep_mode_series),
ep_table.c.season != None, ep_table.c.number != None, ep_table.c.identified_by == None)
session.execute(update(ep_table, where_clause, {'identified_by': 'ep'}))
ver = 6
if ver == 6:
# Translate old qualities into new quality requirements
release_table = table_schema('episode_releases', session)
for row in session.execute(select([release_table.c.id, release_table.c.quality])):
# Webdl quality no longer has dash
new_qual = row['quality'].replace('web-dl', 'webdl')
if row['quality'] != new_qual:
session.execute(update(release_table, release_table.c.id == row['id'],
{'quality': new_qual}))
ver = 7
# Normalization rules changed for 7 and 8, but only run this once
if ver in [7, 8]:
# Merge series that qualify as duplicates with new normalization scheme
series_table = table_schema('series', session)
ep_table = table_schema('series_episodes', session)
all_series = session.execute(select([series_table.c.name, series_table.c.id]))
unique_series = {}
for row in all_series:
unique_series.setdefault(normalize_series_name(row['name']), []).append(row['id'])
for series, ids in unique_series.iteritems():
session.execute(update(ep_table, ep_table.c.series_id.in_(ids), {'series_id': ids[0]}))
if len(ids) > 1:
session.execute(delete(series_table, series_table.c.id.in_(ids[1:])))
session.execute(update(series_table, series_table.c.id == ids[0], {'name_lower': series}))
ver = 9
if ver == 9:
table_add_column('series', 'begin_episode_id', Integer, session)
ver = 10
if ver == 10:
# Due to bad db cleanups there may be invalid entries in series_tasks table
series_tasks = table_schema('series_tasks', session)
series_table = table_schema('series', session)
log.verbose('Repairing series_tasks table data')
session.execute(delete(series_tasks, ~series_tasks.c.series_id.in_(select([series_table.c.id]))))
ver = 11
if ver == 11:
# SeriesTasks was cleared out due to a bug, make sure they get recalculated next run #2772
from flexget.task import config_changed
config_changed()
ver = 12
return ver
@event('manager.db_cleanup')
def db_cleanup(session):
# Clean up old undownloaded releases
result = session.query(Release).\
filter(Release.downloaded == False).\
filter(Release.first_seen < datetime.now() - timedelta(days=120)).delete(False)
if result:
log.verbose('Removed %d undownloaded episode releases.', result)
# Clean up episodes without releases
result = session.query(Episode).filter(~Episode.releases.any()).filter(~Episode.begins_series.any()).delete(False)
if result:
log.verbose('Removed %d episodes without releases.', result)
# Clean up series without episodes that aren't in any tasks
result = session.query(Series).filter(~Series.episodes.any()).filter(~Series.in_tasks.any()).delete(False)
if result:
log.verbose('Removed %d series without episodes.', result)
@event('manager.lock_acquired')
def repair(manager):
# Perform database repairing and upgrading at startup.
if not manager.persist.get('series_repaired', False):
session = Session()
try:
# For some reason at least I have some releases in database which don't belong to any episode.
for release in session.query(Release).filter(Release.episode == None).all():
log.info('Purging orphan release %s from database', release.title)
session.delete(release)
session.commit()
finally:
session.close()
manager.persist['series_repaired'] = True
# Run clean_series the first time we get a database lock, since we won't have had one the first time the config
# got loaded.
clean_series(manager)
@event('manager.config_updated')
def clean_series(manager):
# Unmark series from tasks which have been deleted.
if not manager.has_lock:
return
with Session() as session:
removed_tasks = session.query(SeriesTask)
if manager.tasks:
removed_tasks = removed_tasks.filter(not_(SeriesTask.name.in_(manager.tasks)))
deleted = removed_tasks.delete(synchronize_session=False)
if deleted:
session.commit()
TRANSLATE_MAP = {ord(u'&'): u' and '}
for char in u'\'\\':
TRANSLATE_MAP[ord(char)] = u''
for char in u'_./-,[]():':
TRANSLATE_MAP[ord(char)] = u' '
def normalize_series_name(name):
"""Returns a normalized version of the series name."""
name = name.lower()
name = name.replace('&', ' and ')
name = name.translate(TRANSLATE_MAP) # Replaced some symbols with spaces
name = u' '.join(name.split())
return name
class NormalizedComparator(Comparator):
def operate(self, op, other):
return op(self.__clause_element__(), normalize_series_name(other))
class AlternateNames(Base):
""" Similar to Series. Name is handled case insensitively transparently.
"""
__tablename__ = 'series_alternate_names'
id = Column(Integer, primary_key=True)
_alt_name = Column('alt_name', Unicode)
_alt_name_normalized = Column('alt_name_normalized', Unicode, index=True, unique=True)
series_id = Column(Integer, ForeignKey('series.id'), nullable=False)
def name_setter(self, value):
self._alt_name = value
self._alt_name_normalized = normalize_series_name(value)
def name_getter(self):
return self._alt_name
def name_comparator(self):
return NormalizedComparator(self._alt_name_normalized)
alt_name = hybrid_property(name_getter, name_setter)
alt_name.comparator(name_comparator)
def __init__(self, name):
self.alt_name = name
def __unicode__(self):
return '<SeriesAlternateName(series_id=%s, alt_name=%s)>' % (self.series_id, self.alt_name)
def __repr__(self):
return unicode(self).encode('ascii', 'replace')
#Index('alternatenames_series_name', AlternateNames.alt_name, unique=True)
class Series(Base):
""" Name is handled case insensitively transparently
"""
__tablename__ = 'series'
id = Column(Integer, primary_key=True)
_name = Column('name', Unicode)
_name_normalized = Column('name_lower', Unicode, index=True, unique=True)
identified_by = Column(String)
begin_episode_id = Column(Integer, ForeignKey('series_episodes.id', name='begin_episode_id', use_alter=True))
begin = relation('Episode', uselist=False, primaryjoin="Series.begin_episode_id == Episode.id",
foreign_keys=[begin_episode_id], post_update=True, backref='begins_series')
episodes = relation('Episode', backref='series', cascade='all, delete, delete-orphan',
primaryjoin='Series.id == Episode.series_id')
in_tasks = relation('SeriesTask', backref=backref('series', uselist=False), cascade='all, delete, delete-orphan')
alternate_names = relation('AlternateNames', backref='series', primaryjoin='Series.id == AlternateNames.series_id',
cascade='all, delete, delete-orphan')
# Make a special property that does indexed case insensitive lookups on name, but stores/returns specified case
def name_getter(self):
return self._name
def name_setter(self, value):
self._name = value
self._name_normalized = normalize_series_name(value)
def name_comparator(self):
return NormalizedComparator(self._name_normalized)
name = hybrid_property(name_getter, name_setter)
name.comparator(name_comparator)
def __unicode__(self):
return '<Series(id=%s,name=%s)>' % (self.id, self.name)
def __repr__(self):
return unicode(self).encode('ascii', 'replace')
class Episode(Base):
__tablename__ = 'series_episodes'
id = Column(Integer, primary_key=True)
identifier = Column(String)
season = Column(Integer)
number = Column(Integer)
identified_by = Column(String)
series_id = Column(Integer, ForeignKey('series.id'), nullable=False)
releases = relation('Release', backref='episode', cascade='all, delete, delete-orphan')
@hybrid_property
def first_seen(self):
if not self.releases:
return None
return min(release.first_seen for release in self.releases)
@first_seen.expression
def first_seen(cls):
return select([func.min(Release.first_seen)]).where(Release.episode_id == cls.id).\
correlate(Episode.__table__).label('first_seen')
@property
def age(self):
"""
:return: Pretty string representing age of episode. eg "23d 12h" or "No releases seen"
"""
if not self.first_seen:
return 'No releases seen'
diff = datetime.now() - self.first_seen
age_days = diff.days
age_hours = diff.seconds // 60 // 60
age = ''
if age_days:
age += '%sd ' % age_days
age += '%sh' % age_hours
return age
@property
def is_premiere(self):
if self.season == 1 and self.number in (0, 1):
return 'Series Premiere'
elif self.number in (0, 1):
return 'Season Premiere'
return False
@property
def downloaded_releases(self):
return [release for release in self.releases if release.downloaded]
def __unicode__(self):
return '<Episode(id=%s,identifier=%s,season=%s,number=%s)>' % \
(self.id, self.identifier, self.season, self.number)
def __repr__(self):
return unicode(self).encode('ascii', 'replace')
def __eq__(self, other):
if not isinstance(other, Episode):
return NotImplemented
if self.identified_by != other.identified_by:
return NotImplemented
return self.identifier == other.identifier
def __lt__(self, other):
if not isinstance(other, Episode):
return NotImplemented
if self.identified_by != other.identified_by:
return NotImplemented
if self.identified_by in ['ep', 'sequence']:
return self.season < other.season or (self.season == other.season and self.number < other.number)
if self.identified_by == 'date':
return self.identifier < other.identifier
# Can't compare id type identifiers
return NotImplemented
Index('episode_series_identifier', Episode.series_id, Episode.identifier)
class Release(Base):
__tablename__ = 'episode_releases'
id = Column(Integer, primary_key=True)
episode_id = Column(Integer, ForeignKey('series_episodes.id'), nullable=False, index=True)
_quality = Column('quality', String)
quality = quality_property('_quality')
downloaded = Column(Boolean, default=False)
proper_count = Column(Integer, default=0)
title = Column(Unicode)
first_seen = Column(DateTime)
def __init__(self):
self.first_seen = datetime.now()
@property
def proper(self):
# TODO: TEMP
import warnings
warnings.warn("accessing deprecated release.proper, use release.proper_count instead")
return self.proper_count > 0
def __unicode__(self):
return '<Release(id=%s,quality=%s,downloaded=%s,proper_count=%s,title=%s)>' % \
(self.id, self.quality, self.downloaded, self.proper_count, self.title)
def __repr__(self):
return unicode(self).encode('ascii', 'replace')
class SeriesTask(Base):
__tablename__ = 'series_tasks'
id = Column(Integer, primary_key=True)
series_id = Column(Integer, ForeignKey('series.id'), nullable=False)
name = Column(Unicode, index=True)
def __init__(self, name):
self.name = name
def get_latest_episode(series):
"""Return latest known identifier in dict (season, episode, name) for series name"""
session = Session.object_session(series)
episode = session.query(Episode).join(Episode.series).\
filter(Series.id == series.id).\
filter(Episode.season != None).\
order_by(desc(Episode.season)).\
order_by(desc(Episode.number)).first()
if not episode:
# log.trace('get_latest_info: no info available for %s', name)
return False
# log.trace('get_latest_info, series: %s season: %s episode: %s' % \
# (name, episode.season, episode.number))
return episode
def auto_identified_by(series):
"""
Determine if series `name` should be considered identified by episode or id format
Returns 'ep', 'sequence', 'date' or 'id' if enough history is present to identify the series' id type.
Returns 'auto' if there is not enough history to determine the format yet
"""
session = Session.object_session(series)
type_totals = dict(session.query(Episode.identified_by, func.count(Episode.identified_by)).join(Episode.series).
filter(Series.id == series.id).group_by(Episode.identified_by).all())
# Remove None and specials from the dict,
# we are only considering episodes that we know the type of (parsed with new parser)
type_totals.pop(None, None)
type_totals.pop('special', None)
if not type_totals:
return 'auto'
log.debug('%s episode type totals: %r', series.name, type_totals)
# Find total number of parsed episodes
total = sum(type_totals.itervalues())
# See which type has the most
best = max(type_totals, key=lambda x: type_totals[x])
# Ep mode locks in faster than the rest. At 2 seen episodes.
if type_totals.get('ep', 0) >= 2 and type_totals['ep'] > total / 3:
log.info('identified_by has locked in to type `ep` for %s', series.name)
return 'ep'
# If we have over 3 episodes all of the same type, lock in
if len(type_totals) == 1 and total >= 3:
return best
# Otherwise wait until 5 episodes to lock in
if total >= 5:
log.info('identified_by has locked in to type `%s` for %s', best, series.name)
return best
log.verbose('identified by is currently on `auto` for %s. '
'Multiple id types may be accepted until it locks in on the appropriate type.', series.name)
return 'auto'
def get_latest_release(series, downloaded=True, season=None):
"""
:param Series series: SQLAlchemy session
:param Downloaded: find only downloaded releases
:param Season: season to find newest release for
:return: Instance of Episode or None if not found.
"""
session = Session.object_session(series)
releases = session.query(Episode).join(Episode.releases, Episode.series).filter(Series.id == series.id)
if downloaded:
releases = releases.filter(Release.downloaded == True)
if season is not None:
releases = releases.filter(Episode.season == season)
if series.identified_by and series.identified_by != 'auto':
releases = releases.filter(Episode.identified_by == series.identified_by)
if series.identified_by in ['ep', 'sequence']:
latest_release = releases.order_by(desc(Episode.season), desc(Episode.number)).first()
elif series.identified_by == 'date':
latest_release = releases.order_by(desc(Episode.identifier)).first()
else:
latest_release = releases.order_by(desc(Episode.first_seen)).first()
if not latest_release:
log.debug('get_latest_release returning None, no downloaded episodes found for: %s', series.name)
return
return latest_release
def new_eps_after(since_ep):
"""
:param since_ep: Episode instance
:return: Number of episodes since then
"""
session = Session.object_session(since_ep)
series = since_ep.series
series_eps = session.query(Episode).join(Episode.series).\
filter(Series.id == series.id)
if series.identified_by == 'ep':
if since_ep.season is None or since_ep.number is None:
log.debug('new_eps_after for %s falling back to timestamp because latest dl in non-ep format' %
series.name)
return series_eps.filter(Episode.first_seen > since_ep.first_seen).count()
return series_eps.filter((Episode.identified_by == 'ep') &
(((Episode.season == since_ep.season) & (Episode.number > since_ep.number)) |
(Episode.season > since_ep.season))).count()
elif series.identified_by == 'seq':
return series_eps.filter(Episode.number > since_ep.number).count()
elif series.identified_by == 'id':
return series_eps.filter(Episode.first_seen > since_ep.first_seen).count()
else:
log.debug('unsupported identified_by %s', series.identified_by)
return 0
def store_parser(session, parser, series=None):
"""
Push series information into database. Returns added/existing release.
:param session: Database session to use
:param parser: parser for release that should be added to database
:param series: Series in database to add release to. Will be looked up if not provided.
:return: List of Releases
"""
if not series:
# if series does not exist in database, add new
series = session.query(Series).\
filter(Series.name == parser.name).\
filter(Series.id != None).first()
if not series:
log.debug('adding series %s into db', parser.name)
series = Series()
series.name = parser.name
session.add(series)
log.debug('-> added %s' % series)
releases = []
for ix, identifier in enumerate(parser.identifiers):
# if episode does not exist in series, add new
episode = session.query(Episode).filter(Episode.series_id == series.id).\
filter(Episode.identifier == identifier).\
filter(Episode.series_id != None).first()
if not episode:
log.debug('adding episode %s into series %s', identifier, parser.name)
episode = Episode()
episode.identifier = identifier
episode.identified_by = parser.id_type
# if episodic format
if parser.id_type == 'ep':
episode.season = parser.season
episode.number = parser.episode + ix
elif parser.id_type == 'sequence':
episode.season = 0
episode.number = parser.id + ix
series.episodes.append(episode) # pylint:disable=E1103
log.debug('-> added %s' % episode)
# if release does not exists in episode, add new
#
# NOTE:
#
# filter(Release.episode_id != None) fixes weird bug where release had/has been added
# to database but doesn't have episode_id, this causes all kinds of havoc with the plugin.
# perhaps a bug in sqlalchemy?
release = session.query(Release).filter(Release.episode_id == episode.id).\
filter(Release.title == parser.data).\
filter(Release.quality == parser.quality).\
filter(Release.proper_count == parser.proper_count).\
filter(Release.episode_id != None).first()
if not release:
log.debug('adding release %s into episode', parser)
release = Release()
release.quality = parser.quality
release.proper_count = parser.proper_count
release.title = parser.data
episode.releases.append(release) # pylint:disable=E1103
log.debug('-> added %s' % release)
releases.append(release)
session.flush() # Make sure autonumber ids are populated
return releases
def set_series_begin(series, ep_id):
"""
Set beginning for series
:param Series series: Series instance
:param ep_id: Integer for sequence mode, SxxEyy for episodic and yyyy-mm-dd for date.
:raises ValueError: If malformed ep_id or series in different mode
"""
# If identified_by is not explicitly specified, auto-detect it based on begin identifier
# TODO: use some method of series parser to do the identifier parsing
session = Session.object_session(series)
if isinstance(ep_id, int):
identified_by = 'sequence'
elif re.match(r'(?i)^S\d{1,4}E\d{1,2}$', ep_id):
identified_by = 'ep'
ep_id = ep_id.upper()
elif re.match(r'\d{4}-\d{2}-\d{2}', ep_id):
identified_by = 'date'
else:
# Check if a sequence identifier was passed as a string
try:
ep_id = int(ep_id)
identified_by = 'sequence'
except ValueError:
raise ValueError('`%s` is not a valid episode identifier' % ep_id)
if series.identified_by not in ['auto', '', None]:
if identified_by != series.identified_by:
raise ValueError('`begin` value `%s` does not match identifier type for identified_by `%s`' %
(ep_id, series.identified_by))
series.identified_by = identified_by
episode = (session.query(Episode).filter(Episode.series_id == series.id).
filter(Episode.identified_by == series.identified_by).
filter(Episode.identifier == str(ep_id)).first())
if not episode:
# TODO: Don't duplicate code from self.store method
episode = Episode()
episode.identifier = ep_id
episode.identified_by = identified_by
if identified_by == 'ep':
match = re.match(r'S(\d+)E(\d+)', ep_id)
episode.season = int(match.group(1))
episode.number = int(match.group(2))
elif identified_by == 'sequence':
episode.season = 0
episode.number = ep_id
series.episodes.append(episode)
# Need to flush to get an id on new Episode before assigning it as series begin
session.flush()
series.begin = episode
def forget_series(name):
"""Remove a whole series `name` from database."""
session = Session()
try:
series = session.query(Series).filter(Series.name == name).all()
if series:
for s in series:
session.delete(s)
session.commit()
log.debug('Removed series %s from database.', name)
else:
raise ValueError('Unknown series %s' % name)
finally:
session.close()
def forget_series_episode(name, identifier):
"""Remove all episodes by `identifier` from series `name` from database."""
session = Session()
try:
series = session.query(Series).filter(Series.name == name).first()
if series:
episode = session.query(Episode).filter(Episode.identifier == identifier).\
filter(Episode.series_id == series.id).first()
if episode:
series.identified_by = '' # reset identified_by flag so that it will be recalculated
session.delete(episode)
session.commit()
log.debug('Episode %s from series %s removed from database.', identifier, name)
else:
raise ValueError('Unknown identifier %s for series %s' % (identifier, name.capitalize()))
else:
raise ValueError('Unknown series %s' % name)
finally:
session.close()
def populate_entry_fields(entry, parser):
entry['series_parser'] = copy(parser)
# add series, season and episode to entry
entry['series_name'] = parser.name
if 'quality' in entry and entry['quality'] != parser.quality:
log.verbose('Found different quality for %s. Was %s, overriding with %s.' %
(entry['title'], entry['quality'], parser.quality))
entry['quality'] = parser.quality
entry['proper'] = parser.proper
entry['proper_count'] = parser.proper_count
if parser.id_type == 'ep':
entry['series_season'] = parser.season
entry['series_episode'] = parser.episode
elif parser.id_type == 'date':
entry['series_date'] = parser.id
entry['series_season'] = parser.id.year
else:
entry['series_season'] = time.gmtime().tm_year
entry['series_episodes'] = parser.episodes
entry['series_id'] = parser.pack_identifier
entry['series_id_type'] = parser.id_type
class FilterSeriesBase(object):
"""
Class that contains helper methods for both filter.series as well as plugins that configure it,
such as all_series, series_premiere and configure_series.
"""
@property
def settings_schema(self):
return {
'title': 'series options',
'type': 'object',
'properties': {
'path': {'type': 'string'},
'set': {'type': 'object'},
'alternate_name': one_or_more({'type': 'string'}),
# Custom regexp options
'name_regexp': one_or_more({'type': 'string', 'format': 'regex'}),
'ep_regexp': one_or_more({'type': 'string', 'format': 'regex'}),
'date_regexp': one_or_more({'type': 'string', 'format': 'regex'}),
'sequence_regexp': one_or_more({'type': 'string', 'format': 'regex'}),
'id_regexp': one_or_more({'type': 'string', 'format': 'regex'}),
# Date parsing options
'date_yearfirst': {'type': 'boolean'},
'date_dayfirst': {'type': 'boolean'},
# Quality options
'quality': {'type': 'string', 'format': 'quality_requirements'},
'qualities': {'type': 'array', 'items': {'type': 'string', 'format': 'quality_requirements'}},
'timeframe': {'type': 'string', 'format': 'interval'},
'upgrade': {'type': 'boolean'},
'target': {'type': 'string', 'format': 'quality_requirements'},
# Specials
'specials': {'type': 'boolean'},
# Propers (can be boolean, or an interval string)
'propers': {'type': ['boolean', 'string'], 'format': 'interval'},
# Identified by
'identified_by': {
'type': 'string', 'enum': ['ep', 'date', 'sequence', 'id', 'auto']
},
# Strict naming
'exact': {'type': 'boolean'},
# Begin takes an ep, sequence or date identifier
'begin': {
'oneOf': [
{'name': 'ep identifier', 'type': 'string', 'pattern': r'(?i)^S\d{2,4}E\d{2,3}$',
'error_pattern': 'episode identifiers should be in the form `SxxEyy`'},
{'name': 'date identifier', 'type': 'string', 'pattern': r'^\d{4}-\d{2}-\d{2}$',
'error_pattern': 'date identifiers must be in the form `YYYY-MM-DD`'},
{'name': 'sequence identifier', 'type': 'integer', 'minimum': 0}
]
},
'from_group': one_or_more({'type': 'string'}),
'parse_only': {'type': 'boolean'},
'special_ids': one_or_more({'type': 'string'}),
'prefer_specials': {'type': 'boolean'},
'assume_special': {'type': 'boolean'},
'tracking': {'type': ['boolean', 'string'], 'enum': [True, False, 'backfill']}
},
'additionalProperties': False
}
def make_grouped_config(self, config):
"""Turns a simple series list into grouped format with a empty settings dict"""
if not isinstance(config, dict):
# convert simplest configuration internally grouped format
config = {'simple': config, 'settings': {}}
else:
# already in grouped format, just make sure there's settings
config.setdefault('settings', {})
return config
def apply_group_options(self, config):
"""Applies group settings to each item in series group and removes settings dict."""
# Make sure config is in grouped format first
config = self.make_grouped_config(config)
for group_name in config:
if group_name == 'settings':
continue
group_series = []
if isinstance(group_name, basestring):
# if group name is known quality, convenience create settings with that quality
try:
qualities.Requirements(group_name)
config['settings'].setdefault(group_name, {}).setdefault('target', group_name)
except ValueError:
# If group name is not a valid quality requirement string, do nothing.
pass
for series in config[group_name]:
# convert into dict-form if necessary
series_settings = {}
group_settings = config['settings'].get(group_name, {})
if isinstance(series, dict):
series, series_settings = series.items()[0]
if series_settings is None:
raise Exception('Series %s has unexpected \':\'' % series)
# Make sure this isn't a series with no name
if not series:
log.warning('Series config contains a series with no name!')
continue
# make sure series name is a string to accommodate for "24"
if not isinstance(series, basestring):
series = unicode(series)
# if series have given path instead of dict, convert it into a dict
if isinstance(series_settings, basestring):
series_settings = {'path': series_settings}
# merge group settings into this series settings
merge_dict_from_to(group_settings, series_settings)
# Convert to dict if watched is in SXXEXX format
if isinstance(series_settings.get('watched'), basestring):
season, episode = series_settings['watched'].upper().split('E')
season = season.lstrip('S')
series_settings['watched'] = {'season': int(season), 'episode': int(episode)}
# Convert enough to target for backwards compatibility
if 'enough' in series_settings:
log.warning('Series setting `enough` has been renamed to `target` please update your config.')
series_settings.setdefault('target', series_settings['enough'])
# Add quality: 720p if timeframe is specified with no target
if 'timeframe' in series_settings and 'qualities' not in series_settings:
series_settings.setdefault('target', '720p hdtv+')
group_series.append({series: series_settings})
config[group_name] = group_series
del config['settings']
return config
def prepare_config(self, config):
"""Generate a list of unique series from configuration.
This way we don't need to handle two different configuration formats in the logic.
Applies group settings with advanced form."""
config = self.apply_group_options(config)
return self.combine_series_lists(*config.values())
def combine_series_lists(self, *series_lists, **kwargs):
"""Combines the series from multiple lists, making sure there are no doubles.
If keyword argument log_once is set to True, an error message will be printed if a series
is listed more than once, otherwise log_once will be used."""
unique_series = {}
for series_list in series_lists:
for series in series_list:
series, series_settings = series.items()[0]
if series not in unique_series:
unique_series[series] = series_settings
else:
if kwargs.get('log_once'):
log_once('Series %s is already configured in series plugin' % series, log)
else:
log.warning('Series %s is configured multiple times in series plugin.', series)
# Combine the config dicts for both instances of the show
unique_series[series].update(series_settings)
# Turn our all_series dict back into a list
# sort by reverse alpha, so that in the event of 2 series with common prefix, more specific is parsed first
return [{series: unique_series[series]} for series in sorted(unique_series, reverse=True)]
def merge_config(self, task, config):
"""Merges another series config dict in with the current one."""
# Make sure we start with both configs as a list of complex series
native_series = self.prepare_config(task.config.get('series', {}))
merging_series = self.prepare_config(config)
task.config['series'] = self.combine_series_lists(merging_series, native_series, log_once=True)
return task.config['series']
class FilterSeries(FilterSeriesBase):
"""
Intelligent filter for tv-series.
http://flexget.com/wiki/Plugins/series
"""
@property
def schema(self):
return {
'type': ['array', 'object'],
# simple format:
# - series
# - another series
'items': {
'type': ['string', 'number', 'object'],
'additionalProperties': self.settings_schema
},
# advanced format:
# settings:
# group: {...}
# group:
# {...}
'properties': {
'settings': {
'type': 'object',
'additionalProperties': self.settings_schema
}
},
'additionalProperties': {
'type': 'array',
'items': {
'type': ['string', 'number', 'object'],
'additionalProperties': self.settings_schema
}
}
}
def __init__(self):
try:
self.backlog = plugin.get_plugin_by_name('backlog')
except plugin.DependencyError:
log.warning('Unable utilize backlog plugin, episodes may slip trough timeframe')
def auto_exact(self, config):
"""Automatically enable exact naming option for series that look like a problem"""
# generate list of all series in one dict
all_series = {}
for series_item in config:
series_name, series_config = series_item.items()[0]
all_series[series_name] = series_config
# scan for problematic names, enable exact mode for them
for series_name, series_config in all_series.iteritems():
for name in all_series.keys():
if (name.lower().startswith(series_name.lower())) and \
(name.lower() != series_name.lower()):
if not 'exact' in series_config:
log.verbose('Auto enabling exact matching for series %s (reason %s)', series_name, name)
series_config['exact'] = True
# Run after metainfo_quality and before metainfo_series
@plugin.priority(125)
def on_task_metainfo(self, task, config):
config = self.prepare_config(config)
self.auto_exact(config)
for series_item in config:
series_name, series_config = series_item.items()[0]
log.trace('series_name: %s series_config: %s', series_name, series_config)
start_time = time.clock()
self.parse_series(task.entries, series_name, series_config)
took = time.clock() - start_time
log.trace('parsing %s took %s', series_name, took)
def on_task_filter(self, task, config):
"""Filter series"""
# Parsing was done in metainfo phase, create the dicts to pass to process_series from the task entries
# key: series episode identifier ie. S01E02
# value: seriesparser
config = self.prepare_config(config)
found_series = {}
for entry in task.entries:
if entry.get('series_name') and entry.get('series_id') is not None and entry.get('series_parser'):
found_series.setdefault(entry['series_name'], []).append(entry)
for series_item in config:
with Session() as session:
series_name, series_config = series_item.items()[0]
if series_config.get('parse_only'):
log.debug('Skipping filtering of series %s because of parse_only', series_name)
continue
# Make sure number shows (e.g. 24) are turned into strings
series_name = unicode(series_name)
db_series = session.query(Series).filter(Series.name == series_name).first()
if not db_series:
log.debug('adding series %s into db', series_name)
db_series = Series()
db_series.name = series_name
db_series.identified_by = series_config.get('identified_by', 'auto')
session.add(db_series)
log.debug('-> added %s' % db_series)
session.flush() # Flush to get an id on series before adding alternate names.
alts = series_config.get('alternate_name', [])
if not isinstance(alts, list):
alts = [alts]
for alt in alts:
_add_alt_name(alt, db_series, series_name, session)
if not series_name in found_series:
continue
series_entries = {}
for entry in found_series[series_name]:
# store found episodes into database and save reference for later use
releases = store_parser(session, entry['series_parser'], series=db_series)
entry['series_releases'] = [r.id for r in releases]
series_entries.setdefault(releases[0].episode, []).append(entry)
# TODO: Unfortunately we are setting these again, even though they were set in metanifo. This is
# for the benefit of all_series and series_premiere. Figure a better way.
# set custom download path
if 'path' in series_config:
log.debug('setting %s custom path to %s', entry['title'], series_config.get('path'))
# Just add this to the 'set' dictionary, so that string replacement is done cleanly
series_config.setdefault('set', {}).update(path=series_config['path'])
# accept info from set: and place into the entry
if 'set' in series_config:
set = plugin.get_plugin_by_name('set')
# TODO: Could cause lazy lookups. We don't really want to have a transaction open during this.
set.instance.modify(entry, series_config.get('set'))
# If we didn't find any episodes for this series, continue
if not series_entries:
log.trace('No entries found for %s this run.', series_name)
continue
# configuration always overrides everything
if series_config.get('identified_by', 'auto') != 'auto':
db_series.identified_by = series_config['identified_by']
# if series doesn't have identified_by flag already set, calculate one now that new eps are added to db
if not db_series.identified_by or db_series.identified_by == 'auto':
db_series.identified_by = auto_identified_by(db_series)
log.debug('identified_by set to \'%s\' based on series history', db_series.identified_by)
log.trace('series_name: %s series_config: %s', series_name, series_config)
import time
start_time = time.clock()
self.process_series(task, series_entries, series_config)
took = time.clock() - start_time
log.trace('processing %s took %s', series_name, took)
def parse_series(self, entries, series_name, config):
"""
Search for `series_name` and populate all `series_*` fields in entries when successfully parsed
:param session: SQLAlchemy session
:param entries: List of entries to process
:param series_name: Series name which is being processed
:param config: Series config being processed
"""
def get_as_array(config, key):
"""Return configuration key as array, even if given as a single string"""
v = config.get(key, [])
if isinstance(v, basestring):
return [v]
return v
# set parser flags flags based on config / database
identified_by = config.get('identified_by', 'auto')
if identified_by == 'auto':
with Session() as session:
series = session.query(Series).filter(Series.name == series_name).first()
if series:
# set flag from database
identified_by = series.identified_by or 'auto'
params = dict(identified_by=identified_by,
alternate_names=get_as_array(config, 'alternate_name'),
name_regexps=get_as_array(config, 'name_regexp'),
strict_name=config.get('exact', False),
allow_groups=get_as_array(config, 'from_group'),
date_yearfirst=config.get('date_yearfirst'),
date_dayfirst=config.get('date_dayfirst'),
special_ids=get_as_array(config, 'special_ids'),
prefer_specials=config.get('prefer_specials'),
assume_special=config.get('assume_special'))
for id_type in SERIES_ID_TYPES:
params[id_type + '_regexps'] = get_as_array(config, id_type + '_regexp')
for entry in entries:
# skip processed entries
if (entry.get('series_parser') and entry['series_parser'].valid and
entry['series_parser'].name.lower() != series_name.lower()):
continue
# Quality field may have been manipulated by e.g. assume_quality. Use quality field from entry if available.
parsed = get_plugin_by_name('parsing').instance.parse_series(entry['title'], name=series_name, **params)
if not parsed.valid:
continue
parsed.field = 'title'
log.debug('%s detected as %s, field: %s', entry['title'], parsed, parsed.field)
populate_entry_fields(entry, parsed)
# set custom download path
if 'path' in config:
log.debug('setting %s custom path to %s', entry['title'], config.get('path'))
# Just add this to the 'set' dictionary, so that string replacement is done cleanly
config.setdefault('set', {}).update(path=config['path'])
# accept info from set: and place into the entry
if 'set' in config:
set = plugin.get_plugin_by_name('set')
set.instance.modify(entry, config.get('set'))
def process_series(self, task, series_entries, config):
"""
Accept or Reject episode from available releases, or postpone choosing.
:param task: Current Task
:param series_entries: dict mapping Episodes to entries for that episode
:param config: Series configuration
"""
for ep, entries in series_entries.iteritems():
if not entries:
continue
reason = None
# sort episodes in order of quality
entries.sort(key=lambda e: e['series_parser'], reverse=True)
log.debug('start with episodes: %s', [e['title'] for e in entries])
# reject episodes that have been marked as watched in config file
if ep.series.begin:
if ep < ep.series.begin:
for entry in entries:
entry.reject('Episode `%s` is before begin value of `%s`' %
(ep.identifier, ep.series.begin.identifier))
continue
# skip special episodes if special handling has been turned off
if not config.get('specials', True) and ep.identified_by == 'special':
log.debug('Skipping special episode as support is turned off.')
continue
log.debug('current episodes: %s', [e['title'] for e in entries])
# quality filtering
if 'quality' in config:
entries = self.process_quality(config, entries)
if not entries:
continue
reason = 'matches quality'
# Many of the following functions need to know this info. Only look it up once.
downloaded = ep.downloaded_releases
downloaded_qualities = [rls.quality for rls in downloaded]
# proper handling
log.debug('-' * 20 + ' process_propers -->')
entries = self.process_propers(config, ep, entries)
if not entries:
continue
# Remove any eps we already have from the list
for entry in reversed(entries): # Iterate in reverse so we can safely remove from the list while iterating
if entry['series_parser'].quality in downloaded_qualities:
entry.reject('quality already downloaded')
entries.remove(entry)
if not entries:
continue
# Figure out if we need an additional quality for this ep
if downloaded:
if config.get('upgrade'):
# Remove all the qualities lower than what we have
for entry in reversed(entries):
if entry['series_parser'].quality < max(downloaded_qualities):
entry.reject('worse quality than already downloaded.')
entries.remove(entry)
if not entries:
continue
if 'target' in config and config.get('upgrade'):
# If we haven't grabbed the target yet, allow upgrade to it
self.process_timeframe_target(config, entries, downloaded)
continue
if 'qualities' in config:
# Grab any additional wanted qualities
log.debug('-' * 20 + ' process_qualities -->')
self.process_qualities(config, entries, downloaded)
continue
elif config.get('upgrade'):
entries[0].accept('is an upgrade to existing quality')
continue
# Reject eps because we have them
for entry in entries:
entry.reject('episode has already been downloaded')
continue
best = entries[0]
log.debug('continuing w. episodes: %s', [e['title'] for e in entries])
log.debug('best episode is: %s', best['title'])
# episode tracking. used only with season and sequence based series
if ep.identified_by in ['ep', 'sequence']:
if task.options.disable_tracking or not config.get('tracking', True):
log.debug('episode tracking disabled')
else:
log.debug('-' * 20 + ' episode tracking -->')
# Grace is number of distinct eps in the task for this series + 2
backfill = config.get('tracking') == 'backfill'
if self.process_episode_tracking(ep, entries, grace=len(series_entries)+2, backfill=backfill):
continue
# quality
if 'target' in config or 'qualities' in config:
if 'target' in config:
if self.process_timeframe_target(config, entries, downloaded):
continue
elif 'qualities' in config:
if self.process_qualities(config, entries, downloaded):
continue
# We didn't make a quality target match, check timeframe to see
# if we should get something anyway
if 'timeframe' in config:
if self.process_timeframe(task, config, ep, entries):
continue
reason = 'Timeframe expired, choosing best available'
else:
# If target or qualities is configured without timeframe, don't accept anything now
continue
# Just pick the best ep if we get here
reason = reason or 'choosing best available quality'
best.accept(reason)
def process_propers(self, config, episode, entries):
"""
Accepts needed propers. Nukes episodes from which there exists proper.
:returns: A list of episodes to continue processing.
"""
pass_filter = []
best_propers = []
# Since eps is sorted by quality then proper_count we always see the highest proper for a quality first.
(last_qual, best_proper) = (None, 0)
for entry in entries:
if entry['series_parser'].quality != last_qual:
last_qual, best_proper = entry['series_parser'].quality, entry['series_parser'].proper_count
best_propers.append(entry)
if entry['series_parser'].proper_count < best_proper:
# nuke qualities which there is a better proper available
entry.reject('nuked')
else:
pass_filter.append(entry)
# If propers support is turned off, or proper timeframe has expired just return the filtered eps list
if isinstance(config.get('propers', True), bool):
if not config.get('propers', True):
return pass_filter
else:
# propers with timeframe
log.debug('proper timeframe: %s', config['propers'])
timeframe = parse_timedelta(config['propers'])
first_seen = episode.first_seen
expires = first_seen + timeframe
log.debug('propers timeframe: %s', timeframe)
log.debug('first_seen: %s', first_seen)
log.debug('propers ignore after: %s', expires)
if datetime.now() > expires:
log.debug('propers timeframe expired')
return pass_filter
downloaded_qualities = dict((d.quality, d.proper_count) for d in episode.downloaded_releases)
log.debug('propers - downloaded qualities: %s' % downloaded_qualities)
# Accept propers we actually need, and remove them from the list of entries to continue processing
for entry in best_propers:
if (entry['series_parser'].quality in downloaded_qualities and
entry['series_parser'].proper_count > downloaded_qualities[entry['series_parser'].quality]):
entry.accept('proper')
pass_filter.remove(entry)
return pass_filter
def process_timeframe_target(self, config, entries, downloaded=None):
"""
Accepts first episode matching the quality configured for the series.
:return: True if accepted something
"""
req = qualities.Requirements(config['target'])
if downloaded:
if any(req.allows(release.quality) for release in downloaded):
log.debug('Target quality already achieved.')
return True
# scan for quality
for entry in entries:
if req.allows(entry['series_parser'].quality):
log.debug('Series accepting. %s meets quality %s', entry['title'], req)
entry.accept('target quality')
return True
def process_quality(self, config, entries):
"""
Filters eps that do not fall between within our defined quality standards.
:returns: A list of eps that are in the acceptable range
"""
reqs = qualities.Requirements(config['quality'])
log.debug('quality req: %s', reqs)
result = []
# see if any of the eps match accepted qualities
for entry in entries:
if reqs.allows(entry['quality']):
result.append(entry)
else:
log.verbose('Ignored `%s`. Does not meet quality requirement `%s`.', entry['title'], reqs)
if not result:
log.debug('no quality meets requirements')
return result
def process_episode_tracking(self, episode, entries, grace, backfill=False):
"""
Rejects all episodes that are too old or new, return True when this happens.
:param episode: Episode model
:param list entries: List of entries for given episode.
:param int grace: Number of episodes before or after latest download that are allowed.
:param bool backfill: If this is True, previous episodes will be allowed,
but forward advancement will still be restricted.
"""
latest = get_latest_release(episode.series)
if episode.series.begin and episode.series.begin > latest:
latest = episode.series.begin
log.debug('latest download: %s' % latest)
log.debug('current: %s' % episode)
if latest and latest.identified_by == episode.identified_by:
# Allow any previous episodes this season, or previous episodes within grace if sequence mode
if (not backfill and (episode.season < latest.season or
(episode.identified_by == 'sequence' and episode.number < (latest.number - grace)))):
log.debug('too old! rejecting all occurrences')
for entry in entries:
entry.reject('Too much in the past from latest downloaded episode %s' % latest.identifier)
return True
# Allow future episodes within grace, or first episode of next season
if (episode.season > latest.season + 1 or (episode.season > latest.season and episode.number > 1) or
(episode.season == latest.season and episode.number > (latest.number + grace))):
log.debug('too new! rejecting all occurrences')
for entry in entries:
entry.reject('Too much in the future from latest downloaded episode %s. '
'See `--disable-tracking` if this should be downloaded.' % latest.identifier)
return True
def process_timeframe(self, task, config, episode, entries):
"""
Runs the timeframe logic to determine if we should wait for a better quality.
Saves current best to backlog if timeframe has not expired.
:returns: True - if we should keep the quality (or qualities) restriction
False - if the quality restriction should be released, due to timeframe expiring
"""
if 'timeframe' not in config:
return True
best = entries[0]
# parse options
log.debug('timeframe: %s', config['timeframe'])
timeframe = parse_timedelta(config['timeframe'])
if config.get('quality'):
req = qualities.Requirements(config['quality'])
seen_times = [rls.first_seen for rls in episode.releases if req.allows(rls.quality)]
else:
seen_times = [rls.first_seen for rls in episode.releases]
# Somehow we can get here without having qualifying releases (#2779) make sure min doesn't crash
first_seen = min(seen_times) if seen_times else datetime.now()
expires = first_seen + timeframe
log.debug('timeframe: %s, first_seen: %s, expires: %s', timeframe, first_seen, expires)
stop = normalize_series_name(task.options.stop_waiting) == episode.series._name_normalized
if expires <= datetime.now() or stop:
# Expire timeframe, accept anything
log.info('Timeframe expired, releasing quality restriction.')
return False
else:
# verbose waiting, add to backlog
diff = expires - datetime.now()
hours, remainder = divmod(diff.seconds, 3600)
hours += diff.days * 24
minutes, seconds = divmod(remainder, 60)
log.info('Timeframe waiting %s for %sh:%smin, currently best is %s' %
(episode.series.name, hours, minutes, best['title']))
# add best entry to backlog (backlog is able to handle duplicate adds)
if self.backlog:
self.backlog.instance.add_backlog(task, best)
return True
def process_qualities(self, config, entries, downloaded):
"""
Handles all modes that can accept more than one quality per episode. (qualities, upgrade)
:returns: True - if at least one wanted quality has been downloaded or accepted.
False - if no wanted qualities have been accepted
"""
# Get list of already downloaded qualities
downloaded_qualities = [r.quality for r in downloaded]
log.debug('downloaded_qualities: %s', downloaded_qualities)
# If qualities key is configured, we only want qualities defined in it.
wanted_qualities = set([qualities.Requirements(name) for name in config.get('qualities', [])])
# Compute the requirements from our set that have not yet been fulfilled
still_needed = [req for req in wanted_qualities if not any(req.allows(qual) for qual in downloaded_qualities)]
log.debug('Wanted qualities: %s', wanted_qualities)
def wanted(quality):
"""Returns True if we want this quality based on the config options."""
wanted = not wanted_qualities or any(req.allows(quality) for req in wanted_qualities)
if config.get('upgrade'):
wanted = wanted and quality > max(downloaded_qualities or [qualities.Quality()])
return wanted
for entry in entries:
quality = entry['series_parser'].quality
log.debug('ep: %s quality: %s', entry['title'], quality)
if not wanted(quality):
log.debug('%s is unwanted quality', quality)
continue
if any(req.allows(quality) for req in still_needed):
# Don't get worse qualities in upgrade mode
if config.get('upgrade'):
if downloaded_qualities and quality < max(downloaded_qualities):
continue
entry.accept('quality wanted')
downloaded_qualities.append(quality)
downloaded.append(entry)
# Re-calculate what is still needed
still_needed = [req for req in still_needed if not req.allows(quality)]
return bool(downloaded_qualities)
def on_task_learn(self, task, config):
"""Learn succeeded episodes"""
log.debug('on_task_learn')
for entry in task.accepted:
if 'series_releases' in entry:
with Session() as session:
num = (session.query(Release).filter(Release.id.in_(entry['series_releases'])).
update({'downloaded': True}, synchronize_session=False))
log.debug('marking %s releases as downloaded for %s', num, entry)
else:
log.debug('%s is not a series', entry['title'])
class SeriesDBManager(FilterSeriesBase):
"""Update in the database with series info from the config"""
@plugin.priority(0)
def on_task_start(self, task, config):
if not task.config_modified:
return
# Clear all series from this task
with Session() as session:
session.query(SeriesTask).filter(SeriesTask.name == task.name).delete()
if not task.config.get('series'):
return
config = self.prepare_config(task.config['series'])
for series_item in config:
series_name, series_config = series_item.items()[0]
# Make sure number shows (e.g. 24) are turned into strings
series_name = unicode(series_name)
db_series = session.query(Series).filter(Series.name == series_name).first()
if db_series:
# Update database with capitalization from config
db_series.name = series_name
alts = series_config.get('alternate_name', [])
if not isinstance(alts, list):
alts = [alts]
# Remove the alternate names not present in current config
db_series.alternate_names = [alt for alt in db_series.alternate_names if alt.alt_name in alts]
# Add/update the possibly new alternate names
for alt in alts:
_add_alt_name(alt, db_series, series_name, session)
else:
log.debug('adding series %s into db', series_name)
db_series = Series()
db_series.name = series_name
session.add(db_series)
session.flush() # flush to get id on series before creating alternate names
log.debug('-> added %s' % db_series)
alts = series_config.get('alternate_name', [])
if not isinstance(alts, list):
alts = [alts]
for alt in alts:
_add_alt_name(alt, db_series, series_name, session)
db_series.in_tasks.append(SeriesTask(task.name))
if series_config.get('identified_by', 'auto') != 'auto':
db_series.identified_by = series_config['identified_by']
# Set the begin episode
if series_config.get('begin'):
try:
set_series_begin(db_series, series_config['begin'])
except ValueError as e:
raise plugin.PluginError(e)
def _add_alt_name(alt, db_series, series_name, session):
alt = unicode(alt)
db_series_alt = session.query(AlternateNames).join(Series).filter(AlternateNames.alt_name == alt).first()
if db_series_alt and db_series_alt.series_id == db_series.id:
# Already exists, no need to create it then
# TODO is checking the list for duplicates faster/better than querying the DB?
db_series_alt.alt_name = alt
elif db_series_alt:
# Alternate name already exists for another series. Not good.
raise plugin.PluginError('Error adding alternate name for %s. %s is already associated with %s. '
'Check your config.' % (series_name, alt, db_series_alt.series.name) )
else:
log.debug('adding alternate name %s for %s into db' % (alt, series_name))
db_series_alt = AlternateNames(alt)
db_series.alternate_names.append(db_series_alt)
log.debug('-> added %s' % db_series_alt)
@event('plugin.register')
def register_plugin():
plugin.register(FilterSeries, 'series', api_ver=2)
# This is a builtin so that it can update the database for tasks that may have had series plugin removed
plugin.register(SeriesDBManager, 'series_db', builtin=True, api_ver=2)
@event('options.register')
def register_parser_arguments():
exec_parser = options.get_parser('execute')
exec_parser.add_argument('--stop-waiting', action='store', dest='stop_waiting', default='',
metavar='NAME', help='stop timeframe for a given series')
exec_parser.add_argument('--disable-tracking', action='store_true', default=False,
help='disable episode advancement for this run')
# Backwards compatibility
exec_parser.add_argument('--disable-advancement', action='store_true', dest='disable_tracking',
help=argparse.SUPPRESS)
| tvcsantos/Flexget | flexget/plugins/filter/series.py | Python | mit | 70,663 |
from django.dispatch import dispatcher
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
try:
from notification import models as notification
def create_notice_types(app, created_models, verbosity, **kwargs):
notification.create_notice_type("swaps_proposal", _("New Swap Proposal"), _("someone has proposed a swap for one of your offers"), default=2)
notification.create_notice_type("swaps_acceptance", _("Swap Acceptance"), _("someone has accepted a swap that you proposed"), default=2)
notification.create_notice_type("swaps_rejection", _("Swap Rejection"), _("someone has rejected a swap that you proposed"), default=2)
notification.create_notice_type("swaps_cancellation", _("Swap Cancellation"), _("someone has canceled a proposed swap for one of your offers"), default=2)
notification.create_notice_type("swaps_proposing_offer_changed", _("Swap Proposing Offer Changed"), _("someone has changed their proposing offer in a swap for one of your offers"), default=2)
notification.create_notice_type("swaps_responding_offer_changed", _("Swap Responding Offer Changed"), _("someone has changed their responding offer in a swap that you proposed"), default=2)
notification.create_notice_type("swaps_comment", _("Swap Comment"), _("someone has commented on a swap in which your offer is involved"), default=2)
notification.create_notice_type("swaps_conflict", _("Swap Conflict"), _("your swap has lost a conflict to another swap"), default=2)
signals.post_syncdb.connect(create_notice_types, sender=notification)
except ImportError:
print "Skipping creation of NoticeTypes as notification app not found" | indro/t2c | apps/external_apps/swaps/management.py | Python | mit | 1,734 |
#!/usr/bin/env python
# coding:utf8
import ctypes
from utils import Loader
from utils import convert_data
import numpy as np
import api
mv_lib = Loader.get_lib()
class TableHandler(object):
'''`TableHandler` is an interface to sync different kinds of values.
If you are not writing python code based on theano or lasagne, you are
supposed to sync models (for initialization) and gradients (during
training) so as to let multiverso help you manage the models in distributed
environments.
Otherwise, you'd better use the classes in `multiverso.theano_ext` or
`multiverso.theano_ext.lasagne_ext`
'''
def __init__(self, size, init_value=None):
raise NotImplementedError("You must implement the __init__ method.")
def get(self, size):
raise NotImplementedError("You must implement the get method.")
def add(self, data, sync=False):
raise NotImplementedError("You must implement the add method.")
# types
C_FLOAT_P = ctypes.POINTER(ctypes.c_float)
class ArrayTableHandler(TableHandler):
'''`ArrayTableHandler` is used to sync array-like (one-dimensional) value.'''
def __init__(self, size, init_value=None):
'''Constructor for syncing array-like (one-dimensional) value.
The `size` should be a int equal to the size of value we want to sync.
If init_value is None, zeros will be used to initialize the table,
otherwise the table will be initialized as the init_value.
*Notice*: Only the init_value from the master will be used!
'''
self._handler = ctypes.c_void_p()
self._size = size
mv_lib.MV_NewArrayTable(size, ctypes.byref(self._handler))
if init_value is not None:
init_value = convert_data(init_value)
# sync add is used because we want to make sure that the initial
# value has taken effect when the call returns. No matter whether
# it is master worker, we should call add to make sure it works in
# sync mode
self.add(init_value if api.is_master_worker() else np.zeros(init_value.shape), sync=True)
def get(self):
'''get the latest value from multiverso ArrayTable
Data type of return value is numpy.ndarray with one-dimensional
'''
data = np.zeros((self._size, ), dtype=np.dtype("float32"))
mv_lib.MV_GetArrayTable(self._handler, data.ctypes.data_as(C_FLOAT_P), self._size)
return data
def add(self, data, sync=False):
'''add the data to the multiverso ArrayTable
Data type of `data` is numpy.ndarray with one-dimensional
If sync is True, this call will blocked by IO until the call finish.
Otherwise it will return immediately
'''
data = convert_data(data)
assert(data.size == self._size)
if sync:
mv_lib.MV_AddArrayTable(self._handler, data.ctypes.data_as(C_FLOAT_P), self._size)
else:
mv_lib.MV_AddAsyncArrayTable(self._handler, data.ctypes.data_as(C_FLOAT_P), self._size)
class MatrixTableHandler(TableHandler):
def __init__(self, num_row, num_col, init_value=None):
'''Constructor for syncing matrix-like (two-dimensional) value.
The `num_row` should be the number of rows and the `num_col` should be
the number of columns.
If init_value is None, zeros will be used to initialize the table,
otherwise the table will be initialized as the init_value.
*Notice*: Only the init_value from the master will be used!
'''
self._handler = ctypes.c_void_p()
self._num_row = num_row
self._num_col = num_col
self._size = num_col * num_row
mv_lib.MV_NewMatrixTable(num_row, num_col, ctypes.byref(self._handler))
if init_value is not None:
init_value = convert_data(init_value)
# sync add is used because we want to make sure that the initial
# value has taken effect when the call returns. No matter whether
# it is master worker, we should call add to make sure it works in
# sync mode
self.add(init_value if api.is_master_worker() else np.zeros(init_value.shape), sync=True)
def get(self, row_ids=None):
'''get the latest value from multiverso MatrixTable
If row_ids is None, we will return all rows as numpy.narray , e.g.
array([[1, 3], [3, 4]]).
Otherwise we will return the data according to the row_ids(e.g. you can
pass [1] to row_ids to get only the first row, it will return a
two-dimensional numpy.ndarray with one row)
Data type of return value is numpy.ndarray with two-dimensional
'''
if row_ids is None:
data = np.zeros((self._num_row, self._num_col), dtype=np.dtype("float32"))
mv_lib.MV_GetMatrixTableAll(self._handler, data.ctypes.data_as(C_FLOAT_P), self._size)
return data
else:
row_ids_n = len(row_ids)
int_array_type = ctypes.c_int * row_ids_n
data = np.zeros((row_ids_n, self._num_col), dtype=np.dtype("float32"))
mv_lib.MV_GetMatrixTableByRows(self._handler, data.ctypes.data_as(C_FLOAT_P),
row_ids_n * self._num_col,
int_array_type(*row_ids), row_ids_n)
return data
def add(self, data=None, row_ids=None, sync=False):
'''add the data to the multiverso MatrixTable
If row_ids is None, we will add all data, and the data
should be a list, e.g. [1, 2, 3, ...]
Otherwise we will add the data according to the row_ids
Data type of `data` is numpy.ndarray with two-dimensional
If sync is True, this call will blocked by IO until the call finish.
Otherwise it will return immediately
'''
assert(data is not None)
data = convert_data(data)
if row_ids is None:
assert(data.size == self._size)
if sync:
mv_lib.MV_AddMatrixTableAll(self._handler, data.ctypes.data_as(C_FLOAT_P), self._size)
else:
mv_lib.MV_AddAsyncMatrixTableAll(self._handler, data.ctypes.data_as(C_FLOAT_P), self._size)
else:
row_ids_n = len(row_ids)
assert(data.size == row_ids_n * self._num_col)
int_array_type = ctypes.c_int * row_ids_n
if sync:
mv_lib.MV_AddMatrixTableByRows(self._handler, data.ctypes.data_as(C_FLOAT_P),
row_ids_n * self._num_col,
int_array_type(*row_ids), row_ids_n)
else:
mv_lib.MV_AddAsyncMatrixTableByRows(self._handler, data.ctypes.data_as(C_FLOAT_P),
row_ids_n * self._num_col,
int_array_type(*row_ids), row_ids_n)
| you-n-g/multiverso | binding/python/multiverso/tables.py | Python | mit | 7,029 |
import pytest
from hackathon.constants import VE_PROVIDER, TEMPLATE_STATUS
from hackathon.hmongo.models import User, Template, UserHackathon
from hackathon.hmongo.database import add_super_user
@pytest.fixture(scope="class")
def user1():
# return new user named one
one = User(
name="test_one",
nickname="test_one",
avatar_url="/static/pic/monkey-32-32px.png",
is_super=False)
one.set_password("test_password")
one.save()
return one
@pytest.fixture(scope="class")
def user2():
# return new user named two
two = User(
name="test_two",
nickname="test_two",
avatar_url="/static/pic/monkey-32-32px.png",
is_super=False)
two.set_password("test_password")
two.save()
return two
@pytest.fixture(scope="class")
def admin1():
# return new admin named one
admin_one = User(
name="admin_one",
nickname="admin_one",
avatar_url="/static/pic/monkey-32-32px.png",
is_super=True)
admin_one.set_password("test_password")
admin_one.save()
return admin_one
@pytest.fixture(scope="class")
def default_template(user1):
tmpl = Template(
name="test_default_template",
provider=VE_PROVIDER.DOCKER,
status=TEMPLATE_STATUS.UNCHECKED,
description="old_desc",
content="",
template_args={},
docker_image="ubuntu",
network_configs=[],
virtual_environment_count=0,
creator=user1,
)
tmpl.save()
return tmpl | juniwang/open-hackathon | open-hackathon-server/src/tests/conftest.py | Python | mit | 1,534 |
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.shortcuts import render, reverse
from interlecture.local_settings import HOSTNAME,EMAIL_FROM
import datetime
import hashlib
import random
import dateutil.tz
from interauth.forms import UserForm
from interauth.models import UserActivation
def login_view(request):
context = {'app_name': 'login'}
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('select-course'))
elif request.method == 'POST':
user = authenticate(username=request.POST['uname'], password=request.POST['passwd'])
if user is not None:
login(request, user)
if request.user.is_authenticated():
messages.info(request, "True")
return HttpResponseRedirect(reverse('select-course'))
else:
context['args'] = '{failedLogin:true,}'
return render(request, 'base.html', context=context)
else:
context['args'] = '{failedLogin:false,}'
return render(request, 'base.html', context=context)
@login_required
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse('login'))
def register(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('select-course'))
context = {}
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
new_user = User.objects.create_user(
username=request.POST['username'],
email=request.POST['email'],
password=request.POST['password'],
first_name=request.POST['first_name'],
last_name=request.POST['last_name'],
is_active=False
)
# TODO: Email activation of user
init_activation(new_user)
context['args'] = '{}'
context['app_name'] = 'login'
return render(request, 'base.html', context=context)
else:
context['args'] = '{' + form.d2r_friendly_errors() + form.safe_data() + '}'
context['app_name'] = 'register'
return render(request, 'base.html', context=context)
else:
context['app_name'] = 'register'
context['args'] = '{}'
return render(request, 'base.html', context=context)
def activate(request, key):
try:
activation = UserActivation.objects.get(activation_key=key)
except ObjectDoesNotExist:
return HttpResponseRedirect(reverse('register'))
if not activation.user.is_active:
if datetime.datetime.now(tz=dateutil.tz.tzlocal()) > activation.key_expires:
return HttpResponseRedirect(reverse('resend-activation'))
else:
activation.user.is_active = True
activation.user.save()
return HttpResponseRedirect(reverse('select-course'))
def resend_activation_link(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('select-course'))
if request.method == 'POST':
try:
user = User.objects.get(email=request.POST['email'])
if user.is_active:
return HttpResponseRedirect(reverse('login'))
activation = UserActivation.objects.get(user=user)
activation.key_expires = datetime.datetime.now(dateutil.tz.tzlocal()) + datetime.timedelta(days=2)
send_activation_mail(activation)
return HttpResponseRedirect('login')
except ObjectDoesNotExist:
return HttpResponseRedirect(reverse('resend-activation'))
context = {
'app_name': 'resend_activation',
'args': '{}'
}
return render(request, 'base.html', context=context)
def init_activation(user):
salt = hashlib.sha1(str(random.random()).encode('utf8')).hexdigest()[:8]
usernamesalt = user.username
activation = UserActivation()
activation.user = user
activation.activation_key = hashlib.sha3_512(str(salt + usernamesalt).encode('utf8')).hexdigest()
activation.key_expires = datetime.datetime.now(dateutil.tz.tzlocal()) + datetime.timedelta(days=2)
activation.save()
send_activation_mail(activation)
def send_activation_mail(activation):
mail_body = render_to_string('activation_mail.html', context={'activation': activation,'HOSTNAME':HOSTNAME})
_ = send_mail(
subject='Interlecture Account Activation',
message='',
from_email=EMAIL_FROM,
recipient_list=[activation.user.email],
html_message=mail_body
)
| afriestad/interlecture | interlecture/interauth/views.py | Python | mit | 4,933 |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'topics.views.topics', name="topic_list"),
url(r'^(?P<topic_id>\d+)/edit/$', 'topics.views.topic', kwargs={"edit": True}, name="topic_edit"),
url(r'^(?P<topic_id>\d+)/delete/$', 'topics.views.topic_delete', name="topic_delete"),
url(r'^(?P<topic_id>\d+)/$', 'topics.views.topic', name="topic_detail"),
)
| ericholscher/pinax | pinax/apps/topics/urls.py | Python | mit | 399 |
import logging
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
from wagtail.images.models import Image
from wagtail.images.edit_handlers import ImageChooserPanel
class PhotoGalleryIndexPage(Page):
intro = RichTextField(blank=True)
feed_image = models.ForeignKey(
Image,
help_text="An optional image to represent the page",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
indexed_fields = ('intro')
@property
def galleries(self):
galleries = GalleryIndex.objects.live().descendant_of(self)
galleries = galleries.order_by('-first_published_at')
return galleries
def get_context(self, request):
galleries = self.galleries
page = request.GET.get('page')
paginator = Paginator(galleries, 16)
try:
galleries = paginator.page(page)
except PageNotAnInteger:
galleries = paginator.page(1)
except EmptyPage:
galleries = paginator.page(paginator.num_pages)
context = super(PhotoGalleryIndexPage, self).get_context(request)
context['galleries'] = galleries
return context
class Meta:
verbose_name = _('Photo Gallery Index')
PhotoGalleryIndexPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
]
PhotoGalleryIndexPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
IMAGE_ORDER_TYPES = (
(1, 'Image title'),
(2, 'Newest image first'),
)
class GalleryIndex(Page):
intro = RichTextField(
blank=True,
verbose_name=_('Intro text'),
help_text=_('Optional text to go with the intro text.')
)
collection = models.ForeignKey(
'wagtailcore.Collection',
verbose_name=_('Collection'),
null=True,
blank=False,
on_delete=models.SET_NULL,
related_name='+',
help_text=_('Show images in this collection in the gallery view.')
)
images_per_page = models.IntegerField(
default=20,
verbose_name=_('Images per page'),
help_text=_('How many images there should be on one page.')
)
order_images_by = models.IntegerField(choices=IMAGE_ORDER_TYPES, default=1)
feed_image = models.ForeignKey(
Image,
help_text="An optional image to represent the page",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
content_panels = Page.content_panels + [
FieldPanel('intro', classname='full title'),
FieldPanel('collection'),
FieldPanel('images_per_page', classname='full title'),
FieldPanel('order_images_by'),
]
promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
@property
def images(self):
return get_gallery_images(self.collection.name, self)
def get_context(self, request):
images = self.images
page = request.GET.get('page')
paginator = Paginator(images, self.images_per_page)
try:
images = paginator.page(page)
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
images = paginator.page(paginator.num_pages)
context = super(GalleryIndex, self).get_context(request)
context['gallery_images'] = images
return context
class Meta:
verbose_name = _('Photo Gallery')
verbose_name_plural = _('Photo Galleries')
template = getattr(settings, 'GALLERY_TEMPLATE', 'gallery/gallery_index.html')
def get_gallery_images(collection, page=None, tags=None):
images = None
try:
images = Image.objects.filter(collection__name=collection)
if page:
if page.order_images_by == 0:
images = images.order_by('title')
elif page.order_images_by == 1:
images = images.order_by('-created_at')
except Exception as e:
logging.exception(e)
if images and tags:
images = images.filter(tags__name__in=tags).distinct()
return images
| ilendl2/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/gallery/models.py | Python | mit | 4,587 |
"""Splits the time dimension into an reftime and a leadtime
so that multiple files can be concatenated more easily"""
import sys
from netCDF4 import Dataset, num2date, date2num
for f in sys.argv[1:]:
dataset = Dataset(f, 'a')
# rename record dimension to reftime
dataset.renameDimension('record', 'reftime')
# rename time dimension to leadtime
dataset.renameDimension('time', 'leadtime')
dataset.renameVariable('time', 'leadtime')
time = dataset.variables['leadtime']
reftime = dataset.createVariable('reftime', 'f8', ('reftime',))
reftime.units = time.units
reftime.calendar = time.calendar
reftime[0] = time[0]
reftime.standard_name = "forecast_reference_time"
reftime.long_name = "Time of model initialization"
dt = num2date(time[:], units=time.units, calendar=time.calendar)
lt = date2num(dt, units="hours since %s" % dt[0], calendar=time.calendar)
# use the existing time variable to hold lead time information
time.units = "hours"
time.standard_name = "forecast_period"
time.long_name = "hours since forecast_reference_time"
time[:] = lt
del(time.calendar)
dataset.renameVariable('location', 'old_location')
dataset.renameVariable('lat', 'old_lat')
dataset.renameVariable('lon', 'old_lon')
dataset.renameVariable('height', 'old_height')
loc = dataset.createVariable('location', 'S1', ('location','loc_str_length'))
lat = dataset.createVariable('lat', 'f8', ('location',))
lon = dataset.createVariable('lon', 'f8', ('location',))
hgt = dataset.createVariable('height','i4',('height',))
loc[:] = dataset.variables['old_location'][0]
lat[:] = dataset.variables['old_lat'][0]
lon[:] = dataset.variables['old_lon'][0]
hgt[:] = dataset.variables['old_height'][0]
dataset.close()
| samwisehawkins/wrftools | util/split_time_dimension.py | Python | mit | 1,878 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TargetCostProperties(Model):
"""Properties of a cost target.
:param status: Target cost status. Possible values include: 'Enabled',
'Disabled'
:type status: str or :class:`TargetCostStatus
<azure.mgmt.devtestlabs.models.TargetCostStatus>`
:param target: Lab target cost
:type target: int
:param cost_thresholds: Cost thresholds.
:type cost_thresholds: list of :class:`CostThresholdProperties
<azure.mgmt.devtestlabs.models.CostThresholdProperties>`
:param cycle_start_date_time: Reporting cycle start date.
:type cycle_start_date_time: datetime
:param cycle_end_date_time: Reporting cycle end date.
:type cycle_end_date_time: datetime
:param cycle_type: Reporting cycle type. Possible values include:
'CalendarMonth', 'Custom'
:type cycle_type: str or :class:`ReportingCycleType
<azure.mgmt.devtestlabs.models.ReportingCycleType>`
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'target': {'key': 'target', 'type': 'int'},
'cost_thresholds': {'key': 'costThresholds', 'type': '[CostThresholdProperties]'},
'cycle_start_date_time': {'key': 'cycleStartDateTime', 'type': 'iso-8601'},
'cycle_end_date_time': {'key': 'cycleEndDateTime', 'type': 'iso-8601'},
'cycle_type': {'key': 'cycleType', 'type': 'str'},
}
def __init__(self, status=None, target=None, cost_thresholds=None, cycle_start_date_time=None, cycle_end_date_time=None, cycle_type=None):
self.status = status
self.target = target
self.cost_thresholds = cost_thresholds
self.cycle_start_date_time = cycle_start_date_time
self.cycle_end_date_time = cycle_end_date_time
self.cycle_type = cycle_type
| v-iam/azure-sdk-for-python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/target_cost_properties.py | Python | mit | 2,286 |
import unittest
import base64
from hashlib import md5 as basic_md5
from flask import Flask
from flask_httpauth import HTTPBasicAuth
def md5(s):
if isinstance(s, str):
s = s.encode('utf-8')
return basic_md5(s)
class HTTPAuthTestCase(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'my secret'
basic_custom_auth = HTTPBasicAuth()
@basic_custom_auth.get_password
def get_basic_custom_auth_get_password(username):
if username == 'john':
return md5('hello').hexdigest()
elif username == 'susan':
return md5('bye').hexdigest()
else:
return None
@basic_custom_auth.hash_password
def basic_custom_auth_hash_password(password):
return md5(password).hexdigest()
@app.route('/')
def index():
return 'index'
@app.route('/basic-custom')
@basic_custom_auth.login_required
def basic_custom_auth_route():
return 'basic_custom_auth:' + basic_custom_auth.username()
self.app = app
self.basic_custom_auth = basic_custom_auth
self.client = app.test_client()
def test_basic_auth_login_valid_with_hash1(self):
creds = base64.b64encode(b'john:hello').decode('utf-8')
response = self.client.get(
'/basic-custom', headers={'Authorization': 'Basic ' + creds})
self.assertEqual(response.data.decode('utf-8'),
'basic_custom_auth:john')
def test_basic_custom_auth_login_valid(self):
creds = base64.b64encode(b'john:hello').decode('utf-8')
response = self.client.get(
'/basic-custom', headers={'Authorization': 'Basic ' + creds})
self.assertEqual(response.data, b'basic_custom_auth:john')
def test_basic_custom_auth_login_invalid(self):
creds = base64.b64encode(b'john:bye').decode('utf-8')
response = self.client.get(
'/basic-custom', headers={"Authorization": "Basic " + creds})
self.assertEqual(response.status_code, 401)
self.assertTrue("WWW-Authenticate" in response.headers)
| miguelgrinberg/Flask-HTTPAuth | tests/test_basic_hashed_password.py | Python | mit | 2,215 |
"""
Module. Includes classes for all time dependent lattice.
"""
import sys
import os
import math
from orbit.teapot import TEAPOT_Lattice
from orbit.parsers.mad_parser import MAD_Parser, MAD_LattLine
from orbit.lattice import AccNode, AccActionsContainer
from orbit.time_dep import waveform
class TIME_DEP_Lattice(TEAPOT_Lattice):
"""
The subclass of the TEAPOT_Lattice.
TIME_DEP_Lattice has the ability to set time dependent parameters to the Lattice.
Multi-turn track also available.
"""
def __init__(self, name = "no name"):
TEAPOT_Lattice.__init__(self,name)
self.__latticeDict = {}
self.__TDNodeDict = {}
self.__turns = 1
def setLatticeOrder(self):
"""
Sets the time dependent lattice names to the lattice.
"""
accNodes = self.getNodes()
elemInLine = {}
for i in range(len(accNodes)):
elem = accNodes[i]
elemname = elem.getName()
if(elemInLine.has_key(elemname)):
elemInLine[elemname] += 1
else: elemInLine[elemname] = 1
node = self.getNodes()[i]
node.setParam("TPName",node.getName()+"_"+str(elemInLine[elemname]))
#node.setParam("sequence",i+1)
#print "debug node",node.getName(),node.getParamsDict()
def setTimeDepNode(self, TPName, waveform):
"""
Sets the waveform function to the TP node before track.
"""
flag = 0
for node in self.getNodes():
if (TPName == node.getParam("TPName")):
flag = 1
node.setParam("waveform",waveform)
self.__TDNodeDict[TPName] = node
if not flag:
print "The",TPName,"is not found."
sys.exit(1)
def setTimeDepStrength(self, time):
"""
Set strength to the TP node while running.
"""
NodeDict = self.__TDNodeDict
for i in NodeDict.keys():
node = NodeDict[i]
waveform = node.getParam("waveform")
waveform.calc(time)
waveformType = waveform.getType()
if waveformType == "kicker waveform":
if node.getType() == "kick teapot":
self.setParam(node,"kx",waveform.getKx())
self.setParam(node,"ky",waveform.getKy())
else: print "No kicker waveform added. Please check node type."
elif waveformType == "magnet waveform":
strength = waveform.getStrength()
if node.getType() == "multipole teapot":
self.setParam(node,"kls",strength)
elif node.getType() == "quad teapot":
self.setParam(node,"kls",strength)
self.setParam(node,"kq",strength)
elif node.getType() == "solenoid teapot":
self.setParam(node,"B",strength)
else: print "No magnet waveform added. Please check node type."
def setParam(self, node, Kparam, strength):
if node.hasParam(Kparam):
paramval = node.getParam(Kparam)
if Kparam == "kls":
newparamval = []
for i in range(len(paramval)):
newparamval.append(paramval[i]*strength)
paramval = newparamval
else:paramval = paramval*strength
node.setParam(Kparam,paramval)
def trackBunchTurns(self, bunch):
"""
It tracks the bunch through the lattice with multi-turn.
"""
turns = self.__turns
#start
for i in range(turns-1):
self.trackBunch(bunch)
syncPart = bunch.getSyncParticle()
time = syncPart.time()
self.setTimeDepStrength(time)
print "debug trackBunchTurns time",time,"in",i,"turn"
#getsublattice
#sublattice.trackBunch(bunch)
def setTurns(self, turns, startPosition = 0, endPosition = -1):
"""
Sets the turns and start end position before track.
"""
startNode = StartNode("start node")
endNode = EndNode("end node")
self.addNode(startNode, startPosition)
self.addNode(endNode, endPosition)
self.__turns = turns
#print self.getNodes()
class StartNode(AccNode):
def __init__(self, name = "no name"):
AccNode.__init__(self,name)
self.setType("start node")
def track(self, paramsDict):
bunch = paramsDict["bunch"]
#bunch.getSyncParticle().time(0.)
class EndNode(AccNode):
def __init__(self, name = "no name"):
AccNode.__init__(self,name)
self.setType("end node")
def track(self, paramsDict):
pass
| PyORBIT-Collaboration/py-orbit | py/orbit/time_dep/time_dep.py | Python | mit | 4,174 |
from .cpu import Cpu
| Hexadorsimal/pynes | nes/processors/cpu/__init__.py | Python | mit | 21 |
#! /usr/bin/python2
#
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
import argparse
import collections
import re
import subprocess
import sys
__DESCRIPTION = """
Processes a perf.data sample file and reports the hottest Ignition bytecodes,
or write an input file for flamegraph.pl.
"""
__HELP_EPILOGUE = """
examples:
# Get a flamegraph for Ignition bytecode handlers on Octane benchmark,
# without considering the time spent compiling JS code, entry trampoline
# samples and other non-Ignition samples.
#
$ tools/run-perf.sh out/x64.release/d8 \\
--ignition --noturbo --noopt run.js
$ tools/ignition/linux_perf_report.py --flamegraph -o out.collapsed
$ flamegraph.pl --colors js out.collapsed > out.svg
# Same as above, but show all samples, including time spent compiling JS code,
# entry trampoline samples and other samples.
$ # ...
$ tools/ignition/linux_perf_report.py \\
--flamegraph --show-all -o out.collapsed
$ # ...
# Same as above, but show full function signatures in the flamegraph.
$ # ...
$ tools/ignition/linux_perf_report.py \\
--flamegraph --show-full-signatures -o out.collapsed
$ # ...
# See the hottest bytecodes on Octane benchmark, by number of samples.
#
$ tools/run-perf.sh out/x64.release/d8 \\
--ignition --noturbo --noopt octane/run.js
$ tools/ignition/linux_perf_report.py
"""
COMPILER_SYMBOLS_RE = re.compile(
r"v8::internal::(?:\(anonymous namespace\)::)?Compile|v8::internal::Parser")
JIT_CODE_SYMBOLS_RE = re.compile(
r"(LazyCompile|Compile|Eval|Script):(\*|~)")
GC_SYMBOLS_RE = re.compile(
r"v8::internal::Heap::CollectGarbage")
def strip_function_parameters(symbol):
if symbol[-1] != ')': return symbol
pos = 1
parenthesis_count = 0
for c in reversed(symbol):
if c == ')':
parenthesis_count += 1
elif c == '(':
parenthesis_count -= 1
if parenthesis_count == 0:
break
else:
pos += 1
return symbol[:-pos]
def collapsed_callchains_generator(perf_stream, hide_other=False,
hide_compiler=False, hide_jit=False,
hide_gc=False, show_full_signatures=False):
current_chain = []
skip_until_end_of_chain = False
compiler_symbol_in_chain = False
for line in perf_stream:
# Lines starting with a "#" are comments, skip them.
if line[0] == "#":
continue
line = line.strip()
# Empty line signals the end of the callchain.
if not line:
if (not skip_until_end_of_chain and current_chain
and not hide_other):
current_chain.append("[other]")
yield current_chain
# Reset parser status.
current_chain = []
skip_until_end_of_chain = False
compiler_symbol_in_chain = False
continue
if skip_until_end_of_chain:
continue
# Trim the leading address and the trailing +offset, if present.
symbol = line.split(" ", 1)[1].split("+", 1)[0]
if not show_full_signatures:
symbol = strip_function_parameters(symbol)
# Avoid chains of [unknown]
if (symbol == "[unknown]" and current_chain and
current_chain[-1] == "[unknown]"):
continue
current_chain.append(symbol)
if symbol.startswith("BytecodeHandler:"):
current_chain.append("[interpreter]")
yield current_chain
skip_until_end_of_chain = True
elif JIT_CODE_SYMBOLS_RE.match(symbol):
if not hide_jit:
current_chain.append("[jit]")
yield current_chain
skip_until_end_of_chain = True
elif GC_SYMBOLS_RE.match(symbol):
if not hide_gc:
current_chain.append("[gc]")
yield current_chain
skip_until_end_of_chain = True
elif symbol == "Stub:CEntryStub" and compiler_symbol_in_chain:
if not hide_compiler:
current_chain.append("[compiler]")
yield current_chain
skip_until_end_of_chain = True
elif COMPILER_SYMBOLS_RE.match(symbol):
compiler_symbol_in_chain = True
elif symbol == "Builtin:InterpreterEntryTrampoline":
if len(current_chain) == 1:
yield ["[entry trampoline]"]
else:
# If we see an InterpreterEntryTrampoline which is not at the top of the
# chain and doesn't have a BytecodeHandler above it, then we have
# skipped the top BytecodeHandler due to the top-level stub not building
# a frame. File the chain in the [misattributed] bucket.
current_chain[-1] = "[misattributed]"
yield current_chain
skip_until_end_of_chain = True
def calculate_samples_count_per_callchain(callchains):
chain_counters = collections.defaultdict(int)
for callchain in callchains:
key = ";".join(reversed(callchain))
chain_counters[key] += 1
return chain_counters.items()
def calculate_samples_count_per_handler(callchains):
def strip_handler_prefix_if_any(handler):
return handler if handler[0] == "[" else handler.split(":", 1)[1]
handler_counters = collections.defaultdict(int)
for callchain in callchains:
handler = strip_handler_prefix_if_any(callchain[-1])
handler_counters[handler] += 1
return handler_counters.items()
def write_flamegraph_input_file(output_stream, callchains):
for callchain, count in calculate_samples_count_per_callchain(callchains):
output_stream.write("{}; {}\n".format(callchain, count))
def write_handlers_report(output_stream, callchains):
handler_counters = calculate_samples_count_per_handler(callchains)
samples_num = sum(counter for _, counter in handler_counters)
# Sort by decreasing number of samples
handler_counters.sort(key=lambda entry: entry[1], reverse=True)
for bytecode_name, count in handler_counters:
output_stream.write(
"{}\t{}\t{:.3f}%\n".format(bytecode_name, count,
100. * count / samples_num))
def parse_command_line():
command_line_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__DESCRIPTION,
epilog=__HELP_EPILOGUE)
command_line_parser.add_argument(
"perf_filename",
help="perf sample file to process (default: perf.data)",
nargs="?",
default="perf.data",
metavar="<perf filename>"
)
command_line_parser.add_argument(
"--flamegraph", "-f",
help="output an input file for flamegraph.pl, not a report",
action="store_true",
dest="output_flamegraph"
)
command_line_parser.add_argument(
"--hide-other",
help="Hide other samples",
action="store_true"
)
command_line_parser.add_argument(
"--hide-compiler",
help="Hide samples during compilation",
action="store_true"
)
command_line_parser.add_argument(
"--hide-jit",
help="Hide samples from JIT code execution",
action="store_true"
)
command_line_parser.add_argument(
"--hide-gc",
help="Hide samples from garbage collection",
action="store_true"
)
command_line_parser.add_argument(
"--show-full-signatures", "-s",
help="show full signatures instead of function names",
action="store_true"
)
command_line_parser.add_argument(
"--output", "-o",
help="output file name (stdout if omitted)",
type=argparse.FileType('wt'),
default=sys.stdout,
metavar="<output filename>",
dest="output_stream"
)
return command_line_parser.parse_args()
def main():
program_options = parse_command_line()
perf = subprocess.Popen(["perf", "script", "--fields", "ip,sym",
"-i", program_options.perf_filename],
stdout=subprocess.PIPE)
callchains = collapsed_callchains_generator(
perf.stdout, program_options.hide_other, program_options.hide_compiler,
program_options.hide_jit, program_options.hide_gc,
program_options.show_full_signatures)
if program_options.output_flamegraph:
write_flamegraph_input_file(program_options.output_stream, callchains)
else:
write_handlers_report(program_options.output_stream, callchains)
if __name__ == "__main__":
main()
| hoho/dosido | nodejs/deps/v8/tools/ignition/linux_perf_report.py | Python | mit | 8,176 |
import argparse
import os
import sys
import numpy as np
from PIL import Image
import chainer
from chainer import cuda
import chainer.functions as F
from chainer.functions import caffe
from chainer import Variable, optimizers
import pickle
def subtract_mean(x0):
x = x0.copy()
x[0,0,:,:] -= 104
x[0,1,:,:] -= 117
x[0,2,:,:] -= 123
return x
def add_mean(x0):
x = x0.copy()
x[0,0,:,:] += 104
x[0,1,:,:] += 117
x[0,2,:,:] += 123
return x
def image_resize(img_file, width):
gogh = Image.open(img_file)
orig_w, orig_h = gogh.size[0], gogh.size[1]
if orig_w>orig_h:
new_w = width
new_h = width*orig_h/orig_w
gogh = np.asarray(gogh.resize((new_w,new_h)))[:,:,:3].transpose(2, 0, 1)[::-1].astype(np.float32)
gogh = gogh.reshape((1,3,new_h,new_w))
print("image resized to: ", gogh.shape)
hoge= np.zeros((1,3,width,width), dtype=np.float32)
hoge[0,:,width-new_h:,:] = gogh[0,:,:,:]
gogh = subtract_mean(hoge)
else:
new_w = width*orig_w/orig_h
new_h = width
gogh = np.asarray(gogh.resize((new_w,new_h)))[:,:,:3].transpose(2, 0, 1)[::-1].astype(np.float32)
gogh = gogh.reshape((1,3,new_h,new_w))
print("image resized to: ", gogh.shape)
hoge= np.zeros((1,3,width,width), dtype=np.float32)
hoge[0,:,:,width-new_w:] = gogh[0,:,:,:]
gogh = subtract_mean(hoge)
return xp.asarray(gogh), new_w, new_h
def save_image(img, width, new_w, new_h, it):
def to_img(x):
im = np.zeros((new_h,new_w,3))
im[:,:,0] = x[2,:,:]
im[:,:,1] = x[1,:,:]
im[:,:,2] = x[0,:,:]
def clip(a):
return 0 if a<0 else (255 if a>255 else a)
im = np.vectorize(clip)(im).astype(np.uint8)
Image.fromarray(im).save(args.out_dir+"/im_%05d.png"%it)
if args.gpu>=0:
img_cpu = add_mean(img.get())
else:
img_cpu = add_mean(img)
if width==new_w:
to_img(img_cpu[0,:,width-new_h:,:])
else:
to_img(img_cpu[0,:,:,width-new_w:])
def nin_forward(x):
y0 = F.relu(model.conv1(x))
y1 = model.cccp2(F.relu(model.cccp1(y0)))
x1 = F.relu(model.conv2(F.average_pooling_2d(F.relu(y1), 3, stride=2)))
y2 = model.cccp4(F.relu(model.cccp3(x1)))
x2 = F.relu(model.conv3(F.average_pooling_2d(F.relu(y2), 3, stride=2)))
y3 = model.cccp6(F.relu(model.cccp5(x2)))
x3 = F.relu(getattr(model,"conv4-1024")(F.dropout(F.average_pooling_2d(F.relu(y3), 3, stride=2), train=False)))
return [y0,x1,x2,x3]
def vgg_forward(x):
y1 = model.conv1_2(F.relu(model.conv1_1(x)))
x1 = F.average_pooling_2d(F.relu(y1), 2, stride=2)
y2 = model.conv2_2(F.relu(model.conv2_1(x1)))
x2 = F.average_pooling_2d(F.relu(y2), 2, stride=2)
y3 = model.conv3_3(F.relu(model.conv3_2(F.relu(model.conv3_1(x2)))))
x3 = F.average_pooling_2d(F.relu(y3), 2, stride=2)
y4 = model.conv4_3(F.relu(model.conv4_2(F.relu(model.conv4_1(x3)))))
# x4 = F.average_pooling_2d(F.relu(y4), 2, stride=2)
# y5 = model.conv5_3(F.relu(model.conv5_2(F.relu(model.conv5_1(x4)))))
return [y1,y2,y3,y4]
def get_matrix(y):
ch = y.data.shape[1]
wd = y.data.shape[2]
gogh_y = F.reshape(y, (ch,wd**2))
gogh_matrix = F.matmul(gogh_y, gogh_y, transb=True)/np.float32(ch*wd**2)
return gogh_matrix
class Clip(chainer.Function):
def forward(self, x):
x = x[0]
ret = cuda.elementwise(
'T x','T ret',
'''
ret = x<-100?-100:(x>100?100:x);
''','clip')(x)
return ret
def generate_image(img_orig, img_style, width, nw, nh, max_iter, lr, alpha, beta, img_gen=None):
mid_orig = nin_forward(Variable(img_orig))
style_mats = [get_matrix(y) for y in nin_forward(Variable(img_style))]
if img_gen is None:
if args.gpu >= 0:
img_gen = xp.random.uniform(-20,20,(1,3,width,width),dtype=np.float32)
else:
img_gen = np.random.uniform(-20,20,(1,3,width,width)).astype(np.float32)
x = Variable(img_gen)
xg = xp.zeros_like(x.data)
optimizer = optimizers.Adam(alpha=lr)
optimizer.setup((img_gen,xg))
for i in range(max_iter):
x = Variable(img_gen)
y = nin_forward(x)
optimizer.zero_grads()
L = Variable(xp.zeros((), dtype=np.float32))
for l in range(4):
ch = y[l].data.shape[1]
wd = y[l].data.shape[2]
gogh_y = F.reshape(y[l], (ch,wd**2))
gogh_matrix = F.matmul(gogh_y, gogh_y, transb=True)/np.float32(ch*wd**2)
L1 = np.float32(alpha[l])*F.mean_squared_error(y[l], Variable(mid_orig[l].data))
L2 = np.float32(beta[l])*F.mean_squared_error(gogh_matrix, Variable(style_mats[l].data))/np.float32(4)
L += L1+L2
if i%100==0:
print i,l,L1.data,L2.data
L.backward()
xg += x.grad
optimizer.update()
tmp_shape = img_gen.shape
if args.gpu >= 0:
img_gen += Clip().forward(img_gen).reshape(tmp_shape) - img_gen
else:
def clip(x):
return -100 if x<-100 else (100 if x>100 else x)
img_gen += np.vectorize(clip)(img_gen).reshape(tmp_shape) - img_gen
if i%50==0:
save_image(img_gen, W, nw, nh, i)
parser = argparse.ArgumentParser(
description='A Neural Algorithm of Artistic Style')
parser.add_argument('--model', '-m', default='nin_imagenet.caffemodel',
help='model file')
parser.add_argument('--orig_img', '-i', default='orig.png',
help='Original image')
parser.add_argument('--style_img', '-s', default='style.png',
help='Style image')
parser.add_argument('--out_dir', '-o', default='output',
help='Output directory')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--iter', default=5000, type=int,
help='number of iteration')
parser.add_argument('--lr', default=4.0, type=float,
help='learning rate')
parser.add_argument('--lam', default=0.005, type=float,
help='original image weight / style weight ratio')
parser.add_argument('--width', '-w', default=435, type=int,
help='image width, height')
args = parser.parse_args()
try:
os.mkdir(args.out_dir)
except:
pass
if args.gpu >= 0:
cuda.check_cuda_available()
cuda.get_device(args.gpu).use()
xp = cuda.cupy
else:
xp = np
chainer.Function.type_check_enable = False
print "load model... %s"%args.model
func = caffe.CaffeFunction(args.model)
model = func.fs
if args.gpu>=0:
model.to_gpu()
W = args.width
img_gogh,_,_ = image_resize(args.style_img, W)
img_hongo,nw,nh = image_resize(args.orig_img, W)
generate_image(img_hongo, img_gogh, W, nw, nh, img_gen=None, max_iter=args.iter, lr=args.lr, alpha=[args.lam * x for x in [0,0,1,1]], beta=[1,1,1,1])
| wf9a5m75/chainer-gogh | chainer-gogh.py | Python | mit | 7,054 |
## Copyright (c) 2001-2010, Scott D. Peckham
## January 2009 (converted from IDL)
## November 2009 (collected into cfg_files.py
## May 2010 (added read_key_value_pair())
## July 2010 (added read_list()
import numpy
#---------------------------------------------------------------------
#
# unit_test()
#
# skip_header()
# get_yes_words()
# read_words()
# read_list() # (7/27/10)
#
# read_key_value_pair() # (5/7/10)
# read_line_after_key()
# read_words_after_key()
# read_list_after_key()
# read_value()
#
# var_type_code()
# read_input_option()
# read_output_option() # (boolean and string)
#
#---------------------------------------------------------------------
def unit_test():
import d8_base
comp = d8_base.d8_component()
#------------------------------
comp.CCA = False
comp.directory = '/Applications/Erode/Data/Test1/'
comp.data_prefix = 'Test1'
comp.case_prefix = 'Test1'
comp.read_config_file()
print 'comp.method =', comp.method
print 'comp.method_name =', comp.method_name
print 'comp.dt =', comp.dt
print 'comp.dt.dtype =', comp.dt.dtype
print 'comp.LINK_FLATS =', comp.LINK_FLATS
print 'comp.LR_PERIODIC =', comp.LR_PERIODIC
print 'comp.TB_PERIODIC =', comp.TB_PERIODIC
print 'comp.SAVE_DW_PIXELS =', comp.SAVE_DW_PIXELS
print 'Finished with cfg_files.unit_test().'
print ' '
# unit_test()
#---------------------------------------------------------------------
def skip_header(file_unit, n_lines=4):
#-------------------------
# Skip over header lines
#-------------------------
for k in xrange(n_lines):
line = file_unit.readline()
# skip_header()
#---------------------------------------------------------------------
def get_yes_words():
yes_words = ['1', 'true', 'on', 'yes', 'ok']
return yes_words
# get_yes_words()
#---------------------------------------------------------------------
def read_words(file_unit, word_delim=None):
#----------------------------------------------------
# Note: If (word_delim is None), then the "split()"
# method for strings will use any whitespace
# string as a separator.
#----------------------------------------------------
line = file_unit.readline()
words = line.split( word_delim )
return words
# read_words()
#---------------------------------------------------------------------
def read_list(file_unit, dtype_list=None, dtype='string',
word_delim=None):
#-------------------------------------------------------------
# Notes: Example (read boolean and string/filename):
# vlist = read_list_after_key(file_unit,
# ['boolean', 'string'])
#-------------------------------------------------------------
words = read_words(file_unit, word_delim=word_delim)
#----------------------------------------------
# If "dtype_list" is None, then assume that
# every element in the list has type "dtype".
#----------------------------------------------------
# NB! "dtype_list" cannot default to "[]", because
# then values set from a previous call to this
# function are remembered and used.
#----------------------------------------------------
## if (dtype_list == []):
## if (len(dtype_list) == 0):
if (dtype_list is None):
dtype_list = []
for k in xrange(len(words)):
dtype_list.append( dtype.lower() )
elif (len(dtype_list) > len(words)):
print 'ERROR in cfg_files.read_list_after_key().'
print ' Not enough values in the line.'
return
k = 0
yes_words = get_yes_words()
var_list = []
for type_str in dtype_list:
vtype = type_str.lower()
word = words[k].strip()
if (vtype == 'string'):
var = word
elif (vtype == 'boolean'):
var = (word in yes_words)
else:
value = eval( word )
exec('var = numpy.' + vtype + '( value )')
var_list.append( var )
k += 1
return var_list
# read_list()
#---------------------------------------------------------------------
def read_key_value_pair(file_unit, key_delim=':', SILENT=True):
line = file_unit.readline()
#--------------------------------------
# Extract the variable name or label,
# which may contain blank spaces
#--------------------------------------
p = line.find( key_delim )
if (p == -1):
if not(SILENT):
print 'ERROR in cfg_files.read_line_after_key():'
print ' Key-value delimiter not found.'
return ('', '')
key = line[:p]
value = line[p + 1:]
value = value.strip() # (strip leading & trailing whitespace)
return (key, value)
# read_key_value_pair()
#---------------------------------------------------------------------
def read_line_after_key(file_unit, key_delim=':'):
line = file_unit.readline()
#--------------------------------------
# Extract the variable name or label,
# which may contain blank spaces
#--------------------------------------
p = line.find( key_delim )
if (p == -1):
print 'ERROR in cfg_files.read_line_after_key():'
print ' Key-value delimiter not found.'
return ''
label = line[:p]
line = line[p + 1:]
return line.strip() # (strip leading and trailing whitespace)
# read_line_after_key()
#---------------------------------------------------------------------
def read_words_after_key(file_unit, key_delim=':',
word_delim=None, n_words=None):
#----------------------------------------------------
# Note: If (word_delim is None), then the "split()"
# method for strings will use any whitespace
# string as a separator.
#----------------------------------------------------
line = read_line_after_key( file_unit, key_delim=key_delim)
#-------------------------------
# Extract variables as strings
#-------------------------------
words = line.split( word_delim )
#-----------------------------------
# Option to check for enough words
#-----------------------------------
if (n_words is None):
return words
if (len(words) < n_words):
print 'ERROR in read_words_after_key():'
print ' Not enough words found.'
return words
# read_words_after_key()
#---------------------------------------------------------------------
def read_list_after_key(file_unit, dtype_list=None, dtype='string',
key_delim=':', word_delim=None):
## print 'before: dtype =', dtype
## print 'before: dtype_list =', dtype_list
#-------------------------------------------------------------
# Notes: Example (read boolean and string/filename):
# vlist = read_list_after_key(file_unit,
# ['boolean', 'string'])
#-------------------------------------------------------------
words = read_words_after_key(file_unit, key_delim=key_delim,
word_delim=word_delim)
#----------------------------------------------
# If "dtype_list" is None, then assume that
# every element in the list has type "dtype".
#----------------------------------------------------
# NB! "dtype_list" cannot default to "[]", because
# then values set from a previous call to this
# function are remembered and used.
#----------------------------------------------------
## if (dtype_list == []):
## if (len(dtype_list) == 0):
if (dtype_list is None):
dtype_list = []
for k in xrange(len(words)):
dtype_list.append( dtype.lower() )
elif (len(dtype_list) > len(words)):
print 'ERROR in cfg_files.read_list_after_key().'
print ' Not enough values in the line.'
return
## print 'after: dtype =', dtype
## print 'after: dtype_list =', dtype_list
## print '--------------------------------'
k = 0
yes_words = get_yes_words()
var_list = []
for type_str in dtype_list:
vtype = type_str.lower()
word = words[k].strip()
if (vtype == 'string'):
var = word
elif (vtype == 'boolean'):
var = (word in yes_words)
else:
value = eval( word )
exec('var = numpy.' + vtype + '( value )')
var_list.append( var )
k += 1
return var_list
# read_list_after_key()
#---------------------------------------------------------------------
def read_value(file_unit, dtype='string', key_delim=':'):
#--------------------------------------------------------------
# Notes: Valid "var_types" are:
# 'file', 'string', 'boolean' and any numpy dtype,
# such as:
# 'uint8', 'int16', 'int32', 'float32', 'float64'
# If (var_type eq 'file'), then we want to read everything
# after the ":", which may contain space characters in
# the interior (e.g a directory), but with leading and
# trailing spaces removed.
#--------------------------------------------------------------
vtype = dtype.lower()
if (vtype == 'file'):
return read_line_after_key(file_unit, key_delim=key_delim)
words = read_words_after_key( file_unit )
#--------------------
# Return a string ?
#--------------------
if (vtype == 'string'):
return words[0]
#-------------------------------------
# Return a boolean (True or False) ?
#-------------------------------------
if (vtype == 'boolean'):
yes_words = get_yes_words()
return (words[0].lower() in yes_words)
#------------------------------------
# Try to convert string to a number
#------------------------------------
try:
value = eval(words[0])
except:
print 'ERROR in cfg_files.read_value():'
print ' Unable to convert string to number.'
return words[0]
#----------------------------------
# Return number of requested type
#----------------------------------
exec('result = numpy.' + vtype + '( value )')
return result
# read_value()
#---------------------------------------------------------------------
def var_type_code(var_type):
cmap = {'scalar': 0, \
'time_series': 1, \
'time series': 1, \
'grid' : 2, \
'grid_stack' : 3, \
'grid stack' : 3, \
'grid_sequence': 3, \
'grid sequence': 3 }
code = cmap[ var_type.lower() ]
return numpy.int16( code )
# var_type_code()
#---------------------------------------------------------------------
def read_input_option(file_unit, key_delim=':', word_delim=None):
words= read_words_after_key( file_unit, key_delim=key_delim,
word_delim=word_delim )
#-----------------------------------------------
# TopoFlow "var types" are:
# Scalar, Time_Series, Grid, Grid_Sequence
#-----------------------------------------------
var_type = words[0].lower()
if (var_type == 'scalar'):
type_code = numpy.int16(0)
scalar = numpy.float64( eval( words[1] ) )
filename = '' # (or use None ??)
else:
type_code = var_type_code( var_type )
scalar = None
filename = words[1]
return (type_code, scalar, filename)
# read_input_option()
#---------------------------------------------------------------------
def read_output_option(file_unit, key_delim=':', word_delim=None):
#-------------------------------
# Extract variables as strings
#-------------------------------
words = read_words_after_key( file_unit, key_delim=key_delim,
word_delim=word_delim )
count = len(words)
if (count == 0):
print 'ERROR in cfg_files.read_output_option():'
print ' No value found after delimiter.'
return ''
if (count == 1):
print 'ERROR in cfg_files.read_output_option():'
print ' No filename provided after option.'
return ''
yes_words = ['1','true','yes','on']
option = (words[0].lower() in yes_words)
filename = words[1]
return option, filename
# read_output_option()
#---------------------------------------------------------------------
| mdpiper/topoflow | topoflow/utils/cfg_files.py | Python | mit | 12,732 |
import os
import shutil
import subprocess
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def install(omp=False, mpi=False, phdf5=False, dagmc=False):
# Create build directory and change to it
shutil.rmtree('build', ignore_errors=True)
os.mkdir('build')
os.chdir('build')
# Build in debug mode by default
cmake_cmd = ['cmake', '-Ddebug=on']
# Turn off OpenMP if specified
if not omp:
cmake_cmd.append('-Dopenmp=off')
# Use MPI wrappers when building in parallel
if mpi:
os.environ['CXX'] = 'mpicxx'
# Tell CMake to prefer parallel HDF5 if specified
if phdf5:
if not mpi:
raise ValueError('Parallel HDF5 must be used in '
'conjunction with MPI.')
cmake_cmd.append('-DHDF5_PREFER_PARALLEL=ON')
else:
cmake_cmd.append('-DHDF5_PREFER_PARALLEL=OFF')
if dagmc:
cmake_cmd.append('-Ddagmc=ON')
# Build in coverage mode for coverage testing
cmake_cmd.append('-Dcoverage=on')
# Build and install
cmake_cmd.append('..')
print(' '.join(cmake_cmd))
subprocess.check_call(cmake_cmd)
subprocess.check_call(['make', '-j4'])
subprocess.check_call(['sudo', 'make', 'install'])
def main():
# Convert Travis matrix environment variables into arguments for install()
omp = (os.environ.get('OMP') == 'y')
mpi = (os.environ.get('MPI') == 'y')
phdf5 = (os.environ.get('PHDF5') == 'y')
dagmc = (os.environ.get('DAGMC') == 'y')
# Build and install
install(omp, mpi, phdf5, dagmc)
if __name__ == '__main__':
main()
| liangjg/openmc | tools/ci/travis-install.py | Python | mit | 2,019 |
#!/usr/bin/env python
"""
Apply a surface field to a shape
"""
from __future__ import print_function
import argparse
import time
import sys
import re
from numpy import linspace
from icqsol.shapes.icqShapeManager import ShapeManager
from icqsol import util
# time stamp
tid = re.sub(r'\.', '', str(time.time()))
description = 'Apply a surface field to a shape'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--input', dest='input', default='',
help='Input file (PLY or VTK)')
parser.add_argument('--expression', dest='expression', default='sin(pi*x)*cos(pi*y)*z',
help='Expression of x, y, z, and t')
parser.add_argument('--refine', dest='refine', default=0.0, type=float,
help='Maximum edge length (use 0 if no refinement)')
parser.add_argument('--name', dest='name', default='myField',
help='Set the name of the field')
parser.add_argument('--times', dest='times', default='',
help='Comma separated list of time values')
parser.add_argument('--location', dest='location', default='point',
help='"point" or "cell"')
parser.add_argument('--ascii', dest='ascii', action='store_true',
help='Save data in ASCII format (default is binary)')
parser.add_argument('--output', dest='output',
default='addSurfaceFieldFromExpressionToShape-{0}.vtk'.format(tid),
help='VTK Output file.')
args = parser.parse_args()
if not args.expression:
print('ERROR: must specify --expression <expression>')
sys.exit(2)
if not args.input:
print('ERROR: must specify input file: --input <file>')
sys.exit(3)
# make sure the field name contains no spaces
args.name = re.sub('\s', '_', args.name)
# Get the format of the input - either vtk or ply.
file_format = util.getFileFormat(args.input)
if file_format == util.PLY_FORMAT:
shape_mgr = ShapeManager(file_format=util.PLY_FORMAT)
else:
# We have a VTK file, so Get the dataset type.
vtk_dataset_type = util.getVtkDatasetType(args.input)
shape_mgr = ShapeManager(file_format=util.VTK_FORMAT,
vtk_dataset_type=vtk_dataset_type)
pdata = shape_mgr.loadAsVtkPolyData(args.input)
times = [0.0]
if args.times:
times = eval(args.times)
maxEdgeLength = float('inf')
if args.refine > 0:
maxEdgeLength = args.refine
pdataOut = shape_mgr.addSurfaceFieldFromExpressionToVtkPolyData(pdata, args.name,
args.expression, times,
max_edge_length=maxEdgeLength,
location=args.location)
if args.output:
# Always produce VTK POLYDATA.
shape_mgr.setWriter(file_format=util.VTK_FORMAT,
vtk_dataset_type=util.POLYDATA)
if args.ascii:
file_type = util.ASCII
else:
file_type = util.BINARY
shape_mgr.saveVtkPolyData(vtk_poly_data=pdataOut,
file_name=args.output,
file_type=file_type)
| gregvonkuster/icqsol | examples/addSurfaceFieldFromExpression.py | Python | mit | 3,166 |
#!/usr/bin/python3
"""
Simple wrapper to get diff of two schedules
It's able to show different attributes (by 'attrs' kwarg)
and indicate missing phases
Follows 'diff' exit codes:
0 - same
1 - different
2 - other trouble
Test as "python -m schedules_tools.batches.diff"
"""
import argparse
from datetime import datetime
import json
import logging
from schedules_tools import jsondate, discovery
from schedules_tools.converter import ScheduleConverter
from schedules_tools.models import Task, Schedule
import sys
log = logging.getLogger(__name__)
REPORT_NO_CHANGE = ''
REPORT_ADDED = '_added_'
REPORT_REMOVED = '_removed_'
REPORT_CHANGED = '_changed_'
REPORT_PREFIX_MAP = {
REPORT_ADDED: '[+]',
REPORT_REMOVED: '[-]',
REPORT_CHANGED: '[M]',
REPORT_NO_CHANGE: 3 * ' ',
}
NAME_SIM_THRESHOLD = 0.8
TASK_SCORE_THRESHOLD = 0.45
NAME_SIM_WEIGHT = 0.5
TASK_POS_WEIGHT = 0.5
def strings_similarity(str1, str2, winkler=True, scaling=0.1):
"""
Find the Jaro-Winkler distance of 2 strings.
https://en.wikipedia.org/wiki/Jaro-Winkler_distance
:param winkler: add winkler adjustment to the Jaro distance
:param scaling: constant scaling factor for how much the score is adjusted
upwards for having common prefixes. Should not exceed 0.25
"""
if str1 == str2:
return 1.0
def num_of_char_matches(s1, len1, s2, len2):
count = 0
transpositions = 0 # number of matching chars w/ different sequence order
limit = int(max(len1, len2) / 2 - 1)
for i in range(len1):
start = i - limit
if start < 0:
start = 0
end = i + limit + 1
if end > len2:
end = len2
index = s2.find(s1[i], start, end)
if index > -1: # found common char
count += 1
if index != i:
transpositions += 1
return count, transpositions
len1 = len(str1)
len2 = len(str2)
num_of_matches, transpositions = num_of_char_matches(str1, len1, str2, len2)
if num_of_matches == 0:
return 0.0
m = float(num_of_matches)
t = transpositions / 2.0
dj = (m / float(len1) + m / float(len2) + (m - t) / m) / 3.0
if winkler:
length = 0
# length of common prefix at the start of the string (max = 4)
max_length = min(
len1,
len2,
4
)
while length < max_length and str1[length] == str2[length]:
length += 1
return dj + (length * scaling * (1.0 - dj))
return dj
class ScheduleDiff(object):
result = []
hierarchy_attr = 'tasks'
subtree_hash_attr_name = 'subtree_hash'
""" Default list of attributes used to compare 2 tasks. """
default_tasks_match_attrs = ['name', 'dStart', 'dFinish']
def __init__(self, schedule_a, schedule_b, trim_time=False, extra_compare_attributes=None):
self.schedule_a = schedule_a
self.schedule_b = schedule_b
self.trim_time = trim_time
self.attributes_to_compare = self.default_tasks_match_attrs
if extra_compare_attributes:
# avoid using += to not modify class-level list
self.attributes_to_compare = self.attributes_to_compare + list(extra_compare_attributes)
self.result = self._diff()
def __str__(self):
return self.result_to_str()
def _get_subtree(self, item):
return getattr(item, self.hierarchy_attr)
def result_to_str(self, items=None, level=0):
""" Textual representation of the diff. """
res = ''
if items is None:
items = self.result
schedule = Schedule()
for item in items:
subtree = item['subtree']
state = item['item_state']
if state in [REPORT_CHANGED, REPORT_ADDED]:
task = item['right']
elif state is REPORT_REMOVED:
task = item['left']
else:
task = item['both']
task_obj = Task.load_from_dict(task, schedule)
res += '{} {}{}\n'.format(REPORT_PREFIX_MAP[state], level * ' ', str(task_obj))
if subtree:
res += self.result_to_str(subtree, level + 2)
return res
def _create_report(self,
item_state,
left=None,
right=None,
both=None,
subtree=[],
changed_attrs=[]):
"""
Returns a dictionary representing a possible change.
{
left: Task or None,
right: Task or None,
both: used instead of left and right, when the task are equal,
subtree: List of reports from the child Tasks,
changed_attr: List of changed attributes,
item_state: Type of change
}
"""
if both:
report = {
'both': both.dump_as_dict(recursive=False),
'subtree': subtree,
'changed_attrs': changed_attrs,
'item_state': item_state
}
else:
# No need to keep the whole structure,
# child tasks will be placed in report['tasks']
if left is not None:
left = left.dump_as_dict(recursive=False)
if right is not None:
right = right.dump_as_dict(recursive=False)
report = {
'left': left,
'right': right,
'subtree': subtree,
'changed_attrs': changed_attrs,
'item_state': item_state,
}
return report
def _set_subtree_items_state(self, items, state):
"""
Set the given state recursively on the subtree items
"""
def create_report(item):
kwargs = {
'subtree': self._set_subtree_items_state(self._get_subtree(item), state)
}
if state == REPORT_NO_CHANGE:
kwargs['both'] = item
elif state == REPORT_ADDED:
kwargs['right'] = item
elif state == REPORT_REMOVED:
kwargs['left'] = item
return self._create_report(state, **kwargs)
return [create_report(item) for item in items]
def get_changed_attrs(self, task_a, task_b):
"""
Compare 2 tasks
Uses attributes defined in `self.attributes_to_compare` and subtree hash and
returns a list of atts that don't match.
"""
changed_attributes = [attr for attr in self.attributes_to_compare
if not self._compare_tasks_attributes(task_a, task_b, attr)]
if task_a.get_subtree_hash(self.attributes_to_compare) \
!= task_b.get_subtree_hash(self.attributes_to_compare):
changed_attributes.append(self.subtree_hash_attr_name)
return changed_attributes
def _compare_tasks_attributes(self, task_a, task_b, attr_name):
"""
Compares tasks attributes.
Trims time from datetime objects if self.trim_time is set.
"""
attribute_a = getattr(task_a, attr_name)
attribute_b = getattr(task_b, attr_name)
if self.trim_time:
if isinstance(attribute_a, datetime):
attribute_a = attribute_a.date()
if isinstance(attribute_b, datetime):
attribute_b = attribute_b.date()
return attribute_a == attribute_b
def find_best_match(self, t1, possible_matches, start_at_index=0):
"""
Finds the best match for the given task in the list of possible matches.
Returns the index of the best match and a dict
with a state suggestion and list of changed attrs.
"""
match_index = None
best_match = {
'state': REPORT_REMOVED,
'changes': [],
'name_score': 0,
'score': TASK_SCORE_THRESHOLD
}
if start_at_index > 0:
possible_matches = possible_matches[start_at_index:]
for i, t2 in enumerate(possible_matches, start_at_index):
res = self.eval_tasks(t1, t2, i, name_threshold=best_match['name_score'])
if (res['state'] is REPORT_CHANGED
and res['score'] > best_match['score']):
match_index = i
best_match = res
if res['state'] is REPORT_NO_CHANGE:
match_index = i
best_match = res
break
return match_index, best_match
def _task_position_score(self, index):
return 1.0 / (2 * (index + 1))
def _task_score(self, name_score, position_score):
weight_sum = NAME_SIM_WEIGHT + TASK_POS_WEIGHT
name_score *= NAME_SIM_WEIGHT
position_score *= TASK_POS_WEIGHT
return (name_score + position_score) / weight_sum
def eval_tasks(self, t1, t2, t2_index, name_threshold=NAME_SIM_THRESHOLD):
name_score = 0.0
position_score = 0.0
changed_attrs = self.get_changed_attrs(t1, t2)
# different names
if 'name' in changed_attrs:
t1_subtree = t1.get_subtree_hash(self.attributes_to_compare)
t2_subtree = t2.get_subtree_hash(self.attributes_to_compare)
if t1_subtree and t2_subtree:
if t1_subtree == t2_subtree:
state = REPORT_CHANGED
position_score = 1.0
else:
name_score = strings_similarity(t1.name, t2.name)
if (name_score > name_threshold
and len(changed_attrs) < len(self.attributes_to_compare)):
state = REPORT_CHANGED
position_score = self._task_position_score(t2_index)
else:
state = REPORT_REMOVED
# no subtrees
else:
name_score = strings_similarity(t1.name, t2.name, winkler=False)
if name_score > name_threshold:
state = REPORT_CHANGED
position_score = self._task_position_score(t2_index)
else:
state = REPORT_REMOVED
# names are equal
else:
name_score = 1.0
if (not changed_attrs
or (len(changed_attrs) == 1
and self.subtree_hash_attr_name in changed_attrs)):
state = REPORT_NO_CHANGE
else:
state = REPORT_CHANGED
position_score = 1.0
return {
'state': state,
'changes': changed_attrs,
'name_score': name_score,
'position_score': position_score,
'score': self._task_score(name_score, position_score)
}
def _diff(self, tasks_a=None, tasks_b=None):
if tasks_a is None:
tasks_a = self.schedule_a.tasks
if tasks_b is None:
tasks_b = self.schedule_b.tasks
res = []
last_b_index = 0
# shortcut to create a report for an added task
def report_task_added(index, recursive=True):
task = tasks_b[index]
subtree = self._get_subtree(task)
if recursive:
subtree = self._set_subtree_items_state(subtree, REPORT_ADDED)
return self._create_report(REPORT_ADDED, right=task, subtree=subtree)
for task in tasks_a:
match_index, match = self.find_best_match(task, tasks_b, start_at_index=last_b_index)
report = {}
if match_index is None:
subtree = self._set_subtree_items_state(self._get_subtree(task), REPORT_REMOVED)
report = self._create_report(REPORT_REMOVED, left=task, subtree=subtree)
else:
# ALL elements between last_b_index and match_index => ADDED
res.extend([report_task_added(k) for k in range(last_b_index, match_index)])
# exact match => NO CHANGE
if not match['changes']:
subtree = self._set_subtree_items_state(self._get_subtree(task), match['state'])
report_kwargs = {'both': task, 'subtree': subtree}
# structural change => CHANGED / NO CHANGE
elif self.subtree_hash_attr_name in match['changes']:
# process child tasks
subtree = self._diff(
self._get_subtree(task),
self._get_subtree(tasks_b[match_index])
)
if len(match['changes']) > 1:
report_kwargs = {
'left': task,
'right': tasks_b[match_index],
'subtree': subtree
}
else:
report_kwargs = {
'both': task,
'subtree': subtree
}
# no structural changes => CHANGED
else:
subtree = self._set_subtree_items_state(
self._get_subtree(tasks_b[match_index]), REPORT_NO_CHANGE)
report_kwargs = {
'left': task,
'right': tasks_b[match_index],
'subtree': subtree
}
report = self._create_report(match['state'],
changed_attrs=match['changes'],
**report_kwargs)
last_b_index = match_index + 1
res.append(report)
# remaining tasks => ADDED
res.extend([report_task_added(k) for k in range(last_b_index, len(tasks_b))])
return res
def dump_json(self, **kwargs):
def _encoder(obj):
if isinstance(obj, Task):
return obj.dump_as_dict()
return jsondate._datetime_encoder(obj)
kwargs['default'] = _encoder
return json.dumps(self.result, **kwargs)
def setup_logging(level):
log_format = '%(name)-10s %(levelname)7s: %(message)s'
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(level)
formatter = logging.Formatter(log_format)
sh.setFormatter(formatter)
# setup root logger
inst = logging.getLogger('')
inst.setLevel(level)
inst.addHandler(sh)
def main():
setup_logging(logging.INFO)
parser = argparse.ArgumentParser(
description='Tool to show differences between two schedules.')
parser.add_argument('--simple-diff',
help='Simple comparison between two schedules.',
action='store_true',
default=False)
parser.add_argument(
'--handlers-path',
help='Add python-dot-notation path to discover handlers (needs to '
'be python module), can be called several times '
'(conflicting names will be overriden - the last '
'implementation will be used)',
action='append',
default=[])
parser.add_argument('--whole-days',
help='Compare just date part of timestamp (will '
'ignore differences in time)',
action='store_true',
default=False)
parser.add_argument('left')
parser.add_argument('right')
args = parser.parse_args()
for path in args.handlers_path:
discovery.search_paths.append(path)
left = ScheduleConverter()
left.import_schedule(args.left)
right = ScheduleConverter()
right.import_schedule(args.right)
if args.simple_diff:
diff_res = left.schedule.diff(right.schedule, whole_days=args.whole_days)
else:
diff_res = ScheduleDiff(left.schedule, right.schedule)
if diff_res:
print(diff_res)
sys.exit(1)
if __name__ == '__main__':
main()
| RedHat-Eng-PGM/schedules-tools | schedules_tools/diff.py | Python | mit | 16,312 |
from __future__ import print_function
class Sequence(object):
def __init__(self, name, seq):
"""
:param seq: the sequence
:type seq: string
"""
self.name = name
self.sequence = seq
def __len__(self):
return len(self.sequence)
def to_fasta(self):
id_ = self.name.replace(' ', '_')
fasta = '>{}\n'.format(id_)
start = 0
while start < len(self.sequence):
end = start + 80
fasta += self.sequence[start: end + 1] + '\n'
start = end
return fasta
class DNASequence(Sequence):
alphabet = 'ATGC'
def gc_percent(self):
return float(self.sequence.count('G') + self.sequence.count('C')) / len(self.sequence)
class AASequence(Sequence):
_water = 18.0153
_weight_table = {'A': 89.0932, 'C': 121.1582, 'E': 147.1293,
'D': 133.1027, 'G': 75.0666, 'F': 165.1891,
'I': 131.1729, 'H': 155.1546, 'K': 146.1876,
'M': 149.2113, 'L': 131.1729, 'O': 255.3134,
'N': 132.1179, 'Q': 146.1445, 'P': 115.1305,
'S': 105.0926, 'R': 174.201, 'U': 168.0532,
'T': 119.1192, 'W': 204.2252, 'V': 117.1463,
'Y': 181.1885}
def molecular_weight(self):
return sum([self._weight_table[aa] for aa in self.sequence]) - (len(self.sequence) - 1) * self._water
| C3BI-pasteur-fr/python-course-1 | source/_static/code/inheritance_sequence.py | Python | cc0-1.0 | 1,463 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo Marcxml mapping test."""
from __future__ import absolute_import, print_function
from datetime import datetime
from invenio_pidstore.models import PersistentIdentifier
from invenio_records import Record
from zenodo.modules.records.serializers import marcxml_v1
def test_full_record(app, db, full_record):
"""Test MARC21 serialization of full record."""
# Add embargo date and OAI-PMH set information.
full_record['embargo_date'] = '0900-12-31'
full_record['_oai'] = {
"id": "oai:zenodo.org:1",
"sets": ["user-zenodo", "user-ecfunded"]
}
# Create record and PID.
record = Record.create(full_record)
pid = PersistentIdentifier.create(
pid_type='recid',
pid_value='12345',
object_type='rec',
object_uuid=record.id,
)
assert record.validate() is None
expected = {
u'control_number': u'12345',
u'date_and_time_of_latest_transaction': (
record.model.updated.strftime("%Y%m%d%H%M%S.0")),
u'resource_type': {
u'subtype': u'book',
u'type': u'publication'
},
u'title_statement': {
u'title': u'Test title'
},
u'publication_distribution_imprint': [
{u'date_of_publication_distribution': u'2014-02-27'},
],
u'main_entry_personal_name': {
u'affiliation': u'CERN',
u'personal_name': u'Doe, John',
u'authority_record_control_number_or_standard_number': [
u'(gnd)170118215', u'(orcid)0000-0002-1694-233X'
]
},
u'added_entry_personal_name': [
{
u'affiliation': u'CERN',
u'personal_name': u'Doe, Jane',
u'authority_record_control_number_or_standard_number': [
u'(orcid)0000-0002-1825-0097'
]
},
{
u'affiliation': u'CERN',
u'personal_name': u'Smith, John',
},
{
u'affiliation': u'CERN',
u'personal_name': u'Nowak, Jack',
u'authority_record_control_number_or_standard_number': [
u'(gnd)170118215'
]
},
{
u'affiliation': u'CERN',
u'relator_code': [u'oth'],
u'personal_name': u'Smith, Other',
u'authority_record_control_number_or_standard_number': [
u'(orcid)0000-0002-1825-0097'
]
},
{
u'personal_name': u'Hansen, Viggo',
u'relator_code': [u'oth'],
},
{
u'affiliation': u'CERN',
u'relator_code': [u'dtm'],
u'personal_name': u'Kowalski, Manager'
},
{
u'relator_code': [u'ths'],
u'personal_name': u'Smith, Professor'
},
],
u'summary': {
u'summary': u'Test Description'
},
u'index_term_uncontrolled': [
{u'uncontrolled_term': u'kw1'},
{u'uncontrolled_term': u'kw2'},
{u'uncontrolled_term': u'kw3'},
],
u'subject_added_entry_topical_term': [
{
u'topical_term_or_geographic_name_entry_element': u'cc-by',
u'source_of_heading_or_term': u'opendefinition.org',
u'level_of_subject': u'Primary',
u'thesaurus': u'Source specified in subfield $2',
},
{
u'topical_term_or_geographic_name_entry_element': u'Astronomy',
u'authority_record_control_number_or_standard_number': (
u'(url)http://id.loc.gov/authorities/subjects/sh85009003'),
u'level_of_subject': u'Primary',
},
],
u'general_note': {
u'general_note': u'notes'
},
u'information_relating_to_copyright_status': {
u'copyright_status': u'open'
},
u'terms_governing_use_and_reproduction_note': {
u'uniform_resource_identifier':
u'https://creativecommons.org/licenses/by/4.0/',
u'terms_governing_use_and_reproduction':
u'Creative Commons Attribution 4.0'
},
u'communities': [
u'zenodo',
],
u'funding_information_note': [
{u'grant_number': u'1234', u'text_of_note': u'Grant Title'},
{u'grant_number': u'4321', u'text_of_note': u'Title Grant'}
],
u'host_item_entry': [
{
u'main_entry_heading': u'10.1234/foo.bar',
u'note': u'doi',
u'relationship_information': u'cites',
},
{
'main_entry_heading': u'1234.4325',
'note': u'arxiv',
'relationship_information': u'isIdenticalTo'
},
{
u'main_entry_heading': u'1234.4321',
u'note': u'arxiv',
u'relationship_information': u'cites',
},
{
'main_entry_heading': u'1234.4328',
'note': u'arxiv',
'relationship_information': u'references'
},
{
'main_entry_heading': u'10.1234/zenodo.4321',
'note': u'doi',
'relationship_information': u'isPartOf'
},
{
'main_entry_heading': u'10.1234/zenodo.1234',
'note': u'doi',
'relationship_information': u'hasPart'
},
{
u'main_entry_heading': u'Staszkowka',
u'edition': u'Jol',
u'title': u'Bum',
u'related_parts': u'1-2',
u'international_standard_book_number': u'978-0201633610',
},
],
u'other_standard_identifier': [
{
u'standard_number_or_code': u'10.1234/foo.bar',
u'source_of_number_or_code': u'doi',
},
{
u'standard_number_or_code': (
u'urn:lsid:ubio.org:namebank:11815'),
u'source_of_number_or_code': u'lsid',
u'qualifying_information': u'alternateidentifier',
},
{
u'standard_number_or_code': u'2011ApJS..192...18K',
u'source_of_number_or_code': u'ads',
u'qualifying_information': u'alternateidentifier',
},
{
u'standard_number_or_code': u'0317-8471',
u'source_of_number_or_code': u'issn',
u'qualifying_information': u'alternateidentifier',
},
{
u'standard_number_or_code': u'10.1234/alternate.doi',
u'source_of_number_or_code': u'doi',
u'qualifying_information': u'alternateidentifier',
}
],
u'references': [
{
u'raw_reference': u'Doe, John et al (2012). Some title. '
'Zenodo. 10.5281/zenodo.12'
}, {
u'raw_reference': u'Smith, Jane et al (2012). Some title. '
'Zenodo. 10.5281/zenodo.34'
}
],
u'added_entry_meeting_name': [{
u'date_of_meeting': u'23-25 June, 2014',
u'meeting_name_or_jurisdiction_name_as_entry_element':
u'The 13th Biennial HITRAN Conference',
u'number_of_part_section_meeting': u'VI',
u'miscellaneous_information': u'HITRAN13',
u'name_of_part_section_of_a_work': u'1',
u'location_of_meeting':
u'Harvard-Smithsonian Center for Astrophysics'
}],
u'conference_url': 'http://hitran.org/conferences/hitran-13-2014/',
u'dissertation_note': {
u'name_of_granting_institution': u'I guess important',
},
u'journal': {
'issue': '2',
'pages': '20',
'volume': '20',
'title': 'Bam',
'year': '2014',
},
u'embargo_date': '0900-12-31',
u'language_code': {
'language_code_of_text_sound_track_or_separate_title': 'eng',
},
u'_oai': {
u'sets': [u'user-zenodo', u'user-ecfunded'],
u'id': u'oai:zenodo.org:1'
},
u'_files': [
{
'uri': 'https://zenodo.org/record/12345/files/test',
'checksum': 'md5:11111111111111111111111111111111',
'type': 'txt',
'size': 4,
},
],
'leader': {
'base_address_of_data': '00000',
'bibliographic_level': 'monograph_item',
'character_coding_scheme': 'marc-8',
'descriptive_cataloging_form': 'unknown',
'encoding_level': 'unknown',
'indicator_count': 2,
'length_of_the_implementation_defined_portion': 0,
'length_of_the_length_of_field_portion': 4,
'length_of_the_starting_character_position_portion': 5,
'multipart_resource_record_level':
'not_specified_or_not_applicable',
'record_length': '00000',
'record_status': 'new',
'subfield_code_count': 2,
'type_of_control': 'no_specified_type',
'type_of_record': 'language_material',
'undefined': 0,
},
}
# Dump MARC21 JSON structure and compare against expected JSON.
preprocessed_record = marcxml_v1.preprocess_record(record=record, pid=pid)
data = marcxml_v1.schema_class().dump(preprocessed_record).data
assert expected == data
# Assert that we can output MARCXML.
assert marcxml_v1.serialize(record=record, pid=pid)
def test_minimal_record(app, db, minimal_record):
"""Test minimal record."""
# Create record and pid.
record = Record.create(minimal_record)
record.model.updated = datetime.utcnow()
pid = PersistentIdentifier.create(
pid_type='recid',
pid_value='123',
object_type='rec',
object_uuid=record.id)
assert record.validate() is None
expected = {
u'date_and_time_of_latest_transaction': (
record.model.updated.strftime("%Y%m%d%H%M%S.0")),
u'publication_distribution_imprint': [{
'date_of_publication_distribution': record['publication_date']
}],
u'control_number': '123',
u'other_standard_identifier': [
{
'source_of_number_or_code': u'doi',
'standard_number_or_code': u'10.5072/zenodo.123'
}
],
u'information_relating_to_copyright_status': {
'copyright_status': 'open'
},
u'summary': {
'summary': 'My description'
},
u'main_entry_personal_name': {
'personal_name': 'Test'
},
u'resource_type': {
'type': 'software'
},
u'title_statement': {
'title': 'Test'
},
u'leader': {
'base_address_of_data': '00000',
'bibliographic_level': 'monograph_item',
'character_coding_scheme': 'marc-8',
'descriptive_cataloging_form': 'unknown',
'encoding_level': 'unknown',
'indicator_count': 2,
'length_of_the_implementation_defined_portion': 0,
'length_of_the_length_of_field_portion': 4,
'length_of_the_starting_character_position_portion': 5,
'multipart_resource_record_level':
'not_specified_or_not_applicable',
'record_length': '00000',
'record_status': 'new',
'subfield_code_count': 2,
'type_of_control': 'no_specified_type',
'type_of_record': 'computer_file',
'undefined': 0,
},
}
data = marcxml_v1.schema_class().dump(marcxml_v1.preprocess_record(
pid=pid,
record=record)).data
assert expected == data
marcxml_v1.serialize(pid=pid, record=record)
def assert_array(a1, a2):
"""Check array."""
for i in range(0, len(a1)):
if isinstance(a1[i], dict):
assert_dict(a1[i], a2[i])
elif isinstance(a1[i], list) or isinstance(a1[i], tuple):
assert_array(a1[i], a2[i])
else:
assert a1[i] in a2
assert len(a1) == len(a2)
def assert_dict(a1, a2):
"""Check dict."""
for (k, v) in a1.items():
assert k in a2
if isinstance(v, dict):
assert_dict(v, a2[k])
elif isinstance(v, list) or isinstance(v, tuple):
assert_array(v, a2[k])
else:
assert a2[k] == v
assert len(a2) == len(a1)
| slint/zenodo | tests/unit/records/test_schemas_marcxml.py | Python | gpl-2.0 | 13,950 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Dec 1 09:58:36 2011 by generateDS.py version 2.7a.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
(XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
# pylint: disable=E0602, E0611
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
# pylint: disable=E0602,E0611
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
# ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
# exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' %
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' %
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' %
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_data_type(self, data_type):
self.data_type = data_type
def get_data_type_chain(self):
return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container):
self.container = container
def get_container(self):
return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class testsuites(GeneratedsSuper):
"""Contains an aggregation of testsuite results"""
subclass = None
superclass = None
def __init__(self, testsuite=None):
if testsuite is None:
self.testsuite = []
else:
self.testsuite = testsuite
def factory(*args_, **kwargs_):
if testsuites.subclass:
# pylint: disable=E1102
return testsuites.subclass(*args_, **kwargs_)
else:
return testsuites(*args_, **kwargs_)
factory = staticmethod(factory)
def get_testsuite(self):
return self.testsuite
def set_testsuite(self, testsuite):
self.testsuite = testsuite
def add_testsuite(self, value):
self.testsuite.append(value)
def insert_testsuite(self, index, value):
self.testsuite[index] = value
def export(self, outfile, level, namespace_='', name_='testsuites', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='testsuites')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='testsuites'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='testsuites', fromsubclass_=False):
for testsuite_ in self.testsuite:
testsuite_.export(outfile, level, namespace_, name_='testsuite')
def hasContent_(self):
if (
self.testsuite
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='testsuites'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('testsuite=[\n')
level += 1
for testsuite_ in self.testsuite:
showIndent(outfile, level)
outfile.write('model_.testsuiteType(\n')
testsuite_.exportLiteral(outfile, level, name_='testsuiteType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'testsuite':
obj_ = testsuiteType.factory()
obj_.build(child_)
self.testsuite.append(obj_)
# end class testsuites
class testsuite(GeneratedsSuper):
"""Contains the results of exexuting a testsuiteFull class name of the
test for non-aggregated testsuite documents. Class name without
the package for aggregated testsuites documentswhen the test was
executed. Timezone may not be specified.Host on which the tests
were executed. 'localhost' should be used if the hostname cannot
be determined.The total number of tests in the suiteThe total
number of tests in the suite that failed. A failure is a test
which the code has explicitly failed by using the mechanisms for
that purpose. e.g., via an assertEqualsThe total number of tests
in the suite that errorrd. An errored test is one that had an
unanticipated problem. e.g., an unchecked throwable; or a
problem with the implementation of the test.Time taken (in
seconds) to execute the tests in the suite"""
subclass = None
superclass = None
def __init__(self, tests=None, errors=None, name=None, timestamp=None, hostname=None, time=None, failures=None, properties=None, testcase=None, system_out=None, system_err=None, extensiontype_=None):
self.tests = _cast(int, tests)
self.errors = _cast(int, errors)
self.name = _cast(None, name)
self.timestamp = _cast(None, timestamp)
self.hostname = _cast(None, hostname)
self.time = _cast(float, time)
self.failures = _cast(int, failures)
self.properties = properties
if testcase is None:
self.testcase = []
else:
self.testcase = testcase
self.system_out = system_out
self.system_err = system_err
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if testsuite.subclass:
# pylint: disable=E1102
return testsuite.subclass(*args_, **kwargs_)
else:
return testsuite(*args_, **kwargs_)
factory = staticmethod(factory)
def get_properties(self):
return self.properties
def set_properties(self, properties):
self.properties = properties
def get_testcase(self):
return self.testcase
def set_testcase(self, testcase):
self.testcase = testcase
def add_testcase(self, value):
self.testcase.append(value)
def insert_testcase(self, index, value):
self.testcase[index] = value
def get_system_out(self):
return self.system_out
def set_system_out(self, system_out):
self.system_out = system_out
def get_system_err(self):
return self.system_err
def set_system_err(self, system_err):
self.system_err = system_err
def get_tests(self):
return self.tests
def set_tests(self, tests):
self.tests = tests
def get_errors(self):
return self.errors
def set_errors(self, errors):
self.errors = errors
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_timestamp(self):
return self.timestamp
def set_timestamp(self, timestamp):
self.timestamp = timestamp
def validate_ISO8601_DATETIME_PATTERN(self, value):
# Validate type ISO8601_DATETIME_PATTERN, a restriction on xs:dateTime.
pass
def get_hostname(self):
return self.hostname
def set_hostname(self, hostname):
self.hostname = hostname
def get_time(self):
return self.time
def set_time(self, time):
self.time = time
def get_failures(self):
return self.failures
def set_failures(self, failures):
self.failures = failures
def get_extensiontype_(self):
return self.extensiontype_
def set_extensiontype_(self, extensiontype_):
self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='', name_='testsuite', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='testsuite')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='testsuite'):
if self.tests is not None and 'tests' not in already_processed:
already_processed.append('tests')
outfile.write(' tests="%s"' % self.gds_format_integer(self.tests, input_name='tests'))
if self.errors is not None and 'errors' not in already_processed:
already_processed.append('errors')
outfile.write(' errors="%s"' % self.gds_format_integer(self.errors, input_name='errors'))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.timestamp is not None and 'timestamp' not in already_processed:
already_processed.append('timestamp')
outfile.write(' timestamp=%s' % (quote_attrib(self.timestamp), ))
if self.hostname is not None and 'hostname' not in already_processed:
already_processed.append('hostname')
outfile.write(' hostname=%s' % (self.gds_format_string(quote_attrib(self.hostname).encode(ExternalEncoding), input_name='hostname'), ))
if self.time is not None and 'time' not in already_processed:
already_processed.append('time')
outfile.write(' time="%s"' % self.gds_format_float(self.time, input_name='time'))
if self.failures is not None and 'failures' not in already_processed:
already_processed.append('failures')
outfile.write(' failures="%s"' % self.gds_format_integer(self.failures, input_name='failures'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='testsuite', fromsubclass_=False):
if self.properties is not None:
self.properties.export(outfile, level, namespace_, name_='properties', )
for testcase_ in self.testcase:
testcase_.export(outfile, level, namespace_, name_='testcase')
if self.system_out is not None:
showIndent(outfile, level)
outfile.write('<%ssystem-out>%s</%ssystem-out>\n' % (namespace_, self.gds_format_string(quote_xml(self.system_out).encode(ExternalEncoding), input_name='system-out'), namespace_))
if self.system_err is not None:
showIndent(outfile, level)
outfile.write('<%ssystem-err>%s</%ssystem-err>\n' % (namespace_, self.gds_format_string(quote_xml(self.system_err).encode(ExternalEncoding), input_name='system-err'), namespace_))
def hasContent_(self):
if (
self.properties is not None or
self.testcase or
self.system_out is not None or
self.system_err is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='testsuite'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.tests is not None and 'tests' not in already_processed:
already_processed.append('tests')
showIndent(outfile, level)
outfile.write('tests = %d,\n' % (self.tests,))
if self.errors is not None and 'errors' not in already_processed:
already_processed.append('errors')
showIndent(outfile, level)
outfile.write('errors = %d,\n' % (self.errors,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.timestamp is not None and 'timestamp' not in already_processed:
already_processed.append('timestamp')
showIndent(outfile, level)
outfile.write('timestamp = "%s",\n' % (self.timestamp,))
if self.hostname is not None and 'hostname' not in already_processed:
already_processed.append('hostname')
showIndent(outfile, level)
outfile.write('hostname = "%s",\n' % (self.hostname,))
if self.time is not None and 'time' not in already_processed:
already_processed.append('time')
showIndent(outfile, level)
outfile.write('time = %f,\n' % (self.time,))
if self.failures is not None and 'failures' not in already_processed:
already_processed.append('failures')
showIndent(outfile, level)
outfile.write('failures = %d,\n' % (self.failures,))
def exportLiteralChildren(self, outfile, level, name_):
if self.properties is not None:
showIndent(outfile, level)
outfile.write('properties=model_.propertiesType(\n')
self.properties.exportLiteral(outfile, level, name_='properties')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('testcase=[\n')
level += 1
for testcase_ in self.testcase:
showIndent(outfile, level)
outfile.write('model_.testcaseType(\n')
testcase_.exportLiteral(outfile, level, name_='testcaseType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.system_out is not None:
showIndent(outfile, level)
outfile.write('system_out=%s,\n' % quote_python(self.system_out).encode(ExternalEncoding))
if self.system_err is not None:
showIndent(outfile, level)
outfile.write('system_err=%s,\n' % quote_python(self.system_err).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tests', node)
if value is not None and 'tests' not in already_processed:
already_processed.append('tests')
try:
self.tests = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('errors', node)
if value is not None and 'errors' not in already_processed:
already_processed.append('errors')
try:
self.errors = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
self.name = ' '.join(self.name.split())
value = find_attr_value_('timestamp', node)
if value is not None and 'timestamp' not in already_processed:
already_processed.append('timestamp')
self.timestamp = value
self.validate_ISO8601_DATETIME_PATTERN(self.timestamp) # validate type ISO8601_DATETIME_PATTERN
value = find_attr_value_('hostname', node)
if value is not None and 'hostname' not in already_processed:
already_processed.append('hostname')
self.hostname = value
self.hostname = ' '.join(self.hostname.split())
value = find_attr_value_('time', node)
if value is not None and 'time' not in already_processed:
already_processed.append('time')
try:
self.time = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (time): %s' % exp)
value = find_attr_value_('failures', node)
if value is not None and 'failures' not in already_processed:
already_processed.append('failures')
try:
self.failures = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'properties':
obj_ = propertiesType.factory()
obj_.build(child_)
self.set_properties(obj_)
elif nodeName_ == 'testcase':
obj_ = testcaseType.factory()
obj_.build(child_)
self.testcase.append(obj_)
elif nodeName_ == 'system-out':
system_out_ = child_.text
system_out_ = self.gds_validate_string(system_out_, node, 'system_out')
self.system_out = system_out_
elif nodeName_ == 'system-err':
system_err_ = child_.text
system_err_ = self.gds_validate_string(system_err_, node, 'system_err')
self.system_err = system_err_
# end class testsuite
class system_out(GeneratedsSuper):
"""Data that was written to standard out while the test was executed"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if system_out.subclass:
# pylint: disable=E1102
return system_out.subclass(*args_, **kwargs_)
else:
return system_out(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='system-out', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='system-out')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='system-out'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='system-out', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='system-out'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class system_out
class system_err(GeneratedsSuper):
"""Data that was written to standard error while the test was executed"""
subclass = None
superclass = None
def __init__(self):
pass
def factory(*args_, **kwargs_):
if system_err.subclass:
# pylint: disable=E1102
return system_err.subclass(*args_, **kwargs_)
else:
return system_err(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='', name_='system-err', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='system-err')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='system-err'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='system-err', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='system-err'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class system_err
class testsuiteType(testsuite):
"""Derived from testsuite/@name in the non-aggregated documentsStarts
at '0' for the first testsuite and is incremented by 1 for each
following testsuite"""
subclass = None
superclass = testsuite
def __init__(self, tests=None, errors=None, name=None, timestamp=None, hostname=None, time=None, failures=None, properties=None, testcase=None, system_out=None, system_err=None, id=None, package=None):
super(testsuiteType, self).__init__(tests, errors, name, timestamp, hostname, time, failures, properties, testcase, system_out, system_err, )
self.id = _cast(int, id)
self.package = _cast(None, package)
pass
def factory(*args_, **kwargs_):
if testsuiteType.subclass:
return testsuiteType.subclass(*args_, **kwargs_)
else:
return testsuiteType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def get_package(self):
return self.package
def set_package(self, package):
self.package = package
def export(self, outfile, level, namespace_='', name_='testsuiteType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='testsuiteType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='testsuiteType'):
super(testsuiteType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='testsuiteType')
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
if self.package is not None and 'package' not in already_processed:
already_processed.append('package')
outfile.write(' package=%s' % (self.gds_format_string(quote_attrib(self.package).encode(ExternalEncoding), input_name='package'), ))
def exportChildren(self, outfile, level, namespace_='', name_='testsuiteType', fromsubclass_=False):
super(testsuiteType, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(testsuiteType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='testsuiteType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = %d,\n' % (self.id,))
if self.package is not None and 'package' not in already_processed:
already_processed.append('package')
showIndent(outfile, level)
outfile.write('package = "%s",\n' % (self.package,))
super(testsuiteType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(testsuiteType, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
try:
self.id = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('package', node)
if value is not None and 'package' not in already_processed:
already_processed.append('package')
self.package = value
self.package = ' '.join(self.package.split())
super(testsuiteType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(testsuiteType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class testsuiteType
class propertiesType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, property=None):
if property is None:
self.property = []
else:
self.property = property
def factory(*args_, **kwargs_):
if propertiesType.subclass:
# pylint: disable=E1102
return propertiesType.subclass(*args_, **kwargs_)
else:
return propertiesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_property(self):
return self.property
def set_property(self, property):
self.property = property
def add_property(self, value):
self.property.append(value)
def insert_property(self, index, value):
self.property[index] = value
def export(self, outfile, level, namespace_='', name_='propertiesType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertiesType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertiesType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='propertiesType', fromsubclass_=False):
for property_ in self.property:
property_.export(outfile, level, namespace_, name_='property')
def hasContent_(self):
if (
self.property
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='propertiesType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('property=[\n')
level += 1
for property_ in self.property:
showIndent(outfile, level)
outfile.write('model_.propertyType(\n')
property_.exportLiteral(outfile, level, name_='propertyType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'property':
obj_ = propertyType.factory()
obj_.build(child_)
self.property.append(obj_)
# end class propertiesType
class propertyType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, value=None):
self.name = _cast(None, name)
self.value = _cast(None, value)
pass
def factory(*args_, **kwargs_):
if propertyType.subclass:
# pylint: disable=E1102
return propertyType.subclass(*args_, **kwargs_)
else:
return propertyType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
def export(self, outfile, level, namespace_='', name_='propertyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertyType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertyType'):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.append('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
def exportChildren(self, outfile, level, namespace_='', name_='propertyType', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='propertyType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.value is not None and 'value' not in already_processed:
already_processed.append('value')
showIndent(outfile, level)
outfile.write('value = "%s",\n' % (self.value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
self.name = ' '.join(self.name.split())
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.append('value')
self.value = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class propertyType
class testcaseType(GeneratedsSuper):
"""Name of the test methodFull class name for the class the test method
is in.Time taken (in seconds) to execute the test"""
subclass = None
superclass = None
def __init__(self, classname=None, name=None, time=None, error=None, failure=None):
self.classname = _cast(None, classname)
self.name = _cast(None, name)
self.time = _cast(float, time)
self.error = error
self.failure = failure
def factory(*args_, **kwargs_):
if testcaseType.subclass:
# pylint: disable=E1102
return testcaseType.subclass(*args_, **kwargs_)
else:
return testcaseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_error(self):
return self.error
def set_error(self, error):
self.error = error
def get_failure(self):
return self.failure
def set_failure(self, failure):
self.failure = failure
def get_classname(self):
return self.classname
def set_classname(self, classname):
self.classname = classname
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_time(self):
return self.time
def set_time(self, time):
self.time = time
def export(self, outfile, level, namespace_='', name_='testcaseType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='testcaseType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='testcaseType'):
if self.classname is not None and 'classname' not in already_processed:
already_processed.append('classname')
outfile.write(' classname=%s' % (self.gds_format_string(quote_attrib(self.classname).encode(ExternalEncoding), input_name='classname'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.time is not None and 'time' not in already_processed:
already_processed.append('time')
outfile.write(' time="%s"' % self.gds_format_float(self.time, input_name='time'))
def exportChildren(self, outfile, level, namespace_='', name_='testcaseType', fromsubclass_=False):
if self.error is not None:
self.error.export(outfile, level, namespace_, name_='error')
if self.failure is not None:
self.failure.export(outfile, level, namespace_, name_='failure')
def hasContent_(self):
if (
self.error is not None or
self.failure is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='testcaseType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.classname is not None and 'classname' not in already_processed:
already_processed.append('classname')
showIndent(outfile, level)
outfile.write('classname = "%s",\n' % (self.classname,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.time is not None and 'time' not in already_processed:
already_processed.append('time')
showIndent(outfile, level)
outfile.write('time = %f,\n' % (self.time,))
def exportLiteralChildren(self, outfile, level, name_):
if self.error is not None:
showIndent(outfile, level)
outfile.write('error=model_.errorType(\n')
self.error.exportLiteral(outfile, level, name_='error')
showIndent(outfile, level)
outfile.write('),\n')
if self.failure is not None:
showIndent(outfile, level)
outfile.write('failure=model_.failureType(\n')
self.failure.exportLiteral(outfile, level, name_='failure')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('classname', node)
if value is not None and 'classname' not in already_processed:
already_processed.append('classname')
self.classname = value
self.classname = ' '.join(self.classname.split())
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
self.name = ' '.join(self.name.split())
value = find_attr_value_('time', node)
if value is not None and 'time' not in already_processed:
already_processed.append('time')
try:
self.time = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (time): %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'error':
obj_ = errorType.factory()
obj_.build(child_)
self.set_error(obj_)
elif nodeName_ == 'failure':
obj_ = failureType.factory()
obj_.build(child_)
self.set_failure(obj_)
# end class testcaseType
class errorType(GeneratedsSuper):
"""The error message. e.g., if a java exception is thrown, the return
value of getMessage()The type of error that occurred. e.g., if a
java execption is thrown the full class name of the exception."""
subclass = None
superclass = None
def __init__(self, message=None, type_=None, valueOf_=None):
self.message = _cast(None, message)
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if errorType.subclass:
# pylint: disable=E1102
return errorType.subclass(*args_, **kwargs_)
else:
return errorType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_message(self):
return self.message
def set_message(self, message):
self.message = message
def get_type(self):
return self.type_
def set_type(self, type_):
self.type_ = type_
def get_valueOf_(self):
return self.valueOf_
def set_valueOf_(self, valueOf_):
self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='errorType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='errorType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='errorType'):
if self.message is not None and 'message' not in already_processed:
already_processed.append('message')
outfile.write(' message=%s' % (self.gds_format_string(quote_attrib(self.message).encode(ExternalEncoding), input_name='message'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='errorType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='errorType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.message is not None and 'message' not in already_processed:
already_processed.append('message')
showIndent(outfile, level)
outfile.write('message = "%s",\n' % (self.message,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('message', node)
if value is not None and 'message' not in already_processed:
already_processed.append('message')
self.message = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class errorType
class failureType(GeneratedsSuper):
"""The message specified in the assertThe type of the assert."""
subclass = None
superclass = None
def __init__(self, message=None, type_=None, valueOf_=None):
self.message = _cast(None, message)
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if failureType.subclass:
# pylint: disable=E1102
return failureType.subclass(*args_, **kwargs_)
else:
return failureType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_message(self):
return self.message
def set_message(self, message):
self.message = message
def get_type(self):
return self.type_
def set_type(self, type_):
self.type_ = type_
def get_valueOf_(self):
return self.valueOf_
def set_valueOf_(self, valueOf_):
self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='failureType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='failureType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='failureType'):
if self.message is not None and 'message' not in already_processed:
already_processed.append('message')
outfile.write(' message=%s' % (self.gds_format_string(quote_attrib(self.message).encode(ExternalEncoding), input_name='message'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='failureType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='failureType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.message is not None and 'message' not in already_processed:
already_processed.append('message')
showIndent(outfile, level)
outfile.write('message = "%s",\n' % (self.message,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('message', node)
if value is not None and 'message' not in already_processed:
already_processed.append('message')
self.message = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class failureType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'testsuite'
rootClass = testsuite
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'testsuite'
rootClass = testsuite
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="testsuite",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'testsuite'
rootClass = testsuite
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from JUnit_api import *\n\n')
sys.stdout.write('import JUnit_api as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"errorType",
"failureType",
"propertiesType",
"propertyType",
"system_err",
"system_out",
"testcaseType",
"testsuite",
"testsuiteType",
"testsuites"
]
| joyxu/autotest | client/tools/JUnit_api.py | Python | gpl-2.0 | 65,100 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
PostGISExecuteSQL.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya and Carterix Geomatics
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from sextante.core.GeoAlgorithm import GeoAlgorithm
__author__ = 'Victor Olaya, Carterix Geomatics'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Victor Olaya, Carterix Geomatics'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sextante.parameters.ParameterString import ParameterString
from sextante.admintools import postgis_utils
from sextante.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
class PostGISExecuteSQL(GeoAlgorithm):
DATABASE = "DATABASE"
SQL = "SQL"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + "/../images/postgis.png")
def processAlgorithm(self, progress):
connection = self.getParameterValue(self.DATABASE)
settings = QSettings()
mySettings = "/PostgreSQL/connections/"+ connection
try:
database = settings.value(mySettings+"/database").toString()
username = settings.value(mySettings+"/username").toString()
host = settings.value(mySettings+"/host").toString()
port = int(settings.value(mySettings+"/port").toString())
password = settings.value(mySettings+"/password").toString()
except Exception, e:
raise GeoAlgorithmExecutionException("Wrong database connection name: " + connection)
try:
self.db = postgis_utils.GeoDB(host=host, port=port, dbname=database, user=username, passwd=password)
except postgis_utils.DbError, e:
raise GeoAlgorithmExecutionException("Couldn't connect to database:\n"+e.message)
sql = self.getParameterValue(self.SQL).replace("\n", " ")
try:
self.db._exec_sql_and_commit(str(sql))
except postgis_utils.DbError, e:
raise GeoAlgorithmExecutionException("Error executing SQL:\n"+e.message)
def defineCharacteristics(self):
self.name = "PostGIS execute SQL"
self.group = "PostGIS management tools"
self.addParameter(ParameterString(self.DATABASE, "Database"))
self.addParameter(ParameterString(self.SQL, "SQL query", "", True))
| bstroebl/QGIS | python/plugins/sextante/admintools/PostGISExecuteSQL.py | Python | gpl-2.0 | 3,170 |
# -*- coding: utf-8 -*-
from django.db import models
class Profiles(models.Model):
userid = models.AutoField(primary_key=True)
login_name = models.CharField(max_length=255, unique=True)
cryptpassword = models.CharField(max_length=128, blank=True)
realname = models.CharField(max_length=255)
disabledtext = models.TextField()
disable_mail = models.IntegerField(default=0)
mybugslink = models.IntegerField()
extern_id = models.IntegerField(blank=True)
class Meta:
db_table = "profiles"
def get_groups(self):
q = UserGroupMap.objects.filter(user__userid=self.userid)
q = q.select_related()
groups = [assoc.group for assoc in q.all()]
return groups
class Groups(models.Model):
name = models.CharField(unique=True, max_length=255)
description = models.TextField()
isbuggroup = models.IntegerField()
userregexp = models.TextField()
isactive = models.IntegerField()
class Meta:
db_table = "groups"
class UserGroupMap(models.Model):
user = models.ForeignKey(Profiles, on_delete=models.CASCADE) # user_id
# (actually has two primary keys)
group = models.ForeignKey(Groups, on_delete=models.CASCADE) # group_id
isbless = models.IntegerField(default=0)
grant_type = models.IntegerField(default=0)
class Meta:
db_table = "user_group_map"
unique_together = ("user", "group")
#
# Extra information for users
#
class UserProfile(models.Model):
user = models.OneToOneField(
"auth.User", unique=True, related_name="profile", on_delete=models.CASCADE
)
phone_number = models.CharField(blank=True, default="", max_length=128)
url = models.URLField(blank=True, default="")
im = models.CharField(blank=True, default="", max_length=128)
im_type_id = models.IntegerField(blank=True, default=1, null=True)
address = models.TextField(blank=True, default="")
notes = models.TextField(blank=True, default="")
class Meta:
db_table = "tcms_user_profiles"
def get_im(self):
from .forms import IM_CHOICES
if not self.im:
return None
for c in IM_CHOICES:
if self.im_type_id == c[0]:
return "[{}] {}".format(c[1], self.im)
@classmethod
def get_user_profile(cls, user):
return cls.objects.get(user=user)
| Nitrate/Nitrate | src/tcms/profiles/models.py | Python | gpl-2.0 | 2,379 |
# If you start collecting a wave and then regret it, you can use this
# to roll back the data collection. I would recommend duplicating the database
# first and letting this program loose on a copy, as you won't be able to
# get back any of the data you don't explicitly tell it to keep.
import sqlite3
import itertools
import add_data as ad
def rollback(db_path,waves_to_keep=[],waves_to_lose=[]):
'''waves_to_keep and waves_to_lose should be lists of names of wave
tables in the database currently being cleaned'''
conn=sqlite3.connect(db_path)
curs=conn.cursor()
'''
for wave in waves_to_lose:
curs.execute('DROP TABLE {}'.format(wave))
users_to_keep=[]
for wave in waves_to_keep:
curs.execute('SELECT id FROM {}'.format(wave))
users_to_keep.extend(curs.fetchall())
curs.execute('ALTER TABLE users RENAME TO old_users')
ad.create_table(curs,'users')
curs.execute('ALTER TABLE x_follows_y RENAME TO old_x_follows_y')
ad.create_table(curs,'x_follows_y')
follow_data=set([])
for n, user in enumerate(users_to_keep):
curs.execute('SELECT follower,followed FROM old_x_follows_y '
'WHERE follower=?',user)
follow_data.update(curs.fetchall())
curs.execute('SELECT follower,followed FROM old_x_follows_y '
'WHERE followed=?',user)
follow_data.update(curs.fetchall())
if n % 250 == 0: print "{} users' follow data read.".format(n)
curs.executemany('INSERT INTO x_follows_y VALUES (?,?)',
follow_data)
conn.commit()
print 'Cleaned x_follows_y table filled.'
'''
curs.execute('SELECT follower,followed FROM old_x_follows_y')
follow_data=curs.fetchall()
print 'Got follow data: {} follows'.format(len(follow_data))
users_to_keep = set(itertools.chain.from_iterable(follow_data))
print 'Got users from follow data: {} of them'.format(len(users_to_keep))
print list(users_to_keep)[:10]
n=0
curs.execute('SELECT * FROM old_users')
for i,user_data in enumerate(curs.fetchall()):
if user_data[0] in users_to_keep:
curs.execute('INSERT INTO users VALUES ('
'?,?,?,?,?,?,?,?,?,?,'
'?,?,?,?,?,?,?,?,?,?)',user_data)
n+=1
if i % 1000 == 0:
print '{}th user details checked.'.format(i)
if n % 1000 == 0:
print '{}th user\'s details copied.'.format(n)
print 'Gone through them all now'
conn.commit()
print 'Cleaned users table filled.'
| ValuingElectronicMusic/network-analysis | remove_waves.py | Python | gpl-2.0 | 2,612 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-,
import sys
import os.path
import unittest
from io import StringIO
from suse_git import header
class TestHeaderChecker(unittest.TestCase):
def test_empty(self):
try:
self.header = header.Checker("")
except header.HeaderException as e:
self.assertEqual(4, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('patch-mainline'))
self.assertTrue(e.tag_is_missing('from'))
self.assertTrue(e.tag_is_missing('subject'))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(4, e.errors())
def test_subject_dupe(self):
text = """
From: [email protected]
Subject: some patch
Subject: some patch
Patch-mainline: v4.2-rc2
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.DuplicateTagError))
self.assertEqual(1, e.errors())
def test_patch_mainline_dupe(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Patch-mainline: v4.2-rc2
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.DuplicateTagError))
self.assertEqual(1, e.errors())
def test_patch_mainline_empty(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline:
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.EmptyTagError))
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('patch-mainline'))
self.assertEqual(2, e.errors())
def test_patch_mainline_version_no_ack_or_sob(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
References: bsc#12345
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
"""
try:
self.header = header.Checker(text)
except header.HeaderException as e:
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('acked-by'))
self.assertTrue(e.tag_is_missing('signed-off-by'))
self.assertEqual(1, e.errors())
def test_patch_mainline_version_correct_multi_ack(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_multi_ack_ext_last(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_mixed_ack_sob(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Signed-off-by: [email protected]
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_ack(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_from(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_review(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Reviewed-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_sob(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Signed-off-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_multi_sob(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Signed-off-by: [email protected]
Signed-off-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_multi_sob_ext_last(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Signed-off-by: [email protected]
Signed-off-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_na(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: n/a
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_submitted_correct_ml(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted, 19 July 2015 - linux-btrfs
References: bsc#12345
Acked-by: [email protected]
"""
errors = self.header = header.Checker(text)
def test_patch_mainline_submitted_correct_url(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted, https://lkml.org/archive/link-to-post
References: bsc#12345
Acked-by: [email protected]
"""
errors = self.header = header.Checker(text)
def test_patch_mainline_submitted_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_submitted_detail_git_commit(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted, https://lkml.org/archive/link-to-post
Git-repo: git://host/valid/path/to/repo
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.ExcludedTagError))
self.assertEqual(1, e.errors())
# Required/Excluded conflict between Patch-mainline (Submitted)
# and Git-commit
def test_patch_mainline_submitted_detail_git_commit(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted, https://lkml.org/archive/link-to-post
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertEqual(1, e.errors(header.ExcludedTagError))
self.assertEqual(2, e.errors())
def test_patch_mainline_submitted_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_never_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Never
References: bsc#12345
Acked-by: [email protected]
"""
try:
self.header = header.Checker(text)
except header.HeaderException as e:
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_yes_with_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Yes, v4.1-rc1
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_yes_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Yes
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_not_yet_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Not yet
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_never_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Never, SLES-specific feature
References: FATE#123456
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: No, handled differently upstream
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_not_yet_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Not yet, rare reason
References: bsc#12345
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_git_commit_standalone(self):
text = """
From: [email protected]
Subject: some patch
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
try:
self.header = header.Checker(text)
except header.HeaderException as e:
# Both policy and Git-commit require Patch-mainline
self.assertEqual(2, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('patch-mainline'))
self.assertEqual(2, e.errors())
def test_patch_mainline_queued_correct(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Queued
Git-repo: git://path/to/git/repo
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_queued_standalone(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Queued
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(2, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('git-commit'))
self.assertTrue(e.tag_is_missing('git-repo'))
self.assertEqual(2, e.errors())
def test_patch_mainline_queued_with_git_repo(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Queued
Git-repo: git://path/to/git/repo
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
# Required by both Patch-mainline (Queued) and
# Git-repo
self.assertEqual(2, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('git-commit'))
self.assertEqual(2, e.errors())
def test_patch_mainline_queued_with_git_commit(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Queued
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('git-repo'))
self.assertEqual(1, e.errors())
def test_patch_mainline_invalid(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: n/a
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_diff_like_description(self):
text = """
From: [email protected]
Subject: blablah
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
This is a thing. I ran across it:
*** Testing resulted in failure
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_diff_like_description2(self):
text = """
From: [email protected]
Subject: blablah
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
This is a thing. I ran across it:
--- Testing resulted in failure
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_references_empty(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References:
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.EmptyTagError))
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(2, e.errors())
def test_patch_references_missing(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(1, e.errors())
def test_patch_references_multi(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
References: bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_references_multi2(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345 bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_references_multi3(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345, bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_references_multi3(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345, bsc#12354
References: fix for blahblah
Acked-by: [email protected]
"""
self.header = header.Checker(text)
@unittest.skip("Enable this check when we want to require a real "
"References tag")
def test_patch_references_only_freeform(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: fix for blahblah
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(1, e.errors())
def test_patch_references_empty_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References:
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text, True)
e = cm.exception
self.assertEqual(1, e.errors(header.EmptyTagError))
self.assertEqual(1, e.errors())
def test_patch_references_missing_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
def test_patch_references_multi_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
References: bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
def test_patch_references_multi2_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345 bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
def test_patch_references_multi3_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345, bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
def test_patch_references_multi3_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345, bsc#12354
References: fix for blahblah
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
@unittest.skip("Enable this check when we want to require a real "
"References tag")
def test_patch_references_only_freeform_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: fix for blahblah
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text, True)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(1, e.errors())
def test_no_patch_mainline_for_kabi(self):
text = """
From: [email protected]
Subject: some patch
References: FATE#123456
Acked-by: [email protected]
"""
self.header = header.Checker(text, False, "patches.kabi/FATE123456_fix_kabi.patch")
| kdave/kernel-source | scripts/python/tests/test_header.py | Python | gpl-2.0 | 20,987 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('QuickBooking', '0010_auto_20150704_1942'),
]
operations = [
migrations.AlterField(
model_name='seat',
name='seat_type',
field=models.CharField(max_length=10, primary_key=True),
),
]
| noorelden/QuickBooking | QuickBooking/migrations/0011_auto_20150704_2001.py | Python | gpl-2.0 | 426 |
import fill
import array
a = array.array('I')
a.append(1)
a.append(1)
a.append(3)
a.append(2)
a.append(2)
a.append(2)
a.append(2)
a.append(2)
a.append(2)
a.append(2)
a.append(2)
a.append(2)
print "before", a
b = fill.fill(a, 2, 2, 3, 3, 4278190080)
print "after", b
print "after 2", array.array('I', b)
| samdroid-apps/paint-activity | test_fill.py | Python | gpl-2.0 | 304 |
from miasm2.core.asmblock import disasmEngine
from miasm2.arch.aarch64.arch import mn_aarch64
cb_aarch64_funcs = []
def cb_aarch64_disasm(*args, **kwargs):
for func in cb_aarch64_funcs:
func(*args, **kwargs)
class dis_aarch64b(disasmEngine):
attrib = "b"
def __init__(self, bs=None, **kwargs):
super(dis_aarch64b, self).__init__(
mn_aarch64, self.attrib, bs,
dis_bloc_callback = cb_aarch64_disasm,
**kwargs)
class dis_aarch64l(disasmEngine):
attrib = "l"
def __init__(self, bs=None, **kwargs):
super(dis_aarch64l, self).__init__(
mn_aarch64, self.attrib, bs,
dis_bloc_callback = cb_aarch64_disasm,
**kwargs)
| stephengroat/miasm | miasm2/arch/aarch64/disasm.py | Python | gpl-2.0 | 731 |
# This file is part of BuhIRC.
#
# BuhIRC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BuhIRC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the# GNU General Public License
# along with BuhIRC. If not, see <http://www.gnu.org/licenses/>.
import re
import random
from modules import Module
class VoreModule(Module):
name = "Vore"
description = "#vore-specific commands and chat responses."
# These regexes are borrowed from GyroTech's code
self_regex = "(?i)(((it|him|her|their|them)sel(f|ves))|Burpy\s?Hooves)"
all_regex = "(?i)every\s?(body|one|pony|pone|poni)"
def module_init(self, bot):
if "vore" not in bot.config:
return "Vore section in config is required."
self.config = bot.config["vore"]
self.reacts = self.config["react_messages"]
self.cmd_replies = self.config["command_replies"]
self.hook_command("eat", self.on_command_eat)
self.hook_command("cockvore", self.on_command_cockvore)
self.hook_command("inflate", self.on_command_inflate)
def do_command_reply(self, bot, target, replies):
# Replies is a 3-tuple of lists that looks like: (replies for target=me, replies for target=all, replies for target=user)
reply = None
if re.match(self.self_regex, target, re.IGNORECASE): # !eat BuhIRC
reply = random.choice(replies[0])
elif re.match(self.all_regex, target, re.IGNORECASE): # !eat everypony
reply = random.choice(replies[1])
else:
reply = random.choice(replies[2]) # !eat AppleDash (Or any other user.)
try:
bot.reply_act(reply % target)
except TypeError: # Format string wasn't filled. (No %s)
bot.reply_act(reply)
def on_command_eat(self, bot, ln, args):
eaten = ln.hostmask.nick
if len(args) > 0:
eaten = " ".join(args)
eaten = eaten.strip()
self.do_command_reply(bot, eaten, (self.cmd_replies["eat_self"], self.cmd_replies["eat_all"], self.cmd_replies["eat_user"]))
def on_command_cockvore(self, bot, ln, args):
cockvored = ln.hostmask.nick
if len(args) > 0:
cockvored = " ".join(args)
cockvored = cockvored.strip()
self.do_command_reply(bot, cockvored, (self.cmd_replies["cockvore_self"], self.cmd_replies["cockvore_all"], self.cmd_replies["cockvore_user"]))
def on_command_inflate(self, bot, ln, args):
inflated = ln.hostmask.nick
if len(args) > 0:
inflated = " ".join(args)
inflated = inflated.strip()
if re.match(self.all_regex, inflated, re.IGNORECASE): # Not implemented
return
self.do_command_reply(bot, inflated, (self.cmd_replies["inflate_self"], [], self.cmd_replies["inflate_user"]))
| AppleDash/burpyhooves | modules/vore.py | Python | gpl-3.0 | 3,226 |
"""
WSGI config for group_warning project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "group_warning.settings")
application = get_wsgi_application()
| fazlerabby7/group_warning | group_warning/wsgi.py | Python | gpl-3.0 | 404 |
#!/usr/bin/python3
from ansible.module_utils.arvados_common import process
def main():
additional_argument_spec={
"uuid": dict(required=True, type="str"),
"owner_uuid": dict(required=True, type="str"),
"name": dict(required=True, type="str"),
}
filter_property = "uuid"
filter_value_module_parameter = "uuid"
module_parameter_to_resource_parameter_map = {
"owner_uuid": "owner_uuid",
"name": "name",
}
process("repositories", additional_argument_spec, filter_property, filter_value_module_parameter,
module_parameter_to_resource_parameter_map)
if __name__ == "__main__":
main()
| wtsi-hgi/hgi-ansible | ansible/library/arvados_repository.py | Python | gpl-3.0 | 670 |
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
"""Use AppConf to store sensible defaults for settings. This also documents the
settings that lizard_damage defines. Each setting name automatically has
"FLOODING_LIB_" prepended to it.
By puttng the AppConf in this module and importing the Django settings
here, it is possible to import Django's settings with `from flooding_lib.conf
import settings` and be certain that the AppConf
stuff has also been loaded."""
# Python 3 is coming
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
from django.conf import settings
settings # Pyflakes...
from appconf import AppConf
class MyAppConf(AppConf):
COLORMAP_DIR = os.path.join(
settings.FLOODING_SHARE, 'colormaps')
| lizardsystem/flooding | flooding_lib/conf.py | Python | gpl-3.0 | 875 |
# -*- coding: utf-8 -*-
"""
<license>
CSPLN_MaryKeelerEdition; Manages images to which notes can be added.
Copyright (C) 2015, Thomas Kercheval
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
___________________________________________________________</license>
Description:
Updates README.txt file in the specified directory.
Inputs:
Functions that are in the specified directory.
discover_functions() detects them automatically.
Outputs:
README.txt file, with Scope&&Details listed.
Covers functions in specified directory.
Currently:
To Do:
Done:
Update readme file with current functions&&their docstrings.
"""
import os
def discover_functions(directory):
"""Discorvers python modules in current directory."""
function_names = []
curr_dir = os.listdir(directory)
for name in curr_dir:
if name[-3:] == '.py':
function_names.append(str(name))
return function_names
def grab_docstrings(directory):
""""Grabs the docstrings of all python modules specified."""
import ast
docstrings = {}
for name in discover_functions(directory):
path_name = os.path.join(directory, name)
thing = ast.parse(''.join(open(path_name)))
docstring = ast.get_docstring(thing)
docstrings[name] = docstring
return docstrings
def create_readme(doc_dic, directory):
"""Strips off license statement, formats readme, returns readme text."""
end_lisence = "</license>"
scope = '''Scope:
{}'''
details = '''Details:{}'''
scopelist = []
detaillist = []
scopestuff = ''
detailstuff = ''
# Now to create the contents of the README...
for script in doc_dic.keys().sort():
print " Creating readme entry for: {}...".format(script)
if doc_dic[script] == None:
print " But it has no docstring..."
continue
scopelist.append(script+'\n ')
docstring = doc_dic[script].replace('\n', '\n ')
doc_index = docstring.find(end_lisence) + 11
# Stripping off the license in the docstring...
docstring = docstring[doc_index:]
detaillist.append('\n\n'+script+'\n')
detaillist.append(' '+docstring)
for item in scopelist:
scopestuff += item
for ano_item in detaillist:
detailstuff += ano_item
# Now to put the contents in their correct place...
readme = (scope.format(scopestuff[:-4]) + '\n'
+ details.format(detailstuff) + '\n')
# And write the README in its directory...
write_readme(readme, directory)
return None
def write_readme(r_text, directory):
"""Writes the readme!"""
readme_path = os.path.join(directory, 'subreadme.txt')
with open(readme_path, 'w') as readme:
readme.write(r_text)
return None
def update_readme(directory):
"""Updates the readme everytime this script is called."""
documentation_dict = grab_docstrings(directory)
create_readme(documentation_dict, directory)
return None
if __name__ == "__main__":
CURR_DIR = os.path.abspath(os.path.dirname(__file__))
update_readme(CURR_DIR)
| jjs0sbw/CSPLN | test/update_test_subreadme.py | Python | gpl-3.0 | 3,717 |
# -*- coding: UTF-8 -*-
"""
Lastship Add-on (C) 2019
Credits to Lastship, Placenta and Covenant; our thanks go to their creators
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Addon Name: Lastship
# Addon id: plugin.video.lastship
# Addon Provider: Lastship
import re
import urlparse
from resources.lib.modules import cache
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
from resources.lib.modules import source_faultlog
from resources.lib.modules.handler.requestHandler import cRequestHandler
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['movie4k.sg', 'movie4k.lol', 'movie4k.pe', 'movie4k.tv', 'movie.to', 'movie4k.me', 'movie4k.org', 'movie2k.cm', 'movie2k.nu', 'movie4k.am', 'movie4k.io']
self._base_link = None
self.search_link = '/movies.php?list=search&search=%s'
@property
def base_link(self):
if not self._base_link:
self._base_link = cache.get(self.__get_base_url, 120, 'http://%s' % self.domains[0])
return self._base_link
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search(False, [localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search(False, [title] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search(True, [localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle:
url = self.__search(True, [tvshowtitle] + source_utils.aliases_to_array(aliases), year)
if url:
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url is None:
return
url = urlparse.urljoin(self.base_link, url)
oRequest = cRequestHandler(url)
oRequest.removeBreakLines(False)
oRequest.removeNewLines(False)
r = oRequest.request()
seasonMapping = dom_parser.parse_dom(r, 'select', attrs={'name': 'season'})
seasonMapping = dom_parser.parse_dom(seasonMapping, 'option', req='value')
seasonIndex = [i.attrs['value'] for i in seasonMapping if season in i.content]
seasonIndex = int(seasonIndex[0]) - 1
seasons = dom_parser.parse_dom(r, 'div', attrs={'id': re.compile('episodediv.+?')})
seasons = seasons[seasonIndex]
episodes = dom_parser.parse_dom(seasons, 'option', req='value')
url = [i.attrs['value'] for i in episodes if episode == re.findall('\d+', i.content)[0]]
if len(url) > 0:
return url[0]
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
oRequest = cRequestHandler(url)
oRequest.removeBreakLines(False)
oRequest.removeNewLines(False)
r = oRequest.request()
r = r.replace('\\"', '"')
links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'})
for i in links:
try:
host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt']
host = host.split()[0].rsplit('.', 1)[0].strip().lower()
host = host.encode('utf-8')
valid, host = source_utils.is_host_valid(host, hostDict)
if not valid: continue
link = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href']
link = client.replaceHTMLCodes(link)
link = urlparse.urljoin(self.base_link, link)
link = link.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False, 'debridonly': False})
except:
pass
if len(sources) == 0:
raise Exception()
return sources
except:
source_faultlog.logFault(__name__,source_faultlog.tagScrape, url)
return sources
def resolve(self, url):
try:
h = urlparse.urlparse(url.strip().lower()).netloc
oRequest = cRequestHandler(url)
oRequest.removeBreakLines(False)
oRequest.removeNewLines(False)
r = oRequest.request()
r = r.rsplit('"underplayer"')[0].rsplit("'underplayer'")[0]
u = re.findall('\'(.+?)\'', r) + re.findall('\"(.+?)\"', r)
u = [client.replaceHTMLCodes(i) for i in u]
u = [i for i in u if i.startswith('http') and not h in i]
url = u[-1].encode('utf-8')
if 'bit.ly' in url:
oRequest = cRequestHandler(url)
oRequest.removeBreakLines(False)
oRequest.removeNewLines(False)
oRequest.request()
url = oRequest.getHeaderLocationUrl()
elif 'nullrefer.com' in url:
url = url.replace('nullrefer.com/?', '')
return url
except:
source_faultlog.logFault(__name__,source_faultlog.tagResolve)
return
def __search(self, isSerieSearch, titles, year):
try:
q = self.search_link % titles[0]
q = urlparse.urljoin(self.base_link, q)
t = [cleantitle.get(i) for i in set(titles) if i]
oRequest = cRequestHandler(q)
oRequest.removeBreakLines(False)
oRequest.removeNewLines(False)
r = oRequest.request()
links = dom_parser.parse_dom(r, 'tr', attrs={'id': re.compile('coverPreview.+?')})
tds = [dom_parser.parse_dom(i, 'td') for i in links]
tuples = [(dom_parser.parse_dom(i[0], 'a')[0], re.findall('>(\d{4})', i[1].content)) for i in tds if 'ger' in i[4].content]
tuplesSortByYear = [(i[0].attrs['href'], i[0].content) for i in tuples if year in i[1]]
if len(tuplesSortByYear) > 0 and not isSerieSearch:
tuples = tuplesSortByYear
elif isSerieSearch:
tuples = [(i[0].attrs['href'], i[0].content) for i in tuples if "serie" in i[0].content.lower()]
else:
tuples = [(i[0].attrs['href'], i[0].content) for i in tuples]
urls = [i[0] for i in tuples if cleantitle.get(i[1]) in t]
if len(urls) == 0:
urls = [i[0] for i in tuples if 'untertitel' not in i[1]]
if len(urls) > 0:
return source_utils.strip_domain(urls[0])
except:
try:
source_faultlog.logFault(__name__, source_faultlog.tagSearch, titles[0])
except:
return
return
def __get_base_url(self, fallback):
try:
for domain in self.domains:
try:
url = 'http://%s' % domain
oRequest = cRequestHandler(url)
oRequest.removeBreakLines(False)
oRequest.removeNewLines(False)
r = oRequest.request()
r = dom_parser.parse_dom(r, 'meta', attrs={'name': 'author'}, req='content')
if r and 'movie4k.io' in r[0].attrs.get('content').lower():
return url
except:
pass
except:
pass
return fallback
| lastship/plugin.video.lastship | resources/lib/sources/de/movie4k.py | Python | gpl-3.0 | 8,599 |
../../../../../share/pyshared/checkbox/reports/__init__.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/checkbox/reports/__init__.py | Python | gpl-3.0 | 58 |
######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Module for the Gaussian-Wishart and similar distributions.
"""
import numpy as np
from scipy import special
from .expfamily import (ExponentialFamily,
ExponentialFamilyDistribution,
useconstructor)
from .gaussian import GaussianMoments
from .gamma import GammaMoments
from .wishart import (WishartMoments,
WishartPriorMoments)
from .node import (Moments,
ensureparents)
from bayespy.utils import random
from bayespy.utils import utils
class GaussianGammaISOMoments(Moments):
"""
Class for the moments of Gaussian-gamma-ISO variables.
"""
def compute_fixed_moments(self, x, alpha):
"""
Compute the moments for a fixed value
`x` is a mean vector.
`alpha` is a precision scale
"""
x = np.asanyarray(x)
alpha = np.asanyarray(alpha)
u0 = np.einsum('...,...i->...i', alpha, x)
u1 = np.einsum('...,...i,...j->...ij', alpha, x, x)
u2 = np.copy(alpha)
u3 = np.log(alpha)
u = [u0, u1, u2, u3]
return u
def compute_dims_from_values(self, x, alpha):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(x) < 1:
raise ValueError("Mean must be a vector")
D = np.shape(x)[-1]
return ( (D,), (D,D), (), () )
class GaussianGammaARDMoments(Moments):
"""
Class for the moments of Gaussian-gamma-ARD variables.
"""
def compute_fixed_moments(self, x, alpha):
"""
Compute the moments for a fixed value
`x` is a mean vector.
`alpha` is a precision scale
"""
x = np.asanyarray(x)
alpha = np.asanyarray(alpha)
if np.ndim(x) < 1:
raise ValueError("Mean must be a vector")
if np.ndim(alpha) < 1:
raise ValueError("ARD scales must be a vector")
if np.shape(x)[-1] != np.shape(alpha)[-1]:
raise ValueError("Mean and ARD scales have inconsistent shapes")
u0 = np.einsum('...i,...i->...i', alpha, x)
u1 = np.einsum('...k,...k,...k->...k', alpha, x, x)
u2 = np.copy(alpha)
u3 = np.log(alpha)
u = [u0, u1, u2, u3]
return u
def compute_dims_from_values(self, x, alpha):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(x) < 1:
raise ValueError("Mean must be a vector")
if np.ndim(alpha) < 1:
raise ValueError("ARD scales must be a vector")
D = np.shape(x)[-1]
if np.shape(alpha)[-1] != D:
raise ValueError("Mean and ARD scales have inconsistent shapes")
return ( (D,), (D,), (D,), (D,) )
class GaussianWishartMoments(Moments):
"""
Class for the moments of Gaussian-Wishart variables.
"""
def compute_fixed_moments(self, x, Lambda):
"""
Compute the moments for a fixed value
`x` is a vector.
`Lambda` is a precision matrix
"""
x = np.asanyarray(x)
Lambda = np.asanyarray(Lambda)
u0 = np.einsum('...ik,...k->...i', Lambda, x)
u1 = np.einsum('...i,...ij,...j->...', x, Lambda, x)
u2 = np.copy(Lambda)
u3 = linalg.logdet_cov(Lambda)
return [u0, u1, u2, u3]
def compute_dims_from_values(self, x, Lambda):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(x) < 1:
raise ValueError("Mean must be a vector")
if np.ndim(Lambda) < 2:
raise ValueError("Precision must be a matrix")
D = np.shape(x)[-1]
if np.shape(Lambda)[-2:] != (D,D):
raise ValueError("Mean vector and precision matrix have "
"inconsistent shapes")
return ( (D,), (), (D,D), () )
class GaussianGammaISODistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of Gaussian-Gamma-ISO variables.
"""
def compute_message_to_parent(self, parent, index, u, u_mu_Lambda, u_a, u_b):
"""
Compute the message to a parent node.
"""
if index == 0:
raise NotImplementedError()
elif index == 1:
raise NotImplementedError()
elif index == 2:
raise NotImplementedError()
else:
raise ValueError("Index out of bounds")
def compute_phi_from_parents(self, u_mu_Lambda, u_a, u_b, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
raise NotImplementedError()
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
raise NotImplementedError()
return (u, g)
def compute_cgf_from_parents(self, u_mu_Lambda, u_a, u_b):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
raise NotImplementedError()
return g
def compute_fixed_moments_and_f(self, x, alpha, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
raise NotImplementedError()
return (u, f)
class GaussianWishartDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of Gaussian-Wishart variables.
"""
def compute_message_to_parent(self, parent, index, u, u_mu, u_alpha, u_V, u_n):
"""
Compute the message to a parent node.
"""
if index == 0:
raise NotImplementedError()
elif index == 1:
raise NotImplementedError()
elif index == 2:
raise NotImplementedError()
elif index == 3:
raise NotImplementedError()
else:
raise ValueError("Index out of bounds")
def compute_phi_from_parents(self, u_mu, u_alpha, u_V, u_n, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
raise NotImplementedError()
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
raise NotImplementedError()
return (u, g)
def compute_cgf_from_parents(self, u_mu, u_alpha, u_V, u_n):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
raise NotImplementedError()
return g
def compute_fixed_moments_and_f(self, x, Lambda, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
raise NotImplementedError()
return (u, f)
class GaussianWishart(ExponentialFamily):
"""
Node for Gaussian-Wishart random variables.
The prior:
.. math::
p(x, \Lambda| \mu, \alpha, V, n)
p(x|\Lambda, \mu, \alpha) = \mathcal(N)(x | \mu, \alpha^{-1} Lambda^{-1})
p(\Lambda|V, n) = \mathcal(W)(\Lambda | n, V)
The posterior approximation :math:`q(x, \Lambda)` has the same Gaussian-Wishart form.
"""
_moments = GaussianWishartMoments()
_parent_moments = (GaussianGammaMoments(),
GammaMoments(),
WishartMoments(),
WishartPriorMoments())
_distribution = GaussianWishartDistribution()
@classmethod
@ensureparents
def _constructor(cls, mu, alpha, V, n, plates_lambda=None, plates_x=None, **kwargs):
"""
Constructs distribution and moments objects.
This method is called if useconstructor decorator is used for __init__.
`mu` is the mean/location vector
`alpha` is the scale
`V` is the scale matrix
`n` is the degrees of freedom
"""
D = mu.dims[0][0]
# Check shapes
if mu.dims != ( (D,), (D,D), (), () ):
raise ValueError("Mean vector has wrong shape")
if alpha.dims != ( (), () ):
raise ValueError("Scale has wrong shape")
if V.dims != ( (D,D), () ):
raise ValueError("Precision matrix has wrong shape")
if n.dims != ( (), () ):
raise ValueError("Degrees of freedom has wrong shape")
dims = ( (D,), (), (D,D), () )
return (dims,
kwargs,
cls._total_plates(kwargs.get('plates'),
cls._distribution.plates_from_parent(0, mu.plates),
cls._distribution.plates_from_parent(1, alpha.plates),
cls._distribution.plates_from_parent(2, V.plates),
cls._distribution.plates_from_parent(3, n.plates)),
cls._distribution,
cls._moments,
cls._parent_moments)
def random(self):
"""
Draw a random sample from the distribution.
"""
raise NotImplementedError()
def show(self):
"""
Print the distribution using standard parameterization.
"""
raise NotImplementedError()
| nipunreddevil/bayespy | bayespy/inference/vmp/nodes/gaussian_wishart.py | Python | gpl-3.0 | 10,240 |
#
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
import os
import hashlib
import shutil
from stat import ST_MODE, S_IMODE, S_ISFIFO
def safe_change_mode(path, mode):
if not os.path.exists(path):
raise Exception("Path does not exist: %s" % path)
old_mode = os.stat(path)[ST_MODE]
if mode != S_IMODE(old_mode):
os.chmod(path, mode)
def safe_make_directory(path, mode=0o755):
if os.path.exists(path):
if not os.path.isdir(path):
raise Exception("Path is not a directory: %s" % path)
else:
os.makedirs(path, mode)
def safe_make_fifo(path, mode=0o666):
if os.path.exists(path):
mode = os.stat(path)[ST_MODE]
if not S_ISFIFO(mode):
raise Exception("Path is not a FIFO: %s" % path)
else:
os.mkfifo(path, mode)
def safe_remove_directory(path):
if os.path.exists(path):
if not os.path.isdir(path):
raise Exception("Path is not a directory: %s" % path)
shutil.rmtree(path)
def safe_remove_file(path):
if os.path.exists(path):
if not os.path.isfile(path):
raise Exception("Path is not a file: %s" % path)
os.remove(path)
def safe_rename(old, new):
if old != new:
if not os.path.exists(old):
raise Exception("Old path does not exist: %s" % old)
if os.path.exists(new):
raise Exception("New path exists already: %s" % new)
os.rename(old, new)
class safe_md5sum:
def __init__(self):
self.digest = hashlib.md5()
self.hexdigest = self.digest.hexdigest
def update(self, string):
self.digest.update(string.encode("utf-8"))
def safe_md5sum_file(name):
md5sum = None
if os.path.exists(name):
file = open(name)
digest = safe_md5sum()
while 1:
buf = file.read(4096)
if buf == "":
break
digest.update(buf)
file.close()
md5sum = digest.hexdigest()
return md5sum
def safe_close(file, safe=True):
if safe:
file.flush()
os.fsync(file.fileno())
file.close()
| jds2001/ocp-checkbox | checkbox/lib/safe.py | Python | gpl-3.0 | 2,778 |
from py2neo import Graph
from py2neo.ext.gremlin import Gremlin
import os
DEFAULT_GRAPHDB_URL = "http://localhost:7474/db/data/"
DEFAULT_STEP_DIR = os.path.dirname(__file__) + '/bjoernsteps/'
class BjoernSteps:
def __init__(self):
self._initJoernSteps()
self.initCommandSent = False
def setGraphDbURL(self, url):
""" Sets the graph database URL. By default,
http://localhost:7474/db/data/ is used."""
self.graphDbURL = url
def addStepsDir(self, stepsDir):
"""Add an additional directory containing steps to be injected
into the server"""
self.stepsDirs.append(stepsDir)
def connectToDatabase(self):
""" Connects to the database server."""
self.graphDb = Graph(self.graphDbURL)
self.gremlin = Gremlin(self.graphDb)
def runGremlinQuery(self, query):
""" Runs the specified gremlin query on the database. It is
assumed that a connection to the database has been
established. To allow the user-defined steps located in the
joernsteps directory to be used in the query, these step
definitions are prepended to the query."""
if not self.initCommandSent:
self.gremlin.execute(self._createInitCommand())
self.initCommandSent = True
return self.gremlin.execute(query)
def runCypherQuery(self, cmd):
""" Runs the specified cypher query on the graph database."""
return cypher.execute(self.graphDb, cmd)
def getGraphDbURL(self):
return self.graphDbURL
"""
Create chunks from a list of ids.
This method is useful when you want to execute many independent
traversals on a large set of start nodes. In that case, you
can retrieve the set of start node ids first, then use 'chunks'
to obtain disjoint subsets that can be passed to idListToNodes.
"""
def chunks(self, idList, chunkSize):
for i in xrange(0, len(idList), chunkSize):
yield idList[i:i+chunkSize]
def _initJoernSteps(self):
self.graphDbURL = DEFAULT_GRAPHDB_URL
self.stepsDirs = [DEFAULT_STEP_DIR]
def _createInitCommand(self):
initCommand = ""
for stepsDir in self.stepsDirs:
for (root, dirs, files) in os.walk(stepsDir, followlinks=True):
files.sort()
for f in files:
filename = os.path.join(root, f)
if not filename.endswith('.groovy'): continue
initCommand += file(filename).read() + "\n"
return initCommand
| mrphrazer/bjoern | python-bjoern/bjoern/all.py | Python | gpl-3.0 | 2,645 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb service-group objects
(c) 2014, Mischa Peters <[email protected]>,
Eric Chou <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: a10_service_group
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' service groups.
description:
- Manage SLB (Server Load Balancing) service-group objects on A10 Networks devices via aXAPIv2.
author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- Requires A10 Networks aXAPI 2.1.
- When a server doesn't exist and is added to the service-group the server will be created.
extends_documentation_fragment: a10
options:
partition:
version_added: "2.3"
description:
- set active-partition
required: false
default: null
service_group:
description:
- The SLB (Server Load Balancing) service-group name
required: true
default: null
aliases: ['service', 'pool', 'group']
service_group_protocol:
description:
- The SLB service-group protocol of TCP or UDP.
required: false
default: tcp
aliases: ['proto', 'protocol']
choices: ['tcp', 'udp']
service_group_method:
description:
- The SLB service-group load balancing method, such as round-robin or weighted-rr.
required: false
default: round-robin
aliases: ['method']
choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash']
servers:
description:
- A list of servers to add to the service group. Each list item should be a
dictionary which specifies the C(server:) and C(port:), but can also optionally
specify the C(status:). See the examples below for details.
required: false
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
RETURN = '''
#
'''
EXAMPLES = '''
# Create a new service-group
- a10_service_group:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
service_group: sg-80-tcp
servers:
- server: foo1.mydomain.com
port: 8080
- server: foo2.mydomain.com
port: 8080
- server: foo3.mydomain.com
port: 8080
- server: foo4.mydomain.com
port: 8080
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_service_group
returned: success
type: string
sample: "mynewservicegroup"
'''
VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method']
VALID_SERVER_FIELDS = ['server', 'port', 'status']
def validate_servers(module, servers):
for item in servers:
for key in item:
if key not in VALID_SERVER_FIELDS:
module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS)))
# validate the server name is present
if 'server' not in item:
module.fail_json(msg="server definitions must define the server field")
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="server port definitions must be integers")
else:
module.fail_json(msg="server definitions must define the port field")
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True),
service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']),
service_group_method=dict(type='str', default='round-robin',
aliases=['method'],
choices=['round-robin',
'weighted-rr',
'least-connection',
'weighted-least-connection',
'service-least-connection',
'service-weighted-least-connection',
'fastest-response',
'least-request',
'round-robin-strict',
'src-ip-only-hash',
'src-ip-hash']),
servers=dict(type='list', aliases=['server', 'member'], default=[]),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_service_group = module.params['service_group']
slb_service_group_proto = module.params['service_group_protocol']
slb_service_group_method = module.params['service_group_method']
slb_servers = module.params['servers']
if slb_service_group is None:
module.fail_json(msg='service_group is required')
axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json'
load_balancing_methods = {'round-robin': 0,
'weighted-rr': 1,
'least-connection': 2,
'weighted-least-connection': 3,
'service-least-connection': 4,
'service-weighted-least-connection': 5,
'fastest-response': 6,
'least-request': 7,
'round-robin-strict': 8,
'src-ip-only-hash': 14,
'src-ip-hash': 15}
if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp':
protocol = 2
else:
protocol = 3
# validate the server data list structure
validate_servers(module, slb_servers)
json_post = {
'service_group': {
'name': slb_service_group,
'protocol': protocol,
'lb_method': load_balancing_methods[slb_service_group_method],
}
}
# first we authenticate to get a session id
session_url = axapi_authenticate(module, axapi_base_url, username, password)
# then we select the active-partition
slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
# then we check to see if the specified group exists
slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
slb_service_group_exist = not axapi_failure(slb_result)
changed = False
if state == 'present':
# before creating/updating we need to validate that servers
# defined in the servers list exist to prevent errors
checked_servers = []
for server in slb_servers:
result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']}))
if axapi_failure(result):
module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server'])
checked_servers.append(server['server'])
if not slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
else:
# check to see if the service group definition without the
# server members is different, and update that individually
# if it needs it
do_update = False
for field in VALID_SERVICE_GROUP_FIELDS:
if json_post['service_group'][field] != slb_result['service_group'][field]:
do_update = True
break
if do_update:
result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
# next we pull the defined list of servers out of the returned
# results to make it a bit easier to iterate over
defined_servers = slb_result.get('service_group', {}).get('member_list', [])
# next we add/update new member servers from the user-specified
# list if they're different or not on the target device
for server in slb_servers:
found = False
different = False
for def_server in defined_servers:
if server['server'] == def_server['server']:
found = True
for valid_field in VALID_SERVER_FIELDS:
if server[valid_field] != def_server[valid_field]:
different = True
break
if found or different:
break
# add or update as required
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data))
changed = True
elif different:
result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data))
changed = True
# finally, remove any servers that are on the target
# device but were not specified in the list given
for server in defined_servers:
found = False
for slb_server in slb_servers:
if server['server'] == slb_server['server']:
found = True
break
# remove if not found
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data))
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
else:
result = slb_result
elif state == 'absent':
if slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group}))
changed = True
else:
result = dict(msg="the service group was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# standard ansible module imports
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled
if __name__ == '__main__':
main()
| kbrebanov/ansible-modules-extras | network/a10/a10_service_group.py | Python | gpl-3.0 | 13,531 |
Subsets and Splits