text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import click
import os
import datetime
from incremental import Version
from twisted.python.filepath import FilePath
_VERSIONPY_TEMPLATE = '''"""
Provides %s version information.
"""
# This file is auto-generated! Do not edit!
# Use `python -m incremental.update %s` to change this file.
from incremental import Version
__version__ = %s
__all__ = ["__version__"]
'''
_YEAR_START = 2000
def _findPath(path, package):
cwd = FilePath(path)
src_dir = cwd.child("src").child(package.lower())
current_dir = cwd.child(package.lower())
if src_dir.isdir():
return src_dir
elif current_dir.isdir():
return current_dir
else:
raise ValueError(("Can't find under `./src` or `./`. Check the "
"package name is right (note that we expect your "
"package name to be lower cased), or pass it using "
"'--path'."))
def _existing_version(path):
version_info = {}
with path.child("_version.py").open('r') as f:
exec(f.read(), version_info)
return version_info["__version__"]
def _run(package, path, newversion, patch, rc, dev, create,
_date=None, _getcwd=None, _print=print):
if not _getcwd:
_getcwd = os.getcwd
if not _date:
_date = datetime.date.today()
if type(package) != str:
package = package.encode('utf8')
if not path:
path = _findPath(_getcwd(), package)
else:
path = FilePath(path)
if newversion and patch or newversion and dev or newversion and rc:
raise ValueError("Only give --newversion")
if dev and patch or dev and rc:
raise ValueError("Only give --dev")
if create and dev or create and patch or create and rc or \
create and newversion:
raise ValueError("Only give --create")
if newversion:
from pkg_resources import parse_version
existing = _existing_version(path)
st_version = parse_version(newversion)._version
release = list(st_version.release)
if len(release) == 1:
release.append(0)
if len(release) == 2:
release.append(0)
v = Version(
package, *release,
release_candidate=st_version.pre[1] if st_version.pre else None,
dev=st_version.dev[1] if st_version.dev else None)
elif create:
v = Version(package, _date.year - _YEAR_START, _date.month, 0)
existing = v
elif rc and not patch:
existing = _existing_version(path)
if existing.release_candidate:
v = Version(package, existing.major, existing.minor,
existing.micro, existing.release_candidate + 1)
else:
v = Version(package, _date.year - _YEAR_START, _date.month, 0, 1)
elif patch:
if rc:
rc = 1
else:
rc = None
existing = _existing_version(path)
v = Version(package, existing.major, existing.minor,
existing.micro + 1, rc)
elif dev:
existing = _existing_version(path)
if existing.dev is None:
_dev = 0
else:
_dev = existing.dev + 1
v = Version(package, existing.major, existing.minor,
existing.micro, existing.release_candidate, dev=_dev)
else:
existing = _existing_version(path)
if existing.release_candidate:
v = Version(package,
existing.major, existing.minor, existing.micro)
else:
raise ValueError(
"You need to issue a rc before updating the major/minor")
NEXT_repr = repr(Version(package, "NEXT", 0, 0)).split("#")[0]
NEXT_repr_bytes = NEXT_repr.encode('utf8')
version_repr = repr(v).split("#")[0]
version_repr_bytes = version_repr.encode('utf8')
existing_version_repr = repr(existing).split("#")[0]
existing_version_repr_bytes = existing_version_repr.encode('utf8')
_print("Updating codebase to %s" % (v.public()))
for x in path.walk():
if not x.isfile():
continue
original_content = x.getContent()
content = original_content
# Replace previous release_candidate calls to the new one
if existing.release_candidate:
content = content.replace(existing_version_repr_bytes,
version_repr_bytes)
content = content.replace(
(package.encode('utf8') + b" " +
existing.public().encode('utf8')),
(package.encode('utf8') + b" " +
v.public().encode('utf8')))
# Replace NEXT Version calls with the new one
content = content.replace(NEXT_repr_bytes,
version_repr_bytes)
content = content.replace(NEXT_repr_bytes.replace(b"'", b'"'),
version_repr_bytes)
# Replace <package> NEXT with <package> <public>
content = content.replace(package.encode('utf8') + b" NEXT",
(package.encode('utf8') + b" " +
v.public().encode('utf8')))
if content != original_content:
_print("Updating %s" % (x.path,))
with x.open('w') as f:
f.write(content)
_print("Updating %s/_version.py" % (path.path))
with path.child("_version.py").open('w') as f:
f.write(
(_VERSIONPY_TEMPLATE % (
package, package, version_repr)).encode('utf8'))
@click.command()
@click.argument('package')
@click.option('--path', default=None)
@click.option('--newversion', default=None)
@click.option('--patch', is_flag=True)
@click.option('--rc', is_flag=True)
@click.option('--dev', is_flag=True)
@click.option('--create', is_flag=True)
def run(*args, **kwargs):
return _run(*args, **kwargs)
if __name__ == '__main__': # pragma: no cover
run()
| {
"content_hash": "2bb50314ad7e6dc4c2896160e5044298",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 78,
"avg_line_length": 29.5,
"alnum_prop": 0.5655751193022873,
"repo_name": "EricMuller/mynotes-backend",
"id": "bc10d3768382ea235a16bde905e01f84c96eeeeb",
"size": "6150",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/.eggs/incremental-16.10.1-py3.5.egg/incremental/update.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "11880"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "6613"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "233863"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "22991176"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "13496"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
} |
import os
import Queue
import sys
import subprocess
import threading
import unittest
import urllib2
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(TOOLS_DIR)))
sys.path.append(TOOLS_DIR)
import httpd
from mock import patch, Mock
class HTTPDTest(unittest.TestCase):
def setUp(self):
patcher = patch('BaseHTTPServer.BaseHTTPRequestHandler.log_message')
patcher.start()
self.addCleanup(patcher.stop)
self.server = httpd.LocalHTTPServer('.', 0)
self.addCleanup(self.server.Shutdown)
def testQuit(self):
urllib2.urlopen(self.server.GetURL('?quit=1'))
self.server.process.join(10) # Wait 10 seconds for the process to finish.
self.assertFalse(self.server.process.is_alive())
class MainTest(unittest.TestCase):
@patch('httpd.LocalHTTPServer')
@patch('sys.stdout', Mock())
def testArgs(self, mock_server_ctor):
mock_server = Mock()
mock_server_ctor.return_value = mock_server
httpd.main(['-p', '123', '-C', 'dummy'])
mock_server_ctor.assert_called_once_with('dummy', 123)
class RunTest(unittest.TestCase):
def setUp(self):
self.process = None
def tearDown(self):
if self.process and self.process.returncode is None:
self.process.kill()
@staticmethod
def _SubprocessThread(process, queue):
stdout, stderr = process.communicate()
queue.put((process.returncode, stdout, stderr))
def _Run(self, args=None, timeout=None):
args = args or []
cmd = [sys.executable, os.path.join(TOOLS_DIR, 'run.py'), '--port=5555']
cmd.extend(args)
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
queue = Queue.Queue()
thread = threading.Thread(target=RunTest._SubprocessThread,
args=(self.process, queue))
thread.daemon = True
thread.start()
thread.join(timeout)
self.assertFalse(thread.is_alive(), "Thread still running after timeout")
returncode, stdout, stderr = queue.get(False)
return returncode, stdout, stderr
@staticmethod
def _GetChromeMockArgs(page, http_request_type, sleep,
expect_to_be_killed=True):
args = []
if page:
args.extend(['-P', page])
args.append('--')
args.extend([sys.executable, os.path.join(SCRIPT_DIR, 'chrome_mock.py')])
if http_request_type:
args.append('--' + http_request_type)
if sleep:
args.extend(['--sleep', str(sleep)])
if expect_to_be_killed:
args.append('--expect-to-be-killed')
return args
def testQuit(self):
args = self._GetChromeMockArgs('?quit=1', 'get', sleep=10)
rtn, stdout, _ = self._Run(args, timeout=20)
self.assertEqual(rtn, 0)
self.assertIn('Starting', stdout)
self.assertNotIn('Expected to be killed', stdout)
def testSubprocessDies(self):
args = self._GetChromeMockArgs(page=None, http_request_type=None, sleep=0,
expect_to_be_killed=False)
returncode, stdout, _ = self._Run(args, timeout=10)
self.assertNotEqual(-1, returncode)
self.assertIn('Starting', stdout)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "63eaab1278c5e38996224e854f249512",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 30.50467289719626,
"alnum_prop": 0.6602328431372549,
"repo_name": "chromium/chromium",
"id": "2b95875898de969fd92e17239185b66d30930b58",
"size": "3429",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "native_client_sdk/src/tools/tests/httpd_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import typing as t
from contextlib import contextmanager
from contextlib import ExitStack
from copy import copy
from types import TracebackType
import werkzeug.test
from click.testing import CliRunner
from werkzeug.test import Client
from werkzeug.urls import url_parse
from werkzeug.wrappers import Request as BaseRequest
from .cli import ScriptInfo
from .globals import _cv_request
from .sessions import SessionMixin
if t.TYPE_CHECKING: # pragma: no cover
from werkzeug.test import TestResponse
from .app import Flask
class EnvironBuilder(werkzeug.test.EnvironBuilder):
"""An :class:`~werkzeug.test.EnvironBuilder`, that takes defaults from the
application.
:param app: The Flask application to configure the environment from.
:param path: URL path being requested.
:param base_url: Base URL where the app is being served, which
``path`` is relative to. If not given, built from
:data:`PREFERRED_URL_SCHEME`, ``subdomain``,
:data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
:param subdomain: Subdomain name to append to :data:`SERVER_NAME`.
:param url_scheme: Scheme to use instead of
:data:`PREFERRED_URL_SCHEME`.
:param json: If given, this is serialized as JSON and passed as
``data``. Also defaults ``content_type`` to
``application/json``.
:param args: other positional arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
:param kwargs: other keyword arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
"""
def __init__(
self,
app: "Flask",
path: str = "/",
base_url: t.Optional[str] = None,
subdomain: t.Optional[str] = None,
url_scheme: t.Optional[str] = None,
*args: t.Any,
**kwargs: t.Any,
) -> None:
assert not (base_url or subdomain or url_scheme) or (
base_url is not None
) != bool(
subdomain or url_scheme
), 'Cannot pass "subdomain" or "url_scheme" with "base_url".'
if base_url is None:
http_host = app.config.get("SERVER_NAME") or "localhost"
app_root = app.config["APPLICATION_ROOT"]
if subdomain:
http_host = f"{subdomain}.{http_host}"
if url_scheme is None:
url_scheme = app.config["PREFERRED_URL_SCHEME"]
url = url_parse(path)
base_url = (
f"{url.scheme or url_scheme}://{url.netloc or http_host}"
f"/{app_root.lstrip('/')}"
)
path = url.path
if url.query:
sep = b"?" if isinstance(url.query, bytes) else "?"
path += sep + url.query
self.app = app
super().__init__(path, base_url, *args, **kwargs)
def json_dumps(self, obj: t.Any, **kwargs: t.Any) -> str: # type: ignore
"""Serialize ``obj`` to a JSON-formatted string.
The serialization will be configured according to the config associated
with this EnvironBuilder's ``app``.
"""
return self.app.json.dumps(obj, **kwargs)
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has knowledge about
Flask's contexts to defer the cleanup of the request context until
the end of a ``with`` block. For general information about how to
use this class refer to :class:`werkzeug.test.Client`.
.. versionchanged:: 0.12
`app.test_client()` includes preset default environment, which can be
set after instantiation of the `app.test_client()` object in
`client.environ_base`.
Basic usage is outlined in the :doc:`/testing` chapter.
"""
application: "Flask"
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self.preserve_context = False
self._new_contexts: t.List[t.ContextManager[t.Any]] = []
self._context_stack = ExitStack()
self.environ_base = {
"REMOTE_ADDR": "127.0.0.1",
"HTTP_USER_AGENT": f"werkzeug/{werkzeug.__version__}",
}
@contextmanager
def session_transaction(
self, *args: t.Any, **kwargs: t.Any
) -> t.Generator[SessionMixin, None, None]:
"""When used in combination with a ``with`` statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the ``with`` block is left the session is
stored back.
::
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self.cookie_jar is None:
raise RuntimeError(
"Session transactions only make sense with cookies enabled."
)
app = self.application
environ_overrides = kwargs.setdefault("environ_overrides", {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _cv_request.get(None)
with app.test_request_context(*args, **kwargs) as c:
session_interface = app.session_interface
sess = session_interface.open_session(app, c.request)
if sess is None:
raise RuntimeError(
"Session backend did not open a session. Check the configuration"
)
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
# methods of the actual request context object since that would
# mean that cleanup handlers are called
token = _cv_request.set(outer_reqctx) # type: ignore[arg-type]
try:
yield sess
finally:
_cv_request.reset(token)
resp = app.response_class()
if not session_interface.is_null_session(sess):
session_interface.save_session(app, sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def _copy_environ(self, other):
out = {**self.environ_base, **other}
if self.preserve_context:
out["werkzeug.debug.preserve_context"] = self._new_contexts.append
return out
def _request_from_builder_args(self, args, kwargs):
kwargs["environ_base"] = self._copy_environ(kwargs.get("environ_base", {}))
builder = EnvironBuilder(self.application, *args, **kwargs)
try:
return builder.get_request()
finally:
builder.close()
def open(
self,
*args: t.Any,
buffered: bool = False,
follow_redirects: bool = False,
**kwargs: t.Any,
) -> "TestResponse":
if args and isinstance(
args[0], (werkzeug.test.EnvironBuilder, dict, BaseRequest)
):
if isinstance(args[0], werkzeug.test.EnvironBuilder):
builder = copy(args[0])
builder.environ_base = self._copy_environ(builder.environ_base or {})
request = builder.get_request()
elif isinstance(args[0], dict):
request = EnvironBuilder.from_environ(
args[0], app=self.application, environ_base=self._copy_environ({})
).get_request()
else:
# isinstance(args[0], BaseRequest)
request = copy(args[0])
request.environ = self._copy_environ(request.environ)
else:
# request is None
request = self._request_from_builder_args(args, kwargs)
# Pop any previously preserved contexts. This prevents contexts
# from being preserved across redirects or multiple requests
# within a single block.
self._context_stack.close()
response = super().open(
request,
buffered=buffered,
follow_redirects=follow_redirects,
)
response.json_module = self.application.json # type: ignore[misc]
# Re-push contexts that were preserved during the request.
while self._new_contexts:
cm = self._new_contexts.pop()
self._context_stack.enter_context(cm)
return response
def __enter__(self) -> "FlaskClient":
if self.preserve_context:
raise RuntimeError("Cannot nest client invocations")
self.preserve_context = True
return self
def __exit__(
self,
exc_type: t.Optional[type],
exc_value: t.Optional[BaseException],
tb: t.Optional[TracebackType],
) -> None:
self.preserve_context = False
self._context_stack.close()
class FlaskCliRunner(CliRunner):
"""A :class:`~click.testing.CliRunner` for testing a Flask app's
CLI commands. Typically created using
:meth:`~flask.Flask.test_cli_runner`. See :ref:`testing-cli`.
"""
def __init__(self, app: "Flask", **kwargs: t.Any) -> None:
self.app = app
super().__init__(**kwargs)
def invoke( # type: ignore
self, cli: t.Any = None, args: t.Any = None, **kwargs: t.Any
) -> t.Any:
"""Invokes a CLI command in an isolated environment. See
:meth:`CliRunner.invoke <click.testing.CliRunner.invoke>` for
full method documentation. See :ref:`testing-cli` for examples.
If the ``obj`` argument is not given, passes an instance of
:class:`~flask.cli.ScriptInfo` that knows how to load the Flask
app being tested.
:param cli: Command object to invoke. Default is the app's
:attr:`~flask.app.Flask.cli` group.
:param args: List of strings to invoke the command with.
:return: a :class:`~click.testing.Result` object.
"""
if cli is None:
cli = self.app.cli # type: ignore
if "obj" not in kwargs:
kwargs["obj"] = ScriptInfo(create_app=lambda: self.app)
return super().invoke(cli, args, **kwargs)
| {
"content_hash": "ee7ffc841df373decc9512ddaea968b0",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 86,
"avg_line_length": 37.04895104895105,
"alnum_prop": 0.6006983767459418,
"repo_name": "pallets/flask",
"id": "ec9ebb9def2bc981f11da4384507260986e172e6",
"size": "10596",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/flask/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18"
},
{
"name": "HTML",
"bytes": "405"
},
{
"name": "Python",
"bytes": "552628"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
import pymc
import theano.tensor as T
np.random.seed(42)
theta_true = (25, 0.5)
xdata = 100 * np.random.random(20)
ydata = theta_true[0] + theta_true[1] * xdata
# add scatter to points
xdata = np.random.normal(xdata, 10)
ydata = np.random.normal(ydata, 10)
data = {'x': xdata, 'y': ydata}
with pymc.Model() as model:
alpha = pymc.Uniform('intercept', -100, 100)
# Create custom densities
beta = pymc.DensityDist('slope', lambda value: -1.5 * T.log(1 + value**2), testval=0)
sigma = pymc.DensityDist('sigma', lambda value: -T.log(T.abs_(value)), testval=1)
# Create likelihood
like = pymc.Normal('y_est', mu=alpha + beta * xdata, sd=sigma, observed=ydata)
start = pymc.find_MAP()
step = pymc.NUTS(scaling=start) # Instantiate sampler
trace = pymc.sample(10000, step, start=start)
#################################################
# Create some convenience routines for plotting
# All functions below written by Jake Vanderplas
def compute_sigma_level(trace1, trace2, nbins=20):
"""From a set of traces, bin by number of standard deviations"""
L, xbins, ybins = np.histogram2d(trace1, trace2, nbins)
L[L == 0] = 1E-16
logL = np.log(L)
shape = L.shape
L = L.ravel()
# obtain the indices to sort and unsort the flattened array
i_sort = np.argsort(L)[::-1]
i_unsort = np.argsort(i_sort)
L_cumsum = L[i_sort].cumsum()
L_cumsum /= L_cumsum[-1]
xbins = 0.5 * (xbins[1:] + xbins[:-1])
ybins = 0.5 * (ybins[1:] + ybins[:-1])
return xbins, ybins, L_cumsum[i_unsort].reshape(shape)
def plot_MCMC_trace(ax, xdata, ydata, trace, scatter=False, **kwargs):
"""Plot traces and contours"""
xbins, ybins, sigma = compute_sigma_level(trace[0], trace[1])
ax.contour(xbins, ybins, sigma.T, levels=[0.683, 0.955], **kwargs)
if scatter:
ax.plot(trace[0], trace[1], ',k', alpha=0.1)
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$\beta$')
def plot_MCMC_model(ax, xdata, ydata, trace):
"""Plot the linear model and 2sigma contours"""
ax.plot(xdata, ydata, 'ok')
alpha, beta = trace[:2]
xfit = np.linspace(-20, 120, 10)
yfit = alpha[:, None] + beta[:, None] * xfit
mu = yfit.mean(0)
sig = 2 * yfit.std(0)
ax.plot(xfit, mu, '-k')
ax.fill_between(xfit, mu - sig, mu + sig, color='lightgray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_MCMC_results(xdata, ydata, trace, colors='k'):
"""Plot both the trace and the model together"""
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
plot_MCMC_trace(ax[0], xdata, ydata, trace, True, colors=colors)
plot_MCMC_model(ax[1], xdata, ydata, trace)
pymc_trace = [trace['intercept'],
trace['slope'],
trace['sigma']]
plot_MCMC_results(xdata, ydata, pymc_trace)
plt.show() | {
"content_hash": "70ec2fd4f72a0a7e0db5ae120e372c5a",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 89,
"avg_line_length": 30.47872340425532,
"alnum_prop": 0.6178010471204188,
"repo_name": "nmmarquez/pymc",
"id": "bb50417eba3bf4d61c564536f724abacc22b5e0f",
"size": "3251",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pymc/examples/custom_dists.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Brushing Scatter Plot to show data on a table
---------------------------------------------
A scatter plot of the cars dataset, with data tables for horsepower, MPG, and origin.
The tables update to reflect the selection on the scatter plot.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.cars()
# Brush for selection
brush = alt.selection(type='interval')
# Scatter Plot
points = alt.Chart(source).mark_point().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
color=alt.condition(brush, 'Cylinders:O', alt.value('grey'))
).add_selection(brush)
# Base chart for data tables
ranked_text = alt.Chart(source).mark_text().encode(
y=alt.Y('row_number:O',axis=None)
).transform_window(
row_number='row_number()'
).transform_filter(
brush
).transform_window(
rank='rank(row_number)'
).transform_filter(
alt.datum.rank<20
)
# Data Tables
horsepower = ranked_text.encode(text='Horsepower:N').properties(title='Horsepower')
mpg = ranked_text.encode(text='Miles_per_Gallon:N').properties(title='MPG')
origin = ranked_text.encode(text='Origin:N').properties(title='Origin')
text = alt.hconcat(horsepower, mpg, origin) # Combine data tables
# Build chart
alt.hconcat(
points,
text
).resolve_legend(
color="independent"
)
| {
"content_hash": "bc544a8e526d2376a92abbac58c7e75f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 86,
"avg_line_length": 27.816326530612244,
"alnum_prop": 0.6647101980924431,
"repo_name": "jakevdp/altair",
"id": "3073f3397927072c05fefbf870b989e4c75401e8",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altair/examples/scatter_linked_table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "5353045"
},
{
"name": "TeX",
"bytes": "2684"
}
],
"symlink_target": ""
} |
import re
from knack.util import CLIError
from knack.log import get_logger
from .custom import get_docker_command
from ._docker_utils import _get_aad_token
from .helm import get_helm_command
from ._utils import get_registry_by_name, resolve_identity_client_id
from ._errors import ErrorClass
from ._format import add_timestamp
logger = get_logger(__name__)
DOCKER_PULL_SUCCEEDED = "Downloaded newer image for {}"
DOCKER_IMAGE_UP_TO_DATE = "Image is up to date for {}"
IMAGE = "mcr.microsoft.com/mcr/hello-world:latest"
FAQ_MESSAGE = "\nPlease refer to https://aka.ms/acr/health-check for more information."
ERROR_MSG_DEEP_LINK = "\nPlease refer to https://aka.ms/acr/errors#{} for more information."
MIN_HELM_VERSION = "2.11.0"
HELM_VERSION_REGEX = re.compile(r'(SemVer|Version):"v([.\d]+)"')
ACR_CHECK_HEALTH_MSG = "Try running 'az acr check-health -n {} --yes' to diagnose this issue."
RECOMMENDED_NOTARY_VERSION = "0.6.0"
NOTARY_VERSION_REGEX = re.compile(r'Version:\s+([.\d]+)')
DOCKER_PULL_WRONG_PLATFORM = 'cannot be used on this platform'
# Utilities functions
def print_pass(message):
logger.warning("%s : OK", str(message))
def _handle_error(error, ignore_errors):
if ignore_errors:
logger.error(error.get_error_message())
else:
error_msg = ERROR_MSG_DEEP_LINK.format(error.error_title.lower())
raise CLIError(error.get_error_message(error_msg))
def _subprocess_communicate(command_parts, shell=False):
from subprocess import PIPE, Popen, CalledProcessError
output, stderr = "", ""
try:
p = Popen(command_parts, stdout=PIPE, stderr=PIPE, shell=shell)
output, stderr = p.communicate()
output = output.decode('UTF-8').rstrip()
stderr = stderr.decode('UTF-8').rstrip()
except CalledProcessError as e:
stderr = str(e)
warning = None
if stderr.lower().startswith("warning"):
warning = stderr
stderr = None
if stderr:
stderr = "Failed to run command '{}'. {}".format(
' '.join(command_parts),
stderr
)
return output, warning, stderr
# Checks for the environment
# Checks docker command, docker daemon, docker version and docker pull
def _get_docker_status_and_version(ignore_errors, yes):
from ._errors import DOCKER_DAEMON_ERROR, DOCKER_PULL_ERROR, DOCKER_VERSION_ERROR
# Docker command and docker daemon check
docker_command, error = get_docker_command(is_diagnostics_context=True)
docker_daemon_available = True
if error:
_handle_error(error, ignore_errors)
if error.error_title != DOCKER_DAEMON_ERROR.error_title:
return # We cannot proceed if the error is unexpected or with docker command
docker_daemon_available = False
if docker_daemon_available:
logger.warning("Docker daemon status: available")
# Docker version check
output, warning, stderr = _subprocess_communicate(
[docker_command, "version", "--format", "'Docker version {{.Server.Version}}, "
"build {{.Server.GitCommit}}, platform {{.Server.Os}}/{{.Server.Arch}}'"])
if stderr:
_handle_error(DOCKER_VERSION_ERROR.append_error_message(stderr), ignore_errors)
else:
if warning:
logger.warning(warning)
logger.warning("Docker version: %s", output)
# Docker pull check - only if docker daemon is available
if docker_daemon_available:
if not yes:
from knack.prompting import prompt_y_n
confirmation = prompt_y_n("This will pull the image {}. Proceed?".format(IMAGE))
if not confirmation:
logger.warning("Skipping pull check.")
return
output, warning, stderr = _subprocess_communicate([docker_command, "pull", IMAGE])
if stderr:
if DOCKER_PULL_WRONG_PLATFORM in stderr:
print_pass("Docker pull of '{}'".format(IMAGE))
logger.warning("Image '%s' can be pulled but cannot be used on this platform", IMAGE)
else:
_handle_error(DOCKER_PULL_ERROR.append_error_message(stderr), ignore_errors)
else:
if warning:
logger.warning(warning)
if output.find(DOCKER_PULL_SUCCEEDED.format(IMAGE)) != -1 or \
output.find(DOCKER_IMAGE_UP_TO_DATE.format(IMAGE)) != -1:
print_pass("Docker pull of '{}'".format(IMAGE))
else:
_handle_error(DOCKER_PULL_ERROR, ignore_errors)
# Get current CLI version
def _get_cli_version():
from azure.cli.core import __version__ as core_version
logger.warning('Azure CLI version: %s', core_version)
# Get helm versions
def _get_helm_version(ignore_errors):
from ._errors import HELM_VERSION_ERROR
from packaging.version import parse # pylint: disable=import-error,no-name-in-module
# Helm command check
helm_command, error = get_helm_command(is_diagnostics_context=True)
if error:
_handle_error(error, ignore_errors)
return
# Helm version check
output, warning, stderr = _subprocess_communicate([helm_command, "version", "--client"])
if stderr:
_handle_error(HELM_VERSION_ERROR.append_error_message(stderr), ignore_errors)
return
if warning:
logger.warning(warning)
# Retrieve the helm version if regex pattern is found
match_obj = HELM_VERSION_REGEX.search(output)
if match_obj:
output = match_obj.group(2)
logger.warning("Helm version: %s", output)
# Display an error message if the current helm version < min required version
if match_obj and parse(output) < parse(MIN_HELM_VERSION):
obsolete_ver_error = HELM_VERSION_ERROR.set_error_message(
"Current Helm client version is not recommended. Please upgrade your Helm client to at least version {}."
.format(MIN_HELM_VERSION))
_handle_error(obsolete_ver_error, ignore_errors)
def _get_notary_version(ignore_errors):
from ._errors import NOTARY_VERSION_ERROR
from .notary import get_notary_command
from packaging.version import parse # pylint: disable=import-error,no-name-in-module
# Notary command check
notary_command, error = get_notary_command(is_diagnostics_context=True)
if error:
_handle_error(error, ignore_errors)
return
# Notary version check
output, warning, stderr = _subprocess_communicate([notary_command, "version"])
if stderr:
_handle_error(NOTARY_VERSION_ERROR.append_error_message(stderr), ignore_errors)
return
if warning:
logger.warning(warning)
# Retrieve the notary version if regex pattern is found
match_obj = NOTARY_VERSION_REGEX.search(output)
if match_obj:
output = match_obj.group(1)
logger.warning("Notary version: %s", output)
# Display error if the current version does not match the recommended version
if match_obj and parse(output) != parse(RECOMMENDED_NOTARY_VERSION):
version_msg = "upgrade"
if parse(output) > parse(RECOMMENDED_NOTARY_VERSION):
version_msg = "downgrade"
obsolete_ver_error = NOTARY_VERSION_ERROR.set_error_message(
"Current notary version is not recommended. Please {} your notary client to version {}."
.format(version_msg, RECOMMENDED_NOTARY_VERSION))
_handle_error(obsolete_ver_error, ignore_errors)
# Checks for the connectivity
# Check DNS lookup and access to challenge endpoint
def _get_registry_status(login_server, registry_name, ignore_errors):
import socket
registry_ip = None
try:
registry_ip = socket.gethostbyname(login_server)
except (socket.gaierror, UnicodeError):
# capture UnicodeError for https://github.com/Azure/azure-cli/issues/12936
pass
if not registry_ip:
from ._errors import CONNECTIVITY_DNS_ERROR
_handle_error(CONNECTIVITY_DNS_ERROR.format_error_message(login_server), ignore_errors)
return False
print_pass("DNS lookup to {} at IP {}".format(login_server, registry_ip))
import requests
from requests.exceptions import SSLError, RequestException
from azure.cli.core.util import should_disable_connection_verify
try:
request_url = 'https://' + login_server + '/v2/'
logger.debug(add_timestamp("Sending a HTTP GET request to {}".format(request_url)))
challenge = requests.get(request_url, verify=(not should_disable_connection_verify()))
except SSLError:
from ._errors import CONNECTIVITY_SSL_ERROR
_handle_error(CONNECTIVITY_SSL_ERROR.format_error_message(login_server), ignore_errors)
return False
except RequestException:
from ._errors import CONNECTIVITY_CHALLENGE_ERROR
_handle_error(CONNECTIVITY_CHALLENGE_ERROR.format_error_message(login_server), ignore_errors)
return False
if challenge.status_code == 403:
from ._errors import CONNECTIVITY_FORBIDDEN_ERROR
_handle_error(CONNECTIVITY_FORBIDDEN_ERROR.format_error_message(login_server, registry_name), ignore_errors)
return False
return True
def _get_endpoint_and_token_status(cmd, login_server, ignore_errors):
from ._errors import CONNECTIVITY_CHALLENGE_ERROR, CONNECTIVITY_AAD_LOGIN_ERROR, \
CONNECTIVITY_REFRESH_TOKEN_ERROR, CONNECTIVITY_ACCESS_TOKEN_ERROR
# Check access to login endpoint
url = 'https://' + login_server + '/v2/'
result_from_token = _get_aad_token(cmd.cli_ctx, login_server, False, is_diagnostics_context=True)
if isinstance(result_from_token, ErrorClass):
if result_from_token.error_title == CONNECTIVITY_CHALLENGE_ERROR.error_title:
_handle_error(result_from_token, ignore_errors)
return
print_pass("Challenge endpoint {}".format(url))
if result_from_token.error_title == CONNECTIVITY_AAD_LOGIN_ERROR.error_title:
_handle_error(result_from_token, ignore_errors)
return
if result_from_token.error_title == CONNECTIVITY_REFRESH_TOKEN_ERROR.error_title:
_handle_error(result_from_token, ignore_errors)
return
print_pass("Fetch refresh token for registry '{}'".format(login_server))
if result_from_token.error_title == CONNECTIVITY_ACCESS_TOKEN_ERROR.error_title:
_handle_error(result_from_token, ignore_errors)
return
print_pass("Fetch access token for registry '{}'".format(login_server))
return
# If return is not of type ErrorClass, then it is the token
print_pass("Challenge endpoint {}".format(url))
print_pass("Fetch refresh token for registry '{}'".format(login_server))
print_pass("Fetch access token for registry '{}'".format(login_server))
def _check_registry_health(cmd, registry_name, ignore_errors):
from azure.cli.core.profiles import ResourceType
if registry_name is None:
logger.warning("Registry name must be provided to check connectivity.")
return
registry = None
# Connectivity
try:
registry, _ = get_registry_by_name(cmd.cli_ctx, registry_name)
login_server = registry.login_server.rstrip('/')
except CLIError:
from ._docker_utils import get_login_server_suffix
suffix = get_login_server_suffix(cmd.cli_ctx)
if not suffix:
from ._errors import LOGIN_SERVER_ERROR
_handle_error(LOGIN_SERVER_ERROR.format_error_message(registry_name), ignore_errors)
return
login_server = registry_name + suffix
status_validated = _get_registry_status(login_server, registry_name, ignore_errors)
if status_validated:
_get_endpoint_and_token_status(cmd, login_server, ignore_errors)
if cmd.supported_api_version(min_api='2020-11-01-preview', resource_type=ResourceType.MGMT_CONTAINERREGISTRY): # pylint: disable=too-many-nested-blocks
# CMK settings
if registry and registry.encryption and registry.encryption.key_vault_properties: # pylint: disable=too-many-nested-blocks
client_id = registry.encryption.key_vault_properties.identity
valid_identity = False
if registry.identity:
valid_identity = ((client_id == 'system') and
bool(registry.identity.principal_id)) # use system identity?
if not valid_identity and registry.identity.user_assigned_identities:
for k, v in registry.identity.user_assigned_identities.items():
if v.client_id == client_id:
from msrestazure.azure_exceptions import CloudError
try:
valid_identity = (resolve_identity_client_id(cmd.cli_ctx, k) == client_id)
except CloudError:
pass
if not valid_identity:
from ._errors import CMK_MANAGED_IDENTITY_ERROR
_handle_error(CMK_MANAGED_IDENTITY_ERROR.format_error_message(registry_name), ignore_errors)
def _check_private_endpoint(cmd, registry_name, vnet_of_private_endpoint): # pylint: disable=too-many-locals, too-many-statements
import socket
from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
if registry_name is None:
raise CLIError("Registry name must be provided to verify DNS routings of its private endpoints")
registry = None
# retrieve registry
registry, _ = get_registry_by_name(cmd.cli_ctx, registry_name)
if not registry.private_endpoint_connections:
raise CLIError('Registry "{}" doesn\'t have private endpoints to verify DNS routings.'.format(registry_name))
if is_valid_resource_id(vnet_of_private_endpoint):
res = parse_resource_id(vnet_of_private_endpoint)
if not res.get("type") or res.get("type").lower() != 'virtualnetworks' or not res.get('name'):
raise CLIError('"{}" is not a valid resource id of a virtual network'.format(vnet_of_private_endpoint))
else:
res = parse_resource_id(registry.id)
vnet_of_private_endpoint = resource_id(name=vnet_of_private_endpoint, resource_group=res['resource_group'],
namespace='Microsoft.Network', type='virtualNetworks',
subscription=res['subscription'])
# retrieve FQDNs for registry and its data endpoint
pe_ids = [e.private_endpoint.id for e in registry.private_endpoint_connections if e.private_endpoint]
dns_mappings = {}
for pe_id in pe_ids:
res = parse_resource_id(pe_id)
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK,
subscription_id=res['subscription'])
pe = network_client.private_endpoints.get(res['resource_group'], res['name'])
if pe.subnet.id.lower().startswith(vnet_of_private_endpoint.lower()):
nic_id = pe.network_interfaces[0].id
nic_res = parse_resource_id(nic_id)
nic = network_client.network_interfaces.get(nic_res['resource_group'], nic_res['name'])
for dns_config in nic.ip_configurations:
if dns_config.private_link_connection_properties.fqdns[0] in dns_mappings:
err = ('Registry "{}" has more than one private endpoint in the vnet of "{}".'
' DNS routing will be unreliable')
raise CLIError(err.format(registry_name, vnet_of_private_endpoint))
dns_mappings[dns_config.private_link_connection_properties.fqdns[0]] = dns_config.private_ip_address
dns_ok = True
if not dns_mappings:
err = ('Registry "{}" doesn\'t have private endpoints in the vnet of "{}".'
' Please make sure you provided correct vnet')
raise CLIError(err.format(registry_name, vnet_of_private_endpoint))
for fqdn in dns_mappings:
try:
result = socket.gethostbyname(fqdn)
if result != dns_mappings[fqdn]:
err = 'DNS routing to registry "%s" through private IP is incorrect. Expect: %s, Actual: %s'
logger.warning(err, registry_name, dns_mappings[fqdn], result)
dns_ok = False
except Exception as e: # pylint: disable=broad-except
logger.warning('Error resolving DNS for %s. Ex: %s', fqdn, e)
dns_ok = False
if dns_ok:
print_pass('DNS routing to private endpoint')
else:
raise CLIError('DNS routing verification failed')
# General command
def acr_check_health(cmd, # pylint: disable useless-return
vnet=None,
ignore_errors=False,
yes=False,
registry_name=None):
from azure.cli.core.util import in_cloud_console
in_cloud_console = in_cloud_console()
if in_cloud_console:
logger.warning("Environment checks are not supported in Azure Cloud Shell.")
else:
_get_docker_status_and_version(ignore_errors, yes)
_get_cli_version()
_check_registry_health(cmd, registry_name, ignore_errors)
if vnet:
_check_private_endpoint(cmd, registry_name, vnet)
if not in_cloud_console:
_get_helm_version(ignore_errors)
_get_notary_version(ignore_errors)
logger.warning(FAQ_MESSAGE)
| {
"content_hash": "a025df26d0752fcae6f29dcd7d518a94",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 156,
"avg_line_length": 41.548235294117646,
"alnum_prop": 0.652848567221656,
"repo_name": "yugangw-msft/azure-cli",
"id": "54ddc7ca2a070f0a023f3211c36b44cbe025cf5c",
"size": "18004",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/acr/check_health.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
import collections
import itertools
import json
import os
import posixpath
import re
import time
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.files.storage import default_storage as storage
from django.db import models, transaction
from django.dispatch import receiver
from django.db.models import Max, Q, signals as dbsignals
from django.utils.translation import trans_real as translation
import caching.base as caching
import commonware.log
import json_field
from django_statsd.clients import statsd
from jinja2.filters import do_dictsort
from tower import ugettext_lazy as _
from addons.utils import get_creatured_ids, get_featured_ids
import amo
import amo.models
from access import acl
from amo import helpers
from amo.decorators import use_master, write
from amo.fields import DecimalCharField
from amo.utils import (attach_trans_dict, cache_ns_key, chunked, find_language,
JSONEncoder, send_mail, slugify, sorted_groupby, timer,
to_language, urlparams)
from amo.urlresolvers import get_outgoing_url, reverse
from files.models import File
from reviews.models import Review
import sharing.utils as sharing
from stats.models import AddonShareCountTotal
from tags.models import Tag
from translations.fields import (LinkifiedField, PurifiedField, save_signal,
TranslatedField, Translation)
from translations.query import order_by_translation
from users.models import UserForeignKey, UserProfile
from versions.compare import version_int
from versions.models import inherit_nomination, Version
from . import query, signals
log = commonware.log.getLogger('z.addons')
def clean_slug(instance, slug_field='slug'):
"""Cleans a model instance slug.
This strives to be as generic as possible as it's used by Addons
and Collections, and maybe more in the future.
"""
slug = getattr(instance, slug_field, None) or instance.name
if not slug:
# Initialize the slug with what we have available: a name translation,
# or the id of the instance, or in last resort the model name.
translations = Translation.objects.filter(id=instance.name_id)
if translations.exists():
slug = translations[0]
elif instance.id:
slug = str(instance.id)
else:
slug = instance.__class__.__name__
max_length = instance._meta.get_field_by_name(slug_field)[0].max_length
slug = slugify(slug)[:max_length]
if BlacklistedSlug.blocked(slug):
slug = slug[:max_length - 1] + '~'
# The following trick makes sure we are using a manager that returns
# all the objects, as otherwise we could have a slug clash on our hands.
# Eg with the "Addon.objects" manager, which doesn't list deleted addons,
# we could have a "clean" slug which is in fact already assigned to an
# already existing (deleted) addon. Also, make sure we use the base class.
manager = models.Manager()
manager.model = instance._meta.proxy_for_model or instance.__class__
qs = manager.values_list(slug_field, flat=True) # Get list of all slugs.
if instance.id:
qs = qs.exclude(pk=instance.id) # Can't clash with itself.
# We first need to make sure there's a clash, before trying to find a
# suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still
# available.
clash = qs.filter(**{slug_field: slug})
if clash.exists():
# Leave space for 99 clashes.
slug = slugify(slug)[:max_length - 2]
# There is a clash, so find a suffix that will make this slug unique.
lookup = {'%s__startswith' % slug_field: slug}
clashes = qs.filter(**lookup)
# Try numbers between 1 and the number of clashes + 1 (+ 1 because we
# start the range at 1, not 0):
# if we have two clashes "foo1" and "foo2", we need to try "foox"
# for x between 1 and 3 to be absolutely sure to find an available one.
for idx in range(1, len(clashes) + 2):
new = ('%s%s' % (slug, idx))[:max_length]
if new not in clashes:
slug = new
break
else:
# This could happen. The current implementation (using
# ``[:max_length -3]``) only works for the first 100 clashes in the
# worst case (if the slug is equal to or longuer than
# ``max_length - 3`` chars).
# After that, {verylongslug}-100 will be trimmed down to
# {verylongslug}-10, which is already assigned, but it's the last
# solution tested.
raise RuntimeError
setattr(instance, slug_field, slug)
return instance
class AddonManager(amo.models.ManagerBase):
def __init__(self, include_deleted=False, include_unlisted=False):
# DO NOT change the default value of include_deleted and
# include_unlisted unless you've read through the comment just above
# the Addon managers declaration/instanciation and understand the
# consequences.
amo.models.ManagerBase.__init__(self)
self.include_deleted = include_deleted
self.include_unlisted = include_unlisted
def get_query_set(self):
qs = super(AddonManager, self).get_query_set()
qs = qs._clone(klass=query.IndexQuerySet)
if not self.include_deleted:
qs = qs.exclude(status=amo.STATUS_DELETED)
if not self.include_unlisted:
qs = qs.exclude(is_listed=False)
return qs.transform(Addon.transformer)
def id_or_slug(self, val):
if isinstance(val, basestring) and not val.isdigit():
return self.filter(slug=val)
return self.filter(id=val)
def enabled(self):
return self.filter(disabled_by_user=False)
def public(self):
"""Get public add-ons only"""
return self.filter(self.valid_q([amo.STATUS_PUBLIC]))
def reviewed(self):
"""Get add-ons with a reviewed status"""
return self.filter(self.valid_q(amo.REVIEWED_STATUSES))
def unreviewed(self):
"""Get only unreviewed add-ons"""
return self.filter(self.valid_q(amo.UNREVIEWED_STATUSES))
def valid(self):
"""Get valid, enabled add-ons only"""
return self.filter(self.valid_q(amo.LISTED_STATUSES))
def valid_and_disabled_and_pending(self):
"""
Get valid, pending, enabled and disabled add-ons.
Used to allow pending theme pages to still be viewed.
"""
statuses = list(amo.LISTED_STATUSES) + [amo.STATUS_DISABLED,
amo.STATUS_PENDING]
return (self.filter(Q(status__in=statuses) | Q(disabled_by_user=True))
.exclude(type=amo.ADDON_EXTENSION,
_current_version__isnull=True))
def featured(self, app, lang=None, type=None):
"""
Filter for all featured add-ons for an application in all locales.
"""
ids = get_featured_ids(app, lang, type)
return amo.models.manual_order(self.listed(app), ids, 'addons.id')
def listed(self, app, *status):
"""
Listed add-ons have a version with a file matching ``status`` and are
not disabled. Personas and self-hosted add-ons will be returned too.
"""
if len(status) == 0:
status = [amo.STATUS_PUBLIC]
return self.filter(self.valid_q(status), appsupport__app=app.id)
def valid_q(self, status=[], prefix=''):
"""
Return a Q object that selects a valid Addon with the given statuses.
An add-on is valid if not disabled and has a current version.
``prefix`` can be used if you're not working with Addon directly and
need to hop across a join, e.g. ``prefix='addon__'`` in
CollectionAddon.
"""
if not status:
status = [amo.STATUS_PUBLIC]
def q(*args, **kw):
if prefix:
kw = dict((prefix + k, v) for k, v in kw.items())
return Q(*args, **kw)
return q(q(_current_version__isnull=False),
disabled_by_user=False, status__in=status)
class Addon(amo.models.OnChangeMixin, amo.models.ModelBase):
STATUS_CHOICES = amo.STATUS_CHOICES_ADDON
guid = models.CharField(max_length=255, unique=True, null=True)
slug = models.CharField(max_length=30, unique=True, null=True)
name = TranslatedField(default=None)
default_locale = models.CharField(max_length=10,
default=settings.LANGUAGE_CODE,
db_column='defaultlocale')
type = models.PositiveIntegerField(db_column='addontype_id', default=0)
status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), db_index=True, default=0)
highest_status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), default=0,
help_text="An upper limit for what an author can change.",
db_column='higheststatus')
icon_type = models.CharField(max_length=25, blank=True,
db_column='icontype')
homepage = TranslatedField()
support_email = TranslatedField(db_column='supportemail')
support_url = TranslatedField(db_column='supporturl')
description = PurifiedField(short=False)
summary = LinkifiedField()
developer_comments = PurifiedField(db_column='developercomments')
eula = PurifiedField()
privacy_policy = PurifiedField(db_column='privacypolicy')
the_reason = PurifiedField()
the_future = PurifiedField()
average_rating = models.FloatField(max_length=255, default=0, null=True,
db_column='averagerating')
bayesian_rating = models.FloatField(default=0, db_index=True,
db_column='bayesianrating')
total_reviews = models.PositiveIntegerField(default=0,
db_column='totalreviews')
weekly_downloads = models.PositiveIntegerField(
default=0, db_column='weeklydownloads', db_index=True)
total_downloads = models.PositiveIntegerField(
default=0, db_column='totaldownloads')
hotness = models.FloatField(default=0, db_index=True)
average_daily_downloads = models.PositiveIntegerField(default=0)
average_daily_users = models.PositiveIntegerField(default=0)
share_count = models.PositiveIntegerField(default=0, db_index=True,
db_column='sharecount')
last_updated = models.DateTimeField(
db_index=True, null=True,
help_text='Last time this add-on had a file/version update')
ts_slowness = models.FloatField(
db_index=True, null=True,
help_text='How much slower this add-on makes browser ts tests. '
'Read as {addon.ts_slowness}% slower.')
disabled_by_user = models.BooleanField(default=False, db_index=True,
db_column='inactive')
trusted = models.BooleanField(default=False)
view_source = models.BooleanField(default=True, db_column='viewsource')
public_stats = models.BooleanField(default=False, db_column='publicstats')
prerelease = models.BooleanField(default=False)
admin_review = models.BooleanField(default=False, db_column='adminreview')
admin_review_type = models.PositiveIntegerField(
choices=amo.ADMIN_REVIEW_TYPES.items(), default=amo.ADMIN_REVIEW_FULL)
site_specific = models.BooleanField(default=False,
db_column='sitespecific')
external_software = models.BooleanField(default=False,
db_column='externalsoftware')
dev_agreement = models.BooleanField(
default=False, help_text="Has the dev agreement been signed?")
auto_repackage = models.BooleanField(
default=True, help_text='Automatically upgrade jetpack add-on to a '
'new sdk version?')
nomination_message = models.TextField(null=True,
db_column='nominationmessage')
target_locale = models.CharField(
max_length=255, db_index=True, blank=True, null=True,
help_text="For dictionaries and language packs")
locale_disambiguation = models.CharField(
max_length=255, blank=True, null=True,
help_text="For dictionaries and language packs")
wants_contributions = models.BooleanField(default=False)
paypal_id = models.CharField(max_length=255, blank=True)
charity = models.ForeignKey('Charity', null=True)
# TODO(jbalogh): remove nullify_invalid once remora dies.
suggested_amount = DecimalCharField(
max_digits=8, decimal_places=2, nullify_invalid=True, blank=True,
null=True, help_text=_(u'Users have the option of contributing more '
'or less than this amount.'))
total_contributions = DecimalCharField(max_digits=8, decimal_places=2,
nullify_invalid=True, blank=True,
null=True)
annoying = models.PositiveIntegerField(
choices=amo.CONTRIB_CHOICES, default=0,
help_text=_(u'Users will always be asked in the Add-ons'
u' Manager (Firefox 4 and above)'))
enable_thankyou = models.BooleanField(
default=False, help_text='Should the thank you note be sent to '
'contributors?')
thankyou_note = TranslatedField()
authors = models.ManyToManyField('users.UserProfile', through='AddonUser',
related_name='addons')
categories = models.ManyToManyField('Category', through='AddonCategory')
dependencies = models.ManyToManyField('self', symmetrical=False,
through='AddonDependency',
related_name='addons')
premium_type = models.PositiveIntegerField(
choices=amo.ADDON_PREMIUM_TYPES.items(), default=amo.ADDON_FREE)
manifest_url = models.URLField(max_length=255, blank=True, null=True)
app_domain = models.CharField(max_length=255, blank=True, null=True,
db_index=True)
_current_version = models.ForeignKey(Version, db_column='current_version',
related_name='+', null=True,
on_delete=models.SET_NULL)
_latest_version = models.ForeignKey(Version, db_column='latest_version',
on_delete=models.SET_NULL,
null=True, related_name='+')
make_public = models.DateTimeField(null=True)
mozilla_contact = models.EmailField(blank=True)
vip_app = models.BooleanField(default=False)
# Whether the app is packaged or not (aka hosted).
is_packaged = models.BooleanField(default=False, db_index=True)
# This gets overwritten in the transformer.
share_counts = collections.defaultdict(int)
enable_new_regions = models.BooleanField(default=False, db_index=True)
whiteboard = models.TextField(blank=True)
# Whether the add-on is listed on AMO or not.
is_listed = models.BooleanField(default=True, db_index=True)
# The order of those managers is very important:
# The first one discovered, if it has "use_for_related_fields = True"
# (which it has if it's inheriting from caching.base.CachingManager), will
# be used for relations like `version.addon`. We thus want one that is NOT
# filtered in any case, we don't want a 500 if the addon is not found
# (because it has the status amo.STATUS_DELETED for example).
# The CLASS of the first one discovered will also be used for "many to many
# relations" like `collection.addons`. In that case, we do want the
# filtered version by default, to make sure we're not displaying stuff by
# mistake. You thus want the CLASS of the first one to be filtered by
# default.
# We don't control the instantiation, but AddonManager sets include_deleted
# and include_unlisted to False by default, so filtering is enabled by
# default. This is also why it's not repeated for 'objects' below.
unfiltered = AddonManager(include_deleted=True, include_unlisted=True)
with_unlisted = AddonManager(include_unlisted=True)
objects = AddonManager()
class Meta:
db_table = 'addons'
@staticmethod
def __new__(cls, *args, **kw):
try:
type_idx = Addon._meta._type_idx
except AttributeError:
type_idx = (idx for idx, f in enumerate(Addon._meta.fields)
if f.attname == 'type').next()
Addon._meta._type_idx = type_idx
return object.__new__(cls)
def __unicode__(self):
return u'%s: %s' % (self.id, self.name)
def __init__(self, *args, **kw):
super(Addon, self).__init__(*args, **kw)
self._first_category = {}
if self.type == amo.ADDON_PERSONA:
self.STATUS_CHOICES = Persona.STATUS_CHOICES
def save(self, **kw):
self.clean_slug()
super(Addon, self).save(**kw)
# Like the above Manager objects (`objects`, `with_unlisted`, ...), but
# for ElasticSearch queries.
@classmethod
def search_public(cls):
return cls.search_with_unlisted().filter(is_listed=True)
@classmethod
def search_with_unlisted(cls):
return cls.search().filter(
is_disabled=False, status__in=amo.REVIEWED_STATUSES)
@use_master
def clean_slug(self, slug_field='slug'):
if self.status == amo.STATUS_DELETED:
return
clean_slug(self, slug_field)
@transaction.commit_on_success
def delete(self, msg='', reason=''):
# To avoid a circular import.
from . import tasks
# Check for soft deletion path. Happens only if the addon status isn't
# 0 (STATUS_INCOMPLETE).
soft_deletion = self.highest_status or self.status
if soft_deletion and self.status == amo.STATUS_DELETED:
# We're already done.
return
id = self.id
# Fetch previews before deleting the addon instance, so that we can
# pass the list of files to delete to the delete_preview_files task
# after the addon is deleted.
previews = list(Preview.objects.filter(addon__id=id)
.values_list('id', flat=True))
if soft_deletion:
# /!\ If we ever stop using soft deletion, and remove this code, we
# need to make sure that the logs created below aren't cascade
# deleted!
if self.guid:
log.debug('Adding guid to blacklist: %s' % self.guid)
BlacklistedGuid(guid=self.guid, comments=msg).save()
log.debug('Deleting add-on: %s' % self.id)
to = [settings.FLIGTAR]
user = amo.get_user()
context = {
'atype': amo.ADDON_TYPE.get(self.type).upper(),
'authors': [u.email for u in self.authors.all()],
'adu': self.average_daily_users,
'guid': self.guid,
'id': self.id,
'msg': msg,
'reason': reason,
'name': self.name,
'slug': self.slug,
'total_downloads': self.total_downloads,
'url': helpers.absolutify(self.get_url_path()),
'user_str': ("%s, %s (%s)" % (user.display_name or
user.username, user.email,
user.id) if user else "Unknown"),
}
email_msg = u"""
The following %(atype)s was deleted.
%(atype)s: %(name)s
URL: %(url)s
DELETED BY: %(user_str)s
ID: %(id)s
GUID: %(guid)s
AUTHORS: %(authors)s
TOTAL DOWNLOADS: %(total_downloads)s
AVERAGE DAILY USERS: %(adu)s
NOTES: %(msg)s
REASON GIVEN BY USER FOR DELETION: %(reason)s
""" % context
log.debug('Sending delete email for %(atype)s %(id)s' % context)
subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context
# Update or NULL out various fields.
models.signals.pre_delete.send(sender=Addon, instance=self)
self._reviews.all().delete()
# The last parameter is needed to automagically create an AddonLog.
amo.log(amo.LOG.DELETE_ADDON, self.pk, unicode(self.guid), self)
self.update(status=amo.STATUS_DELETED, slug=None, app_domain=None,
_current_version=None, guid=None)
models.signals.post_delete.send(sender=Addon, instance=self)
send_mail(subject, email_msg, recipient_list=to)
else:
# Real deletion path.
super(Addon, self).delete()
for preview in previews:
tasks.delete_preview_files.delay(preview)
# Remove from search index.
tasks.unindex_addons.delay([id])
return True
@classmethod
def from_upload(cls, upload, platforms, is_packaged=False, source=None,
is_listed=True, data=None):
from files.utils import parse_addon
if not data:
data = parse_addon(upload)
fields = cls._meta.get_all_field_names()
addon = Addon(**dict((k, v) for k, v in data.items() if k in fields))
addon.status = amo.STATUS_NULL
addon.is_listed = is_listed
locale_is_set = (addon.default_locale and
addon.default_locale in (
settings.AMO_LANGUAGES +
settings.HIDDEN_LANGUAGES) and
data.get('default_locale') == addon.default_locale)
if not locale_is_set:
addon.default_locale = to_language(translation.get_language())
if upload.validation_timeout:
addon.admin_review = True
addon.save()
Version.from_upload(upload, addon, platforms, source=source)
amo.log(amo.LOG.CREATE_ADDON, addon)
log.debug('New addon %r from %r' % (addon, upload))
return addon
def flush_urls(self):
urls = ['*/addon/%s/' % self.slug, # Doesn't take care of api
'*/addon/%s/developers/' % self.slug,
'*/addon/%s/eula/*' % self.slug,
'*/addon/%s/privacy/' % self.slug,
'*/addon/%s/versions/*' % self.slug,
'*/api/*/addon/%s' % self.slug,
self.icon_url,
self.thumbnail_url,
]
urls.extend('*/user/%d/' % u.id for u in self.listed_authors)
return urls
def get_url_path(self, more=False, add_prefix=True):
if not self.is_listed: # Not listed? Doesn't have a public page.
return ''
# If more=True you get the link to the ajax'd middle chunk of the
# detail page.
view = 'addons.detail_more' if more else 'addons.detail'
return reverse(view, args=[self.slug], add_prefix=add_prefix)
def get_api_url(self):
# Used by Piston in output.
return helpers.absolutify(self.get_url_path())
def get_dev_url(self, action='edit', args=None, prefix_only=False):
args = args or []
prefix = 'devhub'
type_ = 'themes' if self.type == amo.ADDON_PERSONA else 'addons'
if not prefix_only:
prefix += '.%s' % type_
view_name = '{prefix}.{action}'.format(prefix=prefix,
action=action)
return reverse(view_name, args=[self.slug] + args)
def get_detail_url(self, action='detail', args=[]):
return reverse('addons.%s' % action, args=[self.slug] + args)
def meet_the_dev_url(self):
return reverse('addons.meet', args=[self.slug])
@property
def reviews_url(self):
return helpers.url('addons.reviews.list', self.slug)
def get_ratings_url(self, action='list', args=None, add_prefix=True):
return reverse('ratings.themes.%s' % action,
args=[self.slug] + (args or []),
add_prefix=add_prefix)
@classmethod
def get_type_url(cls, type):
try:
type = amo.ADDON_SLUGS[type]
except KeyError:
return None
return reverse('browse.%s' % type)
def type_url(self):
"""The url for this add-on's type."""
return Addon.get_type_url(self.type)
def share_url(self):
return reverse('addons.share', args=[self.slug])
@property
def automated_signing(self):
# We allow automated signing for add-ons which are not listed, and
# have not asked for side-loading privileges (full review).
# Beta versions are a special case for listed add-ons, and are dealt
# with on a file-by-file basis.
return not (self.is_listed or
self.status in (amo.STATUS_NOMINATED, amo.STATUS_PUBLIC))
@amo.cached_property(writable=True)
def listed_authors(self):
return UserProfile.objects.filter(
addons=self,
addonuser__listed=True).order_by('addonuser__position')
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def reviews(self):
return Review.objects.filter(addon=self, reply_to=None)
def get_category(self, app):
if app in getattr(self, '_first_category', {}):
return self._first_category[app]
categories = list(self.categories.filter(application=app))
return categories[0] if categories else None
def language_ascii(self):
lang = translation.to_language(self.default_locale)
return settings.LANGUAGES.get(lang)
@property
def valid_file_statuses(self):
if self.status == amo.STATUS_PUBLIC:
return [amo.STATUS_PUBLIC]
if self.status in (amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED):
return [amo.STATUS_PUBLIC, amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED]
return amo.VALID_STATUSES
def get_version(self):
"""Retrieve the latest public version of an addon."""
if self.type == amo.ADDON_PERSONA:
return
try:
status = self.valid_file_statuses
status_list = ','.join(map(str, status))
fltr = {'files__status__in': status}
return self.versions.no_cache().filter(**fltr).extra(
where=["""
NOT EXISTS (
SELECT 1 FROM files AS f2
WHERE f2.version_id = versions.id AND
f2.status NOT IN (%s))
""" % status_list])[0]
except (IndexError, Version.DoesNotExist):
return None
@write
def update_version(self, ignore=None, _signal=True):
"""
Returns true if we updated the field.
The optional ``ignore`` parameter, if present, is a a version
to not consider as part of the update, since it may be in the
process of being deleted.
Pass ``_signal=False`` if you want to no signals fired at all.
"""
if self.is_persona():
# Versions are not as critical on themes.
# If there are no versions, just create one and go.
if not self._current_version:
if self._latest_version:
self.update(_current_version=self._latest_version,
_signal=False)
return True
return False
current = self.get_version()
try:
latest_qs = self.versions.exclude(files__status=amo.STATUS_BETA)
if ignore is not None:
latest_qs = latest_qs.exclude(pk=ignore.pk)
latest = latest_qs.latest()
except Version.DoesNotExist:
latest = None
latest_id = latest and latest.id
diff = [self._current_version, current]
# Sometimes the DB is in an inconsistent state when this
# signal is dispatched.
try:
if self._latest_version:
# Make sure stringifying this does not trigger
# Version.DoesNotExist before trying to use it for
# logging.
unicode(self._latest_version)
diff += [self._latest_version, latest]
except Version.DoesNotExist:
diff += [self._latest_version_id, latest_id]
updated = {}
send_signal = False
if self._current_version != current:
updated.update({'_current_version': current})
send_signal = True
# Don't use self.latest_version here. It may throw Version.DoesNotExist
# if we're called from a post_delete signal. We also don't set
# send_signal since we only want this fired if the public version
# changes.
if self._latest_version_id != latest_id:
updated.update({'_latest_version': latest})
# update_version can be called by a post_delete signal (such
# as File's) when deleting a version. If so, we should avoid putting
# that version-being-deleted in any fields.
if ignore is not None:
updated = dict([(k, v) for (k, v) in updated.iteritems()
if v != ignore])
if updated:
# Pass along _signal to the .update() to prevent it from firing
# signals if we don't want them.
updated['_signal'] = _signal
try:
self.update(**updated)
if send_signal and _signal:
signals.version_changed.send(sender=self)
log.info(u'Version changed from current: %s to %s, '
u'latest: %s to %s for addon %s'
% tuple(diff + [self]))
except Exception, e:
log.error(u'Could not save version changes current: %s to %s, '
u'latest: %s to %s for addon %s (%s)' %
tuple(diff + [self, e]))
return bool(updated)
def compatible_version(self, app_id, app_version=None, platform=None,
compat_mode='strict'):
"""Returns the newest compatible version given the input."""
if not app_id:
return None
if platform:
# We include platform_id=1 always in the SQL so we skip it here.
platform = platform.lower()
if platform != 'all' and platform in amo.PLATFORM_DICT:
platform = amo.PLATFORM_DICT[platform].id
else:
platform = None
log.debug(u'Checking compatibility for add-on ID:%s, APP:%s, V:%s, '
u'OS:%s, Mode:%s' % (self.id, app_id, app_version, platform,
compat_mode))
valid_file_statuses = ','.join(map(str, self.valid_file_statuses))
data = dict(id=self.id, app_id=app_id, platform=platform,
valid_file_statuses=valid_file_statuses)
if app_version:
data.update(version_int=version_int(app_version))
else:
# We can't perform the search queries for strict or normal without
# an app version.
compat_mode = 'ignore'
ns_key = cache_ns_key('d2c-versions:%s' % self.id)
cache_key = '%s:%s:%s:%s:%s' % (ns_key, app_id, app_version, platform,
compat_mode)
version_id = cache.get(cache_key)
if version_id is not None:
log.debug(u'Found compatible version in cache: %s => %s' % (
cache_key, version_id))
if version_id == 0:
return None
else:
try:
return Version.objects.get(pk=version_id)
except Version.DoesNotExist:
pass
raw_sql = ["""
SELECT versions.*
FROM versions
INNER JOIN addons
ON addons.id = versions.addon_id AND addons.id = %(id)s
INNER JOIN applications_versions
ON applications_versions.version_id = versions.id
INNER JOIN appversions appmin
ON appmin.id = applications_versions.min
AND appmin.application_id = %(app_id)s
INNER JOIN appversions appmax
ON appmax.id = applications_versions.max
AND appmax.application_id = %(app_id)s
INNER JOIN files
ON files.version_id = versions.id AND
(files.platform_id = 1"""]
if platform:
raw_sql.append(' OR files.platform_id = %(platform)s')
raw_sql.append(') WHERE files.status IN (%(valid_file_statuses)s) ')
if app_version:
raw_sql.append('AND appmin.version_int <= %(version_int)s ')
if compat_mode == 'ignore':
pass # No further SQL modification required.
elif compat_mode == 'normal':
raw_sql.append("""AND
CASE WHEN files.strict_compatibility = 1 OR
files.binary_components = 1
THEN appmax.version_int >= %(version_int)s ELSE 1 END
""")
# Filter out versions that don't have the minimum maxVersion
# requirement to qualify for default-to-compatible.
d2c_max = amo.D2C_MAX_VERSIONS.get(app_id)
if d2c_max:
data['d2c_max_version'] = version_int(d2c_max)
raw_sql.append(
"AND appmax.version_int >= %(d2c_max_version)s ")
# Filter out versions found in compat overrides
raw_sql.append("""AND
NOT versions.id IN (
SELECT version_id FROM incompatible_versions
WHERE app_id=%(app_id)s AND
(min_app_version='0' AND
max_app_version_int >= %(version_int)s) OR
(min_app_version_int <= %(version_int)s AND
max_app_version='*') OR
(min_app_version_int <= %(version_int)s AND
max_app_version_int >= %(version_int)s)) """)
else: # Not defined or 'strict'.
raw_sql.append('AND appmax.version_int >= %(version_int)s ')
raw_sql.append('ORDER BY versions.id DESC LIMIT 1;')
version = Version.objects.raw(''.join(raw_sql) % data)
if version:
version = version[0]
version_id = version.id
else:
version = None
version_id = 0
log.debug(u'Caching compat version %s => %s' % (cache_key, version_id))
cache.set(cache_key, version_id, None)
return version
def increment_version(self):
"""Increment version number by 1."""
version = self.latest_version or self.current_version
version.version = str(float(version.version) + 1)
# Set the current version.
self.update(_current_version=version.save())
def invalidate_d2c_versions(self):
"""Invalidates the cache of compatible versions.
Call this when there is an event that may change what compatible
versions are returned so they are recalculated.
"""
key = cache_ns_key('d2c-versions:%s' % self.id, increment=True)
log.info('Incrementing d2c-versions namespace for add-on [%s]: %s' % (
self.id, key))
@property
def current_version(self):
"""Returns the current_version or None if the app is deleted or not
created yet"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._current_version
except ObjectDoesNotExist:
pass
return None
@property
def latest_version(self):
"""Returns the latest_version or None if the app is deleted or not
created yet"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._latest_version
except ObjectDoesNotExist:
pass
return None
@amo.cached_property
def binary(self):
"""Returns if the current version has binary files."""
version = self.current_version
if version:
return version.files.filter(binary=True).exists()
return False
@amo.cached_property
def binary_components(self):
"""Returns if the current version has files with binary_components."""
version = self.current_version
if version:
return version.files.filter(binary_components=True).exists()
return False
def get_icon_dir(self):
return os.path.join(helpers.user_media_path('addon_icons'),
'%s' % (self.id / 1000))
def get_icon_url(self, size, use_default=True):
"""
Returns either the addon's icon url.
If this is not a theme or persona and there is no
icon for the addon then if:
use_default is True, will return a default icon
use_default is False, will return None
"""
icon_type_split = []
if self.icon_type:
icon_type_split = self.icon_type.split('/')
# Get the closest allowed size without going over
if (size not in amo.ADDON_ICON_SIZES
and size >= amo.ADDON_ICON_SIZES[0]):
size = [s for s in amo.ADDON_ICON_SIZES if s < size][-1]
elif size < amo.ADDON_ICON_SIZES[0]:
size = amo.ADDON_ICON_SIZES[0]
# Figure out what to return for an image URL
if self.type == amo.ADDON_PERSONA:
return self.persona.icon_url
if not self.icon_type:
if self.type == amo.ADDON_THEME:
icon = amo.ADDON_ICONS[amo.ADDON_THEME]
return "%simg/icons/%s" % (settings.STATIC_URL, icon)
else:
if not use_default:
return None
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL,
'default',
size
)
elif icon_type_split[0] == 'icon':
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL,
icon_type_split[1],
size
)
else:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
modified = int(time.mktime(self.modified.timetuple()))
path = '/'.join([
split_id.group(2) or '0',
'{0}-{1}.png?modified={2}'.format(self.id, size, modified),
])
return helpers.user_media_url('addon_icons') + path
@write
def update_status(self, ignore_version=None):
self.reload()
if (self.status in [amo.STATUS_NULL, amo.STATUS_DELETED]
or self.is_disabled or self.is_persona()):
self.update_version(ignore=ignore_version)
return
def logit(reason, old=self.status):
log.info('Changing add-on status [%s]: %s => %s (%s).'
% (self.id, old, self.status, reason))
amo.log(amo.LOG.CHANGE_STATUS, self.get_status_display(), self)
versions = self.versions.all()
status = None
if not versions.exists():
status = amo.STATUS_NULL
logit('no versions')
elif not versions.filter(
files__status__in=amo.VALID_STATUSES).exists():
status = amo.STATUS_NULL
logit('no version with valid file')
elif (self.status == amo.STATUS_PUBLIC and
not versions.filter(files__status=amo.STATUS_PUBLIC).exists()):
if versions.filter(files__status=amo.STATUS_LITE).exists():
status = amo.STATUS_LITE
logit('only lite files')
else:
status = amo.STATUS_UNREVIEWED
logit('no reviewed files')
elif (self.status in amo.REVIEWED_STATUSES
and self.latest_version
and self.latest_version.has_files
and (self.latest_version.all_files[0].status
in amo.UNDER_REVIEW_STATUSES)):
# Addon is public, but its latest file is not (it's the case on a
# new file upload). So, call update, to trigger watch_status, which
# takes care of setting nomination time when needed.
status = self.status
if status is not None:
self.update(status=status)
self.update_version(ignore=ignore_version)
@staticmethod
def attach_related_versions(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
current_ids = filter(None, (a._current_version_id for a in addons))
latest_ids = filter(None, (a._latest_version_id for a in addons))
all_ids = set(current_ids) | set(latest_ids)
versions = list(Version.objects.filter(id__in=all_ids).order_by())
for version in versions:
try:
addon = addon_dict[version.addon_id]
except KeyError:
log.debug('Version %s has an invalid add-on id.' % version.id)
continue
if addon._current_version_id == version.id:
addon._current_version = version
if addon._latest_version_id == version.id:
addon._latest_version = version
version.addon = addon
@staticmethod
def attach_listed_authors(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
q = (UserProfile.objects.no_cache()
.filter(addons__in=addons, addonuser__listed=True)
.extra(select={'addon_id': 'addons_users.addon_id',
'position': 'addons_users.position'}))
q = sorted(q, key=lambda u: (u.addon_id, u.position))
for addon_id, users in itertools.groupby(q, key=lambda u: u.addon_id):
addon_dict[addon_id].listed_authors = list(users)
# FIXME: set listed_authors to empty list on addons without listed
# authors.
@staticmethod
def attach_previews(addons, addon_dict=None, no_transforms=False):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = Preview.objects.filter(addon__in=addons,
position__gte=0).order_by()
if no_transforms:
qs = qs.no_transforms()
qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created))
for addon, previews in itertools.groupby(qs, lambda x: x.addon_id):
addon_dict[addon].all_previews = list(previews)
# FIXME: set all_previews to empty list on addons without previews.
@staticmethod
@timer
def transformer(addons):
if not addons:
return
addon_dict = dict((a.id, a) for a in addons)
personas = [a for a in addons if a.type == amo.ADDON_PERSONA]
addons = [a for a in addons if a.type != amo.ADDON_PERSONA]
# Set _latest_version, _current_version
Addon.attach_related_versions(addons, addon_dict=addon_dict)
# Attach listed authors.
Addon.attach_listed_authors(addons, addon_dict=addon_dict)
for persona in Persona.objects.no_cache().filter(addon__in=personas):
addon = addon_dict[persona.addon_id]
addon.persona = persona
addon.weekly_downloads = persona.popularity
# Personas need categories for the JSON dump.
Category.transformer(personas)
# Attach sharing stats.
sharing.attach_share_counts(AddonShareCountTotal, 'addon', addon_dict)
# Attach previews.
Addon.attach_previews(addons, addon_dict=addon_dict)
# Attach _first_category for Firefox.
cats = dict(AddonCategory.objects.values_list('addon', 'category')
.filter(addon__in=addon_dict,
category__application=amo.FIREFOX.id))
qs = Category.objects.filter(id__in=set(cats.values()))
categories = dict((c.id, c) for c in qs)
for addon in addons:
category = categories[cats[addon.id]] if addon.id in cats else None
addon._first_category[amo.FIREFOX.id] = category
return addon_dict
@property
def show_beta(self):
return self.status == amo.STATUS_PUBLIC and self.current_beta_version
def show_adu(self):
return self.type != amo.ADDON_SEARCH
@amo.cached_property
def current_beta_version(self):
"""Retrieves the latest version of an addon, in the beta channel."""
versions = self.versions.filter(files__status=amo.STATUS_BETA)[:1]
if versions:
return versions[0]
@property
def icon_url(self):
return self.get_icon_url(32)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
if app:
qs = Addon.objects.listed(app)
else:
qs = Addon.objects.valid()
return (qs.exclude(id=self.id)
.filter(addonuser__listed=True,
authors__in=self.listed_authors)
.distinct())
@property
def contribution_url(self, lang=settings.LANGUAGE_CODE,
app=settings.DEFAULT_APP):
return reverse('addons.contribute', args=[self.slug])
@property
def thumbnail_url(self):
"""
Returns the addon's thumbnail url or a default.
"""
try:
preview = self.all_previews[0]
return preview.thumbnail_url
except IndexError:
return settings.STATIC_URL + '/img/icons/no-preview.png'
def can_request_review(self):
"""Return the statuses an add-on can request."""
if not File.objects.filter(version__addon=self):
return ()
if (self.is_disabled or
self.status in (amo.STATUS_PUBLIC,
amo.STATUS_LITE_AND_NOMINATED,
amo.STATUS_DELETED) or
not self.latest_version or
not self.latest_version.files.exclude(
status=amo.STATUS_DISABLED)):
return ()
elif self.status == amo.STATUS_NOMINATED:
return (amo.STATUS_LITE,)
elif self.status in [amo.STATUS_UNREVIEWED, amo.STATUS_LITE]:
return (amo.STATUS_PUBLIC,)
else:
return (amo.STATUS_LITE, amo.STATUS_PUBLIC)
def is_persona(self):
return self.type == amo.ADDON_PERSONA
@property
def is_disabled(self):
"""True if this Addon is disabled.
It could be disabled by an admin or disabled by the developer
"""
return self.status == amo.STATUS_DISABLED or self.disabled_by_user
@property
def is_deleted(self):
return self.status == amo.STATUS_DELETED
@property
def is_under_review(self):
return self.status in amo.UNDER_REVIEW_STATUSES
def is_unreviewed(self):
return self.status in amo.UNREVIEWED_STATUSES
def is_public(self):
return self.status == amo.STATUS_PUBLIC and not self.disabled_by_user
def is_incomplete(self):
from devhub.models import SubmitStep # Avoid import loop.
return SubmitStep.objects.filter(addon=self).exists()
def is_pending(self):
return self.status == amo.STATUS_PENDING
def is_rejected(self):
return self.status == amo.STATUS_REJECTED
def can_be_deleted(self):
return not self.is_deleted
@classmethod
def featured_random(cls, app, lang):
return get_featured_ids(app, lang)
def is_no_restart(self):
"""Is this a no-restart add-on?"""
files = self.current_version and self.current_version.all_files
return bool(files and files[0].no_restart)
def is_featured(self, app, lang=None):
"""Is add-on globally featured for this app and language?"""
if app:
return self.id in get_featured_ids(app, lang)
def has_full_profile(self):
"""Is developer profile public (completed)?"""
return self.the_reason and self.the_future
def has_profile(self):
"""Is developer profile (partially or entirely) completed?"""
return self.the_reason or self.the_future
@amo.cached_property
def tags_partitioned_by_developer(self):
"""Returns a tuple of developer tags and user tags for this addon."""
tags = self.tags.not_blacklisted()
if self.is_persona:
return [], tags
user_tags = tags.exclude(addon_tags__user__in=self.listed_authors)
dev_tags = tags.exclude(id__in=[t.id for t in user_tags])
return dev_tags, user_tags
@amo.cached_property
def compatible_apps(self):
"""Shortcut to get compatible apps for the current version."""
# Search providers and personas don't list their supported apps.
if self.type in amo.NO_COMPAT:
return dict((app, None) for app in
amo.APP_TYPE_SUPPORT[self.type])
if self.current_version:
return self.current_version.compatible_apps
else:
return {}
def accepts_compatible_apps(self):
"""True if this add-on lists compatible apps."""
return self.type not in amo.NO_COMPAT
def incompatible_latest_apps(self):
"""Returns a list of applications with which this add-on is
incompatible (based on the latest version).
"""
return [a for a, v in self.compatible_apps.items() if v and
version_int(v.max.version) < version_int(a.latest_version)]
def has_author(self, user, roles=None):
"""True if ``user`` is an author with any of the specified ``roles``.
``roles`` should be a list of valid roles (see amo.AUTHOR_ROLE_*). If
not specified, has_author will return true if the user has any role.
"""
if user is None or user.is_anonymous():
return False
if roles is None:
roles = dict(amo.AUTHOR_CHOICES).keys()
return AddonUser.objects.filter(addon=self, user=user,
role__in=roles).exists()
@property
def takes_contributions(self):
return (self.status == amo.STATUS_PUBLIC and self.wants_contributions
and (self.paypal_id or self.charity_id))
@property
def has_eula(self):
return self.eula
@classmethod
def _last_updated_queries(cls):
"""
Get the queries used to calculate addon.last_updated.
"""
status_change = Max('versions__files__datestatuschanged')
public = (
Addon.objects.no_cache().filter(
status=amo.STATUS_PUBLIC,
versions__files__status=amo.STATUS_PUBLIC)
.exclude(type=amo.ADDON_PERSONA)
.values('id').annotate(last_updated=status_change))
lite = (Addon.objects.no_cache()
.filter(status__in=amo.LISTED_STATUSES,
versions__files__status=amo.STATUS_LITE)
.values('id').annotate(last_updated=status_change))
stati = amo.LISTED_STATUSES + (amo.STATUS_PUBLIC,)
exp = (Addon.objects.no_cache().exclude(status__in=stati)
.filter(versions__files__status__in=amo.VALID_STATUSES)
.values('id')
.annotate(last_updated=Max('versions__files__created')))
personas = (Addon.objects.no_cache().filter(type=amo.ADDON_PERSONA)
.extra(select={'last_updated': 'created'}))
return dict(public=public, exp=exp, personas=personas,
lite=lite)
@amo.cached_property(writable=True)
def all_categories(self):
return list(self.categories.all())
@amo.cached_property(writable=True)
def all_previews(self):
return list(self.get_previews())
def get_previews(self):
"""Exclude promo graphics."""
return self.previews.exclude(position=-1)
@property
def app_categories(self):
categories = sorted_groupby(order_by_translation(self.categories.all(),
'name'),
key=lambda x: x.application)
app_cats = []
for app_id, cats in categories:
app = amo.APP_IDS.get(app_id)
if app_id and not app:
# Skip retired applications like Sunbird.
continue
app_cats.append((app, list(cats)))
return app_cats
def remove_locale(self, locale):
"""NULLify strings in this locale for the add-on and versions."""
for o in itertools.chain([self], self.versions.all()):
Translation.objects.remove_for(o, locale)
def app_perf_results(self):
"""Generator of (AppVersion, [list of perf results contexts]).
A performance result context is a dict that has these keys:
**baseline**
The baseline of the result. For startup time this is the
time it takes to start up with no addons.
**startup_is_too_slow**
True/False if this result is slower than the threshold.
**result**
Actual result object
"""
res = collections.defaultdict(list)
baselines = {}
for result in (self.performance
.select_related('osversion', 'appversion')
.order_by('-created')[:20]):
k = (result.appversion.id, result.osversion.id, result.test)
if k not in baselines:
baselines[k] = result.get_baseline()
baseline = baselines[k]
appver = result.appversion
slow = result.startup_is_too_slow(baseline=baseline)
res[appver].append({'baseline': baseline,
'startup_is_too_slow': slow,
'result': result})
return res.iteritems()
def get_localepicker(self):
"""For language packs, gets the contents of localepicker."""
if (self.type == amo.ADDON_LPAPP and self.status == amo.STATUS_PUBLIC
and self.current_version):
files = (self.current_version.files
.filter(platform=amo.PLATFORM_ANDROID.id))
try:
return unicode(files[0].get_localepicker(), 'utf-8')
except IndexError:
pass
return ''
def get_mozilla_contacts(self):
return [x.strip() for x in self.mozilla_contact.split(',')]
def can_review(self, user):
return not(user and self.has_author(user))
@property
def all_dependencies(self):
"""Return all the add-ons this add-on depends on."""
return list(self.dependencies.all()[:3])
def has_installed(self, user):
if not user or not isinstance(user, UserProfile):
return False
return self.installed.filter(user=user).exists()
def get_latest_file(self):
"""Get the latest file from the current version."""
cur = self.current_version
if cur:
res = cur.files.order_by('-created')
if res:
return res[0]
def in_escalation_queue(self):
return self.escalationqueue_set.exists()
def update_names(self, new_names):
"""
Adds, edits, or removes names to match the passed in new_names dict.
Will not remove the translation of the default_locale.
`new_names` is a dictionary mapping of locales to names.
Returns a message that can be used in logs showing what names were
added or updated.
Note: This method doesn't save the changes made to the addon object.
Don't forget to call save() in your calling method.
"""
updated_locales = {}
locales = dict(Translation.objects.filter(id=self.name_id)
.values_list('locale',
'localized_string'))
msg_c = [] # For names that were created.
msg_d = [] # For deletes.
msg_u = [] # For updates.
# Normalize locales.
names = {}
for locale, name in new_names.iteritems():
loc = find_language(locale)
if loc and loc not in names:
names[loc] = name
# Null out names no longer in `names` but exist in the database.
for locale in set(locales) - set(names):
names[locale] = None
for locale, name in names.iteritems():
if locale in locales:
if not name and locale.lower() == self.default_locale.lower():
pass # We never want to delete the default locale.
elif not name: # A deletion.
updated_locales[locale] = None
msg_d.append(u'"%s" (%s).' % (locales.get(locale), locale))
elif name != locales[locale]:
updated_locales[locale] = name
msg_u.append(u'"%s" -> "%s" (%s).' % (
locales[locale], name, locale))
else:
updated_locales[locale] = names.get(locale)
msg_c.append(u'"%s" (%s).' % (name, locale))
if locales != updated_locales:
self.name = updated_locales
return {
'added': ' '.join(msg_c),
'deleted': ' '.join(msg_d),
'updated': ' '.join(msg_u),
}
def update_default_locale(self, locale):
"""
Updates default_locale if it's different and matches one of our
supported locales.
Returns tuple of (old_locale, new_locale) if updated. Otherwise None.
"""
old_locale = self.default_locale
locale = find_language(locale)
if locale and locale != old_locale:
self.update(default_locale=locale)
return old_locale, locale
return None
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the addon.
"""
if require_author:
require_owner = False
ignore_disabled = True
admin = False
return acl.check_addon_ownership(request, self, admin=admin,
viewer=(not require_owner),
ignore_disabled=ignore_disabled)
dbsignals.pre_save.connect(save_signal, sender=Addon,
dispatch_uid='addon_translations')
class AddonDeviceType(amo.models.ModelBase):
addon = models.ForeignKey(Addon, db_constraint=False)
device_type = models.PositiveIntegerField(
default=amo.DEVICE_DESKTOP, choices=do_dictsort(amo.DEVICE_TYPES),
db_index=True)
class Meta:
db_table = 'addons_devicetypes'
unique_together = ('addon', 'device_type')
def __unicode__(self):
return u'%s: %s' % (self.addon.name, self.device.name)
@property
def device(self):
return amo.DEVICE_TYPES[self.device_type]
@receiver(signals.version_changed, dispatch_uid='version_changed')
def version_changed(sender, **kw):
from . import tasks
tasks.version_changed.delay(sender.id)
@receiver(dbsignals.post_save, sender=Addon,
dispatch_uid='addons.search.index')
def update_search_index(sender, instance, **kw):
from . import tasks
if not kw.get('raw'):
tasks.index_addons.delay([instance.id])
@Addon.on_change
def watch_status(old_attr={}, new_attr={}, instance=None,
sender=None, **kw):
"""
Set nomination date if the addon is new in queue or updating.
The nomination date cannot be reset, say, when a developer cancels
their request for full review and re-requests full review.
If a version is rejected after nomination, the developer has
to upload a new version.
"""
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if (new_status not in amo.UNDER_REVIEW_STATUSES + amo.REVIEWED_STATUSES
or not new_status or not instance.latest_version):
return
if old_status not in amo.UNDER_REVIEW_STATUSES:
# New: will (re)set nomination only if it's None.
instance.latest_version.reset_nomination_time()
elif instance.latest_version.has_files:
# Updating: inherit nomination from last nominated version.
# Calls `inherit_nomination` manually given that signals are
# deactivated to avoid circular calls.
inherit_nomination(None, instance.latest_version)
@Addon.on_change
def watch_disabled(old_attr={}, new_attr={}, instance=None, sender=None, **kw):
attrs = dict((k, v) for k, v in old_attr.items()
if k in ('disabled_by_user', 'status'))
if Addon(**attrs).is_disabled and not instance.is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.unhide_disabled_file()
if instance.is_disabled and not Addon(**attrs).is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.hide_disabled_file()
@Addon.on_change
def watch_developer_notes(old_attr={}, new_attr={}, instance=None, sender=None,
**kw):
whiteboard_changed = (
new_attr.get('whiteboard') and
old_attr.get('whiteboard') != new_attr.get('whiteboard'))
developer_comments_changed = (new_attr.get('_developer_comments_cache') and
old_attr.get('_developer_comments_cache') !=
new_attr.get('_developer_comments_cache'))
if whiteboard_changed or developer_comments_changed:
instance.versions.update(has_info_request=False)
def attach_categories(addons):
"""Put all of the add-on's categories into a category_ids list."""
addon_dict = dict((a.id, a) for a in addons)
categories = (Category.objects.filter(addoncategory__addon__in=addon_dict)
.values_list('addoncategory__addon', 'id'))
for addon, cats in sorted_groupby(categories, lambda x: x[0]):
addon_dict[addon].category_ids = [c[1] for c in cats]
def attach_translations(addons):
"""Put all translations into a translations dict."""
attach_trans_dict(Addon, addons)
def attach_tags(addons):
addon_dict = dict((a.id, a) for a in addons)
qs = (Tag.objects.not_blacklisted().filter(addons__in=addon_dict)
.values_list('addons__id', 'tag_text'))
for addon, tags in sorted_groupby(qs, lambda x: x[0]):
addon_dict[addon].tag_list = [t[1] for t in tags]
class Persona(caching.CachingMixin, models.Model):
"""Personas-specific additions to the add-on model."""
STATUS_CHOICES = amo.STATUS_CHOICES_PERSONA
addon = models.OneToOneField(Addon)
persona_id = models.PositiveIntegerField(db_index=True)
# name: deprecated in favor of Addon model's name field
# description: deprecated, ditto
header = models.CharField(max_length=64, null=True)
footer = models.CharField(max_length=64, null=True)
accentcolor = models.CharField(max_length=10, null=True)
textcolor = models.CharField(max_length=10, null=True)
author = models.CharField(max_length=255, null=True)
display_username = models.CharField(max_length=255, null=True)
submit = models.DateTimeField(null=True)
approve = models.DateTimeField(null=True)
movers = models.FloatField(null=True, db_index=True)
popularity = models.IntegerField(null=False, default=0, db_index=True)
license = models.PositiveIntegerField(
choices=amo.PERSONA_LICENSES_CHOICES, null=True, blank=True)
# To spot duplicate submissions.
checksum = models.CharField(max_length=64, blank=True, default='')
dupe_persona = models.ForeignKey('self', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'personas'
def __unicode__(self):
return unicode(self.addon.name)
def is_new(self):
return self.persona_id == 0
def flush_urls(self):
urls = ['*/addon/%d/' % self.addon_id,
'*/api/*/addon/%d' % self.addon_id,
self.thumb_url,
self.icon_url,
self.preview_url,
self.header_url,
self.footer_url,
self.update_url]
return urls
def _image_url(self, filename):
return self.get_mirror_url(filename)
def _image_path(self, filename):
return os.path.join(helpers.user_media_path('addons'),
str(self.addon.id), filename)
def get_mirror_url(self, filename):
host = (settings.PRIVATE_MIRROR_URL if self.addon.is_disabled
else helpers.user_media_url('addons'))
image_url = posixpath.join(host, str(self.addon.id), filename or '')
# TODO: Bust the cache on the hash of the image contents or something.
if self.addon.modified is not None:
modified = int(time.mktime(self.addon.modified.timetuple()))
else:
modified = 0
return '%s?%s' % (image_url, modified)
@amo.cached_property
def thumb_url(self):
"""
Handles deprecated GetPersonas URL.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview.jpg')
@amo.cached_property
def thumb_path(self):
"""
Handles deprecated GetPersonas path.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview.jpg')
@amo.cached_property
def icon_url(self):
"""URL to personas square preview."""
if self.is_new():
return self._image_url('icon.png')
else:
return self._image_url('preview_small.jpg')
@amo.cached_property
def icon_path(self):
"""Path to personas square preview."""
if self.is_new():
return self._image_path('icon.png')
else:
return self._image_path('preview_small.jpg')
@amo.cached_property
def preview_url(self):
"""URL to Persona's big, 680px, preview."""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview_large.jpg')
@amo.cached_property
def preview_path(self):
"""Path to Persona's big, 680px, preview."""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview_large.jpg')
@amo.cached_property
def header_url(self):
return self._image_url(self.header)
@amo.cached_property
def footer_url(self):
return self.footer and self._image_url(self.footer) or ''
@amo.cached_property
def header_path(self):
return self._image_path(self.header)
@amo.cached_property
def footer_path(self):
return self.footer and self._image_path(self.footer) or ''
@amo.cached_property
def update_url(self):
locale = settings.LANGUAGE_URL_MAP.get(translation.get_language())
return settings.NEW_PERSONAS_UPDATE_URL % {
'locale': locale or settings.LANGUAGE_CODE,
'id': self.addon.id
}
@amo.cached_property
def theme_data(self):
"""Theme JSON Data for Browser/extension preview."""
def hexcolor(color):
return '#%s' % color
addon = self.addon
return {
'id': unicode(self.addon.id), # Personas dislikes ints
'name': unicode(addon.name),
'accentcolor': hexcolor(self.accentcolor),
'textcolor': hexcolor(self.textcolor),
'category': (unicode(addon.all_categories[0].name) if
addon.all_categories else ''),
# TODO: Change this to be `addons_users.user.display_name`.
'author': self.display_username,
'description': unicode(addon.description),
'header': self.header_url,
'footer': self.footer_url or '',
'headerURL': self.header_url,
'footerURL': self.footer_url or '',
'previewURL': self.thumb_url,
'iconURL': self.icon_url,
'updateURL': self.update_url,
'detailURL': helpers.absolutify(self.addon.get_url_path()),
'version': '1.0'
}
@property
def json_data(self):
"""Persona JSON Data for Browser/extension preview."""
return json.dumps(self.theme_data,
separators=(',', ':'), cls=JSONEncoder)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
qs = (Addon.objects.valid()
.exclude(id=self.addon.id)
.filter(type=amo.ADDON_PERSONA))
return (qs.filter(addonuser__listed=True,
authors__in=self.addon.listed_authors)
.distinct())
@amo.cached_property(writable=True)
def listed_authors(self):
return self.addon.listed_authors
class AddonCategory(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon)
category = models.ForeignKey('Category')
feature = models.BooleanField(default=False)
feature_locales = models.CharField(max_length=255, default='', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'addons_categories'
unique_together = ('addon', 'category')
def flush_urls(self):
urls = ['*/addon/%d/' % self.addon_id,
'*%s' % self.category.get_url_path(), ]
return urls
@classmethod
def creatured_random(cls, category, lang):
return get_creatured_ids(category, lang)
class AddonRecommendation(models.Model):
"""
Add-on recommendations. For each `addon`, a group of `other_addon`s
is recommended with a score (= correlation coefficient).
"""
addon = models.ForeignKey(Addon, related_name="addon_recommendations")
other_addon = models.ForeignKey(Addon, related_name="recommended_for")
score = models.FloatField()
class Meta:
db_table = 'addon_recommendations'
ordering = ('-score',)
@classmethod
def scores(cls, addon_ids):
"""Get a mapping of {addon: {other_addon: score}} for each add-on."""
d = {}
q = (AddonRecommendation.objects.filter(addon__in=addon_ids)
.values('addon', 'other_addon', 'score'))
for addon, rows in sorted_groupby(q, key=lambda x: x['addon']):
d[addon] = dict((r['other_addon'], r['score']) for r in rows)
return d
class AddonUser(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon)
user = UserForeignKey()
role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER,
choices=amo.AUTHOR_CHOICES)
listed = models.BooleanField(_(u'Listed'), default=True)
position = models.IntegerField(default=0)
objects = caching.CachingManager()
def __init__(self, *args, **kwargs):
super(AddonUser, self).__init__(*args, **kwargs)
self._original_role = self.role
self._original_user_id = self.user_id
class Meta:
db_table = 'addons_users'
def flush_urls(self):
return self.addon.flush_urls() + self.user.flush_urls()
class AddonDependency(models.Model):
addon = models.ForeignKey(Addon, related_name='addons_dependencies')
dependent_addon = models.ForeignKey(Addon, related_name='dependent_on')
class Meta:
db_table = 'addons_dependencies'
unique_together = ('addon', 'dependent_addon')
class BlacklistedGuid(amo.models.ModelBase):
guid = models.CharField(max_length=255, unique=True)
comments = models.TextField(default='', blank=True)
class Meta:
db_table = 'blacklisted_guids'
def __unicode__(self):
return self.guid
class Category(amo.models.OnChangeMixin, amo.models.ModelBase):
name = TranslatedField()
slug = amo.models.SlugField(max_length=50,
help_text='Used in Category URLs.')
type = models.PositiveIntegerField(db_column='addontype_id',
choices=do_dictsort(amo.ADDON_TYPE))
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
null=True, blank=True,
db_column='application_id')
count = models.IntegerField('Addon count', default=0)
weight = models.IntegerField(
default=0, help_text='Category weight used in sort ordering')
misc = models.BooleanField(default=False)
addons = models.ManyToManyField(Addon, through='AddonCategory')
class Meta:
db_table = 'categories'
verbose_name_plural = 'Categories'
def __unicode__(self):
return unicode(self.name)
def flush_urls(self):
urls = ['*%s' % self.get_url_path(), ]
return urls
def get_url_path(self):
try:
type = amo.ADDON_SLUGS[self.type]
except KeyError:
type = amo.ADDON_SLUGS[amo.ADDON_EXTENSION]
return reverse('browse.%s' % type, args=[self.slug])
@staticmethod
def transformer(addons):
qs = (Category.objects.no_cache().filter(addons__in=addons)
.extra(select={'addon_id': 'addons_categories.addon_id'}))
cats = dict((addon_id, list(cs))
for addon_id, cs in sorted_groupby(qs, 'addon_id'))
for addon in addons:
addon.all_categories = cats.get(addon.id, [])
def clean(self):
if self.slug.isdigit():
raise ValidationError('Slugs cannot be all numbers.')
dbsignals.pre_save.connect(save_signal, sender=Category,
dispatch_uid='category_translations')
class Feature(amo.models.ModelBase):
addon = models.ForeignKey(Addon)
start = models.DateTimeField()
end = models.DateTimeField()
locale = models.CharField(max_length=10, default='', blank=True, null=True)
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='application_id')
class Meta:
db_table = 'features'
def __unicode__(self):
app = amo.APP_IDS[self.application.id].pretty
return '%s (%s: %s)' % (self.addon.name, app, self.locale)
class Preview(amo.models.ModelBase):
addon = models.ForeignKey(Addon, related_name='previews')
filetype = models.CharField(max_length=25)
thumbtype = models.CharField(max_length=25)
caption = TranslatedField()
position = models.IntegerField(default=0)
sizes = json_field.JSONField(max_length=25, default={})
class Meta:
db_table = 'previews'
ordering = ('position', 'created')
def flush_urls(self):
urls = ['*/addon/%d/' % self.addon_id,
self.thumbnail_url,
self.image_url, ]
return urls
def _image_url(self, url_template):
if self.modified is not None:
modified = int(time.mktime(self.modified.timetuple()))
else:
modified = 0
args = [self.id / 1000, self.id, modified]
if '.png' not in url_template:
args.insert(2, self.file_extension)
return url_template % tuple(args)
def _image_path(self, url_template):
args = [self.id / 1000, self.id]
if '.png' not in url_template:
args.append(self.file_extension)
return url_template % tuple(args)
def as_dict(self, src=None):
d = {'full': urlparams(self.image_url, src=src),
'thumbnail': urlparams(self.thumbnail_url, src=src),
'caption': unicode(self.caption)}
return d
@property
def is_landscape(self):
size = self.image_size
if not size:
return False
return size[0] > size[1]
@property
def file_extension(self):
# Assume that blank is an image.
if not self.filetype:
return 'png'
return self.filetype.split('/')[1]
@property
def thumbnail_url(self):
template = (
helpers.user_media_url('previews') +
'thumbs/%s/%d.png?modified=%s')
return self._image_url(template)
@property
def image_url(self):
template = (
helpers.user_media_url('previews') +
'full/%s/%d.%s?modified=%s')
return self._image_url(template)
@property
def thumbnail_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'thumbs',
'%s',
'%d.png'
)
return self._image_path(template)
@property
def image_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'full',
'%s',
'%d.%s'
)
return self._image_path(template)
@property
def thumbnail_size(self):
return self.sizes.get('thumbnail', []) if self.sizes else []
@property
def image_size(self):
return self.sizes.get('image', []) if self.sizes else []
dbsignals.pre_save.connect(save_signal, sender=Preview,
dispatch_uid='preview_translations')
def delete_preview_files(sender, instance, **kw):
"""On delete of the Preview object from the database, unlink the image
and thumb on the file system """
for filename in [instance.image_path, instance.thumbnail_path]:
if storage.exists(filename):
log.info('Removing filename: %s for preview: %s'
% (filename, instance.pk))
storage.delete(filename)
models.signals.post_delete.connect(delete_preview_files,
sender=Preview,
dispatch_uid='delete_preview_files')
class AppSupport(amo.models.ModelBase):
"""Cache to tell us if an add-on's current version supports an app."""
addon = models.ForeignKey(Addon)
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min = models.BigIntegerField("Minimum app version", null=True)
max = models.BigIntegerField("Maximum app version", null=True)
class Meta:
db_table = 'appsupport'
unique_together = ('addon', 'app')
class Charity(amo.models.ModelBase):
name = models.CharField(max_length=255)
url = models.URLField()
paypal = models.CharField(max_length=255)
class Meta:
db_table = 'charities'
@property
def outgoing_url(self):
if self.pk == amo.FOUNDATION_ORG:
return self.url
return get_outgoing_url(unicode(self.url))
class BlacklistedSlug(amo.models.ModelBase):
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'addons_blacklistedslug'
def __unicode__(self):
return self.name
@classmethod
def blocked(cls, slug):
return slug.isdigit() or cls.objects.filter(name=slug).exists()
class FrozenAddon(models.Model):
"""Add-ons in this table never get a hotness score."""
addon = models.ForeignKey(Addon)
class Meta:
db_table = 'frozen_addons'
def __unicode__(self):
return 'Frozen: %s' % self.addon_id
@receiver(dbsignals.post_save, sender=FrozenAddon)
def freezer(sender, instance, **kw):
# Adjust the hotness of the FrozenAddon.
if instance.addon_id:
Addon.objects.get(id=instance.addon_id).update(hotness=0)
class AddonUpsell(amo.models.ModelBase):
free = models.ForeignKey(Addon, related_name='_upsell_from')
premium = models.ForeignKey(Addon, related_name='_upsell_to')
class Meta:
db_table = 'addon_upsell'
unique_together = ('free', 'premium')
class CompatOverride(amo.models.ModelBase):
"""Helps manage compat info for add-ons not hosted on AMO."""
name = models.CharField(max_length=255, blank=True, null=True)
guid = models.CharField(max_length=255, unique=True)
addon = models.ForeignKey(Addon, blank=True, null=True,
help_text='Fill this out to link an override '
'to a hosted add-on')
class Meta:
db_table = 'compat_override'
unique_together = ('addon', 'guid')
def save(self, *args, **kw):
if not self.addon:
qs = Addon.objects.filter(guid=self.guid)
if qs:
self.addon = qs[0]
return super(CompatOverride, self).save(*args, **kw)
def __unicode__(self):
if self.addon:
return unicode(self.addon)
elif self.name:
return '%s (%s)' % (self.name, self.guid)
else:
return self.guid
def is_hosted(self):
"""Am I talking about an add-on on AMO?"""
return bool(self.addon_id)
@staticmethod
def transformer(overrides):
if not overrides:
return
id_map = dict((o.id, o) for o in overrides)
qs = CompatOverrideRange.objects.filter(compat__in=id_map)
for compat_id, ranges in sorted_groupby(qs, 'compat_id'):
id_map[compat_id].compat_ranges = list(ranges)
# May be filled in by a transformer for performance.
@amo.cached_property(writable=True)
def compat_ranges(self):
return list(self._compat_ranges.all())
def collapsed_ranges(self):
"""Collapse identical version ranges into one entity."""
Range = collections.namedtuple('Range', 'type min max apps')
AppRange = collections.namedtuple('AppRange', 'app min max')
rv = []
def sort_key(x):
return (x.min_version, x.max_version, x.type)
for key, compats in sorted_groupby(self.compat_ranges, key=sort_key):
compats = list(compats)
first = compats[0]
item = Range(first.override_type(), first.min_version,
first.max_version, [])
for compat in compats:
app = AppRange(amo.APPS_ALL[compat.app],
compat.min_app_version, compat.max_app_version)
item.apps.append(app)
rv.append(item)
return rv
OVERRIDE_TYPES = (
(0, 'Compatible (not supported)'),
(1, 'Incompatible'),
)
class CompatOverrideRange(amo.models.ModelBase):
"""App compatibility for a certain version range of a RemoteAddon."""
compat = models.ForeignKey(CompatOverride, related_name='_compat_ranges')
type = models.SmallIntegerField(choices=OVERRIDE_TYPES, default=1)
min_version = models.CharField(
max_length=255, default='0',
help_text=u'If not "0", version is required to exist for the override'
u' to take effect.')
max_version = models.CharField(
max_length=255, default='*',
help_text=u'If not "*", version is required to exist for the override'
u' to take effect.')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, default='0')
max_app_version = models.CharField(max_length=255, default='*')
class Meta:
db_table = 'compat_override_range'
def override_type(self):
"""This is what Firefox wants to see in the XML output."""
return {0: 'compatible', 1: 'incompatible'}[self.type]
class IncompatibleVersions(amo.models.ModelBase):
"""
Denormalized table to join against for fast compat override filtering.
This was created to be able to join against a specific version record since
the CompatOverrideRange can be wildcarded (e.g. 0 to *, or 1.0 to 1.*), and
addon versioning isn't as consistent as Firefox versioning to trust
`version_int` in all cases. So extra logic needed to be provided for when
a particular version falls within the range of a compatibility override.
"""
version = models.ForeignKey(Version, related_name='+')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, blank=True, default='0')
max_app_version = models.CharField(max_length=255, blank=True, default='*')
min_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
max_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
class Meta:
db_table = 'incompatible_versions'
def __unicode__(self):
return u'<IncompatibleVersion V:%s A:%s %s-%s>' % (
self.version.id, self.app.id, self.min_app_version,
self.max_app_version)
def save(self, *args, **kw):
self.min_app_version_int = version_int(self.min_app_version)
self.max_app_version_int = version_int(self.max_app_version)
return super(IncompatibleVersions, self).save(*args, **kw)
def update_incompatible_versions(sender, instance, **kw):
if not instance.compat.addon_id:
return
if not instance.compat.addon.type == amo.ADDON_EXTENSION:
return
from . import tasks
versions = instance.compat.addon.versions.values_list('id', flat=True)
for chunk in chunked(versions, 50):
tasks.update_incompatible_appversions.delay(chunk)
models.signals.post_save.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
models.signals.post_delete.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
def track_new_status(sender, instance, *args, **kw):
if kw.get('raw'):
# The addon is being loaded from a fixure.
return
if kw.get('created'):
track_addon_status_change(instance)
models.signals.post_save.connect(track_new_status,
sender=Addon,
dispatch_uid='track_new_addon_status')
@Addon.on_change
def track_status_change(old_attr={}, new_attr={}, **kw):
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if new_status != old_status:
track_addon_status_change(kw['instance'])
def track_addon_status_change(addon):
statsd.incr('addon_status_change.all.status_{}'
.format(addon.status))
listed_tag = 'listed' if addon.is_listed else 'unlisted'
statsd.incr('addon_status_change.{}.status_{}'
.format(listed_tag, addon.status))
| {
"content_hash": "b21f6fe77b7a54855473f5987c3e3f97",
"timestamp": "",
"source": "github",
"line_count": 2352,
"max_line_length": 79,
"avg_line_length": 37.79506802721088,
"alnum_prop": 0.5887123990370554,
"repo_name": "Witia1/olympia",
"id": "c605a0c6e9001c7bfc777be0fabf4b5f88aefbcd",
"size": "88894",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "apps/addons/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "656811"
},
{
"name": "HTML",
"bytes": "1635245"
},
{
"name": "JavaScript",
"bytes": "1287516"
},
{
"name": "Makefile",
"bytes": "4009"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3947149"
},
{
"name": "Shell",
"bytes": "10335"
},
{
"name": "Smarty",
"bytes": "2229"
}
],
"symlink_target": ""
} |
"""
Liquid Web DNS Driver
"""
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.liquidweb import LiquidWebResponse, LiquidWebConnection
from libcloud.common.liquidweb import APIException
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.types import RecordAlreadyExistsError
__all__ = [
'LiquidWebDNSDriver'
]
class LiquidWebDNSResponse(LiquidWebResponse):
pass
class LiquidWebDNSConnection(LiquidWebConnection):
responseCls = LiquidWebDNSResponse
class LiquidWebDNSDriver(DNSDriver):
type = Provider.LIQUIDWEB
name = 'Liquidweb DNS'
website = 'https://www.liquidweb.com'
connectionCls = LiquidWebDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SOA: 'SOA',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT'
}
def list_zones(self):
"""
Return a list of zones.
:return: ``list`` of :class:`Zone`
"""
action = '/v1/Network/DNS/Zone/list'
response = self.connection.request(action=action,
method='POST')
zones = self._to_zones(response.objects[0])
return zones
def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
action = '/v1/Network/DNS/Record/list'
data = json.dumps({'params': {'zone_id': zone.id}})
response = self.connection.request(action=action,
method='POST',
data=data)
records = self._to_records(response.objects[0], zone=zone)
return records
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
action = '/v1/Network/DNS/Zone/details'
data = json.dumps({'params': {'id': zone_id}})
try:
response = self.connection.request(action=action,
method='POST',
data=data)
except APIException as e:
if e.error_class == 'LW::Exception::RecordNotFound':
raise ZoneDoesNotExistError(zone_id=zone_id,
value=e.value, driver=self)
else:
raise e
zone = self._to_zone(response.objects[0])
return zone
def get_record(self, zone_id, record_id):
"""
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
zone = self.get_zone(zone_id=zone_id)
action = '/v1/Network/DNS/Record/details'
data = json.dumps({'params': {'id': record_id}})
try:
response = self.connection.request(action=action,
method='POST',
data=data)
except APIException as e:
if e.error_class == 'LW::Exception::RecordNotFound':
raise RecordDoesNotExistError(record_id=record_id, driver=self,
value=e.value)
else:
raise e
record = self._to_record(response.objects[0], zone=zone)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param type: Zone type (This is not really used. See API docs for extra
parameters).
:type type: ``str``
:param ttl: TTL for new records. (This is not really used)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). ('region_support',
'zone_data')
:type extra: ``dict``
:rtype: :class:`Zone`
For more info, please see:
https://www.liquidweb.com/storm/api/docs/v1/Network/DNS/Zone.html
"""
action = '/v1/Network/DNS/Zone/create'
data = {'params': {'name': domain}}
if extra is not None:
data['params'].update(extra)
try:
data = json.dumps(data)
response = self.connection.request(action=action,
method='POST',
data=data)
except APIException as e:
if e.error_class == 'LW::Exception::DuplicateRecord':
raise ZoneAlreadyExistsError(zone_id=domain,
value=e.value,
driver=self)
else:
raise e
zone = self._to_zone(response.objects[0])
return zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a record.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone which the records will be created for.
:type zone: :class:`Zone`
:param type: DNS record type ( 'A', 'AAAA', 'CNAME', 'MX', 'NS',
'PTR', 'SOA', 'SRV', 'TXT').
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: (optional) Extra attributes ('prio', 'ttl').
:type extra: ``dict``
:rtype: :class:`Record`
"""
action = '/v1/Network/DNS/Record/create'
to_post = {'params': {'name': name,
'rdata': data,
'type': type,
'zone': zone.domain,
'zone_id': zone.id
}
}
if extra is not None:
to_post['params'].update(extra)
data = json.dumps(to_post)
try:
response = self.connection.request(action=action,
method='POST',
data=data)
except APIException as e:
if e.error_class == 'LW::Exception::DuplicateRecord':
raise RecordAlreadyExistsError(record_id=name,
value=e.value,
driver=self)
else:
raise e
record = self._to_record(response.objects[0], zone=zone)
return record
def update_record(self, record, name, type, data, extra=None):
"""
Update an existing record.
:param record: Record to update.
:type record: :class:`Record`
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param type: DNS record type ( 'A', 'AAAA', 'CNAME', 'MX', 'NS',
'PTR', 'SOA', 'SRV', 'TXT').
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: (optional) Extra attributes ('name', 'rdata', 'prio',
'ttl').
:type extra: ``dict``
:rtype: :class:`Record`
"""
zone = record.zone
action = '/v1/Network/DNS/Record/update'
to_post = {'params': {'id': int(record.id),
'name': name,
'rdata': data}}
if extra is not None:
to_post['params'].update(extra)
j_data = json.dumps(to_post)
try:
response = self.connection.request(action=action,
method='PUT',
data=j_data)
except APIException as e:
if e.error_class == 'LW::Exception::RecordNotFound':
raise RecordDoesNotExistError(record_id=record.id, driver=self,
value=e.value)
else:
raise e
record = self._to_record(response.objects[0], zone=zone)
return record
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
action = '/v1/Network/DNS/Zone/delete'
data = json.dumps({'params': {'id': zone.id}})
try:
response = self.connection.request(action=action,
method='POST',
data=data)
except APIException as e:
if e.error_class == 'LW::Exception::RecordNotFound':
raise ZoneDoesNotExistError(zone_id=zone.id,
value=e.value, driver=self)
else:
raise e
return zone.domain in response.objects
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
action = '/v1/Network/DNS/Record/delete'
data = json.dumps({'params': {'id': record.id}})
try:
response = self.connection.request(action=action,
method='POST',
data=data)
except APIException as e:
if e.error_class == 'LW::Exception::RecordNotFound':
raise RecordDoesNotExistError(record_id=record.id, driver=self,
value=e.value)
else:
raise e
return record.id in response.objects
def _to_zone(self, item):
common_attr = ['id', 'name', 'type']
extra = {}
for key in item:
if key not in common_attr:
extra[key] = item.get(key)
zone = Zone(domain=item['name'], id=item['id'], type=item['type'],
ttl=None, driver=self, extra=extra)
return zone
def _to_zones(self, items):
zones = []
for item in items:
zones.append(self._to_zone(item))
return zones
def _to_record(self, item, zone):
common_attr = ['id', 'rdata', 'name', 'type']
extra = {}
for key in item:
if key not in common_attr:
extra[key] = item.get(key)
record = Record(id=item['id'], name=item['name'], type=item['type'],
data=item['rdata'], zone=zone, driver=self,
extra=extra)
return record
def _to_records(self, items, zone):
records = []
for item in items:
records.append(self._to_record(item, zone))
return records
| {
"content_hash": "f7cdcfc927079471c30872cb29a82bcc",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 79,
"avg_line_length": 33.131147540983605,
"alnum_prop": 0.49488701962724724,
"repo_name": "Kami/libcloud",
"id": "15a8295455cde354121f162df68c8ea40be0ec08",
"size": "12907",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "libcloud/dns/drivers/liquidweb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9122888"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from pyquery import PyQuery as pq
from olympia.addons.models import Addon
from olympia.abuse.models import AbuseReport
from olympia.amo.tests import TestCase, user_factory
from olympia.users.models import UserProfile
class TestAbuse(TestCase):
fixtures = ['base/addon_3615', 'base/user_999']
def test_list(self):
addon = Addon.objects.get(pk=3615)
user = UserProfile.objects.get(pk=999)
# Create a few abuse reports
AbuseReport.objects.create(addon=addon, message='Foo')
AbuseReport.objects.create(
addon=addon, ip_address='1.2.3.4', reporter=user_factory(),
message='Bar')
AbuseReport.objects.create(user=user_factory(), message='Eheheheh')
url = reverse('admin:abuse_abusereport_changelist')
self.grant_permission(user, '*:*')
self.client.login(email=user.email)
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 3
response = self.client.get(url, {'type': 'addon'})
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 2
response = self.client.get(url, {'type': 'user'})
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 1
| {
"content_hash": "963cd7016745e7f8437a186a83737bff",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 36.146341463414636,
"alnum_prop": 0.6518218623481782,
"repo_name": "harikishen/addons-server",
"id": "89650417a029602638095fe5c3ab4cb4f0f3eb57",
"size": "1506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/abuse/tests/test_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "822508"
},
{
"name": "HTML",
"bytes": "698554"
},
{
"name": "JavaScript",
"bytes": "1087360"
},
{
"name": "Makefile",
"bytes": "811"
},
{
"name": "PLSQL",
"bytes": "990"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "4560536"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "7564"
},
{
"name": "Smarty",
"bytes": "1859"
}
],
"symlink_target": ""
} |
import threading
import urllib
class MultiUrl(threading.Thread):
def __init__(self, url):
threading.Thread.__init__(self)
self.url = url
def run(self):
urllib.urlopen(self.url).read()
background = MultiUrl('http://slashdot.org')
background.start()
print 'main continues'
background.join()
print 'main is done.'
| {
"content_hash": "a809092b4878cb3bbe397b69023637d2",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 44,
"avg_line_length": 23,
"alnum_prop": 0.6695652173913044,
"repo_name": "beqa2323/learntosolveit",
"id": "26309e578be174bee823d5dfaab17b199810c6eb",
"size": "345",
"binary": false,
"copies": "4",
"ref": "refs/heads/version1",
"path": "languages/python/software_engineering_simple_threading1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "284320"
},
{
"name": "CSS",
"bytes": "3038"
},
{
"name": "HTML",
"bytes": "6727"
},
{
"name": "Java",
"bytes": "138605"
},
{
"name": "JavaScript",
"bytes": "722"
},
{
"name": "Makefile",
"bytes": "3889"
},
{
"name": "Python",
"bytes": "156544"
},
{
"name": "Ruby",
"bytes": "4290"
},
{
"name": "Scala",
"bytes": "8545"
}
],
"symlink_target": ""
} |
import logging
import endpoints
from protorpc import messages
from protorpc import remote
from google.appengine.api import users
from models import UserModel
from kenix.core.api import kenix_core_api
log = logging.getLogger(__name__)
class AuthRequest(messages.Message):
email = messages.StringField(1)
password = messages.StringField(2)
class AuthToken(messages.Message):
"""
Authentication Token
"""
auth_token = messages.StringField(1)
user = messages.StringField(2)
logout_status = messages.BooleanField(3)
@kenix_core_api.api_class(resource_name='users')
class UserService(remote.Service):
"""
Users API v1
"""
@UserModel.query_method(query_fields=('limit', 'pageToken', 'email'),
path='users', name='index')
def index(self, query):
"""
List of users
"""
return query
@UserModel.method(path='users/{id}', http_method='GET', name='get')
def get(self, user):
"""
Get a user
@param user:
@return:
"""
if not user.from_datastore:
raise endpoints.NotFoundException('User not found')
return user
@UserModel.method(path='users', name='create')
def create(self, user):
"""
Create a user
"""
# do some validation
user.put()
return user
@UserModel.method(path='users/{id}', http_method='PUT', name='update')
def update(self, user):
"""
Update a user
@param user:
@return user:
"""
if not user.from_datastore:
raise endpoints.NotFoundException('User not found')
user.put()
return user
# @UserModel.method(path='users', http_method='POST',
# name='_auth')
# def _auth(self, query):
# """
# Authenticate user by user id and password, or cookie.
# """
# log.error(query)
# current_user = endpoints.get_current_user()
# if not current_user:
# raise endpoints.NotFoundException('User not authenticated')
# return current_user
# request_message=message_types.VoidMessage,
# response_message=message_types.VoidMessage,
# name=None,
# path=None,
# http_method='POST',
# cache_control=None,
# scopes=None,
# audiences=None,
# allowed_client_ids=None,
# auth_level=None
@endpoints.method(AuthRequest, AuthToken,
path='users/auth', http_method='POST',
name='auth')
def auth(self, *args, **kw):
"""
Authenticate a user by email and password
@param args:
@param kw:
@return:
"""
user = users.get_current_user()
token = AuthToken()
if user:
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, ' + user.nickname())
#else:
# self.redirect(users.create_login_url(self.request.uri))
log.error(args)
log.error(kw)
token.auth_token = 'aaa'
token.user = 'kenji'
return token
@endpoints.method(AuthRequest, AuthToken,
path='users/logout', http_method='POST',
name='logout')
def logout(self, *args, **kw):
"""
Logout a user
@param self:
@param args:
@param kw:
@return:
"""
token = AuthToken()
token.auth_token = ''
token.user = ''
token.logout_status = True
return token
| {
"content_hash": "7d8e19aaa33fe9ac53762898da53e9c7",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 74,
"avg_line_length": 26,
"alnum_prop": 0.5575539568345323,
"repo_name": "knoguchi/kenix-scm",
"id": "bf1c5a1cdad1ecdcd62e709e8ca9c1fdbddf80b3",
"size": "3614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/kenix/core/users/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "100207"
},
{
"name": "JavaScript",
"bytes": "76864"
},
{
"name": "Python",
"bytes": "3601755"
}
],
"symlink_target": ""
} |
from flask_restful import Api
from flask import Blueprint
from .views import UeditorView
ueditor = Blueprint('ueditor', __name__)
ueditor_wrap = Api(ueditor)
ueditor_wrap.add_resource(UeditorView, '/api/ueditor/')
| {
"content_hash": "53a588061b24f8993eb36aa0e356678c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 21.8,
"alnum_prop": 0.7614678899082569,
"repo_name": "iceihehe/easy-note",
"id": "c59684d034a05d820250c691781bc5c879337e19",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/ueditor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import sys
verbose = False
def count_syllables(word):
vowels = ['a', 'e', 'i', 'o', 'u']
on_vowel = False
in_diphthong = False
minsyl = 0
maxsyl = 0
lastchar = None
word = word.lower()
for c in word:
is_vowel = c in vowels
if on_vowel == None:
on_vowel = is_vowel
# y is a special case
if c == 'y':
is_vowel = not on_vowel
if is_vowel:
if not on_vowel:
# We weren't on a vowel before.
# Seeing a new vowel bumps the syllable count.
minsyl += 1
maxsyl += 1
elif on_vowel and not in_diphthong and c != lastchar:
# We were already in a vowel.
# Don't increment anything except the max count,
# and only do that once per diphthong.
in_diphthong = True
maxsyl += 1
on_vowel = is_vowel
lastchar = c
# Some special cases:
if word[-1] == 'e':
minsyl -= 1
# if it ended with a consonant followed by y, count that as a syllable.
if word[-1] == 'y' and not on_vowel:
maxsyl += 1
return minsyl | {
"content_hash": "a77dcc9992924aefc4327977941cf16d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 25.145833333333332,
"alnum_prop": 0.4971002485501243,
"repo_name": "hyperreality/Poetry-Tools",
"id": "8c4a011763d00108f9f34bc6f99fff3d29d1b4ed",
"size": "1819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "poetrytools/countsyl.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14409"
}
],
"symlink_target": ""
} |
from blog.orm import db
from blog.security.models import user_datastore
from blog.application import create_app
from argparse import ArgumentParser
def main(email, password, role_name, alias):
app = create_app()
with app.app_context():
user = user_datastore.create_user(email=email, password=password, alias=alias)
role = user_datastore.find_or_create_role(role_name)
user_datastore.add_role_to_user(user, role)
db.session.commit()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('email', help='users email address')
parser.add_argument('password', help='users password')
parser.add_argument('alias', help='users display name')
parser.add_argument('role', help='users role')
args = parser.parse_args()
main(args.email, args.password, args.role, args.alias)
| {
"content_hash": "f5b6f396bf434d70a812acd203eae9b5",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 86,
"avg_line_length": 32.96153846153846,
"alnum_prop": 0.691948658109685,
"repo_name": "pbecotte/devblog",
"id": "bc51dc22f64e52a986f9c4b4ccc9cb32a32993dd",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/blog/init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5026"
},
{
"name": "Dockerfile",
"bytes": "601"
},
{
"name": "HCL",
"bytes": "1107"
},
{
"name": "HTML",
"bytes": "29380"
},
{
"name": "JavaScript",
"bytes": "1461"
},
{
"name": "Makefile",
"bytes": "2136"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "29446"
},
{
"name": "TypeScript",
"bytes": "25304"
}
],
"symlink_target": ""
} |
from fabric.api import task, local
import build
__all__ = ['upload', 'register']
@task
def upload():
"""upload the dist to pypi"""
build.sdist()
local("python setup.py sdist upload")
@task
def register():
"""register with pypi. Needs only to be done once."""
local("python setup.py register")
| {
"content_hash": "37fa624d3331714db446e088093cef58",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 57,
"avg_line_length": 18.705882352941178,
"alnum_prop": 0.6446540880503144,
"repo_name": "rajpushkar83/cloudmesh",
"id": "3da75916d411a377c1d341b79eaae4f714b5fa84",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile/pypi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
} |
from gi.repository import GLib
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
import json
import os
import crypt
import subprocess
import signal
import re
import random
import socket
class System_DBus(dbus.service.Object):
def __init__(self):
bus_name = dbus.service.BusName('com.postgrespro.PGManager', bus=dbus.SystemBus())
dbus.service.Object.__init__(self, bus_name, '/com/postgrespro/PGManager')
def generatePassword(self):
length = 10
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
return ''.join(random.choice(chars) for _ in range(length))
def getFreePort(self):
port = 0
numTries = 10
while numTries > 0:
numTries -= 1
port = random.randint(40000, 49999)
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
tcp.bind(('', port))
break
except OSError:
if numTries == 0:
raise Exception("Could not choose a port number.")
finally:
tcp.close()
return port
@dbus.service.method('com.postgrespro.PGManager')
def CreateCluster(self, username, program_path):
pg_version = ''
if not program_path:
version_output = ''
try:
version_output = subprocess.check_output(
"perl -e \"use PgCommon; print join('\n', get_versions())\"",
stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as ex:
return json.JSONEncoder(ensure_ascii=False).encode({
"error": ('get_versions failed: %s (%s)' % (str(ex), ex.output.decode('utf-8', 'backslashreplace')))});
pg_version = version_output.decode('utf-8').split('\n')[0]
try:
program_path = subprocess.check_output(
"perl -e \"use PgCommon; print get_program_path '', '%s';\"" % pg_version,
stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as ex:
return json.JSONEncoder(ensure_ascii=False).encode({
"error": ('get_program_path failed: %s (%s)' % (str(ex), ex.output.decode('utf-8', 'backslashreplace')))});
program_path = program_path.decode('utf-8')
if program_path == '':
return json.JSONEncoder(ensure_ascii=False).encode({
"error": ('unable to get program path for postgres version: %s' % (pg_version))});
if not os.path.exists(program_path + 'pg_ctl'):
return json.JSONEncoder(ensure_ascii=False).encode({"error": ('invalid server program path: %s' % (program_path))})
password = self.generatePassword()
encPass = crypt.crypt(password,"22")
try:
subprocess.check_output(
['/usr/sbin/useradd', username, '-m', '-p', '"' + encPass + '"'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
return json.JSONEncoder(ensure_ascii=False).encode({
"error": ('useradd failed: %s (%s)' % (str(ex), ex.output.decode('utf-8', 'backslashreplace')))});
port_number = 0
postgres_password = self.generatePassword()
try:
passfile = os.path.expanduser("~%s/.postgres.pass" % username)
with open(passfile, "w") as fpass: fpass.write(postgres_password)
port_number = self.getFreePort()
except Exception as ex:
return json.JSONEncoder(ensure_ascii=False).encode({"error": ('preparation failed: %s' % (str(ex)))})
db_path = os.path.expanduser("~%s/pgdb" % username)
try:
output = subprocess.check_output([
'/bin/su', username, '-l', '-c',
"""
rm -rf /tmp/pg_{2}.log /tmp/{2}; \
{0}initdb --auth=md5 --username=postgres --pwfile={3} {1} && \
mkdir /tmp/{2} && \
{0}pg_ctl start -w -t 10 -D {1} -o "--listen_addresses='*' -p {4} -k /tmp/{2}" >/tmp/pg_{2}.log""" \
.format(program_path, db_path, username, passfile, port_number)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
return json.JSONEncoder(ensure_ascii=False).encode({"error": ('initdb & start failed: %s (%s)' % (str(ex), ex.output.decode('utf-8', 'backslashreplace')))});
os.remove(passfile)
return json.JSONEncoder(ensure_ascii=False).encode({"result":
{"user_name": username,
"os_user_password": password,
"postgres_password": postgres_password,
"pg_version": pg_version,
"program_path": program_path,
"db_path": db_path,
"port_number": port_number
}});
@dbus.service.method('com.postgrespro.PGManager')
def DropCluster(self, username, program_path):
if not os.path.exists(program_path + 'pg_ctl'):
return json.JSONEncoder(ensure_ascii=False).encode({"error": ('invalid server program path: %s' % (program_path))})
messages = ''
db_path = os.path.expanduser("~%s/pgdb" % username)
if program_path != '':
try:
output = subprocess.check_output([
'/bin/su', username, '-l', '-c',
"""{0}pg_ctl stop -w -t 10 -D {1} -m immediate && rm -rf /tmp/{2}""" \
.format(program_path, db_path, username)],
stderr=subprocess.STDOUT)
messages += 'pg_ctl returned: ' + output.decode('utf-8')
except subprocess.CalledProcessError as ex:
messages += 'pg stop failed with messages: %s (%s)' % (str(ex), ex.output.decode('utf-8', 'backslashreplace'))
try:
subprocess.check_output(['/usr/sbin/userdel', username, '-f', '-r'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
return json.JSONEncoder(ensure_ascii=False).encode({"error": ('userdel failed: %s (%s)' % (str(ex), ex.output.decode('utf-8', 'backslashreplace'))) });
return json.JSONEncoder(ensure_ascii=False).encode({"result": {"removed_user_name" : username,
"messages": messages} });
DBusGMainLoop(set_as_default=True)
dbus_service = System_DBus()
try:
GLib.MainLoop().run()
except KeyboardInterrupt:
print("\nThe MainLoop will close...")
GLib.MainLoop().quit()
| {
"content_hash": "51861596481dd3db31d04dfcafe2df47",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 169,
"avg_line_length": 46.13513513513514,
"alnum_prop": 0.5489162272993556,
"repo_name": "alexanderlaw/sqlfiddle2",
"id": "1cc3f0b354d2d5c8c368ee9e1f0f34cd107e8701",
"size": "6883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/resources/db/postgresql/pgmanager/pgmanager.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18891"
},
{
"name": "Groovy",
"bytes": "89662"
},
{
"name": "HTML",
"bytes": "70535"
},
{
"name": "Java",
"bytes": "17981"
},
{
"name": "JavaScript",
"bytes": "188614"
},
{
"name": "PowerShell",
"bytes": "172"
},
{
"name": "Python",
"bytes": "6883"
},
{
"name": "SQLPL",
"bytes": "68451"
},
{
"name": "Shell",
"bytes": "24391"
},
{
"name": "VCL",
"bytes": "1374"
}
],
"symlink_target": ""
} |
import datetime
from bottle import Bottle, request, response, HTTPResponse
from bottle.ext import sqlalchemy
from bottle import static_file
from gisela.model import engine, Base, Tag, Timelog, Timer
from gisela.response import Response
# --------------------------------
# Add SQLAlchemy app
# --------------------------------
app = Bottle()
plugin = sqlalchemy.Plugin(
engine,
Base.metadata,
keyword='db',
create=True,
commit=True,
use_kwargs=False
)
app.install(plugin)
@app.hook('after_request')
def enable_cors():
"""
You need to add some headers to each request.
Don't use the wildcard '*' for Access-Control-Allow-Origin in production.
"""
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
@app.route('/demo/<filename>')
def server_static(filename):
return static_file(filename, root='./demo')
@app.get("/")
def index(db):
return "My name is Gisela."
@app.route("/tags", method=["OPTIONS"])
@app.route("/tags/<id>", method=["OPTIONS"])
@app.route("/times", method=["OPTIONS"])
@app.route("/times/<id>", method=["OPTIONS"])
@app.route("/timers", method=["OPTIONS"])
@app.route("/timers/<id>", method=["OPTIONS"])
def allow_options(id=None):
return {}
@app.post("/timers")
def timer_create(db):
timer = Timer(request.json.get("description", ""))
db.add(timer)
db.commit()
return Response(timer)
@app.get("/timers")
def timer_list(db):
tags = db.query(Timer).all()
return Response(tags)
@app.put("/timers/<id>")
def timer_update(id, db):
timer = db.query(Timer).filter(Timer.id == id).one()
timer.description = request.json.get("description", timer.description)
timer.tags = []
tags = request.json.get("tags", [])
for tag in tags:
tag = db.query(Tag).filter(Tag.id == tag.get("id")).one()
timer.tags.append(tag)
db.commit()
return Response(timer)
@app.delete("/timers/<id>")
def timer_delete(id, db):
timer = db.query(Timer).filter(Timer.id == id).delete()
db.commit()
return Response(timer)
@app.get("/tags")
def tag_list(db):
tags = db.query(Tag).all()
return Response(tags)
@app.post("/tags")
def tag_create(db):
tag = Tag(request.json.get("name"),
request.json.get("description", ""))
db.add(tag)
db.commit()
return Response(tag)
@app.get("/tags/<id>")
def tag_read(id, db):
tag = db.query(Tag).filter(Tag.id == id).one()
return Response(tag)
@app.put("/tags/<id>")
def tag_update(id, db):
tag = db.query(Tag).filter(Tag.id == id).one()
tag.name = request.json.get("name", tag.name)
tag.description = request.json.get("description", tag.description)
db.commit()
return Response(tag)
@app.delete("/tags/<id>")
def tag_delete(id, db):
tag = db.query(Tag).filter(Tag.id == id).delete()
db.commit()
return Response(tag)
@app.get("/times")
def time_list(db):
times = db.query(Timelog).order_by(Timelog.id.desc(),
Timelog.start_date.desc()).all()
return Response(times)
@app.get("/times/export")
def time_export(db):
data = []
times = []
# Header
data.append("Datum Zeit S B [AP] Zusammenfassung")
data.append("==============================================================================")
for id in sorted([int(id) for id in request.GET.get("times").split(",")]):
times.append(db.query(Timelog).filter(Timelog.id == id).one())
data.append("\n".join(zeiterfassung(times)))
data.append("==============================================================================")
out = "\n".join(data)
response.set_header("Content-type", "text/plain")
response.set_header("Content-Disposition", "attachment; filename=export.txt")
response.set_header("Content-Length", len(out))
return out
@app.post("/times")
def time_create(db):
time = Timelog(request.json.get("start_date"),
request.json.get("duration"),
request.json.get("description"))
# Add tags to the timelog
for tagdata in request.json.get("tags", []):
tag = db.query(Tag).filter(Tag.id == tagdata.get("id")).one()
time.tags.append(tag)
db.add(time)
db.commit()
return Response(time)
#@app.get("/times/<id>")
#def time_read(id, db):
# time = db.query(Timelog).filter(Timelog.id == id).one()
# return Response(time)
#
#
@app.put("/times/<id>")
def time_update(id, db):
time = db.query(Timelog).filter(Timelog.id == id).one()
start_date = request.json.get("start_date")
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
time.start_date = start_date
time.duration = int(request.json.get("duration", time.duration))
time.description = request.json.get("description", time.description)
# Add/Remove tags
taglist = request.json.get("tags", [])
if taglist:
time.tags = []
for tagdata in taglist:
tag = db.query(Tag).filter(Tag.id == tagdata.get("id")).one()
time.tags.append(tag)
db.commit()
return Response(time)
@app.delete("/times/<id>")
def time_delete(id, db):
time = db.query(Timelog).filter(Timelog.id == id).delete()
db.commit()
return Response(time)
#
#@app.put("/times/<id>/start")
#def time_start(id, db):
# time = db.query(Timelog).filter(Timelog.id == id).one()
# time.start()
# db.commit()
# return Response(time)
#
#@app.put("/times/<id>/pause")
#def time_pause(id, db):
# time = db.query(Timelog).filter(Timelog.id == id).one()
# time.pause()
# db.commit()
# return Response(time)
#
#@app.put("/times/<id>/stop")
#def time_stop(id, db):
# time = db.query(Timelog).filter(Timelog.id == id).one()
# time.stop()
# db.commit()
# return Response(time)
def week_magic(day):
day_of_week = day.weekday()
to_beginning_of_week = datetime.timedelta(days=day_of_week)
beginning_of_week = day - to_beginning_of_week
to_end_of_week = datetime.timedelta(days=6 - day_of_week)
end_of_week = day + to_end_of_week
return (beginning_of_week, end_of_week)
@app.get("/report")
def report(db):
sw, ew = week_magic(datetime.date.today())
start_date = request.params.get("start")
end_date = request.params.get("end")
if start_date:
y,m,d = map(int, start_date.split("-"))
start_date = datetime.date(y,m,d)
else:
start_date = sw
if end_date:
y,m,d = map(int, end_date.split("-"))
end_date = datetime.date(y,m,d)
else:
end_date = ew
tags = request.params.get("tags")
times = []
for time in db.query(Timelog).all():
if time.start_date.date() <= end_date and time.start_date.date() >= start_date:
times.append(time)
return "\n".join(zeiterfassung(times))
def zeiterfassung(times):
out = []
#05.01.2015 0:15h a ab [2379-100-100] Material zu Wasquik ansehen
def format_duration(duration):
m = duration/60
h = m/60
m = m%60
return "{0:02d}:{1:02d}".format(h, m)
total = 0
for time in times:
total += time.duration
out.append(u"{date} {duration}h a {author:3} [{tags}] {description}"
.format(date=time.start_date.date().strftime("%d.%m.%Y"),
duration=format_duration(time.duration),
author="xxx",
tags=", ".join([t.name for t in time.tags]),
description=time.description))
out.append("\nTotal: {0}h".format(format_duration(total)))
return out
| {
"content_hash": "720cccb0a274cd9b26c9ffe21bb6e41d",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 117,
"avg_line_length": 29.088560885608857,
"alnum_prop": 0.5879741215273373,
"repo_name": "toirl/gisela",
"id": "27df5e5db350f26375a0dc9a32d5cba02f84e894",
"size": "7883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gisela/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "API Blueprint",
"bytes": "7469"
},
{
"name": "HTML",
"bytes": "1977"
},
{
"name": "JavaScript",
"bytes": "1083"
},
{
"name": "Python",
"bytes": "27817"
}
],
"symlink_target": ""
} |
import traceback, types
class pyloader(object):
def __init__(self, log):
self.log = log
def load(self, name, path):
try:
m = types.ModuleType(name)
exec open(path).read() in m.__dict__
m.__file__ = path
return m
except:
self.log.logError("Pyloader", "Load module %s [path: %s] error: %s!!!",
name, path, traceback.format_exc())
return None
| {
"content_hash": "003008797bb6fc1a91a6e1d9dcb10ef4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 83,
"avg_line_length": 26.555555555555557,
"alnum_prop": 0.48326359832635984,
"repo_name": "AlexWoo/pyed",
"id": "93c5199f66bd4d2e253e91760de64855e4136b6c",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysys/pyloader.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "57906"
}
],
"symlink_target": ""
} |
"""
Setup file for django-errordite.
"""
import os
from os.path import join, dirname, normpath, abspath
from setuptools import setup
# allow setup.py to be run from any path
os.chdir(normpath(join(abspath(__file__), os.pardir)))
# we can't import errordite as it hasn't been installed yet, so instead
# read information in as text. This will read in the __init__ file, and
# create a dict containing any lines beginning with '__' as keys, with
# whatever is after the '=' as the value,
# __desc__ = 'hello'
# would give {'desc': 'hello'}
meta = {}
for l in [line for line in tuple(open('django_errordite/__init__.py', 'r')) if line[:2] == '__']:
t = l.split('=')
meta[t[0].strip().strip('__')] = t[1].strip().strip('\'')
setup(
name=meta['title'],
version=meta['version'],
packages=['django_errordite'],
install_requires=['errordite>=0.4'],
include_package_data=True,
license=open(join(dirname(__file__), 'LICENCE.md')).read(),
description=meta['description'],
long_description=open(join(dirname(__file__), 'README.rst')).read(),
url='https://github.com/hugorodgerbrown/django-errordite',
author=meta['author'],
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| {
"content_hash": "0aa7bd72be3892263b8247782005056c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 97,
"avg_line_length": 36.355555555555554,
"alnum_prop": 0.6289731051344744,
"repo_name": "yunojuno/django-errordite",
"id": "ba6a15e3d54cd5781db4bce84569718aa41080d2",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9202"
}
],
"symlink_target": ""
} |
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Meteorers helps ...
"""
#<DefineConcept>
import ShareYourSystem as SYS
SYS.setConceptModule(globals())
#</DefineConcept>
| {
"content_hash": "55a20561f5cced5eabb13446b9b96d6f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 35,
"avg_line_length": 13.529411764705882,
"alnum_prop": 0.7,
"repo_name": "Ledoux/ShareYourSystem",
"id": "7d458c1c3ece9b1035940cf85dbec9f91932ae3a",
"size": "254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pythonlogy/build/lib/ShareYourSystem/Standards/Viewers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
from django.http import FileResponse
from django.core.files.base import ContentFile
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework.exceptions import NotFound, PermissionDenied, ValidationError
from framework.auth.oauth_scopes import CoreScopes
from osf.models import (
Guid,
BaseFileNode,
FileVersion,
QuickFilesNode,
)
from api.base.exceptions import Gone
from api.base.permissions import PermissionWithGetter
from api.base.throttling import CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle
from api.base import utils
from api.base.views import JSONAPIBaseView
from api.base import permissions as base_permissions
from api.nodes.permissions import ContributorOrPublic
from api.nodes.permissions import ReadOnlyIfRegistration
from api.files.permissions import IsPreprintFile
from api.files.permissions import CheckedOutOrAdmin
from api.files.permissions import FileMetadataRecordPermission
from api.files.serializers import FileSerializer
from api.files.serializers import FileDetailSerializer, QuickFilesDetailSerializer
from api.files.serializers import FileMetadataRecordSerializer
from api.files.serializers import FileVersionSerializer
from osf.utils.permissions import ADMIN
class FileMixin(object):
"""Mixin with convenience methods for retrieving the current file based on the
current URL. By default, fetches the file based on the file_id kwarg.
"""
serializer_class = FileSerializer
file_lookup_url_kwarg = 'file_id'
def get_file(self, check_permissions=True):
try:
obj = utils.get_object_or_error(BaseFileNode, self.kwargs[self.file_lookup_url_kwarg], self.request, display_name='file')
except NotFound:
obj = utils.get_object_or_error(Guid, self.kwargs[self.file_lookup_url_kwarg], self.request).referent
if not isinstance(obj, BaseFileNode):
raise NotFound
if obj.is_deleted:
raise Gone(detail='The requested file is no longer available.')
if getattr(obj.target, 'deleted', None):
raise Gone(detail='The requested file is no longer available')
if getattr(obj.target, 'is_quickfiles', False) and getattr(obj.target, 'creator'):
if obj.target.creator.is_disabled:
raise Gone(detail='This user has been deactivated and their quickfiles are no longer available.')
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class FileDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_detail).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
IsPreprintFile,
CheckedOutOrAdmin,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'target'),
PermissionWithGetter(ReadOnlyIfRegistration, 'target'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileDetailSerializer
throttle_classes = (CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle, )
view_category = 'files'
view_name = 'file-detail'
def get_serializer_class(self):
try:
target = self.get_target()
except (NotFound, Gone, PermissionDenied):
return FileDetailSerializer
else:
if isinstance(target, QuickFilesNode):
return QuickFilesDetailSerializer
return FileDetailSerializer
def get_target(self):
return self.get_file().target
# overrides RetrieveAPIView
def get_object(self):
user = utils.get_user_auth(self.request).user
file = self.get_file()
if self.request.GET.get('create_guid', False):
# allows quickfiles to be given guids when another user wants a permanent link to it
if (self.get_target().has_permission(user, ADMIN) and utils.has_admin_scope(self.request)) or getattr(file.target, 'is_quickfiles', False):
file.get_guid(create=True)
return file
class FileVersionsList(JSONAPIBaseView, generics.ListAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_versions).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'target'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'file-versions'
ordering = ('-modified',)
def get_queryset(self):
self.file = self.get_file()
return self.file.versions.all()
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
def node_from_version(request, view, obj):
return view.get_file(check_permissions=False).target
class FileVersionDetail(JSONAPIBaseView, generics.RetrieveAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_version_detail).
"""
version_lookup_url_kwarg = 'version_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, node_from_version),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'version-detail'
# overrides RetrieveAPIView
def get_object(self):
self.file = self.get_file()
maybe_version = self.file.get_version(self.kwargs[self.version_lookup_url_kwarg])
# May raise a permission denied
# Kinda hacky but versions have no reference to node or file
self.check_object_permissions(self.request, self.file)
return utils.get_object_or_error(FileVersion, getattr(maybe_version, '_id', ''), self.request)
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
class FileMetadataRecordsList(JSONAPIBaseView, generics.ListAPIView, FileMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'target'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = FileMetadataRecordSerializer
view_category = 'files'
view_name = 'metadata-records'
ordering = ('-created',)
def get_queryset(self):
return self.get_file().records.all()
class FileMetadataRecordDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, FileMixin):
record_lookup_url_kwarg = 'record_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
FileMetadataRecordPermission(ContributorOrPublic),
FileMetadataRecordPermission(ReadOnlyIfRegistration),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileMetadataRecordSerializer
view_category = 'files'
view_name = 'metadata-record-detail'
def get_object(self):
return utils.get_object_or_error(
self.get_file().records.filter(_id=self.kwargs[self.record_lookup_url_kwarg]),
request=self.request,
)
class FileMetadataRecordDownload(JSONAPIBaseView, generics.RetrieveAPIView, FileMixin):
record_lookup_url_kwarg = 'record_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'target'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NULL]
view_category = 'files'
view_name = 'metadata-record-download'
def get_serializer_class(self):
return None
def get_object(self):
return utils.get_object_or_error(
self.get_file().records.filter(_id=self.kwargs[self.record_lookup_url_kwarg]).select_related('schema', 'file'),
request=self.request,
)
def get(self, request, **kwargs):
file_type = self.request.query_params.get('export', 'json')
record = self.get_object()
try:
response = FileResponse(ContentFile(record.serialize(format=file_type)))
except ValueError as e:
detail = str(e).replace('.', '')
raise ValidationError(detail='{} for metadata file export.'.format(detail))
file_name = 'file_metadata_{}_{}.{}'.format(record.schema._id, record.file.name, file_type)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(file_name)
response['Content-Type'] = 'application/{}'.format(file_type)
return response
| {
"content_hash": "b370e80debed52e49644738449162a81",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 151,
"avg_line_length": 37.125,
"alnum_prop": 0.7003367003367004,
"repo_name": "mattclark/osf.io",
"id": "f0a33b32450a6393afacad5efe2c144efd395b41",
"size": "9504",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/files/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "317371"
},
{
"name": "JavaScript",
"bytes": "1792241"
},
{
"name": "Mako",
"bytes": "654772"
},
{
"name": "Python",
"bytes": "10166997"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import sublime, sys, os
from ArcticTypescript.lib.ArcticTestCase import ArcticTestCase
from sublime_unittest import TestCase
from unittest.mock import MagicMock as MM
Project = sys.modules["ArcticTypescript.lib.system.Project"]
class test_project_opening(ArcticTestCase):
def setUp(self):
# No settings or other files
self.clear_files_except_sublimeproject()
#self.assert_no_typescript_project_settings()
def test_opening_ts_file_should_trigger_dialog_if_no_project_settings(self):
""" Dialog is only shown when no .sublimets or *.sublime-project is found
or if these files do not specify any root files"""
return
# mock show method
tmp_show = Project.ProjectError.show
Project.ProjectError.show = MM()
self.create_ts_file()
self.open_and_focus_tsfile()
yield 10 # pause 10 ms
self.assertTrue(Project.ProjectError.show.called)
# reset mocked method
Project.ProjectError.show = tmp_show
self.close_view()
self.rm_file()
def test_opening_ts_file_should_init_project(self):
self.create_settings()
self.create_ts_file()
tmp_init = Project.OpenedProject.__init__
Project.OpenedProject.__init__ = MM()
self.open_and_focus_tsfile()
yield 10
self.assertTrue(Project.OpenedProject.__init__)
# reset mocked method
Project.OpenedProject.__init__ = tmp_init
#self.close_view()
#self.rm_file()
def test_opening_ts_file_should_create_projectclass(self):
pass
#sublime.active_window().project_data()
#x = Project.project_by_view(1)
#self.assertEqual(x, 13)
# for testing sublime command
class test_helloworld_command(TestCase):
def setUp(self):
self.view = sublime.active_window().new_file()
def tearDown(self):
if self.view:
self.view.set_scratch(True)
self.view.window().focus_view(self.view)
self.view.window().run_command("close_file")
def setText(self, string):
pass
#self.view.run_command("insert", {"characters": string})
def getRow(self, row):
return self.view.substr(self.view.line(self.view.text_point(row,0)))
def test_hello_world_st3(self):
#self.view.run_command("hello_world")
first_row = self.getRow(0)
#self.assertEqual(first_row,"hello world")
def test_hello_world(self):
self.setText("new ")
#self.view.run_command("hello_world")
first_row = self.getRow(0)
#self.assertEqual(first_row,"new hello world")
Project = sys.modules["ArcticTypescript.lib.system.Project"]
class test_internal_functions(TestCase):
def test_foo(self):
x = Project.project_by_view(1)
self.assertEqual(x, 13)
| {
"content_hash": "4c5ae4611a99880b7bb5c4ed4027040a",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 81,
"avg_line_length": 28.366336633663366,
"alnum_prop": 0.6387434554973822,
"repo_name": "nimzco/Environment",
"id": "88fa5b897a22bc52dda81190fd5c0938be922908",
"size": "2880",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Sublime/Packages/ArcticTypescript/lib/test/test_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "332445"
},
{
"name": "Python",
"bytes": "3101171"
},
{
"name": "Shell",
"bytes": "26630"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(name='Rpi-Central',
version='0.0.1',
description='Python project for Raspberry Pi',
author='Julian Kaltenhofer',
author_email='[email protected]',
url='https://github.com/kajuten/rpi-central',
license='MIT',
packages=find_packages(),
long_description=open('README.md').read(),
install_requires: ['nose'],
)
setup(**config)
| {
"content_hash": "33e1edd99d8616f76edf9e90725bdad2",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 52,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.647191011235955,
"repo_name": "kajuten/rpi-central",
"id": "68610fcfb63545301ee407af5be440f423c7a292",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/beta",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
def do_not_use_static_url(request):
def exception():
raise Exception(
"Do not use STATIC_URL in templates. Use the {% static %} templatetag (or {% versioned_static %} within admin templates) instead."
)
return {
"STATIC_URL": lambda: exception(),
}
| {
"content_hash": "4942cbaae39cd5a8cc575ed4804186c7",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 142,
"avg_line_length": 33.111111111111114,
"alnum_prop": 0.5973154362416108,
"repo_name": "zerolab/wagtail",
"id": "6d8f37bb2cc64d042eb0d87dbded3b1c01d8009b",
"size": "298",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/test/context_processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593037"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6560334"
},
{
"name": "SCSS",
"bytes": "219204"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288102"
}
],
"symlink_target": ""
} |
__author__ = "Markus Gumbel"
__copyright__ = "The authors"
__license__ = "Apache 2"
__email__ = "[email protected]"
__status__ = "Test"
from Core.ExecConfig import ExecConfig
from Test.Scalability.VerifyGrowth import VerifyGrowth
class VerifyGrowthT1000D4(VerifyGrowth):
def __init__(self, sim, simthread):
VerifyGrowth.__init__(self, sim, simthread)
self.name = "VerifyGrowthT1000D2"
self._initModel()
def _createExecConfig(self):
return ExecConfig(simDurationDays=2,
xLength=100, yLength=100, zLength=0,
voxelDensity=4, MCSperDay=1000)
| {
"content_hash": "da3ca72b7ea4a6ff62e9b41c019a739b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 62,
"avg_line_length": 32,
"alnum_prop": 0.6375,
"repo_name": "informatik-mannheim/Moduro-CC3D",
"id": "d7c2c7eeb560e7f9198683ac51906b245479fbfa",
"size": "1254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Simulation/Test/Scalability/VerifyGrowthT1000D4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "215449"
}
],
"symlink_target": ""
} |
from model.group import Group
class GroupHelper:
def __init__(self,app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/groups.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def fill_group_form(self, group):
# fill group form
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def edit_first_group(self):
wd = self.app.wd
self.open_groups_page()
# select first group
wd.find_element_by_name("selected[]").click()
wd.find_element_by_xpath("//div[@id='content']/form/input[6]").click()
# fill group form
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("coaches")
# submit editing
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self,index):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def delete_group_by_id(self,id):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_group_by_index(self,index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self,id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def modify_first_group(self):
self.modify_group_by_index(0)
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
#open vodification form
wd.find_element_by_name("edit").click()
#fill group form
self.fill_group_form(new_group_data)
#submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def modify_group_by_id(self, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_id(new_group_data.id)
#open vodification form
wd.find_element_by_name("edit").click()
#fill group form
self.fill_group_form(new_group_data)
#submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache =[]
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
| {
"content_hash": "cf2c0ae056b4ed6df13a196e090df397",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 101,
"avg_line_length": 31.79020979020979,
"alnum_prop": 0.5948086229652442,
"repo_name": "Manolaru/Python_Study",
"id": "cedcf5eacf44745fdd4a4c36931f601b6354d245",
"size": "4546",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "fixture/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "528"
},
{
"name": "Python",
"bytes": "42970"
}
],
"symlink_target": ""
} |
import pytest
import os
from statico.utils import sorted_list_dir, parse_metadata
@pytest.mark.utils
class TestUtils:
def test_sorted_list_dir(self, tmpdir):
for i in range(50):
f = tmpdir.join('file' + str(i) + '.txt')
f.write('test')
srtd = [f.stat().st_mtime for f in sorted_list_dir(str(tmpdir))]
num_files = len(srtd)
assert num_files == 50
assert sorted(srtd, reverse=True) == srtd
def test_parse_metadata(self, tmpdir):
# Success 1
f_success_1 = tmpdir.join('success1.md')
f_success_1.write('---\ntitle: My Test\nauthor : Dan Brown \n category: f:ood: \n------')
content, metadata = parse_metadata(f_success_1.open())
assert content == []
assert metadata.get('title') == 'My Test'
assert metadata.get('author') == 'Dan Brown'
assert metadata.get('category') == 'f:ood:'
# Success 2 (with content)
f_success_2 = tmpdir.join('success2.md')
f_success_2.write('---\ntitle: My Test\n---\nHello how are you?\nMamma mia')
content, metadata = parse_metadata(f_success_2.open())
assert len(content) == 2
assert content[0] == 'Hello how are you?'
assert content[1] == 'Mamma mia'
assert len(metadata) == 1
| {
"content_hash": "e2957d97b8b4ab1b868e2138340a1e20",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 100,
"avg_line_length": 32.9,
"alnum_prop": 0.5828267477203647,
"repo_name": "oss6/statico",
"id": "df5296fd64df3bb7b796008825ab05dd95cd6cb6",
"size": "1341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23780"
},
{
"name": "HTML",
"bytes": "8469"
},
{
"name": "JavaScript",
"bytes": "426"
},
{
"name": "Python",
"bytes": "37651"
}
],
"symlink_target": ""
} |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re,shlex,sys
from waflib import Build,Utils,Task,Options,Logs,Errors,ConfigSet,Runner
from waflib.TaskGen import after_method,feature
from waflib.Configure import conf
WAF_CONFIG_H='config.h'
DEFKEYS='define_key'
INCKEYS='include_key'
cfg_ver={'atleast-version':'>=','exact-version':'==','max-version':'<=',}
SNIP_FUNCTION='''
int main(int argc, char **argv) {
void *p;
(void)argc; (void)argv;
p=(void*)(%s);
return 0;
}
'''
SNIP_TYPE='''
int main(int argc, char **argv) {
(void)argc; (void)argv;
if ((%(type_name)s *) 0) return 0;
if (sizeof (%(type_name)s)) return 0;
return 1;
}
'''
SNIP_EMPTY_PROGRAM='''
int main(int argc, char **argv) {
(void)argc; (void)argv;
return 0;
}
'''
SNIP_FIELD='''
int main(int argc, char **argv) {
char *off;
(void)argc; (void)argv;
off = (char*) &((%(type_name)s*)0)->%(field_name)s;
return (size_t) off < sizeof(%(type_name)s);
}
'''
MACRO_TO_DESTOS={'__linux__':'linux','__GNU__':'gnu','__FreeBSD__':'freebsd','__NetBSD__':'netbsd','__OpenBSD__':'openbsd','__sun':'sunos','__hpux':'hpux','__sgi':'irix','_AIX':'aix','__CYGWIN__':'cygwin','__MSYS__':'msys','_UWIN':'uwin','_WIN64':'win32','_WIN32':'win32','__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__':'darwin','__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__':'darwin','__QNX__':'qnx','__native_client__':'nacl'}
MACRO_TO_DEST_CPU={'__x86_64__':'x86_64','__amd64__':'x86_64','__i386__':'x86','__ia64__':'ia','__mips__':'mips','__sparc__':'sparc','__alpha__':'alpha','__aarch64__':'aarch64','__thumb__':'thumb','__arm__':'arm','__hppa__':'hppa','__powerpc__':'powerpc','__ppc__':'powerpc','__convex__':'convex','__m68k__':'m68k','__s390x__':'s390x','__s390__':'s390','__sh__':'sh',}
@conf
def parse_flags(self,line,uselib_store,env=None,force_static=False):
assert(isinstance(line,str))
env=env or self.env
app=env.append_value
appu=env.append_unique
lex=shlex.shlex(line,posix=False)
lex.whitespace_split=True
lex.commenters=''
lst=list(lex)
uselib=uselib_store
while lst:
x=lst.pop(0)
st=x[:2]
ot=x[2:]
if st=='-I'or st=='/I':
if not ot:ot=lst.pop(0)
appu('INCLUDES_'+uselib,[ot])
elif st=='-include':
tmp=[x,lst.pop(0)]
app('CFLAGS',tmp)
app('CXXFLAGS',tmp)
elif st=='-D'or(env.CXX_NAME=='msvc'and st=='/D'):
if not ot:ot=lst.pop(0)
app('DEFINES_'+uselib,[ot])
elif st=='-l':
if not ot:ot=lst.pop(0)
prefix=force_static and'STLIB_'or'LIB_'
appu(prefix+uselib,[ot])
elif st=='-L':
if not ot:ot=lst.pop(0)
appu('LIBPATH_'+uselib,[ot])
elif x.startswith('/LIBPATH:'):
appu('LIBPATH_'+uselib,[x.replace('/LIBPATH:','')])
elif x=='-pthread'or x.startswith('+')or x.startswith('-std'):
app('CFLAGS_'+uselib,[x])
app('CXXFLAGS_'+uselib,[x])
app('LINKFLAGS_'+uselib,[x])
elif x=='-framework':
appu('FRAMEWORK_'+uselib,[lst.pop(0)])
elif x.startswith('-F'):
appu('FRAMEWORKPATH_'+uselib,[x[2:]])
elif x.startswith('-Wl'):
app('LINKFLAGS_'+uselib,[x])
elif x.startswith('-m')or x.startswith('-f')or x.startswith('-dynamic'):
app('CFLAGS_'+uselib,[x])
app('CXXFLAGS_'+uselib,[x])
elif x.startswith('-bundle'):
app('LINKFLAGS_'+uselib,[x])
elif x.startswith('-undefined'):
arg=lst.pop(0)
app('LINKFLAGS_'+uselib,[x,arg])
elif x.startswith('-arch')or x.startswith('-isysroot'):
tmp=[x,lst.pop(0)]
app('CFLAGS_'+uselib,tmp)
app('CXXFLAGS_'+uselib,tmp)
app('LINKFLAGS_'+uselib,tmp)
elif x.endswith('.a')or x.endswith('.so')or x.endswith('.dylib')or x.endswith('.lib'):
appu('LINKFLAGS_'+uselib,[x])
@conf
def ret_msg(self,f,kw):
if isinstance(f,str):
return f
return f(kw)
@conf
def validate_cfg(self,kw):
if not'path'in kw:
if not self.env.PKGCONFIG:
self.find_program('pkg-config',var='PKGCONFIG')
kw['path']=self.env.PKGCONFIG
if'atleast_pkgconfig_version'in kw:
if not'msg'in kw:
kw['msg']='Checking for pkg-config version >= %r'%kw['atleast_pkgconfig_version']
return
if not'okmsg'in kw:
kw['okmsg']='yes'
if not'errmsg'in kw:
kw['errmsg']='not found'
if'modversion'in kw:
if not'msg'in kw:
kw['msg']='Checking for %r version'%kw['modversion']
return
for x in cfg_ver.keys():
y=x.replace('-','_')
if y in kw:
if not'package'in kw:
raise ValueError('%s requires a package'%x)
if not'msg'in kw:
kw['msg']='Checking for %r %s %s'%(kw['package'],cfg_ver[x],kw[y])
return
if not'msg'in kw:
kw['msg']='Checking for %r'%(kw['package']or kw['path'])
@conf
def exec_cfg(self,kw):
def define_it():
self.define(self.have_define(kw.get('uselib_store',kw['package'])),1,0)
if'atleast_pkgconfig_version'in kw:
cmd=[kw['path'],'--atleast-pkgconfig-version=%s'%kw['atleast_pkgconfig_version']]
self.cmd_and_log(cmd)
if not'okmsg'in kw:
kw['okmsg']='yes'
return
for x in cfg_ver:
y=x.replace('-','_')
if y in kw:
self.cmd_and_log([kw['path'],'--%s=%s'%(x,kw[y]),kw['package']])
if not'okmsg'in kw:
kw['okmsg']='yes'
define_it()
break
if'modversion'in kw:
version=self.cmd_and_log([kw['path'],'--modversion',kw['modversion']]).strip()
self.define('%s_VERSION'%Utils.quote_define_name(kw.get('uselib_store',kw['modversion'])),version)
return version
lst=[kw['path']]
defi=kw.get('define_variable',None)
if not defi:
defi=self.env.PKG_CONFIG_DEFINES or{}
for key,val in defi.items():
lst.append('--define-variable=%s=%s'%(key,val))
if'variables'in kw:
env=kw.get('env',self.env)
uselib=kw.get('uselib_store',kw['package'].upper())
vars=Utils.to_list(kw['variables'])
for v in vars:
val=self.cmd_and_log(lst+['--variable='+v]).strip()
var='%s_%s'%(uselib,v)
env[var]=val
if not'okmsg'in kw:
kw['okmsg']='yes'
return
static=False
if'args'in kw:
args=Utils.to_list(kw['args'])
if'--static'in args or'--static-libs'in args:
static=True
lst+=args
lst.extend(Utils.to_list(kw['package']))
ret=self.cmd_and_log(lst)
if not'okmsg'in kw:
kw['okmsg']='yes'
define_it()
self.parse_flags(ret,kw.get('uselib_store',kw['package'].upper()),kw.get('env',self.env),force_static=static)
return ret
@conf
def check_cfg(self,*k,**kw):
if k:
lst=k[0].split()
kw['package']=lst[0]
kw['args']=' '.join(lst[1:])
self.validate_cfg(kw)
if'msg'in kw:
self.start_msg(kw['msg'])
ret=None
try:
ret=self.exec_cfg(kw)
except self.errors.WafError:
if'errmsg'in kw:
self.end_msg(kw['errmsg'],'YELLOW')
if Logs.verbose>1:
raise
else:
self.fatal('The configuration failed')
else:
kw['success']=ret
if'okmsg'in kw:
self.end_msg(self.ret_msg(kw['okmsg'],kw))
return ret
@conf
def validate_c(self,kw):
if not'env'in kw:
kw['env']=self.env.derive()
env=kw['env']
if not'compiler'in kw and not'features'in kw:
kw['compiler']='c'
if env['CXX_NAME']and Task.classes.get('cxx',None):
kw['compiler']='cxx'
if not self.env['CXX']:
self.fatal('a c++ compiler is required')
else:
if not self.env['CC']:
self.fatal('a c compiler is required')
if not'compile_mode'in kw:
kw['compile_mode']='c'
if'cxx'in Utils.to_list(kw.get('features',[]))or kw.get('compiler','')=='cxx':
kw['compile_mode']='cxx'
if not'type'in kw:
kw['type']='cprogram'
if not'features'in kw:
kw['features']=[kw['compile_mode'],kw['type']]
else:
kw['features']=Utils.to_list(kw['features'])
if not'compile_filename'in kw:
kw['compile_filename']='test.c'+((kw['compile_mode']=='cxx')and'pp'or'')
def to_header(dct):
if'header_name'in dct:
dct=Utils.to_list(dct['header_name'])
return''.join(['#include <%s>\n'%x for x in dct])
return''
if'framework_name'in kw:
fwkname=kw['framework_name']
if not'uselib_store'in kw:
kw['uselib_store']=fwkname.upper()
if not kw.get('no_header',False):
if not'header_name'in kw:
kw['header_name']=[]
fwk='%s/%s.h'%(fwkname,fwkname)
if kw.get('remove_dot_h',None):
fwk=fwk[:-2]
kw['header_name']=Utils.to_list(kw['header_name'])+[fwk]
kw['msg']='Checking for framework %s'%fwkname
kw['framework']=fwkname
if'function_name'in kw:
fu=kw['function_name']
if not'msg'in kw:
kw['msg']='Checking for function %s'%fu
kw['code']=to_header(kw)+SNIP_FUNCTION%fu
if not'uselib_store'in kw:
kw['uselib_store']=fu.upper()
if not'define_name'in kw:
kw['define_name']=self.have_define(fu)
elif'type_name'in kw:
tu=kw['type_name']
if not'header_name'in kw:
kw['header_name']='stdint.h'
if'field_name'in kw:
field=kw['field_name']
kw['code']=to_header(kw)+SNIP_FIELD%{'type_name':tu,'field_name':field}
if not'msg'in kw:
kw['msg']='Checking for field %s in %s'%(field,tu)
if not'define_name'in kw:
kw['define_name']=self.have_define((tu+'_'+field).upper())
else:
kw['code']=to_header(kw)+SNIP_TYPE%{'type_name':tu}
if not'msg'in kw:
kw['msg']='Checking for type %s'%tu
if not'define_name'in kw:
kw['define_name']=self.have_define(tu.upper())
elif'header_name'in kw:
if not'msg'in kw:
kw['msg']='Checking for header %s'%kw['header_name']
l=Utils.to_list(kw['header_name'])
assert len(l)>0,'list of headers in header_name is empty'
kw['code']=to_header(kw)+SNIP_EMPTY_PROGRAM
if not'uselib_store'in kw:
kw['uselib_store']=l[0].upper()
if not'define_name'in kw:
kw['define_name']=self.have_define(l[0])
if'lib'in kw:
if not'msg'in kw:
kw['msg']='Checking for library %s'%kw['lib']
if not'uselib_store'in kw:
kw['uselib_store']=kw['lib'].upper()
if'stlib'in kw:
if not'msg'in kw:
kw['msg']='Checking for static library %s'%kw['stlib']
if not'uselib_store'in kw:
kw['uselib_store']=kw['stlib'].upper()
if'fragment'in kw:
kw['code']=kw['fragment']
if not'msg'in kw:
kw['msg']='Checking for code snippet'
if not'errmsg'in kw:
kw['errmsg']='no'
for(flagsname,flagstype)in[('cxxflags','compiler'),('cflags','compiler'),('linkflags','linker')]:
if flagsname in kw:
if not'msg'in kw:
kw['msg']='Checking for %s flags %s'%(flagstype,kw[flagsname])
if not'errmsg'in kw:
kw['errmsg']='no'
if not'execute'in kw:
kw['execute']=False
if kw['execute']:
kw['features'].append('test_exec')
if not'errmsg'in kw:
kw['errmsg']='not found'
if not'okmsg'in kw:
kw['okmsg']='yes'
if not'code'in kw:
kw['code']=SNIP_EMPTY_PROGRAM
if self.env[INCKEYS]:
kw['code']='\n'.join(['#include <%s>'%x for x in self.env[INCKEYS]])+'\n'+kw['code']
if not kw.get('success'):kw['success']=None
if'define_name'in kw:
self.undefine(kw['define_name'])
assert'msg'in kw,'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c'
@conf
def post_check(self,*k,**kw):
is_success=0
if kw['execute']:
if kw['success']is not None:
if kw.get('define_ret',False):
is_success=kw['success']
else:
is_success=(kw['success']==0)
else:
is_success=(kw['success']==0)
if'define_name'in kw:
if'header_name'in kw or'function_name'in kw or'type_name'in kw or'fragment'in kw:
if kw['execute']and kw.get('define_ret',None)and isinstance(is_success,str):
self.define(kw['define_name'],is_success,quote=kw.get('quote',1))
else:
self.define_cond(kw['define_name'],is_success)
else:
self.define_cond(kw['define_name'],is_success)
if'header_name'in kw:
if kw.get('auto_add_header_name',False):
self.env.append_value(INCKEYS,Utils.to_list(kw['header_name']))
if is_success and'uselib_store'in kw:
from waflib.Tools import ccroot
_vars=set([])
for x in kw['features']:
if x in ccroot.USELIB_VARS:
_vars|=ccroot.USELIB_VARS[x]
for k in _vars:
lk=k.lower()
if k=='INCLUDES':lk='includes'
if k=='DEFINES':lk='defines'
if lk in kw:
val=kw[lk]
if isinstance(val,str):
val=val.rstrip(os.path.sep)
self.env.append_unique(k+'_'+kw['uselib_store'],val)
return is_success
@conf
def check(self,*k,**kw):
self.validate_c(kw)
self.start_msg(kw['msg'])
ret=None
try:
ret=self.run_c_code(*k,**kw)
except self.errors.ConfigurationError:
self.end_msg(kw['errmsg'],'YELLOW')
if Logs.verbose>1:
raise
else:
self.fatal('The configuration failed')
else:
kw['success']=ret
ret=self.post_check(*k,**kw)
if not ret:
self.end_msg(kw['errmsg'],'YELLOW')
self.fatal('The configuration failed %r'%ret)
else:
self.end_msg(self.ret_msg(kw['okmsg'],kw))
return ret
class test_exec(Task.Task):
color='PINK'
def run(self):
if getattr(self.generator,'rpath',None):
if getattr(self.generator,'define_ret',False):
self.generator.bld.retval=self.generator.bld.cmd_and_log([self.inputs[0].abspath()])
else:
self.generator.bld.retval=self.generator.bld.exec_command([self.inputs[0].abspath()])
else:
env=self.env.env or{}
env.update(dict(os.environ))
for var in('LD_LIBRARY_PATH','DYLD_LIBRARY_PATH','PATH'):
env[var]=self.inputs[0].parent.abspath()+os.path.pathsep+env.get(var,'')
if getattr(self.generator,'define_ret',False):
self.generator.bld.retval=self.generator.bld.cmd_and_log([self.inputs[0].abspath()],env=env)
else:
self.generator.bld.retval=self.generator.bld.exec_command([self.inputs[0].abspath()],env=env)
@feature('test_exec')
@after_method('apply_link')
def test_exec_fun(self):
self.create_task('test_exec',self.link_task.outputs[0])
CACHE_RESULTS=1
COMPILE_ERRORS=2
@conf
def run_c_code(self,*k,**kw):
lst=[str(v)for(p,v)in kw.items()if p!='env']
h=Utils.h_list(lst)
dir=self.bldnode.abspath()+os.sep+(not Utils.is_win32 and'.'or'')+'conf_check_'+Utils.to_hex(h)
try:
os.makedirs(dir)
except OSError:
pass
try:
os.stat(dir)
except OSError:
self.fatal('cannot use the configuration test folder %r'%dir)
cachemode=getattr(Options.options,'confcache',None)
if cachemode==CACHE_RESULTS:
try:
proj=ConfigSet.ConfigSet(os.path.join(dir,'cache_run_c_code'))
except OSError:
pass
else:
ret=proj['cache_run_c_code']
if isinstance(ret,str)and ret.startswith('Test does not build'):
self.fatal(ret)
return ret
bdir=os.path.join(dir,'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
self.test_bld=bld=Build.BuildContext(top_dir=dir,out_dir=bdir)
bld.init_dirs()
bld.progress_bar=0
bld.targets='*'
if kw['compile_filename']:
node=bld.srcnode.make_node(kw['compile_filename'])
node.write(kw['code'])
bld.logger=self.logger
bld.all_envs.update(self.all_envs)
bld.env=kw['env']
o=bld(features=kw['features'],source=kw['compile_filename'],target='testprog')
for k,v in kw.items():
setattr(o,k,v)
self.to_log("==>\n%s\n<=="%kw['code'])
bld.targets='*'
ret=-1
try:
try:
bld.compile()
except Errors.WafError:
ret='Test does not build: %s'%Utils.ex_stack()
self.fatal(ret)
else:
ret=getattr(bld,'retval',0)
finally:
proj=ConfigSet.ConfigSet()
proj['cache_run_c_code']=ret
proj.store(os.path.join(dir,'cache_run_c_code'))
return ret
@conf
def check_cxx(self,*k,**kw):
kw['compiler']='cxx'
return self.check(*k,**kw)
@conf
def check_cc(self,*k,**kw):
kw['compiler']='c'
return self.check(*k,**kw)
@conf
def define(self,key,val,quote=True):
assert key and isinstance(key,str)
if val is True:
val=1
elif val in(False,None):
val=0
if isinstance(val,int)or isinstance(val,float):
s='%s=%s'
else:
s=quote and'%s="%s"'or'%s=%s'
app=s%(key,str(val))
ban=key+'='
lst=self.env['DEFINES']
for x in lst:
if x.startswith(ban):
lst[lst.index(x)]=app
break
else:
self.env.append_value('DEFINES',app)
self.env.append_unique(DEFKEYS,key)
@conf
def undefine(self,key):
assert key and isinstance(key,str)
ban=key+'='
lst=[x for x in self.env['DEFINES']if not x.startswith(ban)]
self.env['DEFINES']=lst
self.env.append_unique(DEFKEYS,key)
@conf
def define_cond(self,key,val):
assert key and isinstance(key,str)
if val:
self.define(key,1)
else:
self.undefine(key)
@conf
def is_defined(self,key):
assert key and isinstance(key,str)
ban=key+'='
for x in self.env['DEFINES']:
if x.startswith(ban):
return True
return False
@conf
def get_define(self,key):
assert key and isinstance(key,str)
ban=key+'='
for x in self.env['DEFINES']:
if x.startswith(ban):
return x[len(ban):]
return None
@conf
def have_define(self,key):
return(self.env.HAVE_PAT or'HAVE_%s')%Utils.quote_define_name(key)
@conf
def write_config_header(self,configfile='',guard='',top=False,env=None,defines=True,headers=False,remove=True,define_prefix=''):
if env:
Logs.warn('Cannot pass env to write_config_header')
if not configfile:configfile=WAF_CONFIG_H
waf_guard=guard or'W_%s_WAF'%Utils.quote_define_name(configfile)
node=top and self.bldnode or self.path.get_bld()
node=node.make_node(configfile)
node.parent.mkdir()
lst=['/* WARNING! All changes made to this file will be lost! */\n']
lst.append('#ifndef %s\n#define %s\n'%(waf_guard,waf_guard))
lst.append(self.get_config_header(defines,headers,define_prefix=define_prefix))
lst.append('\n#endif /* %s */\n'%waf_guard)
node.write('\n'.join(lst))
self.env.append_unique(Build.CFG_FILES,[node.abspath()])
if remove:
for key in self.env[DEFKEYS]:
self.undefine(key)
self.env[DEFKEYS]=[]
@conf
def get_config_header(self,defines=True,headers=False,define_prefix=''):
lst=[]
if headers:
for x in self.env[INCKEYS]:
lst.append('#include <%s>'%x)
if defines:
for x in self.env[DEFKEYS]:
if self.is_defined(x):
val=self.get_define(x)
lst.append('#define %s %s'%(define_prefix+x,val))
else:
lst.append('/* #undef %s */'%(define_prefix+x))
return"\n".join(lst)
@conf
def cc_add_flags(conf):
conf.add_os_flags('CPPFLAGS','CFLAGS')
conf.add_os_flags('CFLAGS')
@conf
def cxx_add_flags(conf):
conf.add_os_flags('CPPFLAGS','CXXFLAGS')
conf.add_os_flags('CXXFLAGS')
@conf
def link_add_flags(conf):
conf.add_os_flags('LINKFLAGS')
conf.add_os_flags('LDFLAGS','LINKFLAGS')
@conf
def cc_load_tools(conf):
if not conf.env.DEST_OS:
conf.env.DEST_OS=Utils.unversioned_sys_platform()
conf.load('c')
@conf
def cxx_load_tools(conf):
if not conf.env.DEST_OS:
conf.env.DEST_OS=Utils.unversioned_sys_platform()
conf.load('cxx')
@conf
def get_cc_version(conf,cc,gcc=False,icc=False):
cmd=cc+['-dM','-E','-']
env=conf.env.env or None
try:
p=Utils.subprocess.Popen(cmd,stdin=Utils.subprocess.PIPE,stdout=Utils.subprocess.PIPE,stderr=Utils.subprocess.PIPE,env=env)
p.stdin.write('\n')
out=p.communicate()[0]
except Exception:
conf.fatal('Could not determine the compiler version %r'%cmd)
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding or'iso8859-1')
if gcc:
if out.find('__INTEL_COMPILER')>=0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__')<0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER')<0:
conf.fatal('Not icc/icpc')
k={}
if icc or gcc:
out=out.splitlines()
for line in out:
lst=shlex.split(line)
if len(lst)>2:
key=lst[1]
val=lst[2]
k[key]=val
def isD(var):
return var in k
def isT(var):
return var in k and k[var]!='0'
if not conf.env.DEST_OS:
conf.env.DEST_OS=''
for i in MACRO_TO_DESTOS:
if isD(i):
conf.env.DEST_OS=MACRO_TO_DESTOS[i]
break
else:
if isD('__APPLE__')and isD('__MACH__'):
conf.env.DEST_OS='darwin'
elif isD('__unix__'):
conf.env.DEST_OS='generic'
if isD('__ELF__'):
conf.env.DEST_BINFMT='elf'
elif isD('__WINNT__')or isD('__CYGWIN__'):
conf.env.DEST_BINFMT='pe'
conf.env.LIBDIR=conf.env['PREFIX']+'/bin'
elif isD('__APPLE__'):
conf.env.DEST_BINFMT='mac-o'
if not conf.env.DEST_BINFMT:
conf.env.DEST_BINFMT=Utils.destos_to_binfmt(conf.env.DEST_OS)
for i in MACRO_TO_DEST_CPU:
if isD(i):
conf.env.DEST_CPU=MACRO_TO_DEST_CPU[i]
break
Logs.debug('ccroot: dest platform: '+' '.join([conf.env[x]or'?'for x in('DEST_OS','DEST_BINFMT','DEST_CPU')]))
if icc:
ver=k['__INTEL_COMPILER']
conf.env['CC_VERSION']=(ver[:-2],ver[-2],ver[-1])
else:
if isD('__clang__'):
conf.env['CC_VERSION']=(k['__clang_major__'],k['__clang_minor__'],k['__clang_patchlevel__'])
else:
conf.env['CC_VERSION']=(k['__GNUC__'],k['__GNUC_MINOR__'],k['__GNUC_PATCHLEVEL__'])
return k
@conf
def get_xlc_version(conf,cc):
cmd=cc+['-qversion']
try:
out,err=conf.cmd_and_log(cmd,output=0)
except Errors.WafError:
conf.fatal('Could not find xlc %r'%cmd)
for v in(r"IBM XL C/C\+\+.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re=re.compile(v,re.I).search
match=version_re(out or err)
if match:
k=match.groupdict()
conf.env['CC_VERSION']=(k['major'],k['minor'])
break
else:
conf.fatal('Could not determine the XLC version.')
@conf
def add_as_needed(self):
if self.env.DEST_BINFMT=='elf'and'gcc'in(self.env.CXX_NAME,self.env.CC_NAME):
self.env.append_unique('LINKFLAGS','--as-needed')
class cfgtask(Task.TaskBase):
def display(self):
return''
def runnable_status(self):
return Task.RUN_ME
def uid(self):
return Utils.SIG_NIL
def run(self):
conf=self.conf
bld=Build.BuildContext(top_dir=conf.srcnode.abspath(),out_dir=conf.bldnode.abspath())
bld.env=conf.env
bld.init_dirs()
bld.in_msg=1
bld.logger=self.logger
try:
bld.check(**self.args)
except Exception:
return 1
@conf
def multicheck(self,*k,**kw):
self.start_msg(kw.get('msg','Executing %d configuration tests'%len(k)))
class par(object):
def __init__(self):
self.keep=False
self.cache_global=Options.cache_global
self.nocache=Options.options.nocache
self.returned_tasks=[]
self.task_sigs={}
def total(self):
return len(tasks)
def to_log(self,*k,**kw):
return
bld=par()
tasks=[]
for dct in k:
x=cfgtask(bld=bld)
tasks.append(x)
x.args=dct
x.bld=bld
x.conf=self
x.args=dct
x.logger=Logs.make_mem_logger(str(id(x)),self.logger)
def it():
yield tasks
while 1:
yield[]
p=Runner.Parallel(bld,Options.options.jobs)
p.biter=it()
p.start()
for x in tasks:
x.logger.memhandler.flush()
for x in tasks:
if x.hasrun!=Task.SUCCESS:
self.end_msg(kw.get('errmsg','no'),color='YELLOW')
self.fatal(kw.get('fatalmsg',None)or'One of the tests has failed, see the config.log for more information')
self.end_msg('ok')
| {
"content_hash": "48b538e9dc41bc5cfdf94a6caba091cb",
"timestamp": "",
"source": "github",
"line_count": 728,
"max_line_length": 430,
"avg_line_length": 30.428571428571427,
"alnum_prop": 0.6517695919104369,
"repo_name": "pipsiscool/audacity",
"id": "e6d3b5d4d81a47de6dba471319900cc7973b391b",
"size": "22152",
"binary": false,
"copies": "47",
"ref": "refs/heads/master",
"path": "lib-src/lv2/sord/waflib/Tools/c_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "141298"
},
{
"name": "Awk",
"bytes": "2350"
},
{
"name": "C",
"bytes": "16931951"
},
{
"name": "C++",
"bytes": "21277015"
},
{
"name": "CMake",
"bytes": "102838"
},
{
"name": "CSS",
"bytes": "87696"
},
{
"name": "Common Lisp",
"bytes": "533537"
},
{
"name": "Groff",
"bytes": "65243"
},
{
"name": "HTML",
"bytes": "2177363"
},
{
"name": "Inno Setup",
"bytes": "19531"
},
{
"name": "Java",
"bytes": "84589"
},
{
"name": "M",
"bytes": "6242"
},
{
"name": "Makefile",
"bytes": "141297"
},
{
"name": "Matlab",
"bytes": "2467"
},
{
"name": "NewLisp",
"bytes": "2831"
},
{
"name": "Objective-C",
"bytes": "17554"
},
{
"name": "Pascal",
"bytes": "17208"
},
{
"name": "Perl",
"bytes": "129212"
},
{
"name": "Prolog",
"bytes": "939"
},
{
"name": "Python",
"bytes": "3636067"
},
{
"name": "QMake",
"bytes": "971"
},
{
"name": "R",
"bytes": "305850"
},
{
"name": "Shell",
"bytes": "6354469"
},
{
"name": "Smarty",
"bytes": "172490"
},
{
"name": "TeX",
"bytes": "146115"
}
],
"symlink_target": ""
} |
import warnings
import functools
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func
| {
"content_hash": "790da91e18ed78d1f237b45d1c06a969",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 31,
"alnum_prop": 0.6366723259762309,
"repo_name": "hamaxx/uasparser2",
"id": "a35ed77f56574ec417dcc5314f4ce27684f3651c",
"size": "589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uasparser2/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17479"
}
],
"symlink_target": ""
} |
import unittest
__author__ = 'Dirk Dittert'
CONTENT = """\
-------------------------------------------------------------------------------
PdId: 1
Model Number: Hitachi HDS72302
Drive Type: SATA
SMART Status: Enable
SMART Health Status: OK
SCT Status Version: 3
SCT Version (vendor specific): 256 (0x0100)
SCT Support Level: 1
Device State: SMART Off-line Data Collection executing in background (4)
Current Temperature: 38 Celsius
Power Cycle Min/Max Temperature: 37/39 Celsius
Lifetime Min/Max Temperature: 21/48 Celsius
Under/Over Temperature Limit Count: 0/0
Self-test execution status: ( 0) The previous self-test routine
completed without error or no self-test
has ever been run.
has ever been run.
Error logging capability: (0x01) Error logging supported.
Short self-test routine
recommended polling time: ( 1) minutes.
Extended self-test routine
recommended polling time: ( 255) minutes.
SCT capabilities: (0x003d) SCT Status supported.
SCT Feature Control supported.
SCT Data Table supported.
SMART Self-test log structure revision number: 1
No self-tests have been logged. [To run self-tests, use: smartctl -t]
SMART Error Log Version: 1
No Errors Logged
SMART Attributes Data Structure revision number: 16
Vendor Specific SMART Attributes with Thresholds:
==============================================================================
ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED
WHEN_FAILED RAW_VALUE
==============================================================================
1 Raw_Read_Error_Rate 0x000b 100 100 016 Pre-fail Always
- 0
2 Throughput_Performance 0x0005 130 130 054 Pre-fail Offline
- 101
3 Spin_Up_Time 0x0007 134 134 024 Pre-fail Always
- 425 (Average 429)
4 Start_Stop_Count 0x0012 100 100 000 Old_age Always
- 34
5 Reallocated_Sector_Ct 0x0033 100 100 005 Pre-fail Always
- 0
7 Seek_Error_Rate 0x000b 100 100 067 Pre-fail Always
- 0
8 Seek_Time_Performance 0x0005 138 138 020 Pre-fail Offline
- 25
9 Power_On_Hours 0x0012 098 098 000 Old_age Always
- 17623
10 Spin_Retry_Count 0x0013 100 100 060 Pre-fail Always
- 0
12 Power_Cycle_Count 0x0032 100 100 000 Old_age Always
- 20
192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always
- 178
193 Load_Cycle_Count 0x0012 100 100 000 Old_age Always
- 178
194 Temperature_Celsius 0x0002 157 157 000 Old_age Always
- 38 (Lifetime Min/Max 21/48)
196 Reallocated_Event_Count 0x0032 100 100 000 Old_age Always
- 0
197 Current_Pending_Sector 0x0022 100 100 000 Old_age Always
- 0
198 Offline_Uncorrectable 0x0008 100 100 000 Old_age Offline
- 0
199 UDMA_CRC_Error_Count 0x000a 200 200 000 Old_age Always
- 0
"""
EXPECTED = """\
-------------------------------------------------------------------------------
PdId: 1
Model Number: Hitachi HDS72302
Drive Type: SATA
SMART Status: Enable
SMART Health Status: OK
SCT Status Version: 3
SCT Version (vendor specific): 256 (0x0100)
SCT Support Level: 1
Device State: SMART Off-line Data Collection executing in background (4)
Current Temperature: 38 Celsius
Power Cycle Min/Max Temperature: 37/39 Celsius
Lifetime Min/Max Temperature: 21/48 Celsius
Under/Over Temperature Limit Count: 0/0
Self-test execution status: ( 0) The previous self-test routine
completed without error or no self-test
has ever been run.
has ever been run.
Error logging capability: (0x01) Error logging supported.
Short self-test routine
recommended polling time: ( 1) minutes.
Extended self-test routine
recommended polling time: ( 255) minutes.
SCT capabilities: (0x003d) SCT Status supported.
SCT Feature Control supported.
SCT Data Table supported.
SMART Self-test log structure revision number: 1
No self-tests have been logged. [To run self-tests, use: smartctl -t]
SMART Error Log Version: 1
No Errors Logged
SMART Attributes Data Structure revision number: 16
Vendor Specific SMART Attributes with Thresholds:
ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
1 Raw_Read_Error_Rate 0x000b 100 100 016 Pre-fail Always - 0
2 Throughput_Performance 0x0005 130 130 054 Pre-fail Offline - 101
3 Spin_Up_Time 0x0007 134 134 024 Pre-fail Always - 425
4 Start_Stop_Count 0x0012 100 100 000 Old_age Always - 34
5 Reallocated_Sector_Ct 0x0033 100 100 005 Pre-fail Always - 0
7 Seek_Error_Rate 0x000b 100 100 067 Pre-fail Always - 0
8 Seek_Time_Performance 0x0005 138 138 020 Pre-fail Offline - 25
9 Power_On_Hours 0x0012 098 098 000 Old_age Always - 17623
10 Spin_Retry_Count 0x0013 100 100 060 Pre-fail Always - 0
12 Power_Cycle_Count 0x0032 100 100 000 Old_age Always - 20
192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always - 178
193 Load_Cycle_Count 0x0012 100 100 000 Old_age Always - 178
194 Temperature_Celsius 0x0002 157 157 000 Old_age Always - 38
196 Reallocated_Event_Count 0x0032 100 100 000 Old_age Always - 0
197 Current_Pending_Sector 0x0022 100 100 000 Old_age Always - 0
198 Offline_Uncorrectable 0x0008 100 100 000 Old_age Offline - 0
199 UDMA_CRC_Error_Count 0x000a 200 200 000 Old_age Always - 0
"""
from pyprobe.sensors.pegasus.helper import reformat_smart_values
class PegasusSmartReformatterTest(unittest.TestCase):
def test_empty(self):
result = reformat_smart_values()
self.assertIsNone(result)
def test_reformat(self):
result = reformat_smart_values(CONTENT)
self.assertEqual(EXPECTED, result) | {
"content_hash": "87259efd823757f4653939d90e7bb1e1",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 101,
"avg_line_length": 43.92857142857143,
"alnum_prop": 0.5742793791574279,
"repo_name": "dittert/pyprobe",
"id": "12aaa8b561b16ed8767fe8b2d3cba9c95cc68583",
"size": "6780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sensors/pegasus/PegasusSmartReformatterTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "55"
},
{
"name": "Python",
"bytes": "251844"
},
{
"name": "Shell",
"bytes": "2744"
}
],
"symlink_target": ""
} |
import ssl
import tempfile
import pytest
from ...utils.data import get_pkg_data_filename
from ..hub import SAMPHubServer
from ..integrated_client import SAMPIntegratedClient
from ..errors import SAMPProxyError
# By default, tests should not use the internet.
from .. import conf
from .test_helpers import random_params, Receiver, assert_output, TEST_REPLY
def setup_module(module):
conf.use_internet = False
class TestStandardProfile(object):
@property
def hub_init_kwargs(self):
return {}
@property
def client_init_kwargs(self):
return {}
@property
def client_connect_kwargs(self):
return {}
def setup_method(self, method):
self.tmpdir = tempfile.mkdtemp()
self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1,
**self.hub_init_kwargs)
self.hub.start()
self.client1 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
self.client2 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
def teardown_method(self, method):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
def test_main(self):
self.client1_id = self.client1.get_public_id()
self.client2_id = self.client2.get_public_id()
self.metadata1 = {"samp.name": "Client 1",
"samp.description.text": "Client 1 Description",
"client.version": "1.1"}
self.metadata2 = {"samp.name": "Client 2",
"samp.description.text": "Client 2 Description",
"client.version": "1.2"}
# Check that the clients are connected
assert self.client1.is_connected
assert self.client2.is_connected
# Check that ping works
self.client1.ping()
self.client2.ping()
# Check that get_registered_clients works as expected.
assert self.client1_id not in self.client1.get_registered_clients()
assert self.client2_id in self.client1.get_registered_clients()
assert self.client1_id in self.client2.get_registered_clients()
assert self.client2_id not in self.client2.get_registered_clients()
# Check that get_metadata works as expected
assert self.client1.get_metadata(self.client1_id) == {}
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client1_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client1.declare_metadata(self.metadata1)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client2.declare_metadata(self.metadata2)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == self.metadata2
assert self.client2.get_metadata(self.client2_id) == self.metadata2
# Check that, without subscriptions, sending a notification from one
# client to another raises an error.
message = {}
message['samp.mtype'] = "table.load.votable"
message['samp.params'] = {}
with pytest.raises(SAMPProxyError):
self.client1.notify(self.client2_id, message)
# Check that there are no currently active subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
# We now test notifications and calls
rec1 = Receiver(self.client1)
rec2 = Receiver(self.client2)
self.client2.bind_receive_notification('table.load.votable',
rec2.receive_notification)
self.client2.bind_receive_call('table.load.votable',
rec2.receive_call)
self.client1.bind_receive_response('test-tag', rec1.receive_response)
# Check resulting subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {self.client2_id: {}}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
assert 'table.load.votable' in self.client1.get_subscriptions(self.client2_id)
assert 'table.load.votable' in self.client2.get_subscriptions(self.client2_id)
# Once we have finished with the calls and notifications, we will
# check the data got across correctly.
# Test notify
params = random_params(self.tmpdir)
self.client1.notify(self.client2.get_public_id(),
{'samp.mtype':'table.load.votable',
'samp.params':params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify(self.client2.get_public_id(),
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test notify_all
params = random_params(self.tmpdir)
self.client1.notify_all({'samp.mtype':'table.load.votable',
'samp.params':params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify_all("table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call
params = random_params(self.tmpdir)
self.client1.call(self.client2.get_public_id(), 'test-tag',
{'samp.mtype':'table.load.votable',
'samp.params':params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall(self.client2.get_public_id(), 'test-tag',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_all
params = random_params(self.tmpdir)
self.client1.call_all('tag1',
{'samp.mtype':'table.load.votable',
'samp.params':params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall_all('tag2',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_and_wait
params = random_params(self.tmpdir)
result = self.client1.call_and_wait(self.client2.get_public_id(),
{'samp.mtype':'table.load.votable',
'samp.params':params}, timeout=5)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
result = self.client1.ecall_and_wait(self.client2.get_public_id(),
"table.load.votable", timeout=5, **params)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# TODO: check that receive_response received the right data
| {
"content_hash": "8b93bd8a913234890de81b516d1e1e80",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 97,
"avg_line_length": 36.5531914893617,
"alnum_prop": 0.6011641443538999,
"repo_name": "kelle/astropy",
"id": "758ee9e6484762999bc5a1a321e1f95763ffefbc",
"size": "8590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astropy/samp/tests/test_standard_profile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366877"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "8072264"
},
{
"name": "Shell",
"bytes": "446"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
} |
import os
from neupy import storage, architectures
from imagenet_tools import (CURRENT_DIR, FILES_DIR, load_image,
print_top_n, download_file)
RESNET50_WEIGHTS_FILE = os.path.join(FILES_DIR, 'resnet50.hdf5')
DOG_IMAGE_PATH = os.path.join(CURRENT_DIR, 'images', 'german-shepherd.jpg')
def download_resnet50_weights():
if not os.path.exists(RESNET50_WEIGHTS_FILE):
download_file(
url="http://neupy.s3.amazonaws.com/tensorflow/imagenet-models/resnet50.hdf5",
filepath=RESNET50_WEIGHTS_FILE,
description='Downloading weights')
print("File with ResNet-50 weights: {}".format(RESNET50_WEIGHTS_FILE))
return RESNET50_WEIGHTS_FILE
if __name__ == '__main__':
resnet50_weights_filename = download_resnet50_weights()
resnet50 = architectures.resnet50()
print("Recovering ResNet-50 parameters...")
storage.load(resnet50, resnet50_weights_filename)
print("Making prediction...")
dog_image = load_image(
DOG_IMAGE_PATH,
image_size=(256, 256),
crop_size=(224, 224))
output = resnet50.predict(dog_image)
print_top_n(output, n=5)
| {
"content_hash": "3550d23708581cdfea89b77b69004aaa",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 89,
"avg_line_length": 30.657894736842106,
"alnum_prop": 0.663519313304721,
"repo_name": "itdxer/neupy",
"id": "056cdeec0443c7457997a3233ebab49d48640220",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/cnn/resnet50.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13847"
},
{
"name": "JavaScript",
"bytes": "7460"
},
{
"name": "Python",
"bytes": "16002521"
},
{
"name": "Shell",
"bytes": "434"
}
],
"symlink_target": ""
} |
"""This code example gets all active placements by using a statement. To create
a placement, run create_placements.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201206')
# Create a statement to only select active placements.
values = [{
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
filter_statement = {'query': 'WHERE status = :status LIMIT 500',
'values': values}
# Get placements by statement.
response = placement_service.GetPlacementsByStatement(filter_statement)[0]
placements = []
if 'results' in response:
placements = response['results']
# Display results.
for placement in placements:
print ('Placement with id \'%s\', name \'%s\', and status \'%s\' was found.'
% (placement['id'], placement['name'], placement['status']))
print
print 'Number of results found: %s' % len(placements)
| {
"content_hash": "4681d87b01a8947656f71ccf80e46226",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 80,
"avg_line_length": 30.8,
"alnum_prop": 0.6724386724386724,
"repo_name": "krux/adspygoogle",
"id": "9c4367cc0e7f7d855af43029b8cdfb501e7bd3ca",
"size": "2004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201206/get_placements_by_statement.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "2263332"
}
],
"symlink_target": ""
} |
"""Misc. useful functionality used by the rest of this package.
This module provides common functionality used by the other modules in
this package.
"""
import sys
import os
import subprocess
# Whether or not to show debug messages
DEBUG = False
def notify(msg, *args):
"""Print a message to stderr."""
print >> sys.stderr, msg % args
def debug (msg, *args):
"""Print a debug message to stderr when DEBUG is enabled."""
if DEBUG:
print >> sys.stderr, msg % args
def error (msg, *args):
"""Print an error message to stderr."""
print >> sys.stderr, "ERROR:", msg % args
def warn(msg, *args):
"""Print a warning message to stderr."""
print >> sys.stderr, "warning:", msg % args
def die (msg, *args):
"""Print as error message to stderr and exit the program."""
error(msg, *args)
sys.exit(1)
class ProgressIndicator(object):
"""Simple progress indicator.
Displayed as a spinning character by default, but can be customized
by passing custom messages that overrides the spinning character.
"""
States = ("|", "/", "-", "\\")
def __init__ (self, prefix = "", f = sys.stdout):
"""Create a new ProgressIndicator, bound to the given file object."""
self.n = 0 # Simple progress counter
self.f = f # Progress is written to this file object
self.prev_len = 0 # Length of previous msg (to be overwritten)
self.prefix = prefix # Prefix prepended to each progress message
self.prefix_lens = [] # Stack of prefix string lengths
def pushprefix (self, prefix):
"""Append the given prefix onto the prefix stack."""
self.prefix_lens.append(len(self.prefix))
self.prefix += prefix
def popprefix (self):
"""Remove the last prefix from the prefix stack."""
prev_len = self.prefix_lens.pop()
self.prefix = self.prefix[:prev_len]
def __call__ (self, msg = None, lf = False):
"""Indicate progress, possibly with a custom message."""
if msg is None:
msg = self.States[self.n % len(self.States)]
msg = self.prefix + msg
print >> self.f, "\r%-*s" % (self.prev_len, msg),
self.prev_len = len(msg.expandtabs())
if lf:
print >> self.f
self.prev_len = 0
self.n += 1
def finish (self, msg = "done", noprefix = False):
"""Finalize progress indication with the given message."""
if noprefix:
self.prefix = ""
self(msg, True)
def start_command (args, cwd = None, shell = False, add_env = None,
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.PIPE):
"""Start the given command, and return a subprocess object.
This provides a simpler interface to the subprocess module.
"""
env = None
if add_env is not None:
env = os.environ.copy()
env.update(add_env)
return subprocess.Popen(args, bufsize = 1, stdin = stdin, stdout = stdout,
stderr = stderr, cwd = cwd, shell = shell,
env = env, universal_newlines = True)
def run_command (args, cwd = None, shell = False, add_env = None,
flag_error = True):
"""Run the given command to completion, and return its results.
This provides a simpler interface to the subprocess module.
The results are formatted as a 3-tuple: (exit_code, output, errors)
If flag_error is enabled, Error messages will be produced if the
subprocess terminated with a non-zero exit code and/or stderr
output.
The other arguments are passed on to start_command().
"""
process = start_command(args, cwd, shell, add_env)
(output, errors) = process.communicate()
exit_code = process.returncode
if flag_error and errors:
error("'%s' returned errors:\n---\n%s---", " ".join(args), errors)
if flag_error and exit_code:
error("'%s' returned exit code %i", " ".join(args), exit_code)
return (exit_code, output, errors)
def file_reader_method (missing_ok = False):
"""Decorator for simplifying reading of files.
If missing_ok is True, a failure to open a file for reading will
not raise the usual IOError, but instead the wrapped method will be
called with f == None. The method must in this case properly
handle f == None.
"""
def _wrap (method):
"""Teach given method to handle both filenames and file objects.
The given method must take a file object as its second argument
(the first argument being 'self', of course). This decorator
will take a filename given as the second argument and promote
it to a file object.
"""
def _wrapped_method (self, filename, *args, **kwargs):
if isinstance(filename, file):
f = filename
else:
try:
f = open(filename, 'r')
except IOError:
if missing_ok:
f = None
else:
raise
try:
return method(self, f, *args, **kwargs)
finally:
if not isinstance(filename, file) and f:
f.close()
return _wrapped_method
return _wrap
def file_writer_method (method):
"""Decorator for simplifying writing of files.
Enables the given method to handle both filenames and file objects.
The given method must take a file object as its second argument
(the first argument being 'self', of course). This decorator will
take a filename given as the second argument and promote it to a
file object.
"""
def _new_method (self, filename, *args, **kwargs):
if isinstance(filename, file):
f = filename
else:
# Make sure the containing directory exists
parent_dir = os.path.dirname(filename)
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
f = open(filename, 'w')
try:
return method(self, f, *args, **kwargs)
finally:
if not isinstance(filename, file):
f.close()
return _new_method
| {
"content_hash": "6abe50d6776e54693ad892680157ba54",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 78,
"avg_line_length": 32.864583333333336,
"alnum_prop": 0.5949286846275753,
"repo_name": "racker/omnibus",
"id": "dce83e60660825ba115c503ce0776955d90c1a44",
"size": "6333",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "source/git-1.7.6.4/git_remote_helpers/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "21896"
},
{
"name": "ActionScript",
"bytes": "7811"
},
{
"name": "Ada",
"bytes": "913692"
},
{
"name": "Assembly",
"bytes": "546596"
},
{
"name": "Awk",
"bytes": "147229"
},
{
"name": "C",
"bytes": "118056858"
},
{
"name": "C#",
"bytes": "1871806"
},
{
"name": "C++",
"bytes": "28581121"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CSS",
"bytes": "162089"
},
{
"name": "Clojure",
"bytes": "79070"
},
{
"name": "D",
"bytes": "4925"
},
{
"name": "DOT",
"bytes": "1898"
},
{
"name": "Emacs Lisp",
"bytes": "625560"
},
{
"name": "Erlang",
"bytes": "79712366"
},
{
"name": "FORTRAN",
"bytes": "3755"
},
{
"name": "Java",
"bytes": "5632652"
},
{
"name": "JavaScript",
"bytes": "1240931"
},
{
"name": "Logos",
"bytes": "119270"
},
{
"name": "Objective-C",
"bytes": "1088478"
},
{
"name": "PHP",
"bytes": "39064"
},
{
"name": "Pascal",
"bytes": "66389"
},
{
"name": "Perl",
"bytes": "4971637"
},
{
"name": "PowerShell",
"bytes": "1885"
},
{
"name": "Prolog",
"bytes": "5214"
},
{
"name": "Python",
"bytes": "912999"
},
{
"name": "R",
"bytes": "4009"
},
{
"name": "Racket",
"bytes": "2713"
},
{
"name": "Ragel in Ruby Host",
"bytes": "24585"
},
{
"name": "Rebol",
"bytes": "106436"
},
{
"name": "Ruby",
"bytes": "27360215"
},
{
"name": "Scala",
"bytes": "5487"
},
{
"name": "Scheme",
"bytes": "5036"
},
{
"name": "Scilab",
"bytes": "771"
},
{
"name": "Shell",
"bytes": "8793006"
},
{
"name": "Tcl",
"bytes": "3330919"
},
{
"name": "Visual Basic",
"bytes": "10926"
},
{
"name": "XQuery",
"bytes": "4276"
},
{
"name": "XSLT",
"bytes": "2003063"
},
{
"name": "eC",
"bytes": "4568"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.append("../tools")
import mergejs
import optparse
def build(config_file = None, output_file = None, options = None):
have_compressor = []
try:
import jsmin
have_compressor.append("jsmin")
except ImportError:
print "No jsmin"
try:
# tools/closure_library_jscompiler.py from:
# http://code.google.com/p/closure-library/source/browse/trunk/closure/bin/build/jscompiler.py
import closure_library_jscompiler as closureCompiler
have_compressor.append("closure")
except Exception, E:
print "No closure (%s)" % E
try:
import closure_ws
have_compressor.append("closure_ws")
except ImportError:
print "No closure_ws"
try:
import minimize
have_compressor.append("minimize")
except ImportError:
print "No minimize"
use_compressor = None
if options.compressor and options.compressor in have_compressor:
use_compressor = options.compressor
sourceDirectory = "../lib"
configFilename = "full.cfg"
outputFilename = "OpenLayers.js"
if config_file:
configFilename = config_file
extension = configFilename[-4:]
if extension != ".cfg":
configFilename = config_file + ".cfg"
if output_file:
outputFilename = output_file
print "Merging libraries."
try:
if use_compressor == "closure":
sourceFiles = mergejs.getNames(sourceDirectory, configFilename)
else:
merged = mergejs.run(sourceDirectory, None, configFilename)
except mergejs.MissingImport, E:
print "\nAbnormal termination."
sys.exit("ERROR: %s" % E)
if options.amdname:
options.amdname = "'" + options.amdname + "',"
else:
options.amdname = ""
if options.amd == 'pre':
print "\nAdding AMD function."
merged = "define(%sfunction(){%sreturn OpenLayers;});" % (options.amdname, merged)
print "Compressing using %s" % use_compressor
if use_compressor == "jsmin":
minimized = jsmin.jsmin(merged)
elif use_compressor == "minimize":
minimized = minimize.minimize(merged)
elif use_compressor == "closure_ws":
if len(merged) > 1000000: # The maximum file size for this web service is 1000 KB.
print "\nPre-compressing using jsmin"
merged = jsmin.jsmin(merged)
print "\nIs being compressed using Closure Compiler Service."
try:
minimized = closure_ws.minimize(merged)
except Exception, E:
print "\nAbnormal termination."
sys.exit("ERROR: Closure Compilation using Web service failed!\n%s" % E)
if len(minimized) <= 2:
print "\nAbnormal termination due to compilation errors."
sys.exit("ERROR: Closure Compilation using Web service failed!")
else:
print "Closure Compilation using Web service has completed successfully."
elif use_compressor == "closure":
jscompilerJar = "../tools/closure-compiler.jar"
if not os.path.isfile(jscompilerJar):
print "\nNo closure-compiler.jar; read README.txt!"
sys.exit("ERROR: Closure Compiler \"%s\" does not exist! Read README.txt" % jscompilerJar)
minimized = closureCompiler.Compile(
jscompilerJar,
sourceFiles, [
"--externs", "closure-compiler/Externs.js",
"--jscomp_warning", "checkVars", # To enable "undefinedVars"
"--jscomp_error", "checkRegExp", # Also necessary to enable "undefinedVars"
"--jscomp_error", "undefinedVars"
]
)
if minimized is None:
print "\nAbnormal termination due to compilation errors."
sys.exit("ERROR: Closure Compilation failed! See compilation errors.")
print "Closure Compilation has completed successfully."
else: # fallback
minimized = merged
if options.amd == 'post':
print "\nAdding AMD function."
minimized = "define(%sfunction(){%sreturn OpenLayers;});" % (options.amdname, minimized)
if options.status:
print "\nAdding status file."
minimized = "// status: " + file(options.status).read() + minimized
print "\nAdding license file."
minimized = file("license.txt").read() + minimized
print "Writing to %s." % outputFilename
file(outputFilename, "w").write(minimized)
print "Done."
if __name__ == '__main__':
opt = optparse.OptionParser(usage="%s [options] [config_file] [output_file]\n Default config_file is 'full.cfg', Default output_file is 'OpenLayers.js'")
opt.add_option("-c", "--compressor", dest="compressor", help="compression method: one of 'jsmin' (default), 'minimize', 'closure_ws', 'closure', or 'none'", default="jsmin")
opt.add_option("-s", "--status", dest="status", help="name of a file whose contents will be added as a comment at the front of the output file. For example, when building from a git repo, you can save the output of 'git describe --tags' in this file. Default is no file.", default=False)
opt.add_option("--amd", dest="amd", help="output should be AMD module; wrap merged files in define function; can be either 'pre' (before compilation) or 'post' (after compilation). Wrapping the OpenLayers var in a function means the filesize can be reduced by the closure compiler using 'pre', but be aware that a few functions depend on the OpenLayers variable being present. Either option can be used with jsmin or minimize compression. Default false, not AMD.", default=False)
opt.add_option("--amdname", dest="amdname", help="only useful with amd option. Name of AMD module. Default no name, anonymous module.", default=False)
(options, args) = opt.parse_args()
if not len(args):
build(options=options)
elif len(args) == 1:
build(args[0], options=options)
elif len(args) == 2:
build(args[0], args[1], options=options)
else:
print "Wrong number of arguments" | {
"content_hash": "87f6f333c2d313f77692173643e3d1ea",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 481,
"avg_line_length": 43.234042553191486,
"alnum_prop": 0.6389435695538058,
"repo_name": "boundlessgeo/openlayers",
"id": "da539a29ab4514ebfc812a11961fd8c4e1ca8bea",
"size": "6119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/build.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "21272"
},
{
"name": "HTML",
"bytes": "3229854"
},
{
"name": "JavaScript",
"bytes": "3568325"
},
{
"name": "Python",
"bytes": "78933"
},
{
"name": "Shell",
"bytes": "206"
}
],
"symlink_target": ""
} |
import binascii
import collections
import json
def formating():
string = "Number 1: {0}, Number 2: {1:+012b}, Number 3: {2:010d}"
string = string.format(4, 6, -9)
assert string == "Number 1: 4, Number 2: +00000000110, Number 3: -000000009"
## The `str.encode()` method doesn't seem to have made it's way from 2.7 to 3.x
## This is how you create various "hexification" of strings
def encode_decode_hex():
string_binary = b"this is a test"
hex_string_binary = binascii.hexlify(string_binary)
assert str(hex_string_binary) == "b'7468697320697320612074657374'"
assert str(hex_string_binary, "ascii") == "7468697320697320612074657374"
string_binary = binascii.unhexlify(hex_string_binary)
assert str(string_binary) == "b'this is a test'"
assert str(string_binary, "ascii") == "this is a test"
Person = collections.namedtuple("Person", "name age")
def named_tuples():
person = Person("eric", 56)
print("Name: {0} Age: {1}".format(person.name, person.age))
def enum(file_path):
with open(file_path, "r") as f:
for lineno, line in enumerate(f, start=1):
print("Line {0}, Line Number: {1}".format(line, lineno))
def default_dictionary(word_list):
counter = collections.defaultdict(int) # By default, `int` returns 0
for word in word_list:
counter[word] += 1
def set():
set1 = {1, 2, 3, 4}
set2 = {3, 4, 5, 6}
set1 |= set2 # union
def set_comp(max_int):
return {num for num in range(0, max_int)}
## Lambda's work as unnamed functions on elements of a list. In this case,
## each elem (``e`) is treated as a tuple and reveresed. The normal recursive
## sort occurs, at which point the values are returned to their original value
def lambda():
x = [(1, 10), (2, 9), (3, 8)]
x.sort(key=lambda e: (e[1], e[0]))
assert x == [(3, 8), (2, 9), (1, 10)]
def lambda2():
x = [1,2,3,4,5]
x.sort(key=lambda e: 1 if x < 3 else 0)
assert x == [4, 5 , 1, 2 , 3]
def json_eval():
x = json.loads('{"1":"a","2":"b"}')
assert x['1'] == 'a'
str_x = json.dumps(x)
assert str_x == '{"1":"a","2":"b"}'
def enumerate_dictioary():
x = {1:'a', 2, 'z'}
for count, key in enumerate(x):
# count starts at 0
print("{}: {} -> {}".format(count, key, x[key])
| {
"content_hash": "4871bce2deb39a32652106cf35a0ebd1",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 29.240506329113924,
"alnum_prop": 0.606926406926407,
"repo_name": "holycrap872/til",
"id": "b021d43a5e9aace54623d10131b89ce02d9c10bb",
"size": "2330",
"binary": false,
"copies": "1",
"ref": "refs/heads/mainline",
"path": "python3/quick_tricks.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "9016"
},
{
"name": "C",
"bytes": "2205"
},
{
"name": "C++",
"bytes": "3545"
},
{
"name": "E",
"bytes": "902619"
},
{
"name": "Python",
"bytes": "16628"
},
{
"name": "Ruby",
"bytes": "3150"
},
{
"name": "Shell",
"bytes": "4093"
},
{
"name": "Vim script",
"bytes": "1369"
}
],
"symlink_target": ""
} |
"""Parse web services description language to get SOAP methods.
Rudimentary support."""
ident = '$Id: WSDL.py 1467 2008-05-16 23:32:51Z warnes $'
from version import __version__
import wstools
import xml
from Errors import Error
from Client import SOAPProxy, SOAPAddress
from Config import Config
import urllib
class Proxy:
"""WSDL Proxy.
SOAPProxy wrapper that parses method names, namespaces, soap actions from
the web service description language (WSDL) file passed into the
constructor. The WSDL reference can be passed in as a stream, an url, a
file name, or a string.
Loads info into self.methods, a dictionary with methodname keys and values
of WSDLTools.SOAPCallinfo.
For example,
url = 'http://www.xmethods.org/sd/2001/TemperatureService.wsdl'
wsdl = WSDL.Proxy(url)
print len(wsdl.methods) # 1
print wsdl.methods.keys() # getTemp
See WSDLTools.SOAPCallinfo for more info on each method's attributes.
"""
def __init__(self, wsdlsource, config=Config, **kw ):
reader = wstools.WSDLTools.WSDLReader()
self.wsdl = None
# From Mark Pilgrim's "Dive Into Python" toolkit.py--open anything.
if self.wsdl is None and hasattr(wsdlsource, "read"):
print 'stream:', wsdlsource
try:
self.wsdl = reader.loadFromStream(wsdlsource)
except xml.parsers.expat.ExpatError, e:
newstream = urllib.URLopener(key_file=config.SSL.key_file, cert_file=config.SSL.cert_file).open(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
# NOT TESTED (as of April 17, 2003)
#if self.wsdl is None and wsdlsource == '-':
# import sys
# self.wsdl = reader.loadFromStream(sys.stdin)
# print 'stdin'
if self.wsdl is None:
try:
file(wsdlsource)
self.wsdl = reader.loadFromFile(wsdlsource)
#print 'file'
except (IOError, OSError): pass
except xml.parsers.expat.ExpatError, e:
newstream = urllib.urlopen(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
if self.wsdl is None:
try:
stream = urllib.URLopener(key_file=config.SSL.key_file, cert_file=config.SSL.cert_file).open(wsdlsource)
self.wsdl = reader.loadFromStream(stream, wsdlsource)
except (IOError, OSError): pass
except xml.parsers.expat.ExpatError, e:
newstream = urllib.urlopen(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
if self.wsdl is None:
import StringIO
self.wsdl = reader.loadFromString(str(wsdlsource))
#print 'string'
# Package wsdl info as a dictionary of remote methods, with method name
# as key (based on ServiceProxy.__init__ in ZSI library).
self.methods = {}
service = self.wsdl.services[0]
port = service.ports[0]
name = service.name
binding = port.getBinding()
portType = binding.getPortType()
for operation in portType.operations:
callinfo = wstools.WSDLTools.callInfoFromWSDL(port, operation.name)
self.methods[callinfo.methodName] = callinfo
self.soapproxy = SOAPProxy('http://localhost/dummy.webservice',
config=config, **kw)
def __str__(self):
s = ''
for method in self.methods.values():
s += str(method)
return s
def __getattr__(self, name):
"""Set up environment then let parent class handle call.
Raises AttributeError is method name is not found."""
if not self.methods.has_key(name): raise AttributeError, name
callinfo = self.methods[name]
self.soapproxy.proxy = SOAPAddress(callinfo.location)
self.soapproxy.namespace = callinfo.namespace
self.soapproxy.soapaction = callinfo.soapAction
return self.soapproxy.__getattr__(name)
def show_methods(self):
for key in self.methods.keys():
method = self.methods[key]
print "Method Name:", key.ljust(15)
print
inps = method.inparams
for parm in range(len(inps)):
details = inps[parm]
print " In #%d: %s (%s)" % (parm, details.name, details.type)
print
outps = method.outparams
for parm in range(len(outps)):
details = outps[parm]
print " Out #%d: %s (%s)" % (parm, details.name, details.type)
print
| {
"content_hash": "d3eae120bb6e43b4defeea0966e81b94",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 123,
"avg_line_length": 37.22627737226277,
"alnum_prop": 0.5798039215686275,
"repo_name": "wpa/jirash",
"id": "84f7d3f5c85b9dda001f59a923a7a846ef86dac4",
"size": "5100",
"binary": false,
"copies": "294",
"ref": "refs/heads/master",
"path": "deps/SOAPpy/WSDL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109591"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
} |
import os
import sqlite3
import re
from flask import Flask
from flask import send_from_directory
from flask import render_template
from flask import request
from flask import redirect
from flask import url_for
DATABASE = './dream.db'
app = Flask(__name__)
@app.route('/')
def go_home():
return redirect(url_for('handle_search'))
#remember to replace form.html with actual path
@app.route('/form.html', methods=['POST'])
def handle_search():
descrp = request.form['project_description']
return generate_result_page(search_dream(descrp))
def re_fn(expr, item):
reg = re.compile(expr, re.I)
return reg.search(item) is not None
def search_dream(dream):
conn = sqlite3.connect(DATABASE)
conn.create_function("REGEXP", 2, re_fn)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS dreams (dream text, user text)")
conn.commit()
#c.execute("SELECT * FROM dreams WHERE dream=?", [dream,])
c.execute("SELECT * FROM dreams WHERE dream REGEXP ?", [r"[^=]{0,255}"+dream+r"[^=]{0,255}",])
return c.fetchall()
def generate_result_page(dream_list):
print(dream_list)
return render_template('result.html', dreams=dream_list)
@app.route('/<path>_profile')
def handle_profile(path):
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS profiles (name text, profile text)")
c.execute("SELECT profile FROM profiles WHERE name=?", [path,])
smr = c.fetchone()[0]
c.execute("SELECT dream FROM dreams WHERE user=?", [path,])
lis = c.fetchall()
return render_template('profile.html', name=path, summary=smr, dreamlist=lis)
@app.route('/<path>_profile', methods=['POST'])
def handle_insert(path):
dr = request.form['dream']
return create_dream(dr, path)
def create_dream(dream, user):
if len(dream)<1:
return redirect(url_for("handle_profile", path=user))
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS dreams (dream text, user text)")
c.execute("INSERT INTO dreams VALUES (?, ?)", [dream, user,])
conn.commit()
return redirect(url_for("handle_profile", path=user))
@app.route('/delete_<dream>_<user>')
def delete_dream(dream, user):
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS dreams (dream text, user text)")
c.execute("DELETE FROM dreams WHERE dream=?", [dream,])
conn.commit()
return redirect(url_for("handle_profile", path=user))
@app.route('/newprofile')
def return_new():
return render_template("newprofile.html")
@app.route('/newprofile', methods=['POST'])
def create_profile():
descrp = request.form['user']
if len(descrp) < 1:
return render_template("newprofile.html")
smr = request.form['smr']
if len(smr) < 1:
return render_template("newprofile.html")
print(descrp)
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS dreams (dream text, user text)")
c.execute("INSERT INTO profiles VALUES (?, ?)", [descrp, smr,])
c.execute("INSERT INTO dreams VALUES (?, ?)", ["complete all dreams", descrp,])
conn.commit()
return redirect(url_for("handle_profile", path=descrp))
#A catch all function :) goal-keeper
@app.route('/<path:path>')
def catch_all(path):
return send_from_directory('../', path);
if __name__ == '__main__':
os.chdir('.')
app.run(host='0.0.0.0', port=80)
| {
"content_hash": "a656d15ae14dccaab10867969aaabf55",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 98,
"avg_line_length": 33.114285714285714,
"alnum_prop": 0.6617773943054357,
"repo_name": "GitHubdeWill/teamyourdreams",
"id": "fc7bf86a3872d387eb39cea07ea44f52845beb04",
"size": "3499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BackendServer/Server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "310726"
},
{
"name": "HTML",
"bytes": "17308"
},
{
"name": "JavaScript",
"bytes": "618412"
},
{
"name": "Python",
"bytes": "3499"
}
],
"symlink_target": ""
} |
from nipype.testing import assert_equal
from nipype.interfaces.fsl.maths import UnaryMaths
def test_UnaryMaths_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
operation=dict(argstr='-%s',
mandatory=True,
position=4,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(mandatory=True,
nohash=True,
),
)
inputs = UnaryMaths.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_UnaryMaths_outputs():
output_map = dict(out_file=dict(),
)
outputs = UnaryMaths.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "d216a567c2e4d7b6c1e70348c0c72ae7",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 24.555555555555557,
"alnum_prop": 0.6184012066365008,
"repo_name": "rameshvs/nipype",
"id": "540decac1ce1f8cd58929ddda98d1ef9eb6bbaee",
"size": "1380",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10156"
},
{
"name": "JavaScript",
"bytes": "25919"
},
{
"name": "Matlab",
"bytes": "5018"
},
{
"name": "Python",
"bytes": "3855189"
},
{
"name": "Shell",
"bytes": "2959"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
from AccessControl import ClassSecurityInfo
from Products.Archetypes.public import *
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.config import PROJECTNAME
from bika.lims.interfaces import IReportFolder, IHaveNoBreadCrumbs
from plone.app.folder.folder import ATFolder, ATFolderSchema
from zope.interface import implements
schema = ATFolderSchema.copy()
class ReportFolder(ATFolder):
implements(IReportFolder, IHaveNoBreadCrumbs)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
registerType(ReportFolder, PROJECTNAME)
| {
"content_hash": "bc6f0bf17a2e8c75687bcab98cb95740",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 33.77777777777778,
"alnum_prop": 0.8223684210526315,
"repo_name": "hocinebendou/bika.gsoc",
"id": "83c7621926813583b6d47b1d770d965e8ce92c5b",
"size": "608",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bika/lims/content/reportfolder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
} |
import nova.conf
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = nova.conf.CONF
class FlavorManageSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = 'flavor-manage'
def _get_flags(self):
f = super(FlavorManageSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavormanage.'
'Flavormanage')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavor_disabled.'
'Flavor_disabled')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavor_access.'
'Flavor_access')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavorextradata.'
'Flavorextradata')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavor_swap.'
'Flavor_swap')
return f
def _create_flavor(self):
"""Create a flavor."""
subs = {
'flavor_id': '10',
'flavor_name': "test_flavor"
}
response = self._do_post("flavors",
"flavor-create-post-req",
subs)
self._verify_response("flavor-create-post-resp", subs, response, 200)
# TODO(sdague): remove duplication
def test_create_flavor(self):
# Get api sample to create a flavor.
self._create_flavor()
def test_delete_flavor(self):
# Get api sample to delete a flavor.
self._create_flavor()
response = self._do_delete("flavors/10")
self.assertEqual(202, response.status_code)
self.assertEqual('', response.content)
| {
"content_hash": "d247bd9c271433839134a9bfa317dc70",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 36.55769230769231,
"alnum_prop": 0.599158337716991,
"repo_name": "bigswitch/nova",
"id": "aee25d13fc3dc311ace8f113d55bc733ae900d55",
"size": "2533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api_sample_tests/test_flavor_manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17220528"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.conf.urls import include, url
from wagtail.api.v2 import urls as wagtailapi2_urls
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
from wagtail.contrib.wagtailsitemaps.views import sitemap
from wagtail.tests.testapp import urls as testapp_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailimages import urls as wagtailimages_urls
from wagtail.wagtailimages.tests import urls as wagtailimages_test_urls
from wagtail.wagtailsearch import urls as wagtailsearch_urls
urlpatterns = [
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^search/', include(wagtailsearch_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^testimages/', include(wagtailimages_test_urls)),
url(r'^images/', include(wagtailimages_urls)),
url(r'^api/', include(wagtailapi_urls)),
url(r'^api/', include(wagtailapi2_urls)),
url(r'^sitemap\.xml$', sitemap),
url(r'^testapp/', include(testapp_urls)),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
]
| {
"content_hash": "2afba4670f0723054905a4f600d7f0c3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 73,
"avg_line_length": 40.5,
"alnum_prop": 0.7569444444444444,
"repo_name": "hamsterbacke23/wagtail",
"id": "b1d4a5198be2b17dd4701ce8d7f1365842685514",
"size": "1296",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/tests/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "172736"
},
{
"name": "HTML",
"bytes": "291553"
},
{
"name": "JavaScript",
"bytes": "116387"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "2243460"
},
{
"name": "Shell",
"bytes": "7387"
}
],
"symlink_target": ""
} |
from TwitterSearch import *
import time
import requests
from tweet2geoJSON import format2geoJSON
# see: https://dev.twitter.com/rest/reference/get/search/tweets
# BASE_URL = "https://api.twitter.com/1.1/search/tweets.json"
# and https://github.com/ckoepp/TwitterSearch
from API_KEYS import consumer_key, consumer_secret, access_token, access_token_secret
keywordsThere = True
keywords = ["#earthquake", "#quake", "#shake", "#quakeAlert", "shakeAlert", "earthquake", "quake", "shake", "Prelim M"]
keyword_filesThere = False
keyword_files = ["corpori/animals_unique_translated.txt", "corpori/adjectives_unique_translated.txt", "corpori/earthquakes_unique_translated.txt"]
outputfile = "tweets.txt"
outputgeo = "tweets.geojson"
def get_keywords(filename):
keywords = []
with open(filename, 'r') as inF:
for line in inF:
keywords.append(line)
inF.close()
return keywords #(filename, keywords)
def test_language(tso, language):
try:
# load currently supported languages by Twitter and store them in a TwitterSearchOrder object
ts.set_supported_languages(tso)
# try to set language (see ISO 639-1)
ts.set_language(language)
print('{0} seems to be officially supported by Twitter. Yay!\n'.format(language))
return 0
except TwitterSearchException as e:
# if we get an 1002 code it means that <language> is not supported (see TwitterSearchException)
if e.code == 1002:
print('Oh no - {0} is not supported :(\n'.format(language))
print(e)
return 0
def searchTweets(keywordLists=None, keywords=None, language=None, geo_lat=None, geo_lng=None, geo_rad=None, timeStart=None, timeStop=None, no_entities=False, no_retweets=False, no_links=False, no_answers=False):
tweetsFound = []
tweetsCount = 0
tso = TwitterSearchOrder()
# remove all restrictions from previos calls:
tso.remove_all_filters()
# this makes sure no videos/pics are commented
tso.set_keywords(["-video", "-pic", "-foto", "-funny", "-clip", "-vid", "-movie", "-song"]) # append more synonyms and other languages TODO
try:
tso = TwitterSearchOrder()
if keywordLists != None:
for keywordList in keywordLists:
tso.add_keyword(keywordList, or_operator=True)
if keywords != None:
for keyword in keywords:
tso.add_keyword(keyword, or_operator=True)
if language != None:
tso.set_language(str(language))
if geo_rad != None and geo_lat != None and geo_lng != None:
tso.set_geocode(geo_lat, geo_lng, geo_rad, imperial_metric=True) # must be of format: str(lat,lng,radius) + 'km'/'mi'
if timeStart != None:
tso.add_keyword('since:' + str(timeStart)) # time has to be of the format: YYYY-MM-DD
if timeStop != None:
tso.add_keyword('until:' + str(timeStop)) # time has to be of the format: YYYY-MM-DD
if no_entities:
tso.set_include_entities(False)
if no_retweets:
pass #tso.set_include_rts(False) #TODO
if no_links:
pass #TODO
if no_answers:
pass #tso.set_exclude_replies(True) #TODO
# Maybe use sentiment analysis? // tso.set_negative_attitude_filter()
ts = TwitterSearch(
consumer_key = consumer_key,
consumer_secret = consumer_secret,
access_token = access_token,
access_token_secret = access_token_secret)
for tweet in ts.search_tweets_iterable(tso, callback=my_callback):
#tweetsFound.append(tweet)
tweetsCount += 1
# write to .txt file
with open(outputfile, 'a+') as outP:
outP.write(str(tweet))
outP.write('\n')
outP.close()
# convert and write as geoJSON:
with open(outputgeo, 'a+') as outPgeo:
outPgeo.write(format2geoJSON(tweet))
outPgeo.close()
print( '@%s tweeted: %s\n' % ( tweet['user']['screen_name'], tweet['text'] ) )
except TwitterSearchException as e:
print(e)
except requests.exceptions.SSLError as e:
print(e)
return tweetsCount
def my_callback(current_ts_instance): # accepts ONE argument: an instance of TwitterSearch
queries, tweets_seen = current_ts_instance.get_statistics()
if queries > 0 and (queries % 10) == 0: # trigger delay every 10th query
time.sleep(10) # sleep for 10 seconds
if __name__=='__main__':
keywordLists = []
if keyword_filesThere:
for keyword_file in keyword_files:
keywordLists.append(get_keywords(keyword_file))
searchTweets(keywordLists=keywordLists, no_retweets=True, no_links=True, no_answers=True) #SF Area: geo_lat=34.0, geo_lng=-118.0, geo_rad=10,
elif keywordsThere:
searchTweets(keywords=keywords, no_retweets=True, no_links=False, no_answers=True)
| {
"content_hash": "9a750aa7c7ab9c1d40e7b7fce5a8be83",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 211,
"avg_line_length": 42.315384615384616,
"alnum_prop": 0.57607707689511,
"repo_name": "passiweinberger/passiweinberger.github.io",
"id": "4796b1f02c2797a386d1203113c2c52c0b7145e4",
"size": "5526",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "projects/nostradamIQ/services/twitter/get_tweets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import sys
class InvalidDocumentTree(Exception):
pass
class InternalError(Exception):
pass
class UnexpectedMethodCall(InternalError):
def __init__(self, cls, method_name=None):
super(UnexpectedMethodCall, self).__init__()
# If no method name is supplied, then
# assume it is from the function raising the
# exception. Technique taken from:
# http://code.activestate.com/recipes/66062-determining-current-function-name/
if method_name is None:
method_name = sys._getframe(1).f_code.co_name
self.cls = cls
self.method_name = method_name
def __str__(self):
return 'UnexpectedMethodCall: %s.%s' % \
(self.cls, self.method_name)
| {
"content_hash": "587387198220a729667a42156dc2224b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 86,
"avg_line_length": 27.74074074074074,
"alnum_prop": 0.6328437917222964,
"repo_name": "mikehulluk/mredoc",
"id": "daebf0c0dd999572e5654764e3413a44e556baf9",
"size": "2487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mredoc/errors.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "121764"
}
],
"symlink_target": ""
} |
import asyncio
import atexit
import codecs
import contextlib
import io
import os
import subprocess
import sys
import traceback
from .error import PrintableError
# Prior to Python 3.8 (which switched to the ProactorEventLoop by default on
# Windows), the default event loop on Windows doesn't support subprocesses, so
# we need to use the proactor loop. See:
# https://docs.python.org/3/library/asyncio-eventloops.html#available-event-loops
# Because the event loop is essentially a global variable, we have to set this
# at import time. Otherwise asyncio objects that get instantiated early
# (particularly Locks and Semaphores) could grab a reference to the wrong loop.
# TODO: Importing for side effects isn't very clean. Find a better way.
if os.name == 'nt':
EVENT_LOOP = asyncio.ProactorEventLoop()
else:
EVENT_LOOP = asyncio.new_event_loop()
asyncio.set_event_loop(EVENT_LOOP)
# We also need to make sure the event loop is explicitly closed, to avoid a bug
# in _UnixSelectorEventLoop.__del__. See http://bugs.python.org/issue23548.
atexit.register(EVENT_LOOP.close)
def run_task(coro):
return EVENT_LOOP.run_until_complete(coro)
class GatheredExceptions(PrintableError):
def __init__(self, exceptions, reprs):
assert len(exceptions) > 0
self.exceptions = []
self.reprs = []
for e, st in zip(exceptions, reprs):
# Flatten in the exceptions list of any other GatheredExceptions we
# see. (This happens, for example, if something throws inside a
# recursive module.)
if isinstance(e, GatheredExceptions):
self.exceptions.extend(e.exceptions)
else:
self.exceptions.append(e)
# Don't flatten the reprs. This would make us lose PrintableError
# context. TODO: Represent context in a more structured way?
self.reprs.append(st)
self.message = "\n\n".join(self.reprs)
async def gather_coalescing_exceptions(coros, display, *, verbose):
'''The tricky thing about running multiple coroutines in parallel is what
we're supposed to do when one of them raises an exception. The approach
we're using here is to catch exceptions and keep waiting for other tasks to
finish. At the end, we reraise a GatheredExceptions error, if any
exceptions were caught.
Another minor detail: We also want to make sure to start coroutines in the
order given, so that they end up appearing to the user alphabetically in
the fancy display. Note that asyncio.gather() puts coroutines in a set
internally, so we schedule coroutines *before* we give them to gather().
'''
exceptions = []
reprs = []
async def catching_wrapper(coro):
try:
return (await coro)
except Exception as e:
exceptions.append(e)
if isinstance(e, PrintableError) and not verbose:
reprs.append(e.message)
else:
reprs.append(traceback.format_exc())
return None
# Suppress a deprecation warning in Python 3.5, while continuing to support
# 3.3 and early 3.4 releases.
if hasattr(asyncio, 'ensure_future'):
schedule = getattr(asyncio, 'ensure_future')
else:
schedule = getattr(asyncio, 'async')
futures = [schedule(catching_wrapper(coro)) for coro in coros]
results = await asyncio.gather(*futures)
if exceptions:
raise GatheredExceptions(exceptions, reprs)
else:
return results
async def create_subprocess_with_handle(command,
display_handle,
*,
shell=False,
cwd,
**kwargs):
'''Writes subprocess output to a display handle as it comes in, and also
returns a copy of it as a string. Throws if the subprocess returns an
error. Note that cwd is a required keyword-only argument, on theory that
peru should never start child processes "wherever I happen to be running
right now."'''
# We're going to get chunks of bytes from the subprocess, and it's possible
# that one of those chunks ends in the middle of a unicode character. An
# incremental decoder keeps those dangling bytes around until the next
# chunk arrives, so that split characters get decoded properly. Use
# stdout's encoding, but provide a default for the case where stdout has
# been redirected to a StringIO. (This happens in tests.)
encoding = sys.stdout.encoding or 'utf8'
decoder_factory = codecs.getincrementaldecoder(encoding)
decoder = decoder_factory(errors='replace')
output_copy = io.StringIO()
# Display handles are context managers. Entering and exiting the display
# handle lets the display know when the job starts and stops.
with display_handle:
stdin = asyncio.subprocess.DEVNULL
stdout = asyncio.subprocess.PIPE
stderr = asyncio.subprocess.STDOUT
if shell:
proc = await asyncio.create_subprocess_shell(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
**kwargs)
else:
proc = await asyncio.create_subprocess_exec(
*command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd,
**kwargs)
# Read all the output from the subprocess as its comes in.
while True:
outputbytes = await proc.stdout.read(4096)
if not outputbytes:
break
outputstr = decoder.decode(outputbytes)
outputstr_unified = _unify_newlines(outputstr)
display_handle.write(outputstr_unified)
output_copy.write(outputstr_unified)
returncode = await proc.wait()
if returncode != 0:
raise subprocess.CalledProcessError(returncode, command,
output_copy.getvalue())
if hasattr(decoder, 'buffer'):
# The utf8 decoder has this attribute, but some others don't.
assert not decoder.buffer, 'decoder nonempty: ' + repr(decoder.buffer)
return output_copy.getvalue()
def _unify_newlines(s):
r'''Because all asyncio subprocess output is read in binary mode, we don't
get universal newlines for free. But it's the right thing to do, because we
do all our printing with strings in text mode, which translates "\n" back
into the platform-appropriate line separator. So for example, "\r\n" in a
string on Windows will become "\r\r\n" when it gets printed. This function
ensures that all newlines are represented as "\n" internally, which solves
that problem and also helps our tests work on Windows. Right now we only
handle Windows, but we can expand this if there's ever another newline
style we have to support.'''
return s.replace('\r\n', '\n')
async def safe_communicate(process, input=None):
'''Asyncio's communicate method has a bug where `communicate(input=b"")` is
treated the same as `communicate(). That means that child processes can
hang waiting for input, when their stdin should be closed. See
https://bugs.python.org/issue26848. The issue is fixed upstream in
https://github.com/python/asyncio/commit/915b6eaa30e1e3744e6f8223f996e197c1c9b91d,
but we will probably always need this workaround for old versions.'''
if input is not None and len(input) == 0:
process.stdin.close()
return (await process.communicate())
else:
return (await process.communicate(input))
class RaisesGatheredContainer:
def __init__(self):
self.exception = None
@contextlib.contextmanager
def raises_gathered(error_type):
'''For use in tests. Many tests expect a single error to be thrown, and
want it to be of a specific type. This is a helper method for when that
type is inside a gathered exception.'''
container = RaisesGatheredContainer()
try:
yield container
except GatheredExceptions as e:
# Make sure there is exactly one exception.
if len(e.exceptions) != 1:
raise
inner = e.exceptions[0]
# Make sure the exception is the right type.
if not isinstance(inner, error_type):
raise
# Success.
container.exception = inner
| {
"content_hash": "9d26885a800f345e00018f01152e740a",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 86,
"avg_line_length": 38.626126126126124,
"alnum_prop": 0.652128279883382,
"repo_name": "buildinspace/peru",
"id": "06099064d315ec59325572d580c685f56982f056",
"size": "8575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peru/async_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "325"
},
{
"name": "Makefile",
"bytes": "929"
},
{
"name": "Python",
"bytes": "289631"
},
{
"name": "Shell",
"bytes": "4200"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from django.contrib import admin
from mezzanine.core.admin import TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
from .models import Event, EventType, EventImage
event_fieldsets = deepcopy(PageAdmin.fieldsets)
event_fieldsets[0][1]['fields'].remove('in_menus')
event_fieldsets[0][1]['fields'].remove('login_required')
event_fieldsets[0][1]['fields'].remove('status')
event_fieldsets[0][1]['fields'].remove(('publish_date', 'expiry_date'))
event_fieldsets[1][1]['fields'].append('status')
event_fieldsets[1][1]['fields'].append(('publish_date', 'expiry_date'))
event_fieldsets[0][1]["fields"].extend([
"content", "start", "end", "type", "zip_import", "in_menus", 'login_required'])
class EventImageInline(TabularDynamicInlineAdmin):
model = EventImage
class EventAdmin(PageAdmin):
inlines = (EventImageInline,)
fieldsets = event_fieldsets
def get_form(self, request, obj=None, **kwargs):
"""Don't require content even when status is published"""
form = super(PageAdmin, self).get_form(request, obj, **kwargs)
def clean_content(form):
content = form.cleaned_data.get("content")
return content
form.clean_content = clean_content
return form
admin.site.register(Event, EventAdmin)
admin.site.register(EventType)
| {
"content_hash": "8251cb461547225d6451be369b2e3e2c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 83,
"avg_line_length": 33,
"alnum_prop": 0.7036215816703622,
"repo_name": "shurik/mezzanine.calendar",
"id": "03ced7e54f19dedae5890305eeb76515115aca40",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/calendar/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1120"
}
],
"symlink_target": ""
} |
import string
import random
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.forms import PasswordChangeForm, SetPasswordForm
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Submit, Field, HTML, Div
from crispy_forms.bootstrap import FormActions
class LoginForm(forms.Form):
"""
Super simple login form
"""
email = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
# Form Layout
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-md-4'
helper.field_class = 'col-md-8'
helper.layout = Layout(
Fieldset(
'Please Login',
Field('email', placeholder='[email protected]'),
Field('password', placeholder='123456'),
),
FormActions(
Submit('login', 'Login', css_class="button pull-right"),
#HTML('<br/><a href="{% url \'password_reset\' %}">Recover Password</a>'),
)
)
class UserChangeForm(forms.ModelForm):
"""
A form for updating users.
"""
user_level = forms.ChoiceField(choices=Group.objects.all().values_list(), label='User Level')
is_active = forms.ChoiceField(choices=((True, 'Active'), (False, 'Disabled')), label='Status')
# Form Layout
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-md-4'
helper.field_class = 'col-md-8'
helper.layout = Layout(
Field('email'),
Field('first_name'),
Field('last_name'),
Field('user_level'),
Field('is_active'),
Field('template'),
FormActions(
Submit('btnSubmit', 'Submit', css_class="button btn-primary pull-right"),
),
)
class Meta:
model = get_user_model()
def __init__(self, *args, **kwargs):
# form instance and initial values
initial = kwargs.get('initial', {})
instance = kwargs.get('instance', {})
user_is_admin = kwargs.pop('user_is_admin', False)
# Set initial values for the non-model questions
if instance:
# Get user's group
groups = instance.groups.all()
initial['user_level'] = groups[0].id if groups.exists() else None
# Map is_active question to model property
initial['is_active'] = instance.is_active
kwargs['initial'] = initial
super(UserChangeForm, self).__init__(*args, **kwargs)
self.fields['password'].required = False
self.fields['last_login'].required = False
self.fields['date_joined'].required = False
self.fields['template'].required = False
if not user_is_admin:
self.fields.pop('user_level', None)
self.fields.pop('is_active', None)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
## Set the hidden inputs to the initial value since we don't want them hoodwinked
def clean_password(self):
return self.initial["password"]
def clean_last_login(self):
return self.initial["last_login"]
def clean_date_joined(self):
return self.initial["date_joined"]
def save(self, commit=True):
"""
Save the model instance with the correct Auth Group based on the user_level question
"""
instance = super(UserChangeForm, self).save(commit=commit)
if commit:
instance.save()
# Assign user to selected group
if self.cleaned_data.get('user_level', False):
instance.groups.clear()
instance.groups.add(Group.objects.get(id=self.cleaned_data['user_level']))
# Set staff status based on user group
instance.is_staff = instance.user_is_admin()
instance.save()
return instance
class UserCreationForm(UserChangeForm):
"""
A form for creating new users. Includes all the required fields, plus a
repeated password.
"""
error_messages = {'duplicate_email': _("A user with that email already exists."), }
class Meta:
model = get_user_model()
def clean_date_joined(self):
return now()
def clean_last_login(self):
return now()
def clean_email(self):
"""
Set a nicer error message than the ORM.
"""
email = self.cleaned_data["email"]
try:
get_user_model()._default_manager.get(email=email)
except get_user_model().DoesNotExist:
return email
raise forms.ValidationError(self.error_messages['duplicate_email'])
def clean_password(self):
"""
Generate a random 32 char password for this user
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
def save(self, commit=True):
"""
Save the model instance with the correct Auth Group based on the user_level question
"""
instance = super(UserCreationForm, self).save(commit=commit)
instance.set_password(self.cleaned_data['password'])
instance.save()
return instance
class UserPasswordChangeForm(PasswordChangeForm):
# Form Layout
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-md-4'
helper.field_class = 'col-md-8'
helper.layout = Layout(
Field('old_password'),
Field('new_password1'),
Field('new_password2'),
FormActions(
Submit('btnSubmit', 'Submit', css_class="button btn-primary pull-right"),
),
)
class UserPasswordCreateForm(SetPasswordForm):
class Meta:
fields = ('new_password1', 'new_password2')
labels = {'new_password1': 'Password', 'new_password2': 'Confirm'}
help_texts = {'new_password1': 'Create a password for you account.'} | {
"content_hash": "7e19d564403170c47c4c951dfc17d6d7",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 98,
"avg_line_length": 30.85,
"alnum_prop": 0.6144246353322529,
"repo_name": "Hedde/fabric-bolt",
"id": "eae80c77bde93613c10ff0a4e9dd0f93ec5e2648",
"size": "6170",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature/venv_support",
"path": "fabric_bolt/accounts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "779"
},
{
"name": "JavaScript",
"bytes": "102422"
},
{
"name": "Python",
"bytes": "128175"
}
],
"symlink_target": ""
} |
"""
This module redefines ``str`` on Python 2.x to be a subclass of the Py2
``unicode`` type that behaves like the Python 3.x ``str``.
The main differences between ``newstr`` and Python 2.x's ``unicode`` type are
the stricter type-checking and absence of a `u''` prefix in the representation.
It is designed to be used together with the ``unicode_literals`` import
as follows:
>>> from __future__ import unicode_literals
>>> from builtins import str, isinstance
On Python 3.x and normally on Python 2.x, these expressions hold
>>> str('blah') is 'blah'
True
>>> isinstance('blah', str)
True
However, on Python 2.x, with this import:
>>> from __future__ import unicode_literals
the same expressions are False:
>>> str('blah') is 'blah'
False
>>> isinstance('blah', str)
False
This module is designed to be imported together with ``unicode_literals`` on
Python 2 to bring the meaning of ``str`` back into alignment with unprefixed
string literals (i.e. ``unicode`` subclasses).
Note that ``str()`` (and ``print()``) would then normally call the
``__unicode__`` method on objects in Python 2. To define string
representations of your objects portably across Py3 and Py2, use the
:func:`python_2_unicode_compatible` decorator in :mod:`future.utils`.
"""
from numbers import Number
from future.utils import PY3, istext, with_metaclass, isnewbytes
from future.types import no, issubset
from future.types.newobject import newobject
if PY3:
# We'll probably never use newstr on Py3 anyway...
unicode = str
from collections.abc import Iterable
else:
from collections import Iterable
class BaseNewStr(type):
def __instancecheck__(cls, instance):
if cls == newstr:
return isinstance(instance, unicode)
else:
return issubclass(instance.__class__, cls)
class newstr(with_metaclass(BaseNewStr, unicode)):
"""
A backport of the Python 3 str object to Py2
"""
no_convert_msg = "Can't convert '{0}' object to str implicitly"
def __new__(cls, *args, **kwargs):
"""
From the Py3 str docstring:
str(object='') -> str
str(bytes_or_buffer[, encoding[, errors]]) -> str
Create a new string object from the given object. If encoding or
errors is specified, then the object must expose a data buffer
that will be decoded using the given encoding and error handler.
Otherwise, returns the result of object.__str__() (if defined)
or repr(object).
encoding defaults to sys.getdefaultencoding().
errors defaults to 'strict'.
"""
if len(args) == 0:
return super(newstr, cls).__new__(cls)
# Special case: If someone requests str(str(u'abc')), return the same
# object (same id) for consistency with Py3.3. This is not true for
# other objects like list or dict.
elif type(args[0]) == newstr and cls == newstr:
return args[0]
elif isinstance(args[0], unicode):
value = args[0]
elif isinstance(args[0], bytes): # i.e. Py2 bytes or newbytes
if 'encoding' in kwargs or len(args) > 1:
value = args[0].decode(*args[1:], **kwargs)
else:
value = args[0].__str__()
else:
value = args[0]
return super(newstr, cls).__new__(cls, value)
def __repr__(self):
"""
Without the u prefix
"""
value = super(newstr, self).__repr__()
# assert value[0] == u'u'
return value[1:]
def __getitem__(self, y):
"""
Warning: Python <= 2.7.6 has a bug that causes this method never to be called
when y is a slice object. Therefore the type of newstr()[:2] is wrong
(unicode instead of newstr).
"""
return newstr(super(newstr, self).__getitem__(y))
def __contains__(self, key):
errmsg = "'in <string>' requires string as left operand, not {0}"
# Don't use isinstance() here because we only want to catch
# newstr, not Python 2 unicode:
if type(key) == newstr:
newkey = key
elif isinstance(key, unicode) or isinstance(key, bytes) and not isnewbytes(key):
newkey = newstr(key)
else:
raise TypeError(errmsg.format(type(key)))
return issubset(list(newkey), list(self))
@no('newbytes')
def __add__(self, other):
return newstr(super(newstr, self).__add__(other))
@no('newbytes')
def __radd__(self, left):
" left + self "
try:
return newstr(left) + self
except:
return NotImplemented
def __mul__(self, other):
return newstr(super(newstr, self).__mul__(other))
def __rmul__(self, other):
return newstr(super(newstr, self).__rmul__(other))
def join(self, iterable):
errmsg = 'sequence item {0}: expected unicode string, found bytes'
for i, item in enumerate(iterable):
# Here we use type() rather than isinstance() because
# __instancecheck__ is being overridden. E.g.
# isinstance(b'abc', newbytes) is True on Py2.
if isnewbytes(item):
raise TypeError(errmsg.format(i))
# Support use as a staticmethod: str.join('-', ['a', 'b'])
if type(self) == newstr:
return newstr(super(newstr, self).join(iterable))
else:
return newstr(super(newstr, newstr(self)).join(iterable))
@no('newbytes')
def find(self, sub, *args):
return super(newstr, self).find(sub, *args)
@no('newbytes')
def rfind(self, sub, *args):
return super(newstr, self).rfind(sub, *args)
@no('newbytes', (1, 2))
def replace(self, old, new, *args):
return newstr(super(newstr, self).replace(old, new, *args))
def decode(self, *args):
raise AttributeError("decode method has been disabled in newstr")
def encode(self, encoding='utf-8', errors='strict'):
"""
Returns bytes
Encode S using the codec registered for encoding. Default encoding
is 'utf-8'. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
"""
from future.types.newbytes import newbytes
# Py2 unicode.encode() takes encoding and errors as optional parameter,
# not keyword arguments as in Python 3 str.
# For the surrogateescape error handling mechanism, the
# codecs.register_error() function seems to be inadequate for an
# implementation of it when encoding. (Decoding seems fine, however.)
# For example, in the case of
# u'\udcc3'.encode('ascii', 'surrogateescape_handler')
# after registering the ``surrogateescape_handler`` function in
# future.utils.surrogateescape, both Python 2.x and 3.x raise an
# exception anyway after the function is called because the unicode
# string it has to return isn't encodable strictly as ASCII.
if errors == 'surrogateescape':
if encoding == 'utf-16':
# Known to fail here. See test_encoding_works_normally()
raise NotImplementedError('FIXME: surrogateescape handling is '
'not yet implemented properly')
# Encode char by char, building up list of byte-strings
mybytes = []
for c in self:
code = ord(c)
if 0xD800 <= code <= 0xDCFF:
mybytes.append(newbytes([code - 0xDC00]))
else:
mybytes.append(c.encode(encoding=encoding))
return newbytes(b'').join(mybytes)
return newbytes(super(newstr, self).encode(encoding, errors))
@no('newbytes', 1)
def startswith(self, prefix, *args):
if isinstance(prefix, Iterable):
for thing in prefix:
if isnewbytes(thing):
raise TypeError(self.no_convert_msg.format(type(thing)))
return super(newstr, self).startswith(prefix, *args)
@no('newbytes', 1)
def endswith(self, prefix, *args):
# Note we need the decorator above as well as the isnewbytes()
# check because prefix can be either a bytes object or e.g. a
# tuple of possible prefixes. (If it's a bytes object, each item
# in it is an int.)
if isinstance(prefix, Iterable):
for thing in prefix:
if isnewbytes(thing):
raise TypeError(self.no_convert_msg.format(type(thing)))
return super(newstr, self).endswith(prefix, *args)
@no('newbytes', 1)
def split(self, sep=None, maxsplit=-1):
# Py2 unicode.split() takes maxsplit as an optional parameter,
# not as a keyword argument as in Python 3 str.
parts = super(newstr, self).split(sep, maxsplit)
return [newstr(part) for part in parts]
@no('newbytes', 1)
def rsplit(self, sep=None, maxsplit=-1):
# Py2 unicode.rsplit() takes maxsplit as an optional parameter,
# not as a keyword argument as in Python 3 str.
parts = super(newstr, self).rsplit(sep, maxsplit)
return [newstr(part) for part in parts]
@no('newbytes', 1)
def partition(self, sep):
parts = super(newstr, self).partition(sep)
return tuple(newstr(part) for part in parts)
@no('newbytes', 1)
def rpartition(self, sep):
parts = super(newstr, self).rpartition(sep)
return tuple(newstr(part) for part in parts)
@no('newbytes', 1)
def index(self, sub, *args):
"""
Like newstr.find() but raise ValueError when the substring is not
found.
"""
pos = self.find(sub, *args)
if pos == -1:
raise ValueError('substring not found')
return pos
def splitlines(self, keepends=False):
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
# Py2 unicode.splitlines() takes keepends as an optional parameter,
# not as a keyword argument as in Python 3 str.
parts = super(newstr, self).splitlines(keepends)
return [newstr(part) for part in parts]
def __eq__(self, other):
if (isinstance(other, unicode) or
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__eq__(other)
else:
return NotImplemented
def __hash__(self):
if (isinstance(self, unicode) or
isinstance(self, bytes) and not isnewbytes(self)):
return super(newstr, self).__hash__()
else:
raise NotImplementedError()
def __ne__(self, other):
if (isinstance(other, unicode) or
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__ne__(other)
else:
return True
unorderable_err = 'unorderable types: str() and {0}'
def __lt__(self, other):
if (isinstance(other, unicode) or
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__lt__(other)
raise TypeError(self.unorderable_err.format(type(other)))
def __le__(self, other):
if (isinstance(other, unicode) or
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__le__(other)
raise TypeError(self.unorderable_err.format(type(other)))
def __gt__(self, other):
if (isinstance(other, unicode) or
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__gt__(other)
raise TypeError(self.unorderable_err.format(type(other)))
def __ge__(self, other):
if (isinstance(other, unicode) or
isinstance(other, bytes) and not isnewbytes(other)):
return super(newstr, self).__ge__(other)
raise TypeError(self.unorderable_err.format(type(other)))
def __getattribute__(self, name):
"""
A trick to cause the ``hasattr`` builtin-fn to return False for
the 'decode' method on Py2.
"""
if name in ['decode', u'decode']:
raise AttributeError("decode method has been disabled in newstr")
return super(newstr, self).__getattribute__(name)
def __native__(self):
"""
A hook for the future.utils.native() function.
"""
return unicode(self)
@staticmethod
def maketrans(x, y=None, z=None):
"""
Return a translation table usable for str.translate().
If there is only one argument, it must be a dictionary mapping Unicode
ordinals (integers) or characters to Unicode ordinals, strings or None.
Character keys will be then converted to ordinals.
If there are two arguments, they must be strings of equal length, and
in the resulting dictionary, each character in x will be mapped to the
character at the same position in y. If there is a third argument, it
must be a string, whose characters will be mapped to None in the result.
"""
if y is None:
assert z is None
if not isinstance(x, dict):
raise TypeError('if you give only one argument to maketrans it must be a dict')
result = {}
for (key, value) in x.items():
if len(key) > 1:
raise ValueError('keys in translate table must be strings or integers')
result[ord(key)] = value
else:
if not isinstance(x, unicode) and isinstance(y, unicode):
raise TypeError('x and y must be unicode strings')
if not len(x) == len(y):
raise ValueError('the first two maketrans arguments must have equal length')
result = {}
for (xi, yi) in zip(x, y):
if len(xi) > 1:
raise ValueError('keys in translate table must be strings or integers')
result[ord(xi)] = ord(yi)
if z is not None:
for char in z:
result[ord(char)] = None
return result
def translate(self, table):
"""
S.translate(table) -> str
Return a copy of the string S, where all characters have been mapped
through the given translation table, which must be a mapping of
Unicode ordinals to Unicode ordinals, strings, or None.
Unmapped characters are left untouched. Characters mapped to None
are deleted.
"""
l = []
for c in self:
if ord(c) in table:
val = table[ord(c)]
if val is None:
continue
elif isinstance(val, unicode):
l.append(val)
else:
l.append(chr(val))
else:
l.append(c)
return ''.join(l)
def isprintable(self):
raise NotImplementedError('fixme')
def isidentifier(self):
raise NotImplementedError('fixme')
def format_map(self):
raise NotImplementedError('fixme')
__all__ = ['newstr']
| {
"content_hash": "f857d2f75fd2f5eead2391c1420f5e92",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 95,
"avg_line_length": 36.990610328638496,
"alnum_prop": 0.5940474679527858,
"repo_name": "4shadoww/usploit",
"id": "8ca191f97867d8efefab951f55eaf0caacb3d7ad",
"size": "15758",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/future/types/newstr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7981066"
}
],
"symlink_target": ""
} |
from dice.operators import Multiply
import pytest
def test_init():
operator = Multiply(5, 1)
assert operator.original_operands == (5, 1)
assert operator.operands == (5, 1)
def test_repr():
"""
Test that the string representation of the operator is what is
expected.
Given an instance of the Multiply operator on operands 4 and 2
When the method __repr__ is called
Then the result should be "Add(4, 2)"
"""
operator = Multiply(4, 2)
assert repr(operator) == "Multiply(4, 2)"
def test_repr():
"""
Test that the string representation of the operator is what is
expected.
Given an instance of the Multiply operator on operands 4 and 2
When the method __repr__ is called
Then the result should be "Add(4, 2)"
"""
operator = Multiply(4, 2)
assert str(operator) == "4*2"
def test_evaluate():
"""
Test that the evaluation of the operator is correct.
Given an instance of the Multiply operator on operands 4 and 2
When the operator is evaluated
Then the result should be 4
"""
operator = Multiply(4, 2)
actual = operator.evaluate()
assert actual == 8
def test_evaluate_invalid():
"""
Test that the evaluation of the operator raises a ValueError
when an invalid term is supplied.
Given an instance of the Divide operator on operands 4 and
"invalid"
When the operator is evaluated
Then a ValueError should be raised.
"""
operator = Multiply(4, "invalid")
with pytest.raises(ValueError):
operator.evaluate()
def test_evaluate_operand_as_integral_string():
"""
Test that the evaluation of the operator is correct on all
numeric operands, even if one of those operands is represtend
as a string.
Given an instance of the Subtract operator on operands 4 and "3"
When the operator is evaluated
Then the result should be 7.
"""
operator = Multiply(4, "3")
actual = operator.evaluate()
assert actual == 12
def test_evaluate_object():
pass
def test_function():
# operator = Multiply()
# operator.function()
pass
| {
"content_hash": "caf107be745256c305701fd951e3a298",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 68,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6607642124883504,
"repo_name": "extesla/dice-python",
"id": "972a6b72fa269306d04188ecd23c1f88c2e22b6f",
"size": "3261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/dice/operators/test_multiply_operator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88431"
},
{
"name": "Shell",
"bytes": "669"
}
],
"symlink_target": ""
} |
import os
from unittest import TestCase
from requirements_detector.detect import from_requirements_txt, from_requirements_dir, \
from_requirements_blob, from_setup_py, CouldNotParseRequirements
from requirements_detector.requirement import DetectedRequirement
class DependencyDetectionTest(TestCase):
def _expected(self, *requirements):
return [DetectedRequirement.parse(req) for req in requirements]
def test_requirements_txt_parsing(self):
filepath = os.path.join(os.path.dirname(__file__), 'detection/test1/requirements.txt')
dependencies = from_requirements_txt(filepath)
expected = self._expected(
'amqp!=1.0.13',
'Django>=1.5.0',
'six<1.4,>=1.3.0',
'South==0.8.2',
)
self.assertEqual(expected, sorted(dependencies))
def test_requirements_dir_parsing(self):
filepath = os.path.join(os.path.dirname(__file__), 'detection/test2/requirements')
dependencies = from_requirements_dir(filepath)
expected = self._expected(
'amqp==1.0.13',
'anyjson==0.3.3',
'Django==1.5.2',
'South==0.8.2',
)
self.assertEqual(expected, sorted(dependencies))
def test_requirements_blob_parsing(self):
filepath = os.path.join(os.path.dirname(__file__), 'detection/test3')
dependencies = from_requirements_blob(filepath)
expected = self._expected(
'amqp==1.0.13',
'anyjson==0.3.3',
'django-gubbins==1.1.2',
)
self.assertEqual(expected, sorted(dependencies))
def test_invalid_requirements_txt(self):
filepath = os.path.join(os.path.dirname(__file__), 'detection/test5/invalid_requirements.txt')
dependencies = from_requirements_txt(filepath)
expected = self._expected('django<1.6', 'django')
self.assertEqual(expected, sorted(dependencies))
def test_invalid_requirements_txt(self):
filepath = os.path.join(os.path.dirname(__file__), 'detection/test6/requirements.txt')
from_requirements_txt(filepath)
def _test_setup_py(self, setup_py_file, *expected):
filepath = os.path.join(os.path.dirname(__file__), 'detection/test4', setup_py_file)
dependencies = from_setup_py(filepath)
expected = self._expected(*expected)
self.assertEqual(expected, sorted(dependencies))
def _test_setup_py_not_parseable(self, setup_py_file):
filepath = os.path.join(os.path.dirname(__file__), 'detection/test4', setup_py_file)
self.assertRaises(CouldNotParseRequirements, from_setup_py, filepath)
def test_simple_setup_py_parsing(self):
self._test_setup_py('simple.py', 'Django==1.5.0', 'django-gubbins==1.1.2')
def test_setup_py_reqs_defined_in_file_parsing(self):
self._test_setup_py('in_file.py', 'Django==1.5.0', 'django-gubbins==1.1.2')
def test_setup_py_tuple(self):
self._test_setup_py('tuple.py', 'Django==1.5.0', 'django-gubbins==1.1.2')
def test_subscript_assign(self):
self._test_setup_py('subscript_assign.py', 'Django==1.5.0', 'django-gubbins==1.1.2')
def test_utf8_setup_py(self):
self._test_setup_py('utf8.py', 'Django==1.5.0', 'django-gubbins==1.1.2')
def test_requires_setup_py(self):
self._test_setup_py('uses_requires.py', 'Django==1.5.0', 'django-gubbins==1.1.2')
def test_requires_and_install_requires_setup_py(self):
self._test_setup_py('uses_requires_and_install_requires.py', 'Django==1.5.0', 'django-gubbins==1.1.2')
def test_callable_install_requires(self):
self._test_setup_py_not_parseable('callable.py') | {
"content_hash": "295b314c83ec36cfa3f1b07f0a81e58b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 110,
"avg_line_length": 39.74193548387097,
"alnum_prop": 0.6423160173160173,
"repo_name": "pombredanne/requirements-detector",
"id": "58d088c941e632b22d8a4c20eb9001ef8fdc42ed",
"size": "3696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_detection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26655"
}
],
"symlink_target": ""
} |
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
import copy
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# crowdsource these experiments
def crowdsource(i):
"""
See in module "experiment.tune.compiler.flags"
"""
i['module_uoa']='experiment.tune.compiler.flags'
i['module_cfg']=copy.deepcopy(cfg)
i['module_work']=copy.deepcopy(work)
return ck.access(i)
##############################################################################
# view solutions in html
def html_viewer(i):
"""
See in module "experiment.tune.compiler.flags"
"""
i['module_uoa']='experiment.tune.compiler.flags'
i['module_cfg']=copy.deepcopy(cfg)
i['module_work']=copy.deepcopy(work)
return ck.access(i)
##############################################################################
# replay optimization
def replay(i):
"""
See in module "program.optimization"
"""
i['module_uoa']=cfg['module_deps']['program.optimization']
i['module_ref_uoa']=work['self_module_uid']
i['module_cfg']=copy.deepcopy(cfg)
i['module_work']=copy.deepcopy(work)
return ck.access(i)
##############################################################################
# prune compiler flags to find minimal set of choices
def prune(i):
"""
See in module "program.optimization"
"""
i['module_uoa']=cfg['module_deps']['program.optimization']
i['module_ref_uoa']=work['self_module_uid']
i['module_cfg']=copy.deepcopy(cfg)
i['module_work']=copy.deepcopy(work)
return ck.access(i)
| {
"content_hash": "1b6f7b6dfae7357e27c1296b64af7052",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 27.194805194805195,
"alnum_prop": 0.4918815663801337,
"repo_name": "ctuning/ck",
"id": "716b45152795966b66a3b76da657008c322ab841",
"size": "2386",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ck/repo/module/experiment.tune.compiler.flags.llvm.e.x/module.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "557"
},
{
"name": "HTML",
"bytes": "319"
},
{
"name": "PHP",
"bytes": "12320"
},
{
"name": "Python",
"bytes": "530259"
},
{
"name": "Shell",
"bytes": "2931"
}
],
"symlink_target": ""
} |
import unittest
import subprocess32 as subprocess
import extraction.utils as utils
import os
class TestUtils(unittest.TestCase):
def setUp(self):
pass
def test_external_process_works(self):
(status, out, err) = utils.external_process(['grep', '3'], input_data='Line 1\nLine 2\nLine 3\n')
self.assertEqual(status, 0)
self.assertEqual(out, 'Line 3\n')
self.assertEqual(err, '')
def test_external_process_returns_status_code(self):
(status, out, err) = utils.external_process(['true'])
self.assertEqual(status, 0)
(status, out, err) = utils.external_process(['false'])
self.assertEqual(status, 1)
def test_external_process_timesout(self):
self.assertRaises(subprocess.TimeoutExpired, utils.external_process, ['sleep', '3'], timeout=2)
# This shouldn't timeout and thus shouldn't raise an error
utils.external_process(['sleep', '3'])
def test_temp_file(self):
data = 'test'
file_path = utils.temp_file(data, suffix='.food')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(os.path.splitext(file_path)[1], '.food')
self.assertEqual(open(file_path, 'r').read(), 'test')
os.remove(file_path)
self.assertFalse(os.path.isfile(file_path))
| {
"content_hash": "9c3ce8bc3af65d491a017c8d0c88e3ee",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 103,
"avg_line_length": 34.513513513513516,
"alnum_prop": 0.6656225528582616,
"repo_name": "Tiger66639/extractor-framework",
"id": "5c047145584f07a876f9ba4081abcd38c7395381",
"size": "1277",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "extraction/test/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41131"
}
],
"symlink_target": ""
} |
import uuid
import ldappool
from oslo_config import cfg
from keystone.common.ldap import core as ldap_core
from keystone.identity.backends import ldap
from keystone.tests import unit as tests
from keystone.tests.unit import fakeldap
from keystone.tests.unit import test_backend_ldap_pool
from keystone.tests.unit import test_ldap_livetest
CONF = cfg.CONF
class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin,
test_ldap_livetest.LiveLDAPIdentity):
"""Executes existing LDAP live test with pooled LDAP handler.
Also executes common pool specific tests via Mixin class.
"""
def setUp(self):
super(LiveLDAPPoolIdentity, self).setUp()
self.addCleanup(self.cleanup_pools)
# storing to local variable to avoid long references
self.conn_pools = ldap_core.PooledLDAPHandler.connection_pools
def config_files(self):
config_files = super(LiveLDAPPoolIdentity, self).config_files()
config_files.append(tests.dirs.
tests_conf('backend_pool_liveldap.conf'))
return config_files
def test_assert_connector_used_not_fake_ldap_pool(self):
handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True)
self.assertNotEqual(type(handler.Connector),
type(fakeldap.FakeLdapPool))
self.assertEqual(type(ldappool.StateConnector),
type(handler.Connector))
def test_async_search_and_result3(self):
self.config_fixture.config(group='ldap', page_size=1)
self.test_user_enable_attribute_mask()
def test_pool_size_expands_correctly(self):
who = CONF.ldap.user
cred = CONF.ldap.password
# get related connection manager instance
ldappool_cm = self.conn_pools[CONF.ldap.url]
def _get_conn():
return ldappool_cm.connection(who, cred)
with _get_conn() as c1: # 1
self.assertEqual(1, len(ldappool_cm))
self.assertTrue(c1.connected, True)
self.assertTrue(c1.active, True)
with _get_conn() as c2: # conn2
self.assertEqual(2, len(ldappool_cm))
self.assertTrue(c2.connected)
self.assertTrue(c2.active)
self.assertEqual(2, len(ldappool_cm))
# c2 went out of context, its connected but not active
self.assertTrue(c2.connected)
self.assertFalse(c2.active)
with _get_conn() as c3: # conn3
self.assertEqual(2, len(ldappool_cm))
self.assertTrue(c3.connected)
self.assertTrue(c3.active)
self.assertTrue(c3 is c2) # same connection is reused
self.assertTrue(c2.active)
with _get_conn() as c4: # conn4
self.assertEqual(3, len(ldappool_cm))
self.assertTrue(c4.connected)
self.assertTrue(c4.active)
def test_password_change_with_auth_pool_disabled(self):
self.config_fixture.config(group='ldap', use_auth_pool=False)
old_password = self.user_sna['password']
self.test_password_change_with_pool()
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={},
user_id=self.user_sna['id'],
password=old_password)
def _create_user_and_authenticate(self, password):
user_dict = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'password': password}
user = self.identity_api.create_user(user_dict)
self.identity_api.authenticate(
context={},
user_id=user['id'],
password=password)
return self.identity_api.get_user(user['id'])
def _get_auth_conn_pool_cm(self):
pool_url = ldap_core.PooledLDAPHandler.auth_pool_prefix + CONF.ldap.url
return self.conn_pools[pool_url]
def _do_password_change_for_one_user(self, password, new_password):
self.config_fixture.config(group='ldap', use_auth_pool=True)
self.cleanup_pools()
self.load_backends()
user1 = self._create_user_and_authenticate(password)
auth_cm = self._get_auth_conn_pool_cm()
self.assertEqual(1, len(auth_cm))
user2 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user3 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user4 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
user5 = self._create_user_and_authenticate(password)
self.assertEqual(1, len(auth_cm))
# connection pool size remains 1 even for different user ldap bind
# as there is only one active connection at a time
user_api = ldap.UserApi(CONF)
u1_dn = user_api._id_to_dn_string(user1['id'])
u2_dn = user_api._id_to_dn_string(user2['id'])
u3_dn = user_api._id_to_dn_string(user3['id'])
u4_dn = user_api._id_to_dn_string(user4['id'])
u5_dn = user_api._id_to_dn_string(user5['id'])
# now create multiple active connections for end user auth case which
# will force to keep them in pool. After that, modify one of user
# password. Need to make sure that user connection is in middle
# of pool list.
auth_cm = self._get_auth_conn_pool_cm()
with auth_cm.connection(u1_dn, password) as _:
with auth_cm.connection(u2_dn, password) as _:
with auth_cm.connection(u3_dn, password) as _:
with auth_cm.connection(u4_dn, password) as _:
with auth_cm.connection(u5_dn, password) as _:
self.assertEqual(5, len(auth_cm))
_.unbind_s()
user3['password'] = new_password
self.identity_api.update_user(user3['id'], user3)
return user3
def test_password_change_with_auth_pool_enabled_long_lifetime(self):
self.config_fixture.config(group='ldap',
auth_pool_connection_lifetime=600)
old_password = 'my_password'
new_password = 'new_password'
user = self._do_password_change_for_one_user(old_password,
new_password)
user.pop('password')
# with long connection lifetime auth_pool can bind to old password
# successfully which is not desired if password change is frequent
# use case in a deployment.
# This can happen in multiple concurrent connections case only.
user_ref = self.identity_api.authenticate(
context={}, user_id=user['id'], password=old_password)
self.assertDictEqual(user_ref, user)
def test_password_change_with_auth_pool_enabled_no_lifetime(self):
self.config_fixture.config(group='ldap',
auth_pool_connection_lifetime=0)
old_password = 'my_password'
new_password = 'new_password'
user = self._do_password_change_for_one_user(old_password,
new_password)
# now as connection lifetime is zero, so authentication
# with old password will always fail.
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={}, user_id=user['id'],
password=old_password)
| {
"content_hash": "e2f3526e460e78586aa3202a183b0a16",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 79,
"avg_line_length": 40.95744680851064,
"alnum_prop": 0.5990909090909091,
"repo_name": "jamielennox/keystone",
"id": "a8776e5b05cc19c69d14f1bb07d1580539f89e9d",
"size": "8286",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/test_ldap_pool_livetest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "3897073"
}
],
"symlink_target": ""
} |
"""
Coderbits2PDF - Convert your coderbits profile to pdf.
Added option of adding your github repos.
Usage -
python coderbits2pdf.py --make username # create resume
python coderbits2pdf.py --add username # add user
python coderbits2pdf.py --del username # delete user
python coderbits2pdf.py --add-repo username # add more repositories
python coderbits2pdf.py --del-repo username # delete repositories
"""
__author__= 'elssar <[email protected]>'
__license__= 'MIT'
from requests import post, get
from json import loads
from weasyprint import HTML, CSS
from jinja2 import Template
from os import path
from sys import argv
from yaml import dump, safe_load as load
from logging import getLogger
logger= getLogger('weasyprint')
logger.handlers= [] # Shut up weesyprints noisy logger
base= path.dirname(path.abspath(__file__))
coderbits= 'https://coderbits.com/{0}.json'
github= 'https://api.github.com/users/{0}/repos'
charts= 'http://{0}.chart.apis.google.com/chart?'
header= {'user-agent': 'coderbits2pdf'}
def get_coderbits(username):
profile= get(coderbits.format(username))
if profile.status_code!=200 or profile.headers['content-length']=='2':
return None
print 'fetched coderbits profile'
return loads(profile.content)
def get_repos(username, selected_repos=None):
req= get(github.format(username), headers= header)
repos= loads(req.content)
if selected_repos is None:
print 'fetched github repositories'
return repos
contents= []
for repo in repos:
if repo['name'] in selected_repos:
contents.append(repo)
selected_repos.remove(repo['name'])
if selected_repos:
for repo in selected_repos:
print 'Warning! Repository {0} not found in github.'.format(repo)
print 'fetched github repositories'
return contents
def get_chart_url(data, name, labels):
payload= {'cht': 'p3',
'chs': '300x150',
'chco': '2F69BF|A2BF2F|BF5A2F|BFA22F|772FBF',
'chtt': name,
'chd': 't:'+','.join(data),
'chdl': '|'.join(labels)
}
query_string= ''
for key in payload:
query_string+= '{0}={1}&'.format(key, payload[key])
return query_string[:-1]
def save_pdf(html, output, css):
HTML(string=html).write_pdf(output, stylesheets=[CSS(css), CSS(string='@page {width: 960px}')])
print 'pdf created'
def create_resume(username):
try:
with open(path.join(base, 'config.yaml'), 'r') as con_file:
config= load(con_file)
except IOError:
print 'Error opening config.yaml'
return
if username not in config:
print 'Error! User does not exist'
return
coderbits= get_coderbits(username)
img_urls= []
i= 0
for entry in coderbits:
if 'top_' in entry:
data= []
labels= []
for value in coderbits[entry]:
data.append(float(value['count']))
labels.append(value['name'])
total= sum(data)
data= map(lambda x: str((x/total)*100), data)
labels= ['{0} {1}%'.format(x, y[:y.find('.')+3]) for x, y in zip(labels, data)]
normalized_labels= [label+' '*(23-len(label)) for label in labels] #to keep charts the same size
heading= entry.replace('_', ' ')
heading= heading.title()
query_string= get_chart_url(data, heading, normalized_labels)
img_urls.append(charts.format(i)+query_string)
i+= 1
args= []
args.append(config[username]['github'])
args.append(config[username]['repositories'] if len(config[username]['repositories'])>0 else None)
github= get_repos(*args)
try:
with open(path.join(base, 'layout.html'), 'r') as f:
layout= f.read()
except IOError:
print 'Template not found!'
return
template= Template(layout)
html= template.render(username=username, coderbits=coderbits, github=github, img_urls=img_urls, email=config[username]['email'])
print 'creating pdf'
save_pdf(html, path.join(base, 'resume.pdf'), path.join(base, 'resume.css'))
def add_user(username):
try:
with open(path.join(base, 'config.yaml'), 'r') as con_file:
config= load(con_file.read())
except IOError:
config= {}
email= raw_input("Enter email: ")
github= raw_input("Enter github username: ")
config[username]= {'email': email, 'github': github, 'repositories': []}
print 'Do you want to specify which github repos to get?'
print 'Enter the repository name, one per line. Enter blank line when done.'
print 'Or leave blank if you want all github repos to be included.'
while True:
repo= raw_input()
if repo=='':
break
config['repositories'].append(repo)
with open(path.join(base, 'config.yaml'), 'w') as con_file:
dump(config, con_file)
def del_user(username):
try:
with open(path.join(base, 'config.yaml'), 'r') as con_file:
config= load(con_file.read())
except IOError:
print 'No config file.'
return
if username not in config:
print 'User {0} does not exist.'.format(username)
return
del config[username]
print 'User {0} deleted.'.format(username)
def del_repos(username):
try:
with open(path.join(base, 'config.yaml'), 'r') as con_file:
config= load(con_file)
except IOError:
print 'Config file does not exist.'
return
if username not in config:
print 'User does not exist.'
return
print 'Which repositories do you want to remove from the list?'
print 'Enter the names one per line, and leave line blank when done.'
while True:
repo= raw_input()
if repo=='':
break
if repo in config[username]['repositories']:
config[username]['repositories'].remove(repo)
print 'Repository {0} deleted.'.format(repo)
else:
print 'Error! Repository not in list.'
def add_repos(username):
try:
with open(path.join(base, 'config.yaml'), 'r') as con_file:
config= load(con_file)
except IOError:
print 'Config file does not exist.'
return
if username not in config:
print 'User does not exist.'
return
print 'Which repositories do you want to add to the list?'
print 'Enter the names one per line, and leave line blank when done.'
while True:
repo= raw_input()
if repo=='':
break
if repo not in config[username]['repositories']:
config[username]['repositories'].append(repo)
print 'Repository {0} added'.format(repo)
else:
print 'Error! Repository already in list.'
def main():
args= {'--add': add_user,
'--del': del_user,
'--add-repo': add_repos,
'--del-repo': del_repos,
'--make': create_resume
}
if len(argv)!=3 or argv[1] not in args:
print 'Invalid arguments!'
print __doc__
return
args[argv[1]](argv[2])
if __name__=='__main__':
main()
| {
"content_hash": "82dee0f2885a277718e14d0ef2b2dd3e",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 132,
"avg_line_length": 34.52857142857143,
"alnum_prop": 0.6068128533995311,
"repo_name": "elssar/coderbits2pdf",
"id": "5fe0e1667aa7126dce770c7693586e1a6e3e88c8",
"size": "7274",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "coderbits2pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7274"
}
],
"symlink_target": ""
} |
import glob
import time
import os
import numpy as np
import hickle as hkl
from proc_load import crop_and_mirror
def proc_configs(config):
if not os.path.exists(config['weights_dir']):
os.makedirs(config['weights_dir'])
print "Creat folder: " + config['weights_dir']
return config
def unpack_configs(config, ext_data='.hkl', ext_label='.npy'):
flag_para_load = config['para_load']
# Load Training/Validation Filenames and Labels
train_folder = config['train_folder']
val_folder = config['val_folder']
label_folder = config['label_folder']
train_filenames = sorted(glob.glob(train_folder + '/*' + ext_data))
val_filenames = sorted(glob.glob(val_folder + '/*' + ext_data))
train_labels = np.load(label_folder + 'train_labels' + ext_label)
val_labels = np.load(label_folder + 'val_labels' + ext_label)
img_mean = np.load(config['mean_file'])
img_mean = img_mean[:, :, :, np.newaxis].astype('float32')
return (flag_para_load,
train_filenames, val_filenames, train_labels, val_labels, img_mean)
def adjust_learning_rate(config, epoch, step_idx, val_record, learning_rate):
# Adapt Learning Rate
if config['lr_policy'] == 'step':
if epoch == config['lr_step'][step_idx]:
learning_rate.set_value(
np.float32(learning_rate.get_value() / 10))
step_idx += 1
if step_idx >= len(config['lr_step']):
step_idx = 0 # prevent index out of range error
print 'Learning rate changed to:', learning_rate.get_value()
if config['lr_policy'] == 'auto':
if (epoch > 5) and (val_record[-3] - val_record[-1] <
config['lr_adapt_threshold']):
learning_rate.set_value(
np.float32(learning_rate.get_value() / 10))
print 'Learning rate changed to::', learning_rate.get_value()
return step_idx
def get_val_error_loss(rand_arr, shared_x, shared_y,
val_filenames, val_labels,
flag_para_load, img_mean,
batch_size, validate_model,
send_queue=None, recv_queue=None,
flag_top_5=False):
validation_losses = []
validation_errors = []
if flag_top_5:
validation_errors_top_5 = []
n_val_batches = len(val_filenames)
if flag_para_load:
# send the initial message to load data, before each epoch
send_queue.put(str(val_filenames[0]))
send_queue.put(np.float32([0.5, 0.5, 0]))
send_queue.put('calc_finished')
for val_index in range(n_val_batches):
if flag_para_load:
# load by self or the other process
# wait for the copying to finish
msg = recv_queue.get()
assert msg == 'copy_finished'
if val_index + 1 < n_val_batches:
name_to_read = str(val_filenames[val_index + 1])
send_queue.put(name_to_read)
send_queue.put(np.float32([0.5, 0.5, 0]))
else:
val_img = hkl.load(str(val_filenames[val_index])) - img_mean
param_rand = [0.5,0.5,0]
val_img = crop_and_mirror(val_img, param_rand, flag_batch=True)
shared_x.set_value(val_img)
shared_y.set_value(val_labels[val_index * batch_size:
(val_index + 1) * batch_size])
if flag_top_5:
loss, error, error_top_5 = validate_model()
else:
loss, error = validate_model()
if flag_para_load and (val_index + 1 < n_val_batches):
send_queue.put('calc_finished')
# print loss, error
validation_losses.append(loss)
validation_errors.append(error)
if flag_top_5:
validation_errors_top_5.append(error_top_5)
this_validation_loss = np.mean(validation_losses)
this_validation_error = np.mean(validation_errors)
if flag_top_5:
this_validation_error_top_5 = np.mean(validation_errors_top_5)
return this_validation_error, this_validation_error_top_5, this_validation_loss
else:
return this_validation_error, this_validation_loss
def get_rand3d():
tmp_rand = np.float32(np.random.rand(3))
tmp_rand[2] = round(tmp_rand[2])
return tmp_rand
def train_model_wrap(train_model, shared_x, shared_y, rand_arr, img_mean,
count, minibatch_index, minibatch_range, batch_size,
train_filenames, train_labels,
flag_para_load,
flag_batch,
send_queue=None, recv_queue=None):
if flag_para_load:
# load by self or the other process
# wait for the copying to finish
msg = recv_queue.get()
assert msg == 'copy_finished'
if count < len(minibatch_range):
ind_to_read = minibatch_range[count]
name_to_read = str(train_filenames[ind_to_read])
send_queue.put(name_to_read)
send_queue.put(get_rand3d())
else:
batch_img = hkl.load(str(train_filenames[minibatch_index])) - img_mean
param_rand = get_rand3d()
batch_img = crop_and_mirror(batch_img, param_rand, flag_batch=flag_batch)
shared_x.set_value(batch_img)
batch_label = train_labels[minibatch_index * batch_size:
(minibatch_index + 1) * batch_size]
shared_y.set_value(batch_label)
cost_ij = train_model()
return cost_ij | {
"content_hash": "5ee8a56b39efe7c78314b65f37976830",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 90,
"avg_line_length": 34.03030303030303,
"alnum_prop": 0.5732858414959929,
"repo_name": "myt00seven/svrg",
"id": "0f314f5be169c293ad1e6b34edd5d24e8a7037d8",
"size": "5615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "para_gpu/train_funcs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "69093"
},
{
"name": "Python",
"bytes": "1064699"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
import os
import sys
import re
__all__ = ['WrapperBase','WrapperCPPMacro','WrapperCCode']
class WrapperBase:
def __init__(self):
self.srcdir = os.path.join(os.path.dirname(__file__),'src')
return
def warning(self, message):
print >> sys.stderr, message
def info(self, message):
print >> sys.stderr, message
def get_resource_content(self, name, ext):
if name.startswith('pyobj_to_'):
try:
return self.generate_pyobj_to_ctype_c(name[9:])
except NotImplementedError:
pass
elif name.startswith('pyobj_from_'):
try:
return self.generate_pyobj_from_ctype_c(name[11:])
except NotImplementedError:
pass
generator_mth_name = 'generate_' + name + ext.replace('.','_')
generator_mth = getattr(self, generator_mth_name, lambda : None)
body = generator_mth()
if body is not None:
return body
fn = os.path.join(self.srcdir,name+ext)
if os.path.isfile(fn):
f = open(fn,'r')
body = f.read()
f.close()
return body
self.warning('No such file: %r' % (fn))
return
def get_dependencies(self, code):
l = []
for uses in re.findall(r'(?<=depends:)([,\w\s.]+)', code, re.I):
for use in uses.split(','):
use = use.strip()
if not use: continue
l.append(use)
return l
def resolve_dependencies(self, parent, body):
assert isinstance(body, str),type(body)
for d in self.get_dependencies(body):
if d.endswith('.cpp'):
WrapperCPPMacro(parent, d[:-4])
elif d.endswith('.c'):
WrapperCCode(parent, d[:-2])
else:
self.warning('Unknown dependence: %r.' % (d))
return
def apply_attributes(self, template):
"""
Apply instance attributes to template string.
Replace rules for attributes:
_list - will be joined with newline
_clist - _list will be joined with comma
_elist - _list will be joined
..+.. - attributes will be added
[..] - will be evaluated
"""
replace_names = set(re.findall(r'[ ]*%\(.*?\)s', template))
d = {}
for name in replace_names:
tab = ' ' * (len(name)-len(name.lstrip()))
name = name.lstrip()[2:-2]
names = name.split('+')
joinsymbol = '\n'
attrs = None
for n in names:
realname = n.strip()
if n.endswith('_clist'):
joinsymbol = ', '
realname = realname[:-6] + '_list'
elif n.endswith('_elist'):
joinsymbol = ''
realname = realname[:-6] + '_list'
realname_lower = realname.lower()
parent = getattr(self,'parent',None)
if hasattr(self, realname):
attr = getattr(self, realname)
elif hasattr(self, realname_lower):
attr = getattr(self, realname_lower).upper()
elif hasattr(parent, realname):
attr = getattr(parent, realname)
elif hasattr(parent, realname_lower):
attr = getattr(parent, realname_lower).upper()
elif realname.startswith('['):
attr = eval(realname)
else:
self.warning('Undefined %r attribute: %r' % (self.__class__.__name__, realname))
continue
if attrs is None:
attrs = attr
else:
attrs += attr
if isinstance(attrs, list):
attrs = joinsymbol.join(attrs)
d[name] = str(attrs).replace('\n','\n'+tab)
return template % d
def apply_templates(self, child):
for n in self.list_names:
l = getattr(self,n + '_list')
c = child.apply_attributes(getattr(child, n+'_template',''))
if c:
l.append(c)
return
class WrapperCPPMacro(WrapperBase):
"""
CPP macros
"""
def __init__(self, parent, name):
WrapperBase.__init__(self)
defined = parent.defined_cpp_code
if name in defined:
return
defined.append(name)
body = self.get_resource_content(name,'.cpp')
if body is None:
self.warning('Failed to get CPP macro %r content.' % (name))
return
self.resolve_dependencies(parent, body)
parent.header_list.append(body)
return
class WrapperCCode(WrapperBase):
"""
C code
"""
def __init__(self, parent, name):
WrapperBase.__init__(self)
defined = parent.defined_c_code
if name in defined:
return
defined.append(name)
body = self.get_resource_content(name,'.c')
if body is None:
self.warning('Failed to get C code %r content.' % (name))
return
if isinstance(body, dict):
for k,v in body.items():
self.resolve_dependencies(parent, v)
for k,v in body.items():
l = getattr(parent,k+'_list')
l.append(v)
else:
self.resolve_dependencies(parent, body)
parent.c_code_list.append(body)
return
def generate_pyobj_to_ctype_c(self, ctype):
from generate_pyobj_tofrom_funcs import pyobj_to_npy_scalar, pyobj_to_f2py_string
if ctype.startswith('npy_'):
return pyobj_to_npy_scalar(ctype)
elif ctype.startswith('f2py_string'):
return pyobj_to_f2py_string(ctype)
raise NotImplementedError,`ctype`
def generate_pyobj_from_ctype_c(self, ctype):
from generate_pyobj_tofrom_funcs import pyobj_from_npy_scalar
if ctype.startswith('npy_'):
return pyobj_from_npy_scalar(ctype)
raise NotImplementedError,`ctype`
| {
"content_hash": "f38493972fa0f60c7e4e7b27fc7502ac",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 100,
"avg_line_length": 34.70224719101124,
"alnum_prop": 0.5152986886838271,
"repo_name": "santisiri/popego",
"id": "3164e817f055fe42de516d23ca4f7386d0958117",
"size": "6177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/f2py/lib/wrapper_base.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userena', '0002_auto_20170214_1529'),
]
operations = [
migrations.AddField(
model_name='userenasignup',
name='invitation_key',
field=models.CharField(blank=True, max_length=40, verbose_name='invitation key'),
),
migrations.AddField(
model_name='userenasignup',
name='invitation_status',
field=models.CharField(choices=[('INV', 'Invitation Mail was sent'), ('PSWRST', 'Password was reset by user'), ('PRFEDIT', 'Profile was edited by user')], default='INV', max_length=7),
),
]
| {
"content_hash": "06b5b79885dafcd16668724edb82ae32",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 196,
"avg_line_length": 32.56521739130435,
"alnum_prop": 0.6088117489986649,
"repo_name": "RDXT/django-userena",
"id": "85f15ec497bd1a2e8e57edc86b20edb45a185937",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userena/migrations/0003_auto_20170217_1640.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "23087"
},
{
"name": "Python",
"bytes": "255073"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
} |
from pkgutil import iter_modules
import os
import os.path
__all__ = []
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else []
del globals()[name]
for k in lst:
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
_SKIP = []
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if module_name.startswith('_'):
continue
if module_name not in _SKIP:
global_import(module_name)
| {
"content_hash": "09814754f292a851a977da70cb10a38f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 58,
"avg_line_length": 24.25925925925926,
"alnum_prop": 0.566412213740458,
"repo_name": "haamoon/tensorpack",
"id": "b76820cf99b2d2023add5065cdce6084c129588a",
"size": "743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorpack/train/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "4039"
},
{
"name": "Makefile",
"bytes": "1566"
},
{
"name": "Python",
"bytes": "574087"
},
{
"name": "Shell",
"bytes": "2281"
}
],
"symlink_target": ""
} |
"""Tests for workers __init__."""
from __future__ import unicode_literals
import json
import os
import tempfile
import unittest
import mock
from turbinia import evidence
from turbinia import TurbiniaException
from turbinia.workers import TurbiniaTask
from turbinia.workers import TurbiniaTaskResult
from turbinia.workers.plaso import PlasoParserTask
from prometheus_client import REGISTRY
class TestTurbiniaTaskBase(unittest.TestCase):
"""Test TurbiniaTask class.
Attributes:
class_task(TurbiniaTask): The class the test should instantiated
remove_file(list(str)): Files that will be removed after the test run
remove_dirs(list(str)): Dirs that will be removed after the test run
base_output_dir(str): The base output directory used by the Task
task(TurbiniaTask): The instantiated Task under test
test_stdout_path(str): A path we can use to send temporary stdout too
evidence(Evidence): The test evidence object used by the Task
result(TurbiniaResult): The result object used by the Task
"""
def setUp(self, task_class=TurbiniaTask, evidence_class=evidence.RawDisk):
self.task_class = task_class
self.evidence_class = evidence_class
self.remove_files = []
self.remove_dirs = []
# Set up Tasks under test
self.base_output_dir = tempfile.mkdtemp()
self.plaso_task = PlasoParserTask(base_output_dir=self.base_output_dir)
self.plaso_task.output_manager = mock.MagicMock()
self.plaso_task.output_manager.get_local_output_dirs.return_value = (
None, None)
self.task = self.task_class(base_output_dir=self.base_output_dir)
self.task.job_name = 'PlasoJob'
self.task.output_manager = mock.MagicMock()
self.task.output_manager.get_local_output_dirs.return_value = (None, None)
self.task.get_metrics = mock.MagicMock()
# Set up RawDisk Evidence
test_disk_path = tempfile.mkstemp(dir=self.base_output_dir)[1]
self.remove_files.append(test_disk_path)
self.test_stdout_path = tempfile.mkstemp(dir=self.base_output_dir)[1]
self.remove_files.append(self.test_stdout_path)
self.evidence = evidence.RawDisk(source_path=test_disk_path)
self.evidence.config['abort'] = False
self.evidence.config['globals'] = {}
self.evidence.preprocess = mock.MagicMock()
# Set up TurbiniaTaskResult
self.result = TurbiniaTaskResult(base_output_dir=self.base_output_dir)
self.result.output_dir = self.base_output_dir
def tearDown(self):
for remove_file in self.remove_files:
if os.path.exists(remove_file):
os.remove(remove_file)
for directory in self.remove_dirs:
if os.path.exists(directory):
os.rmdir(directory)
os.rmdir(self.base_output_dir)
def setResults(
self, setup=None, run=None, validate_result=None, mock_run=True):
"""Set up mock returns in TurbiniaTaskResult object.
Args:
setup: What value to return from setup()
run: What value to return from run()
validate_result: What value to return from validate_result()
mock_run(bool): Whether to mock out the run method
"""
if setup is None:
setup = self.result
if run is None:
run = self.result
if validate_result is None:
validate_result = self.result
self.result.input_evidence = evidence.RawDisk()
self.result.status = 'TestStatus'
self.result.update_task_status = mock.MagicMock()
self.result.close = mock.MagicMock()
self.task.setup = mock.MagicMock(return_value=setup)
self.result.worker_name = 'worker1'
self.result.state_manager = None
if mock_run:
self.task.run = mock.MagicMock(return_value=run)
self.task.validate_result = mock.MagicMock(return_value=validate_result)
def unregisterMetrics(self):
"""Unset all the metrics to avoid duplicated timeseries error."""
for collector, names in tuple(REGISTRY._collector_to_names.items()):
REGISTRY.unregister(collector)
class TestTurbiniaTask(TestTurbiniaTaskBase):
"""Test TurbiniaTask class."""
def testTurbiniaTaskCloseTruncate(self):
"""Tests that the close method will truncate large report output."""
evidence_ = evidence.ReportText(source_path='/no/path')
max_size = 2**20
evidence_.text_data = 'A' * max_size
self.result.add_evidence(evidence_, self.task._evidence_config)
self.result.close(self.task, success=True)
self.remove_files.append(
os.path.join(self.task.base_output_dir, 'worker-log.txt'))
self.assertIn('truncating', evidence_.text_data[-100:])
self.assertTrue(len(evidence_.text_data) <= (max_size * 0.8))
def testTurbiniaTaskSerialize(self):
"""Test that we can properly serialize/deserialize tasks."""
out_dict = self.plaso_task.serialize()
out_obj = TurbiniaTask.deserialize(out_dict)
self.assertIsInstance(out_obj, PlasoParserTask)
# Nuke output_manager so we don't deal with class equality
self.plaso_task.output_manager = None
out_obj.output_manager = None
self.assertEqual(out_obj.__dict__, self.plaso_task.__dict__)
def testTurbiniaTaskRunWrapper(self):
"""Test that the run wrapper executes task run."""
self.unregisterMetrics()
self.setResults()
self.result.closed = True
new_result = self.task.run_wrapper(self.evidence.__dict__)
new_result = TurbiniaTaskResult.deserialize(new_result)
self.assertEqual(new_result.status, 'TestStatus')
self.result.close.assert_not_called()
def testTurbiniaTaskRunWrapperAutoClose(self):
"""Test that the run wrapper closes the task."""
self.unregisterMetrics()
self.setResults()
new_result = self.task.run_wrapper(self.evidence.__dict__)
new_result = TurbiniaTaskResult.deserialize(new_result)
self.assertEqual(new_result.status, 'TestStatus')
self.result.close.assert_called()
@mock.patch('turbinia.state_manager.get_state_manager')
def testTurbiniaTaskRunWrapperBadResult(self, _):
"""Test that the run wrapper recovers from run returning bad result."""
self.unregisterMetrics()
bad_result = 'Not a TurbiniaTaskResult'
checked_result = TurbiniaTaskResult(base_output_dir=self.base_output_dir)
checked_result.setup(self.task)
checked_result.status = 'CheckedResult'
self.setResults(run=bad_result, validate_result=checked_result)
new_result = self.task.run_wrapper(self.evidence.__dict__)
new_result = TurbiniaTaskResult.deserialize(new_result)
self.task.validate_result.assert_any_call(bad_result)
self.assertEqual(type(new_result), TurbiniaTaskResult)
self.assertIn('CheckedResult', new_result.status)
def testTurbiniaTaskJobUnavailable(self):
"""Test that the run wrapper can fail if the job doesn't exist."""
self.unregisterMetrics()
self.setResults()
self.task.job_name = 'non_exist'
canary_status = (
'Task will not run due to the job: '
'non_exist being disabled on the worker.')
new_result = self.task.run_wrapper(self.evidence.__dict__)
new_result = TurbiniaTaskResult.deserialize(new_result)
self.assertEqual(new_result.status, canary_status)
def testTurbiniaTaskRunWrapperExceptionThrown(self):
"""Test that the run wrapper recovers from run throwing an exception."""
self.unregisterMetrics()
self.setResults()
self.task.run = mock.MagicMock(side_effect=TurbiniaException)
new_result = self.task.run_wrapper(self.evidence.__dict__)
new_result = TurbiniaTaskResult.deserialize(new_result)
self.assertEqual(type(new_result), TurbiniaTaskResult)
self.assertIn('failed', new_result.status)
@mock.patch('turbinia.workers.TurbiniaTask.create_result')
@mock.patch('turbinia.state_manager.get_state_manager')
def testTurbiniaTaskRunWrapperSetupFail(self, _, mock_create_result):
"""Test that the run wrapper recovers from setup failing."""
self.task.result = None
canary_status = 'exception_message'
self.task.setup = mock.MagicMock(
side_effect=TurbiniaException('exception_message'))
self.result.no_output_manager = True
mock_create_result.return_value = self.result
self.remove_files.append(
os.path.join(self.task.base_output_dir, 'worker-log.txt'))
new_result = self.task.run_wrapper(self.evidence.__dict__)
new_result = TurbiniaTaskResult.deserialize(new_result)
self.assertEqual(type(new_result), TurbiniaTaskResult)
# Checking specifically for `False` value and not whether this evaluates to
# `False` because we don't want the `None` case to pass.
self.assertEqual(new_result.successful, False)
create_results_args = mock_create_result.call_args.kwargs
self.assertIn(canary_status, create_results_args['message'])
def testTurbiniaTaskValidateResultGoodResult(self):
"""Tests validate_result with good result."""
self.result.status = 'GoodStatus'
self.result.state_manager = None
new_result = self.task.validate_result(self.result)
self.assertEqual(new_result.status, 'GoodStatus')
self.assertDictEqual(new_result.error, {})
@mock.patch('turbinia.workers.TurbiniaTaskResult.close')
@mock.patch('turbinia.state_manager.get_state_manager')
def testTurbiniaTaskValidateResultBadResult(self, _, __):
"""Tests validate_result with bad result."""
# Passing in an unpickleable object (json module) and getting back a
# TurbiniaTaskResult
new_result = self.task.validate_result(json)
self.assertEqual(type(new_result), TurbiniaTaskResult)
self.assertNotEqual(new_result.error, {})
@mock.patch('turbinia.workers.evidence_decode')
def testTurbiniaTaskEvidenceValidationFailure(self, evidence_decode_mock):
"""Tests Task fails when evidence validation fails."""
self.setResults()
test_evidence = evidence.RawDisk()
test_evidence.REQUIRED_ATTRIBUTES = ['doesnotexist']
evidence_decode_mock.return_value = test_evidence
test_result = self.task.run_wrapper(test_evidence.__dict__)
test_result = TurbiniaTaskResult.deserialize(test_result)
self.assertFalse(test_result.successful)
self.assertIn('validation failed', test_result.status)
@mock.patch('turbinia.workers.subprocess.Popen')
def testTurbiniaTaskExecute(self, popen_mock):
"""Test execution with success case."""
cmd = 'test cmd'
output = ('test stdout', 'test stderr')
self.result.close = mock.MagicMock()
proc_mock = mock.MagicMock()
proc_mock.communicate.return_value = output
proc_mock.returncode = 0
popen_mock.return_value = proc_mock
self.task.execute(
cmd, self.result, stdout_file=self.test_stdout_path, close=True)
with open(self.test_stdout_path, 'r') as stdout_path:
stdout_data = stdout_path.read()
# Command was executed, has the correct output saved and
# TurbiniaTaskResult.close() was called with successful status.
popen_mock.assert_called_with(
cmd, stdout=-1, stderr=-1, cwd=None, env=None, text=True,
encoding='utf-8')
self.assertEqual(self.result.error['stderr'], str(output[1]))
self.assertEqual(stdout_data, output[0])
self.result.close.assert_called_with(self.task, success=True)
@mock.patch('turbinia.workers.subprocess.Popen')
def testTurbiniaTaskExecuteFailure(self, popen_mock):
"""Test execution with failure case."""
cmd = 'test cmd'
output = ('test stdout', 'test stderr')
self.result.close = mock.MagicMock()
proc_mock = mock.MagicMock()
proc_mock.communicate.return_value = output
proc_mock.returncode = 1
popen_mock.return_value = proc_mock
self.task.execute(cmd, self.result, close=True)
# Command was executed and TurbiniaTaskResult.close() was called with
# unsuccessful status.
popen_mock.assert_called_with(
cmd, stdout=-1, stderr=-1, cwd=None, env=None, text=True,
encoding='utf-8')
self.result.close.assert_called_with(
self.task, success=False, status=mock.ANY)
def testTurbiniaTaskExecuteTimeout(self):
"""Test execution with subprocess timeout case."""
cmd = 'sleep 3'
self.result.close = mock.MagicMock()
ret, result = self.task.execute(cmd, self.result, shell=True, timeout=1)
# Command was executed and TurbiniaTaskResult.close() was called with
# unsuccessful status.
self.result.close.assert_called_with(
self.task, success=False, status=mock.ANY)
result_call_args = self.result.close.call_args.kwargs
# 'timeout' string shows up in status message
self.assertIn('timeout', result_call_args['status'])
# Return value shows job was killed
self.assertEqual(ret, -9)
@mock.patch('turbinia.workers.subprocess.Popen')
def testTurbiniaTaskExecuteEvidenceExists(self, popen_mock):
"""Test execution with new evidence that has valid a source_path."""
cmd = 'test cmd'
output = ('test stdout', 'test stderr')
self.result.close = mock.MagicMock()
proc_mock = mock.MagicMock()
proc_mock.communicate.return_value = output
proc_mock.returncode = 0
popen_mock.return_value = proc_mock
# Create our evidence local path file
with open(self.evidence.source_path, 'w') as evidence_path:
evidence_path.write('test')
self.task.execute(
cmd, self.result, new_evidence=[self.evidence], close=True)
self.assertIn(self.evidence, self.result.evidence)
@mock.patch('turbinia.workers.subprocess.Popen')
def testTurbiniaTaskExecuteEvidenceDoesNotExist(self, popen_mock):
"""Test execution with new evidence that does not have a source_path."""
cmd = 'test cmd'
output = ('test stdout', 'test stderr')
self.result.close = mock.MagicMock()
proc_mock = mock.MagicMock()
proc_mock.communicate.return_value = output
proc_mock.returncode = 0
popen_mock.return_value = proc_mock
self.task.execute(
cmd, self.result, new_evidence=[self.evidence], close=True)
self.assertNotIn(self.evidence, self.result.evidence)
@mock.patch('turbinia.workers.subprocess.Popen')
def testTurbiniaTaskExecuteEvidenceExistsButEmpty(self, popen_mock):
"""Test execution with new evidence source_path that exists but is empty."""
cmd = 'test cmd'
output = ('test stdout', 'test stderr')
self.result.close = mock.MagicMock()
proc_mock = mock.MagicMock()
proc_mock.communicate.return_value = output
proc_mock.returncode = 0
popen_mock.return_value = proc_mock
# Exists and is empty
self.assertTrue(os.path.exists(self.evidence.source_path))
self.assertEqual(os.path.getsize(self.evidence.source_path), 0)
self.task.execute(
cmd, self.result, new_evidence=[self.evidence], close=True)
self.assertNotIn(self.evidence, self.result.evidence)
@mock.patch('turbinia.workers.Histogram')
def testTurbiniaSetupMetrics(self, mock_histogram):
"""Tests that metrics are set up correctly."""
mock_task_list = {'TestTask1', 'TestTask2'}
mock_histogram.return_value = "test_metrics"
metrics = self.task.setup_metrics(task_list=mock_task_list)
self.assertEqual(len(metrics), len(mock_task_list))
self.assertEqual(metrics['testtask1'], 'test_metrics')
self.assertIn('testtask1', metrics)
def testEvidenceSetup(self):
"""Tests basic run of evidence_setup."""
self.evidence.preprocess = mock.MagicMock()
self.task.evidence_setup(self.evidence)
self.evidence.preprocess.assert_called_with(
self.task.id, tmp_dir=self.task.tmp_dir,
required_states=self.task.REQUIRED_STATES)
def testEvidenceSetupStateNotFulfilled(self):
"""Test that evidence setup throws exception when states don't match."""
self.evidence.preprocess = mock.MagicMock()
self.evidence.POSSIBLE_STATES = [evidence.EvidenceState.ATTACHED]
self.task.REQUIRED_STATES = [evidence.EvidenceState.ATTACHED]
# The current state of the evience as shown in evidence.state[ATTACHED] is
# not True, so this should throw an exception
self.assertRaises(
TurbiniaException, self.task.evidence_setup, self.evidence)
# Runs fine after setting the state
self.evidence.state[evidence.EvidenceState.ATTACHED] = True
self.task.evidence_setup(self.evidence)
def testAddEvidence(self):
"""Test that add_evidence adds evidence when appropriate."""
# Test that evidence gets added in the base case (source_path points to file
# with contents)
self.evidence.name = 'AddEvidenceTest'
with open(self.evidence.source_path, 'w') as source_path:
source_path.write('test')
self.result.add_evidence(self.evidence, 'EmptyConfig')
self.assertEqual(len(self.result.evidence), 1)
self.assertEqual(self.result.evidence[0].name, 'AddEvidenceTest')
self.assertEqual(self.result.evidence[0].config, 'EmptyConfig')
# Test that evidence is *not* added when source_path points to file with no
# contents.
self.result.evidence = []
empty_file = tempfile.mkstemp(dir=self.base_output_dir)[1]
self.remove_files.append(empty_file)
self.evidence.source_path = empty_file
self.result.add_evidence(self.evidence, 'EmptyConfig')
# Evidence with empty path was not in evidence list
self.assertEqual(len(self.result.evidence), 0)
# Test that evidence with source_path=None gets added
self.result.evidence = []
self.evidence.source_path = None
self.result.add_evidence(self.evidence, 'EmptyConfig')
self.assertEqual(self.result.evidence[0].name, 'AddEvidenceTest')
| {
"content_hash": "a3eee448817fdcdefdd1db547264aa0e",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 80,
"avg_line_length": 40.91803278688525,
"alnum_prop": 0.7128548534798534,
"repo_name": "google/turbinia",
"id": "39cefd97dd1d78637ce61497f67042ed8b760d89",
"size": "18072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbinia/workers/workers_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13864"
},
{
"name": "Dockerfile",
"bytes": "11147"
},
{
"name": "Go",
"bytes": "16353"
},
{
"name": "HTML",
"bytes": "466"
},
{
"name": "JavaScript",
"bytes": "11640"
},
{
"name": "Python",
"bytes": "871891"
},
{
"name": "Shell",
"bytes": "99905"
},
{
"name": "Vue",
"bytes": "4863"
},
{
"name": "YARA",
"bytes": "2306"
},
{
"name": "sed",
"bytes": "1724"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import datetime
import logging
import urllib
from xml.sax.saxutils import escape
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.models import Max, Q
from django.http import HttpResponse, HttpResponseNotFound
from django.http.response import Http404, JsonResponse
from django.shortcuts import get_list_or_404, get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView
from django.views.generic.edit import FormView
from lti import InvalidLTIConfigError, OutcomeRequest, OutcomeResponse
from lti.outcome_response import CODE_MAJOR_CODES, SEVERITY_CODES
from api.backends.api_client import get_active_content_sources, get_available_courses
from bridge_lti.models import LtiLmsPlatform, LtiUser
from common.utils import get_engine_and_collection_order, stub_page
from module import tasks, utils
from module.base_views import BaseCollectionOrderView, BaseCollectionView, BaseModuleGroupView
from module.consumers import CallbackSequenceConsumer
from module.forms import (
ActivityForm, BaseCollectionForm, BaseGradingPolicyForm, CollectionOrderForm, ContributorPermissionForm,
ModuleGroupForm
)
from module.mixins.views import (
BackURLMixin, CollectionOrderEditFormMixin, CollectionSlugToContextMixin, GroupEditFormMixin,
JsonResponseMixin, LinkObjectsMixin, LtiSessionMixin, ModalFormMixin, SetUserInFormMixin
)
from module.models import (
Activity, Collection, CollectionOrder, ContributorPermission, GRADING_POLICY_NAME_TO_CLS, Log, ModuleGroup,
Sequence, SequenceItem
)
log = logging.getLogger(__name__)
DEMO_USER = 'demo_lti_user'
@method_decorator(login_required, name='dispatch')
class ModuleGroupList(BaseModuleGroupView, ListView):
context_object_name = 'groups'
ordering = ['id']
filter = 'group_slug'
enable_sharing = True
@method_decorator(login_required, name='dispatch')
class GetCollectionForm(FormView):
form_class = BaseCollectionForm
template_name = 'module/collection_form.html'
prefix = 'collection'
def get_form_kwargs(self):
form_kw = dict(prefix=self.prefix)
return form_kw
def get_form(self, form_class=None):
form = super().get_form()
form.fields['owner'].initial = self.request.user.id
form.fields['name'].help_text = (
"Name of the new Collection. You can choose available collections under the Collection settings block"
)
collection_id = self.request.GET.get('collection_id')
if collection_id and Collection.objects.filter(id=collection_id).first():
form.fields.clear()
return form
@method_decorator(login_required, name='dispatch')
class GetGradingPolicyForm(FormView):
form_class = BaseGradingPolicyForm
template_name = 'module/gradingpolicy_form.html'
prefix = 'grading'
def get_form_class(self):
policy_cls = GRADING_POLICY_NAME_TO_CLS.get(self.request.GET.get('grading_policy'), None)
if policy_cls is None:
raise Http404("No such grading policy")
return policy_cls.get_form_class()
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if self.kwargs.get('collection_order_slug'):
collection_order_query = CollectionOrder.objects.filter(
slug=self.kwargs.get('collection_order_slug'),
)
if self.request.GET.get('grading_policy'):
collection_order_query = collection_order_query.filter(
grading_policy__name=self.request.GET.get('grading_policy')
)
collection_order = collection_order_query.first()
if collection_order:
kwargs['instance'] = collection_order.grading_policy
return kwargs
def get_form(self, form_class=None):
self.form_class = self.get_form_class()
form = super().get_form()
gp = self.request.GET.get('grading_policy')
if gp in GRADING_POLICY_NAME_TO_CLS:
form.fields['name'].initial = self.request.GET.get('grading_policy')
return form
@method_decorator(login_required, name='dispatch')
class ModuleGroupCreate(BaseModuleGroupView, SetUserInFormMixin, GroupEditFormMixin, ModalFormMixin, CreateView):
pass
@method_decorator(login_required, name='dispatch')
class ModuleGroupDetail(CollectionOrderEditFormMixin, LinkObjectsMixin, BaseModuleGroupView, DetailView):
context_object_name = 'group'
link_form_class = CollectionOrderForm
link_object_name = 'collection'
filter = 'group_slug'
enable_sharing = True
def get_link_form_kwargs(self):
return dict(user=self.request.user, group=self.object)
def get_link_action_url(self):
return reverse('module:collection-add')
def get_has_available_objects(self, form):
return form.fields['collection'].queryset.exists()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'bridge_host': settings.BRIDGE_HOST,
})
return context
class AddCollectionInGroup(CollectionOrderEditFormMixin, JsonResponseMixin, FormView):
template_name = 'module/modals/course_add_group.html'
form_class = CollectionOrderForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['group'] = get_object_or_404(ModuleGroup, slug=self.kwargs.get('group_slug'))
return kwargs
def get_success_url(self):
return reverse('module:group-detail', kwargs=self.kwargs)
@method_decorator(login_required, name='dispatch')
class ModuleGroupUpdate(BaseModuleGroupView, SetUserInFormMixin, ModalFormMixin, UpdateView):
form_class = ModuleGroupForm
context_object_name = 'group'
def get(self, request, *args, **kwargs):
# ToDo(AndreyLykhoman): testing this order method
if kwargs.get('order'):
collection_order = CollectionOrder.objects.get(
slug=kwargs.get('collection_order_slug')
)
try:
getattr(collection_order, 'to')(int(kwargs['order']))
except AttributeError:
log.exception("Unknown ordering method!")
return HttpResponse(status=201)
return super().get(request, *args, **kwargs)
@method_decorator(login_required, name='dispatch')
class ModuleGroupShare(BaseModuleGroupView, SetUserInFormMixin, ModalFormMixin, UpdateView):
template_name = 'module/modals/share_module_group.html'
form_class = ContributorPermissionForm
def get_success_url(self):
return self.request.GET.get('return_url', reverse('module:group-detail', kwargs=self.kwargs))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
model = context.get('object')
if model:
context['contributors'] = model.contributors.all()
return context
def form_valid(self, form):
new_consumer_obj = form.cleaned_data.get('new_consumer_obj')
if new_consumer_obj and form.instance.owner != new_consumer_obj:
super().form_valid(form)
form.fields['contributor_username'].help_text = "The new contributor added. You can add another one."
form.fields['contributor_username'].initial = None
return self.render_to_response(self.get_context_data(form=form, object=form.instance))
else:
self.form_invalid(form)
@method_decorator(login_required, name='dispatch')
class ContributorPermissionDelete(DeleteView):
model = ContributorPermission
def get_success_url(self):
return self.request.GET.get(
'return_url',
reverse('module:group-detail', kwargs={"group_slug": self.kwargs.get("group_slug")})
)
def get_object(self):
return self.model.objects.get(
group__slug=self.kwargs.get("group_slug"), user__username=self.kwargs.get("username")
)
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
@method_decorator(login_required, name='dispatch')
class ModuleGroupDelete(BaseModuleGroupView, DeleteView):
def get_success_url(self):
return self.request.GET.get('return_url') or reverse('module:group-list')
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
@method_decorator(login_required, name='dispatch')
class CollectionList(BaseCollectionView, ListView):
context_object_name = 'collections'
filter = 'collection_slug'
# Note(AndreyLikhoman): Django.views.generic.ListView doesn't have default fields slug_url_kwarg and slug_field so
# these fields were added.
slug_url_kwarg = 'slug'
slug_field = 'slug'
def get_context_data(self):
# Note(AndreyLykhoman): This implementation must be a rewrite after changing ContributorPermission model. You
# should remove the 'owner' field because of the 'contributors' field contains the last one and additional
# contributors. Also, you should change the forms' and the views' logic of work that work with Module Group.
context = super().get_context_data()
# Get Module Groups where collections are used.
mg = ModuleGroup.objects.filter(collections__in=list(context['object_list'])).distinct()
# The "name" and "slug" are ModuleGroup's fields
res = mg.values('name', 'slug', 'collections__slug').filter(
Q(owner=self.request.user) | Q(contributors=self.request.user)
)
list_mg = list(res)
result_dict = defaultdict(list)
# make a dictionary like: "{..."collection_slug": [{"name": "Name", "slug": "Slug"},..], ...}"
for mg_item in list_mg:
result_dict[mg_item.get("collections__slug")].append(mg_item)
context['avaliable_groups'] = dict(result_dict)
return context
@method_decorator(login_required, name='dispatch')
class CollectionCreate(BaseCollectionView, SetUserInFormMixin, ModalFormMixin, CreateView):
def form_valid(self, form):
result = super().form_valid(form)
if self.kwargs.get('group_slug'):
group = ModuleGroup.objects.get(slug=self.kwargs['group_slug'])
CollectionOrder.objects.create(group=group, collection=self.object)
return result
@method_decorator(login_required, name='dispatch')
class CollectionUpdate(BaseCollectionView, SetUserInFormMixin, ModalFormMixin, UpdateView):
def form_valid(self, form):
"""
Return status code as Accepted and JSON {'status': 'ok', 'collection_slug': new_slug}.
"""
super().form_valid(form)
return JsonResponse(status=202, data={'status': 'ok', 'collection_slug': form.cleaned_data.get("slug")})
@method_decorator(login_required, name='dispatch')
class CollectionOrderUpdate(
BaseCollectionOrderView,
SetUserInFormMixin,
CollectionOrderEditFormMixin,
ModalFormMixin,
UpdateView,
):
def get_object(self):
collection_order = CollectionOrder.objects.get(slug=self.kwargs.get("collection_order_slug"))
self.collection_order_group_slug = collection_order.group.slug
return collection_order
def get_success_url(self):
return reverse("module:group-detail", kwargs={'group_slug': self.collection_order_group_slug})
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['group'] = self.object.group
kwargs['read_only'] = self._set_read_only_collection()
return kwargs
def _set_read_only_collection(self):
return bool(self.kwargs.get('collection_order_slug'))
class CollectionOrderAdd(
BaseCollectionOrderView,
SetUserInFormMixin,
CollectionOrderEditFormMixin,
ModalFormMixin,
CreateView
):
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['group'] = get_object_or_404(ModuleGroup, slug=self.kwargs.get('group_slug'))
kwargs['read_only'] = self._set_read_only_collection()
return kwargs
def get(self, request, *args, **kwargs):
"""
Handle GET requests: instantiate a blank version of the form.
"""
result = super().get(request, *args, **kwargs)
result.context_data["group"] = get_object_or_404(ModuleGroup, slug=self.kwargs.get('group_slug'))
result.context_data['form'].fields['collection'].required = False
result.context_data['form'].fields['collection'].empty_label = "--- Create a new Collection ---"
result.context_data['collection_form'].fields['owner'].initial = self.request.user.id
result.context_data['collection_form'].fields['name'].help_text = (
"Name of the new Collection. You can choose available collections under the Collection settings block"
)
return result
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.POST.get('collection_group-collection'):
context['collection_form'] = None
return context
def get_success_url(self):
return reverse("module:group-detail", kwargs={'group_slug': self.kwargs.get("group_slug")})
def _set_read_only_collection(self):
# NOTE(AndreyLykhoman): Return 'False' because we will able to choose a new collection to add.
return False
@method_decorator(login_required, name='dispatch')
class CollectionDetail(BaseCollectionView, DetailView):
context_object_name = 'collection'
filter = 'collection_slug'
def get(self, request, *args, **kwargs):
try:
self.get_object()
except Http404:
collection_slug = request.GET.get('collection_slug')
if collection_slug:
return redirect(reverse("module:collection-detail", kwargs={'slug': collection_slug}))
return redirect(reverse('module:collection-list'))
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
selected_content_sources = list(map(int, self.request.GET.getlist('content_source', [])))
activities = Activity.objects.filter(collection=self.object).select_related('lti_content_source')
context = super().get_context_data(**kwargs)
context['render_fields'] = ['name', 'tags', 'difficulty', 'points', 'source']
context['activities'] = activities
context['not_active_content_source'] = activities.filter(lti_content_source__is_active=False).order_by(
"lti_content_source"
).distinct("lti_content_source").values_list('lti_content_source__name', flat=True)
context['content_sources'] = self.get_content_source_list(selected_content_sources)
context['source_courses'], context['errors_content_source'] = get_available_courses(
self.request, selected_content_sources
)
context['activity_form'] = ActivityForm(initial={'collection': self.object})
context['sync_available'] = self.object.collection_groups.exists()
engine_failure = self.request.GET.get('engine')
if engine_failure:
context['engine'] = engine_failure
return context
def get_content_source_list(self, selected_content_sources):
return [
{
'name': source.name,
'id': source.id,
'checked': 'checked' if not selected_content_sources or source.id in selected_content_sources else ''
}
for source in get_active_content_sources(request=self.request, not_allow_empty_source_id=False)
]
@method_decorator(login_required, name='dispatch')
class CollectionDelete(DeleteView):
model = Collection
def get_success_url(self):
return self.request.GET.get('return_url') or reverse('module:collection-list')
def get_queryset(self):
return super().get_queryset().filter(owner=self.request.user)
# TODO check it
def get(self, *args, **kwargs):
return self.post(*args, **kwargs)
@method_decorator(login_required, name='dispatch')
class CollectionGroupDelete(DeleteView):
model = CollectionOrder
def get_success_url(self):
return (
self.request.GET.get('return_url') or
reverse('module:group-detail', kwargs={'group_slug': self.object.group.slug})
)
def get(self, request, *args, **kwargs):
return self.post(request=request, *args, **kwargs)
def get_queryset(self):
return self.model.filter()
def get_object(self, queryset=None):
return self.model.objects.get(slug=self.kwargs['collection_order_slug'])
@method_decorator(login_required, name='dispatch')
class ActivityCreate(BackURLMixin, CollectionSlugToContextMixin, ModalFormMixin, CreateView):
model = Activity
form_class = ActivityForm
def get_initial(self):
result = super().get_initial()
if self.request.method == 'GET':
result.update({
'name': self.request.GET.get('name'),
'source_name': self.request.GET.get('source_name'),
'source_launch_url': self.request.GET.get('source_launch_url', '').replace(' ', '+'),
'lti_content_source': self.request.GET.get('lti_content_source'),
'stype': self.request.GET.get('stype'),
})
return result
def form_valid(self, form):
form.instance.collection = Collection.objects.get(slug=self.kwargs.get('collection_slug'))
result = super().form_valid(form)
return result
@method_decorator(login_required, name='dispatch')
class ActivityUpdate(CollectionSlugToContextMixin, ModalFormMixin, UpdateView):
model = Activity
form_class = ActivityForm
context_object_name = 'activity'
def get(self, request, *args, **kwargs):
"""
To Update activity by a GET request.
Updating activities order and running update method in the superclass. The drag and drop feature uses this view.
"""
activity = self.get_object()
# NOTE(AndreyLykhoman): Changing activity's order if kwargs contains the 'order' param.
if kwargs.get('order'):
try:
getattr(activity, 'to')(int(kwargs['order']))
except AttributeError:
log.exception("Unknown ordering method!")
return HttpResponse(status=201)
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
To Update activity by a POST request.
Updating activity and changing the activity's order if activity changes the type.
"""
activity = self.get_object()
if request.POST.get("atype") != activity.atype:
# NOTE(AndreyLykhoman): Excluding activity from atype group and reorder other activities. The autocommit
# was disabled in this part of code in order to send one query to DB.
ordering_queryset = activity.get_ordering_queryset().exclude(pk=activity.pk)
if ordering_queryset.exists():
transaction.set_autocommit(False)
try:
for index, element in enumerate(ordering_queryset):
element.order = index
element.save()
except Exception:
transaction.rollback()
raise
else:
transaction.commit()
finally:
transaction.set_autocommit(True)
# NOTE(AndreyLykhoman): Calculate a new activity's order
new_order = 0
tmp_activity = Activity.objects.filter(
collection=activity.collection,
atype=request.POST.get("atype")
).first()
if tmp_activity:
new_order = 1 + tmp_activity.get_ordering_queryset().latest('order').order
activity.atype, activity.order = request.POST.get("atype"), new_order
activity.save()
result = super().post(request, *args, **kwargs)
return result
@method_decorator(login_required, name='dispatch')
class ActivityDelete(DeleteView):
model = Activity
def get_success_url(self):
return self.request.GET.get('return_url') or reverse(
'module:collection-detail', kwargs={'slug': self.object.collection.slug}
)
def delete(self, request, *args, **kwargs):
try:
return super().delete(request, *args, **kwargs)
except (ValidationError, TypeError):
return redirect("{}?engine=failure".format(self.get_success_url()))
class SequenceItemDetail(LtiSessionMixin, DetailView):
model = SequenceItem
context_object_name = 'sequence_item'
template_name = 'module/sequence_item.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['forbidden'] = True
if not self.request.GET.get('forbidden'):
context['forbidden'], _, _ = _check_next_forbidden(self.object.pk)
sequence_items = SequenceItem.objects.filter(sequence=self.object.sequence)
last_item = sequence_items.last()
if (
self.request.session.get('Lti_update_activity') and
len(sequence_items) > 1 and
last_item.is_problem and
not last_item.score
):
sequence_items = sequence_items[:len(sequence_items) - 1]
context['sequence_items'] = sequence_items
log.debug("Sequence Items on the page: {}".format(len(sequence_items)))
Log.objects.create(
sequence_item=self.object,
log_type=Log.OPENED
)
return context
@method_decorator(login_required, name='dispatch')
class SequenceDelete(DeleteView):
model = Sequence
def get_success_url(self):
return self.request.GET.get('return_url') or reverse(
'module:group-detail', kwargs={'group_slug': self.object.group.slug}
)
def delete(self, request, *args, **kwargs):
# NOTE(idegtiarov) ensure that sequence corresponds to the demo_user before deleting
# if self.get_object().lti_user.user_id == DEMO_USER:
if Sequence.objects.filter(id=self.kwargs.get('pk'), lti_user__user_id=DEMO_USER).exists():
cache.delete(settings.TEST_SEQUENCE_SUFFIX)
return super().delete(request, *args, **kwargs)
else:
return redirect(self.get_success_url())
def get(self, request, *args, **kwargs):
return self.post(request=request, *args, **kwargs)
def _check_next_forbidden(pk):
"""
Check if next sequence item is forbidden to be shown to the student.
:param pk: currently opened SequenseItem's pk
:return: tuple of the parameters next_forbidden, last_item, sequence_item
Where next_forbidden is boolean flag to forbid show next sequence item to the student,
last_item (integer) is index of the last SequenceItem,
sequence_item (SequenceItem inctance) of the currently open sequence item
"""
sequence_item = SequenceItem.objects.get(pk=pk)
last_item = SequenceItem.objects.filter(
sequence=sequence_item.sequence
).aggregate(last_item=Max('position'))['last_item']
next_forbidden = (
sequence_item.is_problem and
sequence_item.position == last_item and
sequence_item.sequence.collection_order.strict_forward and
sequence_item.score is None
)
log.debug(f"Next item forbidden: {next_forbidden}, last_item: {last_item}, sequence_item_id: {sequence_item.id}")
return next_forbidden, last_item, sequence_item
def sequence_item_next(request, pk):
try:
next_forbidden, last_item, sequence_item = _check_next_forbidden(pk)
except SequenceItem.DoesNotExist:
log.exception("SequenceItem which supposed to exist can't be found!")
return render(
request,
template_name="bridge_lti/announcement.html",
context={
'title': 'Something went wrong...',
'message': 'Internal problem was occurred, please, inform course personal about your experience.',
'tip': "ERROR: next sequence item can't be proposed",
}
)
if next_forbidden:
return redirect("{}?forbidden=true".format(
reverse('module:sequence-item', kwargs={'pk': sequence_item.id}))
)
next_sequence_item = SequenceItem.objects.filter(
sequence=sequence_item.sequence,
position=sequence_item.position + 1
).first()
log.debug("Picked next sequence item is: {}".format(next_sequence_item))
if not next_sequence_item or next_sequence_item.position == last_item:
activity = utils.choose_activity(sequence_item)
update_activity = request.session.pop('Lti_update_activity', None)
if next_sequence_item is None:
sequence = sequence_item.sequence
policy = sequence.collection_order.grading_policy.policy_instance(
sequence=sequence
)
policy.send_grade()
if not activity:
if sequence.completed:
return redirect(reverse('module:sequence-complete', kwargs={'pk': sequence_item.sequence_id}))
return stub_page(
request,
title="Warning",
message="Cannot get next activity from the engine.",
tip="Try again later or connect with the instructor."
)
next_sequence_item = SequenceItem.objects.create(
sequence=sequence_item.sequence,
activity=activity,
position=sequence_item.position + 1
)
elif update_activity:
log.debug('Bridge updates activity in the un-submitted SequenceItem')
if activity:
next_sequence_item.activity = activity
next_sequence_item.save()
return redirect(
reverse('module:sequence-item', kwargs={'pk': next_sequence_item.id})
)
class SequenceComplete(LtiSessionMixin, DetailView):
model = Sequence
template_name = 'module/sequence_complete.html'
def _check_and_build_web_socket_message(sequence):
"""
Build a dictionary with data for callback by Web Socket.
Check flags ui_option, congratulation_message and validate score.
"""
web_socket_message_dict = {"is_button_enable": True}
grade = sequence.collection_order.grading_policy.calculate_grade(sequence)
if sequence.collection_order.congratulation_message and grade >= settings.CONGRATULATION_SCORE_LEVEL:
web_socket_message_dict["is_show_pop_up"] = True
if sequence.collection_order.ui_option:
web_socket_message_dict["ui_details"] = sequence.sequence_ui_details()
return web_socket_message_dict
@csrf_exempt
def callback_sequence_item_grade(request):
outcome_response = OutcomeResponse(
message_identifier='unknown', code_major=CODE_MAJOR_CODES[2], severity=SEVERITY_CODES[0]
)
try:
outcome_request = OutcomeRequest().from_post_request(request)
score = float(outcome_request.score)
if not 0.0 <= score <= 1.0:
raise InvalidLTIConfigError('[LTI] score value is outside the permitted range of 0.0-1.0')
operation = outcome_request.operation
if not operation == 'replaceResult':
raise InvalidLTIConfigError('[LTI] request operation {} cannot be proceed'.format(operation))
except (InvalidLTIConfigError, ValueError) as err:
body = escape(request.body) if request.body else ''
error_message = "Request body XML parsing error: {} {}".format(err.message, body)
log.debug("Failure to archive grade from the source: %s" + error_message)
outcome_response.description = escape(error_message)
return HttpResponse(outcome_response.generate_response_xml(), content_type='application/xml')
sequence_item_id, user_id, _activity, _suffix = outcome_request.lis_result_sourcedid.text.split(':')
outcome_response.code_major = CODE_MAJOR_CODES[0]
outcome_response.description = 'Score for {sourced_id} is now {score}'.format(
sourced_id=outcome_request.lis_result_sourcedid, score=score
)
outcome_response.message_identifier = outcome_request.message_identifier
outcome_response.operation = operation
xml = outcome_response.generate_response_xml()
log.debug("Received CallBack with the submitted answer for sequence item {}.".format(sequence_item_id))
try:
sequence_item = SequenceItem.objects.get(id=sequence_item_id)
except SequenceItem.DoesNotExist:
error_message = "Sequence Item with the ID={} was not found".format(sequence_item_id)
outcome_response.description = escape(error_message)
log.debug("[LTI] {}".format(error_message))
return HttpResponseNotFound(outcome_response.generate_response_xml(), content_type='application/xml')
sequence_item.score = score
sequence_item.save()
log.debug("[LTI] Sequence item {} grade is updated".format(sequence_item))
last_log_submit = Log.objects.filter(sequence_item=sequence_item, log_type='S').last()
attempt = (last_log_submit.attempt if last_log_submit else 0) + 1
correct = bool(score)
Log.objects.create(
sequence_item=sequence_item,
log_type=Log.SUBMITTED,
answer=correct,
attempt=attempt,
)
log.debug("New Log is created log_type: 'Submitted', attempt: {}, correct: {}, sequence is completed: {}".format(
attempt, correct, sequence_item.sequence.completed
))
sequence = sequence_item.sequence
web_socket_message_dict = _check_and_build_web_socket_message(sequence)
CallbackSequenceConsumer.send_message_to_channel(
f'{sequence_item.id}_{sequence_item.position}', web_socket_message_dict
)
if sequence.lis_result_sourcedid:
policy = sequence.collection_order.grading_policy.policy_instance(
sequence=sequence, request=request, user_id=user_id
)
policy.send_grade()
return HttpResponse(xml, content_type="application/xml")
def sync_collection(request, slug, api_request=None):
"""
Synchronize collection immediately.
"""
back_url = request.GET.get('back_url')
collection = get_object_or_404(Collection, slug=slug)
collection.save()
log.debug("Immediate sync task is created, time: {}".format(collection.updated_at))
task = tasks.sync_collection_engines.delay(
collection_slug=slug, created_at=collection.updated_at
)
if api_request:
return task.collect(timeout=settings.CELERY_RESULT_TIMEOUT)
return redirect(
reverse('module:collection-detail', kwargs={'slug': collection.slug}) + '?back_url={}'.format(back_url)
)
def update_students_grades(request, collection_order_slug):
"""
Mandatory update students grade related to the collection-group.
"""
back_url = request.GET.get('back_url')
colection_order = get_object_or_404(CollectionOrder, slug=collection_order_slug)
tasks.update_students_grades.delay(collection_order_slug=collection_order_slug)
log.debug(
f"Task with updating students grades related to the colection_order with id {colection_order.id} is started."
)
return redirect(reverse(
'module:group-detail', kwargs={'group_slug': colection_order.group.slug}
) + '?back_url={}'.format(back_url))
def preview_collection(request, slug):
acitvities = [
{
'url': (
f'{reverse("lti:source-preview")}?source_id={a.id}&source_name={urllib.parse.quote_plus(a.name)}'
f'&source_lti_url={a.source_launch_url}&content_source_id={a.lti_content_source_id}'
),
'pos': pos,
}
for pos, a in enumerate(get_list_or_404(Activity, collection__slug=slug), start=1)
]
return render(
request,
template_name="module/sequence_preview.html",
context={
'activities': acitvities,
'back_url': (
f"{reverse('module:collection-detail', kwargs={'slug': slug})}"
f"?back_url={request.GET.get('back_url')}"
)
}
)
def demo_collection(request, collection_order_slug):
"""
View for the demonstration and testing of the adaptivity behaviour.
"""
__, collection_order = get_engine_and_collection_order(collection_order_slug)
lti_lms_platform = LtiLmsPlatform.objects.first()
test_lti_user, created = LtiUser.objects.get_or_create(
user_id=DEMO_USER,
lti_lms_platform=lti_lms_platform,
)
test_sequence, created = Sequence.objects.get_or_create(
lti_user=test_lti_user,
collection_order=collection_order
)
strict_forward = collection_order.strict_forward
request.session['Lti_sequence'] = test_sequence.id
request.session['Lti_strict_forward'] = strict_forward
back_url = request.GET.get('back_url', '')
context = {
'sequence_pk': test_sequence.id,
'back_url': back_url,
'forbidden': request.GET.get('forbidden', ''),
}
if created or not test_sequence.items.exists():
suffix = int(datetime.datetime.now().timestamp())
cache.set(settings.TEST_SEQUENCE_SUFFIX, suffix)
test_sequence.suffix = suffix
test_sequence.save()
log.debug("Sequence {} was created".format(test_sequence))
start_activity = utils.choose_activity(sequence=test_sequence)
if not start_activity:
log.warning('Instructor configured empty Collection.')
return stub_page(
request,
title="Warning",
message="Cannot get the first question to start.",
tip="Please try again later",
demo=True,
sequence=test_sequence,
back_url=back_url,
)
sequence_item = SequenceItem.objects.create(
sequence=test_sequence,
activity=start_activity,
position=1
)
next_forbidden, _, _ = _check_next_forbidden(sequence_item.id)
context.update({"forbidden": next_forbidden})
else:
s_item_id = request.GET.get('sequence_item_id') or test_sequence.items.last().id
log.debug(f'SequienceItem id: {s_item_id}')
next_forbidden, last_item, sequence_item = _check_next_forbidden(s_item_id)
position = int(request.GET.get('position') or 1)
if not (next_forbidden and position > sequence_item.position):
update_activity = request.session.pop('Lti_update_activity', None)
sequence_item, sequence_complete, stub = utils.select_next_sequence_item(
sequence_item, update_activity, last_item, position,
)
next_forbidden, _, _ = _check_next_forbidden(sequence_item.id)
context.update({"forbidden": next_forbidden})
if sequence_complete:
context.update(
{'sequence_items': test_sequence.items.all(), 'demo': True, 'sequence_item': sequence_item}
)
return render(
request,
template_name='module/sequence_complete.html',
context=context)
elif stub:
return stub_page(
request,
title="Warning",
message="Cannot get next activity from the engine.",
tip="Try again later or connect with the instructor.",
demo=True,
sequence=test_sequence,
back_url=back_url,
)
context.update({
'sequence_item': sequence_item,
'sequence_items': test_sequence.items.all(),
'demo': True,
'position': sequence_item.position + 1
})
return render(
request,
template_name="module/sequence_item.html",
context=context
)
| {
"content_hash": "b3dd7286a0da97b81ea2295031434cf7",
"timestamp": "",
"source": "github",
"line_count": 916,
"max_line_length": 120,
"avg_line_length": 40.180131004366814,
"alnum_prop": 0.6482814834940904,
"repo_name": "harvard-vpal/bridge-adaptivity",
"id": "ac5ae33cf4505f57400d9f8df9d46024c127fba1",
"size": "36805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bridge_adaptivity/module/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2514"
},
{
"name": "Dockerfile",
"bytes": "1586"
},
{
"name": "HTML",
"bytes": "70921"
},
{
"name": "JavaScript",
"bytes": "29636"
},
{
"name": "Makefile",
"bytes": "1614"
},
{
"name": "Python",
"bytes": "315506"
},
{
"name": "Shell",
"bytes": "242"
}
],
"symlink_target": ""
} |
import random
class Node:
data = left = right = None
def __init__(self, data=None):
self.data = data
def generateTreeFromArray(data):
return generateTreeHelper(data, 0, len(data))
def generateTreeHelper(data, root, right):
if root < right:
r = Node(data[root])
r.left = generateTreeHelper(data, 2 * root + 1, right)
r.right = generateTreeHelper(data, 2 * root + 2, right)
return r
else:
return None
def findNode(root, x):
if root == None:
return None
if root.data == x:
return root
left = findNode(root.left, x)
if left != None:
return left
right = findNode(root.right, x)
if right != None:
return right
def LCA(root, p, q):
if root == None:
return None
if root == p or root == q:
return root
left = LCA(root.left, p, q)
right = LCA(root.right, p, q)
if left != None and right != None:
return root
if left != None:
return left
else:
return right
if __name__ == '__main__':
size = 15
data = [x for x in range(size)]
root = generateTreeFromArray(data)
p = random.randint(1, size - 1)
q = random.randint(1, size - 1)
n1 = findNode(root, p)
n2 = findNode(root, q)
n3 = LCA(root, n1, n2)
for i in [n1, n2, n3]:
print(i.data)
| {
"content_hash": "0c1c2e10917f6b282883a41be156d617",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 63,
"avg_line_length": 19.92753623188406,
"alnum_prop": 0.5534545454545454,
"repo_name": "baiyubin/python_practice",
"id": "5ef83838c593b63092bc29d480e4cdb7a9819d6a",
"size": "1375",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lca.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7209"
},
{
"name": "Python",
"bytes": "94379"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('jobs', '0034_auto_20160209_1821'),
]
operations = [
migrations.AddField(
model_name='company',
name='creator',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='companies', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='job',
name='creator',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='invitationcode',
name='is_issued',
field=models.BooleanField(default=True, verbose_name='Issued?'),
),
migrations.AlterField(
model_name='invitationcode',
name='issued_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Issued on'),
),
]
| {
"content_hash": "8fce716e397c8750e66b8283738e98dd",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 147,
"avg_line_length": 34.69230769230769,
"alnum_prop": 0.6274944567627494,
"repo_name": "Santiago-vdk/jabbs",
"id": "0ce9573c34e42ed6ffe9d199ec49a3cac92701d9",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jobs/migrations/0035_auto_20160210_1341.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "114734"
},
{
"name": "HTML",
"bytes": "20903"
},
{
"name": "JavaScript",
"bytes": "428326"
},
{
"name": "Python",
"bytes": "45843"
}
],
"symlink_target": ""
} |
""" Functions useful to directives and roles contained in
the ``bokeh.sphinxext`` package.
"""
import os
from os.path import exists
def out_of_date(original, derived):
""" Test whether a derived file is newer than its original.
Args:
original (str) : full path to original file
derived (str) : full path to derived file
Returns:
bool :
True if original is newer or derived does not
exist, False otherwise
Raises:
RuntimeError : if original does not exists
"""
if not exists(original):
raise RuntimeError()
if not exists(derived):
return True
return os.stat(derived).st_mtime < os.stat(original).st_mtime
| {
"content_hash": "50b6b2238e578ff4b743180ab5c5fc9f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 65,
"avg_line_length": 23.866666666666667,
"alnum_prop": 0.6410614525139665,
"repo_name": "almarklein/bokeh",
"id": "9b5cb05001dcafb1d1a7a379f1523fa8c167aa7f",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/sphinxext/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "410607"
},
{
"name": "CoffeeScript",
"bytes": "2138603"
},
{
"name": "JavaScript",
"bytes": "349966"
},
{
"name": "Makefile",
"bytes": "6253"
},
{
"name": "Python",
"bytes": "1543731"
},
{
"name": "Scala",
"bytes": "28963"
},
{
"name": "Shell",
"bytes": "20366"
}
],
"symlink_target": ""
} |
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],
fetched_vals[2]
if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(ops.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.shape],
lambda fetched_vals: ops.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: (
[fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape],
_get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None
else [feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object,
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)],
lambda feed: [feed])]
# pylint: enable=g-long-lambda
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond
exactly to the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, dict):
return _DictFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined
in _REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)'
% (fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if self._fetch_type == list:
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._keys = fetches.keys()
self._mappers = [_FetchMapper.for_fetch(fetch)
for fetch in fetches.values()]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = {}
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability
and to convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
feeds: A feed dict where keys are fully resolved tensor names.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
fetch_name = compat.as_bytes(fetch.name)
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch_name)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch_name)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if isinstance(fetch, ops.Tensor) and fetch.op.type == 'GetSessionHandle':
self._fetch_handles[fetch_name] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise ValueError(
'Operation %r has been marked as not fetchable.' % op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned
by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None,
the default graph will be used.
config: (Optional) ConfigProto proto used to configure the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._opened = False
self._closed = False
self._current_version = 0
self._extend_lock = threading.Lock()
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is not None:
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s'
% type(config))
self._config = config
self._add_shapes = config.graph_options.infer_shapes
else:
self._config = None
self._add_shapes = False
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
with errors.raise_exception_on_not_ok_status() as status:
self._session = tf_session.TF_NewSession(opts, status)
finally:
tf_session.TF_DeleteSessionOptions(opts)
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
with self._extend_lock:
if self._opened and not self._closed:
self._closed = True
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_CloseSession(self._session, status)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
status = tf_session.TF_NewStatus()
tf_session.TF_DeleteSession(self._session, status)
finally:
tf_session.TF_DeleteStatus(status)
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
[`Operation.run()`](../../api_docs/python/framework.md#Operation.run) or
[`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval) should be
executed in this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use
[`tf.get_default_session()`](#get_default_session).
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default graph is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, or dict containing graph elements at its
leaves. A graph element can be one of the following types:
* An [`Operation`](../../api_docs/python/framework.md#Operation).
The corresponding fetched value will be `None`.
* A [`Tensor`](../../api_docs/python/framework.md#Tensor).
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A [`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor).
The corresponding fetched value will be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue)
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v a Python list with 2 numpy arrays: the numpy array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' the numpy array [10, 20] and
# 'b' the numpy array [1.0, 2.0]
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a [`Tensor`](../../api_docs/python/framework.md#Tensor), the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
[placeholder](../../api_docs/python/io_ops.md#placeholder), the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
[`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),
the value should be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue).
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
run_metadata_ptr = tf_session.TF_NewBuffer()
if options:
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString()))
else:
options_ptr = None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (see documentation for `run`).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
feed_list.append(compat.as_bytes(subfeed_t.name))
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: '
+ e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_PRunSetup(session, feed_list, fetch_list,
target_list, status)
return self._do_call(_setup_fn, self._session, feed_list,
fetch_handler.fetches(), fetch_handler.targets())
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_string = {}
feed_map = {}
# Validate and process feed_dict.
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: '
+ e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, or numpy ndarrays.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val,
int) and subfeed_dtype(subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' is not'
' compatible with Tensor type ' + str(subfeed_dtype) + '.'
' Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if not subfeed_t.get_shape().is_compatible_with(np_val.shape):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r'
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
subfeed_name = compat.as_bytes(subfeed_t.name)
feed_dict_string[subfeed_name] = np_val
feed_map[subfeed_name] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, feed_dict_string)
# Run request and get response.
# We need to keep the movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
movers = self._update_with_movers(feed_dict_string, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
if final_fetches or final_targets:
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_string, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
# Captures the name of a node in an error status.
_NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =')
def _do_run(self, handle, target_list, fetch_list, feed_dict,
options, run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of byte arrays corresponding to names of tensors
or operations to be run to, but not fetched.
fetch_list: A list of byte arrays corresponding to names of tensors to
be fetched and operations to be run.
feed_dict: A dictionary that maps tensor names (as byte arrays) to
numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
def _run_fn(session, feed_dict, fetch_list, target_list, options,
run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_Run(session, options,
feed_dict, fetch_list, target_list,
status, run_metadata)
def _prun_fn(session, handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_PRun(session, handle, feed_dict, fetch_list,
status)
if handle is None:
return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
target_list, options, run_metadata)
else:
return self._do_call(_prun_fn, self._session, handle, feed_dict,
fetch_list)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(1)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
raise type(e)(node_def, op, message)
def _extend_graph(self):
# Ensure any changes to the graph are reflected in the runtime.
with self._extend_lock:
if self._graph.version > self._current_version:
# pylint: disable=protected-access
graph_def, self._current_version = self._graph._as_graph_def(
from_version=self._current_version,
add_shapes=self._add_shapes)
# pylint: enable=protected-access
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_ExtendGraph(
self._session, graph_def.SerializeToString(), status)
self._opened = True
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
# TODO(yuanbyu): For now we use a sequence of runs to minimize the graph
# size and the overhead of graph construction/partitioning.
if tensors_to_delete:
for tensor_handle in tensors_to_delete:
feeds = {}
fetches = []
holder, deleter = session_ops._get_handle_deleter(self.graph,
tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_dict[handle_mover[0]] = np_val
return handles
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
[variables](../../api_docs/python/state_ops.md#Variable), [queues](../../api_docs/python/io_ops.md#QueueBase),
and [readers](../../api_docs/python/io_ops.md#ReaderBase). It is important to release
these resources when they are no longer required. To do this, either
invoke the [`close()`](#Session.close) method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.Session() as sess:
sess.run(...)
```
The [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
```
@@__init__
@@run
@@close
@@graph
@@as_default
@@reset
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. See
[Distributed Tensorflow](https://www.tensorflow.org/how_tos/distributed/index.html)
for more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
self._default_session_context_manager.__exit__(
exec_type, exec_value, exec_tb)
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods [`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval)
and [`Operation.run()`](../../api_docs/python/framework.md#Operation.run)
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
@@__init__
@@close
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
config = config_pb2.ConfigProto()
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_session.__exit__(None, None, None)
| {
"content_hash": "824f2ea6ecc99020285da8be6ff074d9",
"timestamp": "",
"source": "github",
"line_count": 1265,
"max_line_length": 114,
"avg_line_length": 37.33280632411067,
"alnum_prop": 0.6554863846186423,
"repo_name": "juharris/tensorflow",
"id": "c139f87c323dab7049e92da562deed15f68adc91",
"size": "47916",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/client/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156005"
},
{
"name": "C++",
"bytes": "9229239"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "783708"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1773496"
},
{
"name": "Protocol Buffer",
"bytes": "112087"
},
{
"name": "Python",
"bytes": "6699482"
},
{
"name": "Shell",
"bytes": "185658"
},
{
"name": "TypeScript",
"bytes": "410434"
}
],
"symlink_target": ""
} |
import json
import time, datetime, os, sys
from pyvcloud.vcloudair import VCA
from pyvcloud.cluster import Cluster
from pyvcloud.helper.CommonUtils import convertPythonObjToStr
def print_vca(vca):
if vca:
print 'vca token: ', vca.token
if vca.vcloud_session:
print 'vcloud session token: ', vca.vcloud_session.token
print 'org name: ', vca.vcloud_session.org
print 'org url: ', vca.vcloud_session.org_url
print 'organization: ', vca.vcloud_session.organization
else:
print 'vca vcloud session: ', vca.vcloud_session
else:
print 'vca: ', vca
host='vcd.cpsbu.eng.vmware.com'
username = 'administrator'
password = os.environ['PASSWORD']
org = 'System'
org_url = 'https://%s/cloud' % host
verify = False
log = True
version = '27.0'
vca = VCA(host=host, username=username, service_type='standalone', version=version, verify=verify, log=log)
result = vca.login(password=password, org=org, org_url=org_url)
print_vca(vca)
cse = Cluster(session=vca.vcloud_session, verify=verify, log=log)
clusters = cse.get_clusters()
print('clusters found: %s' % len(clusters))
for cluster in clusters:
print('cluster %s' % cluster['name'])
| {
"content_hash": "9e3fe89befbfffd8a63a3b89e9591d41",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 107,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.6554160125588697,
"repo_name": "h-medjahed/pyvcloud",
"id": "df1e70add294121dc8b8c60f39917c2d84a65a90",
"size": "1298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/list_clusters.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17068794"
}
],
"symlink_target": ""
} |
from django.apps import apps
from wagtail.contrib.frontend_cache.utils import purge_page_from_cache
from wagtail.core.signals import page_published, page_unpublished
def page_published_signal_handler(instance, **kwargs):
purge_page_from_cache(instance)
def page_unpublished_signal_handler(instance, **kwargs):
purge_page_from_cache(instance)
def register_signal_handlers():
# Get list of models that are page types
Page = apps.get_model('wagtailcore', 'Page')
indexed_models = [model for model in apps.get_models() if issubclass(model, Page)]
# Loop through list and register signal handlers for each one
for model in indexed_models:
page_published.connect(page_published_signal_handler, sender=model)
page_unpublished.connect(page_unpublished_signal_handler, sender=model)
| {
"content_hash": "e9eeb059382eee02dab9b49a0dae820b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 86,
"avg_line_length": 35.91304347826087,
"alnum_prop": 0.7506053268765133,
"repo_name": "kaedroho/wagtail",
"id": "b537bf6e0dfa422c85f0f4508e635a2ee647a425",
"size": "826",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "wagtail/contrib/frontend_cache/signal_handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3323"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "505436"
},
{
"name": "JavaScript",
"bytes": "279901"
},
{
"name": "Makefile",
"bytes": "977"
},
{
"name": "Python",
"bytes": "4671883"
},
{
"name": "SCSS",
"bytes": "201389"
},
{
"name": "Shell",
"bytes": "7662"
},
{
"name": "TypeScript",
"bytes": "30266"
}
],
"symlink_target": ""
} |
import os
from tornado.web import authenticated, removeslash
from handlers.basehandlers import BaseHandler
import logging
import json
class StatisticsHandler(BaseHandler):
role = "admin"
@authenticated
@removeslash
def get(self):
self.render("admin/statistics.html", title="Cloud Setuper", username=self.current_user)
| {
"content_hash": "a7bdd003ff138f9eea6a2a3ade6702c4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 95,
"avg_line_length": 23.066666666666666,
"alnum_prop": 0.7543352601156069,
"repo_name": "dragondjf/CloudSetuper",
"id": "2be9965972a2c83b13b087113d11a4bbafabf179",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setuper web app/handlers/admin/statisticshandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1806"
},
{
"name": "C++",
"bytes": "3059"
},
{
"name": "CSS",
"bytes": "171046"
},
{
"name": "JavaScript",
"bytes": "801718"
},
{
"name": "Lua",
"bytes": "72652"
},
{
"name": "Objective-C",
"bytes": "342"
},
{
"name": "Python",
"bytes": "8361927"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
} |
from .http_client import HttpClient
# Assign all the api classes
from .api.result import Result
from .api.variables import Variables
from .api.experiment import Experiment
from .api.settings import Settings
from .api.users import Users
from .api.results import Results
from .api.suggest import Suggest
from .api.experiments import Experiments
from .api.setting import Setting
class Client():
def __init__(self, auth = {}, options = {}):
self.http_client = HttpClient(auth, options)
# Manipulate a result set indexed by its id
#
# id - Identifier of a result
def result(self, id):
return Result(id, self.http_client)
# Returns the variables set for a user
#
def variables(self):
return Variables(self.http_client)
# Manipulate the experiment indexed by id.
#
# id - Identifier of corresponding experiment
def experiment(self, id):
return Experiment(id, self.http_client)
# Returns the settings config for an experiment
#
def settings(self):
return Settings(self.http_client)
# Return user list
#
def users(self):
return Users(self.http_client)
# Manipulate the results set for an experiment given filters
#
def results(self):
return Results(self.http_client)
return Experiments(self.http_client)
# Ask the server to propose a new set of parameters to run the next experiment
#
# exptid - Identifier of corresponding experiment
def suggest(self, exptid):
return Suggest(exptid, self.http_client)
# Returns the experiments set for a user
#
def experiments(self):
return Experiments(self.http_client)
# Manipulate an experimental settings object
#
def setting(self):
return Setting(self.http_client)
| {
"content_hash": "be829e361001d7499918254c5ea85b57",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 24.80597014925373,
"alnum_prop": 0.7478941034897714,
"repo_name": "schevalier/Whetlab-Python-Client",
"id": "73cdb4e9f3dc313bbe1c3e1509183a1e195bd4fe",
"size": "1662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whetlab/server/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16530"
},
{
"name": "JavaScript",
"bytes": "54587"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Python",
"bytes": "128645"
}
],
"symlink_target": ""
} |
from functools import partial
import logging
import math
from numbers import Number
from operator import add
import os
from bokeh.layouts import column, row
from bokeh.models import (
ColumnDataSource,
DataRange1d,
HoverTool,
ResetTool,
PanTool,
WheelZoomTool,
TapTool,
OpenURL,
Range1d,
Plot,
Quad,
Span,
value,
LinearAxis,
NumeralTickFormatter,
BasicTicker,
NumberFormatter,
BoxSelectTool,
GroupFilter,
CDSView,
)
from bokeh.models.widgets import DataTable, TableColumn
from bokeh.plotting import figure
from bokeh.palettes import Viridis11
from bokeh.themes import Theme
from bokeh.transform import factor_cmap
from bokeh.io import curdoc
import dask
from dask.utils import format_bytes
from toolz import pipe, merge
from tornado import escape
try:
import numpy as np
except ImportError:
np = False
from . import components
from .components import (
DashboardComponent,
ProfileTimePlot,
ProfileServer,
add_periodic_callback,
)
from .core import BokehServer
from .worker import SystemMonitor, counters_doc
from .utils import transpose, BOKEH_VERSION, without_property_validation
from ..metrics import time
from ..utils import log_errors, format_time
from ..diagnostics.progress_stream import color_of, progress_quads, nbytes_bar
from ..diagnostics.progress import AllProgress
from ..diagnostics.graph_layout import GraphLayout
from ..diagnostics.task_stream import TaskStreamPlugin
try:
from cytoolz.curried import map, concat, groupby, valmap, first
except ImportError:
from toolz.curried import map, concat, groupby, valmap, first
logger = logging.getLogger(__name__)
PROFILING = False
from jinja2 import Environment, FileSystemLoader
env = Environment(
loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates"))
)
template_variables = {
"pages": ["status", "workers", "tasks", "system", "profile", "graph", "info"]
}
BOKEH_THEME = Theme(os.path.join(os.path.dirname(__file__), "theme.yaml"))
nan = float("nan")
inf = float("inf")
@without_property_validation
def update(source, data):
""" Update source with data
This checks a few things first
1. If the data is the same, then don't update
2. If numpy is available and the data is numeric, then convert to numpy
arrays
3. If profiling then perform the update in another callback
"""
if not np or not any(isinstance(v, np.ndarray) for v in source.data.values()):
if source.data == data:
return
if np and len(data[first(data)]) > 10:
d = {}
for k, v in data.items():
if type(v) is not np.ndarray and isinstance(v[0], Number):
d[k] = np.array(v)
else:
d[k] = v
else:
d = data
if PROFILING:
curdoc().add_next_tick_callback(lambda: source.data.update(d))
else:
source.data.update(d)
class Occupancy(DashboardComponent):
""" Occupancy (in time) per worker """
def __init__(self, scheduler, **kwargs):
with log_errors():
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"occupancy": [0, 0],
"worker": ["a", "b"],
"x": [0.0, 0.1],
"y": [1, 2],
"ms": [1, 2],
"color": ["red", "blue"],
"escaped_worker": ["a", "b"],
}
)
fig = figure(
title="Occupancy",
tools="",
id="bk-occupancy-plot",
x_axis_type="datetime",
**kwargs
)
rect = fig.rect(
source=self.source, x="x", width="ms", y="y", height=1, color="color"
)
rect.nonselection_glyph = None
fig.xaxis.minor_tick_line_alpha = 0
fig.yaxis.visible = False
fig.ygrid.visible = False
# fig.xaxis[0].formatter = NumeralTickFormatter(format='0.0s')
fig.x_range.start = 0
tap = TapTool(callback=OpenURL(url="./info/worker/@escaped_worker.html"))
hover = HoverTool()
hover.tooltips = "@worker : @occupancy s."
hover.point_policy = "follow_mouse"
fig.add_tools(hover, tap)
self.root = fig
@without_property_validation
def update(self):
with log_errors():
workers = list(self.scheduler.workers.values())
y = list(range(len(workers)))
occupancy = [ws.occupancy for ws in workers]
ms = [occ * 1000 for occ in occupancy]
x = [occ / 500 for occ in occupancy]
total = sum(occupancy)
color = []
for ws in workers:
if ws in self.scheduler.idle:
color.append("red")
elif ws in self.scheduler.saturated:
color.append("green")
else:
color.append("blue")
if total:
self.root.title.text = "Occupancy -- total time: %s wall time: %s" % (
format_time(total),
format_time(total / self.scheduler.total_nthreads),
)
else:
self.root.title.text = "Occupancy"
if occupancy:
result = {
"occupancy": occupancy,
"worker": [ws.address for ws in workers],
"ms": ms,
"color": color,
"escaped_worker": [escape.url_escape(ws.address) for ws in workers],
"x": x,
"y": y,
}
update(self.source, result)
class ProcessingHistogram(DashboardComponent):
""" How many tasks are on each worker """
def __init__(self, scheduler, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{"left": [1, 2], "right": [10, 10], "top": [0, 0]}
)
self.root = figure(
title="Tasks Processing (Histogram)",
id="bk-nprocessing-histogram-plot",
name="processing_hist",
y_axis_label="frequency",
tools="",
**kwargs
)
self.root.xaxis.minor_tick_line_alpha = 0
self.root.ygrid.visible = False
self.root.toolbar.logo = None
self.root.toolbar_location = None
self.root.quad(
source=self.source,
left="left",
right="right",
bottom=0,
top="top",
color="deepskyblue",
fill_alpha=0.5,
)
@without_property_validation
def update(self):
L = [len(ws.processing) for ws in self.scheduler.workers.values()]
counts, x = np.histogram(L, bins=40)
self.source.data.update({"left": x[:-1], "right": x[1:], "top": counts})
class NBytesHistogram(DashboardComponent):
""" How many tasks are on each worker """
def __init__(self, scheduler, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{"left": [1, 2], "right": [10, 10], "top": [0, 0]}
)
self.root = figure(
title="Bytes Stored (Histogram)",
name="nbytes_hist",
id="bk-nbytes-histogram-plot",
y_axis_label="frequency",
tools="",
**kwargs
)
self.root.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
self.root.xaxis.major_label_orientation = -math.pi / 12
self.root.xaxis.minor_tick_line_alpha = 0
self.root.ygrid.visible = False
self.root.toolbar.logo = None
self.root.toolbar_location = None
self.root.quad(
source=self.source,
left="left",
right="right",
bottom=0,
top="top",
color="deepskyblue",
fill_alpha=0.5,
)
@without_property_validation
def update(self):
nbytes = np.asarray([ws.nbytes for ws in self.scheduler.workers.values()])
counts, x = np.histogram(nbytes, bins=40)
d = {"left": x[:-1], "right": x[1:], "top": counts}
self.source.data.update(d)
self.root.title.text = "Bytes stored (Histogram): " + format_bytes(nbytes.sum())
class CurrentLoad(DashboardComponent):
""" How many tasks are on each worker """
def __init__(self, scheduler, width=600, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"nprocessing": [1, 2],
"nprocessing-half": [0.5, 1],
"nprocessing-color": ["red", "blue"],
"nbytes": [1, 2],
"nbytes-half": [0.5, 1],
"nbytes_text": ["1B", "2B"],
"cpu": [1, 2],
"cpu-half": [0.5, 1],
"worker": ["a", "b"],
"y": [1, 2],
"nbytes-color": ["blue", "blue"],
"escaped_worker": ["a", "b"],
}
)
processing = figure(
title="Tasks Processing",
tools="",
id="bk-nprocessing-plot",
name="processing_hist",
width=int(width / 2),
**kwargs
)
rect = processing.rect(
source=self.source,
x="nprocessing-half",
y="y",
width="nprocessing",
height=1,
color="nprocessing-color",
)
processing.x_range.start = 0
rect.nonselection_glyph = None
nbytes = figure(
title="Bytes stored",
tools="",
id="bk-nbytes-worker-plot",
width=int(width / 2),
name="nbytes_hist",
**kwargs
)
rect = nbytes.rect(
source=self.source,
x="nbytes-half",
y="y",
width="nbytes",
height=1,
color="nbytes-color",
)
rect.nonselection_glyph = None
cpu = figure(
title="CPU Utilization",
tools="",
id="bk-cpu-worker-plot",
width=int(width / 2),
name="cpu_hist",
**kwargs
)
rect = cpu.rect(
source=self.source,
x="cpu-half",
y="y",
width="cpu",
height=1,
color="blue",
)
rect.nonselection_glyph = None
hundred_span = Span(
location=100,
dimension="height",
line_color="gray",
line_dash="dashed",
line_width=3,
)
cpu.add_layout(hundred_span)
nbytes.axis[0].ticker = BasicTicker(mantissas=[1, 256, 512], base=1024)
nbytes.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
nbytes.xaxis.major_label_orientation = -math.pi / 12
nbytes.x_range.start = 0
for fig in [processing, nbytes]:
fig.xaxis.minor_tick_line_alpha = 0
fig.yaxis.visible = False
fig.ygrid.visible = False
tap = TapTool(
callback=OpenURL(url="./info/worker/@escaped_worker.html")
)
fig.add_tools(tap)
fig.toolbar.logo = None
fig.toolbar_location = None
fig.yaxis.visible = False
hover = HoverTool()
hover.tooltips = "@worker : @nprocessing tasks"
hover.point_policy = "follow_mouse"
processing.add_tools(hover)
hover = HoverTool()
hover.tooltips = "@worker : @nbytes_text"
hover.point_policy = "follow_mouse"
nbytes.add_tools(hover)
hover = HoverTool()
hover.tooltips = "@worker : @cpu %"
hover.point_policy = "follow_mouse"
cpu.add_tools(hover)
self.processing_figure = processing
self.nbytes_figure = nbytes
self.cpu_figure = cpu
processing.y_range = nbytes.y_range
cpu.y_range = nbytes.y_range
@without_property_validation
def update(self):
with log_errors():
workers = list(self.scheduler.workers.values())
y = list(range(len(workers)))
cpu = [int(ws.metrics["cpu"]) for ws in workers]
nprocessing = [len(ws.processing) for ws in workers]
processing_color = []
for ws in workers:
if ws in self.scheduler.idle:
processing_color.append("red")
elif ws in self.scheduler.saturated:
processing_color.append("green")
else:
processing_color.append("blue")
nbytes = [ws.metrics["memory"] for ws in workers]
nbytes_text = [format_bytes(nb) for nb in nbytes]
nbytes_color = []
max_limit = 0
for ws, nb in zip(workers, nbytes):
limit = (
getattr(self.scheduler.workers[ws.address], "memory_limit", inf)
or inf
)
if limit > max_limit:
max_limit = limit
if nb > limit:
nbytes_color.append("red")
elif nb > limit / 2:
nbytes_color.append("orange")
else:
nbytes_color.append("blue")
now = time()
if any(nprocessing) or self.last + 1 < now:
self.last = now
result = {
"cpu": cpu,
"cpu-half": [c / 2 for c in cpu],
"nprocessing": nprocessing,
"nprocessing-half": [np / 2 for np in nprocessing],
"nprocessing-color": processing_color,
"nbytes": nbytes,
"nbytes-half": [nb / 2 for nb in nbytes],
"nbytes-color": nbytes_color,
"nbytes_text": nbytes_text,
"worker": [ws.address for ws in workers],
"escaped_worker": [escape.url_escape(ws.address) for ws in workers],
"y": y,
}
self.nbytes_figure.title.text = "Bytes stored: " + format_bytes(
sum(nbytes)
)
update(self.source, result)
class StealingTimeSeries(DashboardComponent):
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.source = ColumnDataSource(
{"time": [time(), time() + 1], "idle": [0, 0.1], "saturated": [0, 0.1]}
)
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
fig = figure(
title="Idle and Saturated Workers Over Time",
x_axis_type="datetime",
y_range=[-0.1, len(scheduler.workers) + 0.1],
height=150,
tools="",
x_range=x_range,
**kwargs
)
fig.line(source=self.source, x="time", y="idle", color="red")
fig.line(source=self.source, x="time", y="saturated", color="green")
fig.yaxis.minor_tick_line_color = None
fig.add_tools(
ResetTool(), PanTool(dimensions="width"), WheelZoomTool(dimensions="width")
)
self.root = fig
@without_property_validation
def update(self):
with log_errors():
result = {
"time": [time() * 1000],
"idle": [len(self.scheduler.idle)],
"saturated": [len(self.scheduler.saturated)],
}
if PROFILING:
curdoc().add_next_tick_callback(
lambda: self.source.stream(result, 10000)
)
else:
self.source.stream(result, 10000)
class StealingEvents(DashboardComponent):
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.steal = scheduler.extensions["stealing"]
self.last = 0
self.source = ColumnDataSource(
{
"time": [time() - 20, time()],
"level": [0, 15],
"color": ["white", "white"],
"duration": [0, 0],
"radius": [1, 1],
"cost_factor": [0, 10],
"count": [1, 1],
}
)
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
fig = figure(
title="Stealing Events",
x_axis_type="datetime",
y_axis_type="log",
height=250,
tools="",
x_range=x_range,
**kwargs
)
fig.circle(
source=self.source,
x="time",
y="cost_factor",
color="color",
size="radius",
alpha=0.5,
)
fig.yaxis.axis_label = "Cost Multiplier"
hover = HoverTool()
hover.tooltips = "Level: @level, Duration: @duration, Count: @count, Cost factor: @cost_factor"
hover.point_policy = "follow_mouse"
fig.add_tools(
hover,
ResetTool(),
PanTool(dimensions="width"),
WheelZoomTool(dimensions="width"),
)
self.root = fig
def convert(self, msgs):
""" Convert a log message to a glyph """
total_duration = 0
for msg in msgs:
time, level, key, duration, sat, occ_sat, idl, occ_idl = msg
total_duration += duration
try:
color = Viridis11[level]
except (KeyError, IndexError):
color = "black"
radius = math.sqrt(min(total_duration, 10)) * 30 + 2
d = {
"time": time * 1000,
"level": level,
"count": len(msgs),
"color": color,
"duration": total_duration,
"radius": radius,
"cost_factor": min(10, self.steal.cost_multipliers[level]),
}
return d
@without_property_validation
def update(self):
with log_errors():
log = self.steal.log
n = self.steal.count - self.last
log = [log[-i] for i in range(1, n + 1) if isinstance(log[-i], list)]
self.last = self.steal.count
if log:
new = pipe(
log,
map(groupby(1)),
map(dict.values),
concat,
map(self.convert),
list,
transpose,
)
if PROFILING:
curdoc().add_next_tick_callback(
lambda: self.source.stream(new, 10000)
)
else:
self.source.stream(new, 10000)
class Events(DashboardComponent):
def __init__(self, scheduler, name, height=150, **kwargs):
self.scheduler = scheduler
self.action_ys = dict()
self.last = 0
self.name = name
self.source = ColumnDataSource(
{"time": [], "action": [], "hover": [], "y": [], "color": []}
)
x_range = DataRange1d(follow="end", follow_interval=200000)
fig = figure(
title=name,
x_axis_type="datetime",
height=height,
tools="",
x_range=x_range,
**kwargs
)
fig.circle(
source=self.source,
x="time",
y="y",
color="color",
size=50,
alpha=0.5,
legend="action",
)
fig.yaxis.axis_label = "Action"
fig.legend.location = "top_left"
hover = HoverTool()
hover.tooltips = "@action<br>@hover"
hover.point_policy = "follow_mouse"
fig.add_tools(
hover,
ResetTool(),
PanTool(dimensions="width"),
WheelZoomTool(dimensions="width"),
)
self.root = fig
@without_property_validation
def update(self):
with log_errors():
log = self.scheduler.events[self.name]
n = self.scheduler.event_counts[self.name] - self.last
if log:
log = [log[-i] for i in range(1, n + 1)]
self.last = self.scheduler.event_counts[self.name]
if log:
actions = []
times = []
hovers = []
ys = []
colors = []
for msg in log:
times.append(msg["time"] * 1000)
action = msg["action"]
actions.append(action)
try:
ys.append(self.action_ys[action])
except KeyError:
self.action_ys[action] = len(self.action_ys)
ys.append(self.action_ys[action])
colors.append(color_of(action))
hovers.append("TODO")
new = {
"time": times,
"action": actions,
"hover": hovers,
"y": ys,
"color": colors,
}
if PROFILING:
curdoc().add_next_tick_callback(
lambda: self.source.stream(new, 10000)
)
else:
self.source.stream(new, 10000)
class TaskStream(components.TaskStream):
def __init__(self, scheduler, n_rectangles=1000, clear_interval="20s", **kwargs):
self.scheduler = scheduler
self.offset = 0
es = [p for p in self.scheduler.plugins if isinstance(p, TaskStreamPlugin)]
if not es:
self.plugin = TaskStreamPlugin(self.scheduler)
else:
self.plugin = es[0]
self.index = max(0, self.plugin.index - n_rectangles)
self.workers = dict()
components.TaskStream.__init__(
self, n_rectangles=n_rectangles, clear_interval=clear_interval, **kwargs
)
@without_property_validation
def update(self):
if self.index == self.plugin.index:
return
with log_errors():
if self.index and len(self.source.data["start"]):
start = min(self.source.data["start"])
duration = max(self.source.data["duration"])
boundary = (self.offset + start - duration) / 1000
else:
boundary = self.offset
rectangles = self.plugin.rectangles(
istart=self.index, workers=self.workers, start_boundary=boundary
)
n = len(rectangles["name"])
self.index = self.plugin.index
if not rectangles["start"]:
return
# If there has been a significant delay then clear old rectangles
first_end = min(map(add, rectangles["start"], rectangles["duration"]))
if first_end > self.last:
last = self.last
self.last = first_end
if first_end > last + self.clear_interval * 1000:
self.offset = min(rectangles["start"])
self.source.data.update({k: [] for k in rectangles})
rectangles["start"] = [x - self.offset for x in rectangles["start"]]
# Convert to numpy for serialization speed
if n >= 10 and np:
for k, v in rectangles.items():
if isinstance(v[0], Number):
rectangles[k] = np.array(v)
if PROFILING:
curdoc().add_next_tick_callback(
lambda: self.source.stream(rectangles, self.n_rectangles)
)
else:
self.source.stream(rectangles, self.n_rectangles)
class GraphPlot(DashboardComponent):
"""
A dynamic node-link diagram for the task graph on the scheduler
See also the GraphLayout diagnostic at
distributed/diagnostics/graph_layout.py
"""
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.layout = GraphLayout(scheduler)
self.invisible_count = 0 # number of invisible nodes
self.node_source = ColumnDataSource(
{"x": [], "y": [], "name": [], "state": [], "visible": [], "key": []}
)
self.edge_source = ColumnDataSource({"x": [], "y": [], "visible": []})
node_view = CDSView(
source=self.node_source,
filters=[GroupFilter(column_name="visible", group="True")],
)
edge_view = CDSView(
source=self.edge_source,
filters=[GroupFilter(column_name="visible", group="True")],
)
node_colors = factor_cmap(
"state",
factors=["waiting", "processing", "memory", "released", "erred"],
palette=["gray", "green", "red", "blue", "black"],
)
self.root = figure(title="Task Graph", **kwargs)
self.root.multi_line(
xs="x",
ys="y",
source=self.edge_source,
line_width=1,
view=edge_view,
color="black",
alpha=0.3,
)
rect = self.root.square(
x="x",
y="y",
size=10,
color=node_colors,
source=self.node_source,
view=node_view,
legend="state",
)
self.root.xgrid.grid_line_color = None
self.root.ygrid.grid_line_color = None
hover = HoverTool(
point_policy="follow_mouse",
tooltips="<b>@name</b>: @state",
renderers=[rect],
)
tap = TapTool(callback=OpenURL(url="info/task/@key.html"), renderers=[rect])
rect.nonselection_glyph = None
self.root.add_tools(hover, tap)
@without_property_validation
def update(self):
with log_errors():
# occasionally reset the column data source to remove old nodes
if self.invisible_count > len(self.node_source.data["x"]) / 2:
self.layout.reset_index()
self.invisible_count = 0
update = True
else:
update = False
new, self.layout.new = self.layout.new, []
new_edges = self.layout.new_edges
self.layout.new_edges = []
self.add_new_nodes_edges(new, new_edges, update=update)
self.patch_updates()
@without_property_validation
def add_new_nodes_edges(self, new, new_edges, update=False):
if new or update:
node_key = []
node_x = []
node_y = []
node_state = []
node_name = []
edge_x = []
edge_y = []
x = self.layout.x
y = self.layout.y
tasks = self.scheduler.tasks
for key in new:
try:
task = tasks[key]
except KeyError:
continue
xx = x[key]
yy = y[key]
node_key.append(escape.url_escape(key))
node_x.append(xx)
node_y.append(yy)
node_state.append(task.state)
node_name.append(task.prefix)
for a, b in new_edges:
try:
edge_x.append([x[a], x[b]])
edge_y.append([y[a], y[b]])
except KeyError:
pass
node = {
"x": node_x,
"y": node_y,
"state": node_state,
"name": node_name,
"key": node_key,
"visible": ["True"] * len(node_x),
}
edge = {"x": edge_x, "y": edge_y, "visible": ["True"] * len(edge_x)}
if update or not len(self.node_source.data["x"]):
# see https://github.com/bokeh/bokeh/issues/7523
self.node_source.data.update(node)
self.edge_source.data.update(edge)
else:
self.node_source.stream(node)
self.edge_source.stream(edge)
@without_property_validation
def patch_updates(self):
"""
Small updates like color changes or lost nodes from task transitions
"""
n = len(self.node_source.data["x"])
m = len(self.edge_source.data["x"])
if self.layout.state_updates:
state_updates = self.layout.state_updates
self.layout.state_updates = []
updates = [(i, c) for i, c in state_updates if i < n]
self.node_source.patch({"state": updates})
if self.layout.visible_updates:
updates = self.layout.visible_updates
updates = [(i, c) for i, c in updates if i < n]
self.visible_updates = []
self.node_source.patch({"visible": updates})
self.invisible_count += len(updates)
if self.layout.visible_edge_updates:
updates = self.layout.visible_edge_updates
updates = [(i, c) for i, c in updates if i < m]
self.visible_updates = []
self.edge_source.patch({"visible": updates})
def __del__(self):
self.scheduler.remove_plugin(self.layout)
class TaskProgress(DashboardComponent):
""" Progress bars per task type """
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
ps = [p for p in scheduler.plugins if isinstance(p, AllProgress)]
if ps:
self.plugin = ps[0]
else:
self.plugin = AllProgress(scheduler)
data = progress_quads(
dict(all={}, memory={}, erred={}, released={}, processing={})
)
self.source = ColumnDataSource(data=data)
x_range = DataRange1d(range_padding=0)
y_range = Range1d(-8, 0)
self.root = figure(
id="bk-task-progress-plot",
title="Progress",
name="task_progress",
x_range=x_range,
y_range=y_range,
toolbar_location=None,
tools="",
**kwargs
)
self.root.line( # just to define early ranges
x=[0, 0.9], y=[-1, 0], line_color="#FFFFFF", alpha=0.0
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="left",
right="right",
fill_color="#aaaaaa",
line_color="#aaaaaa",
fill_alpha=0.1,
line_alpha=0.3,
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="left",
right="released-loc",
fill_color="color",
line_color="color",
fill_alpha=0.6,
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="released-loc",
right="memory-loc",
fill_color="color",
line_color="color",
fill_alpha=1.0,
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="memory-loc",
right="erred-loc",
fill_color="black",
fill_alpha=0.5,
line_alpha=0,
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="erred-loc",
right="processing-loc",
fill_color="gray",
fill_alpha=0.35,
line_alpha=0,
)
self.root.text(
source=self.source,
text="show-name",
y="bottom",
x="left",
x_offset=5,
text_font_size=value("10pt"),
)
self.root.text(
source=self.source,
text="done",
y="bottom",
x="right",
x_offset=-5,
text_align="right",
text_font_size=value("10pt"),
)
self.root.ygrid.visible = False
self.root.yaxis.minor_tick_line_alpha = 0
self.root.yaxis.visible = False
self.root.xgrid.visible = False
self.root.xaxis.minor_tick_line_alpha = 0
self.root.xaxis.visible = False
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 14px; font-weight: bold;">Name:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@name</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">All:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@all</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Memory:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@memory</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Erred:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@erred</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Ready:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@processing</span>
</div>
""",
)
self.root.add_tools(hover)
@without_property_validation
def update(self):
with log_errors():
state = {"all": valmap(len, self.plugin.all), "nbytes": self.plugin.nbytes}
for k in ["memory", "erred", "released", "processing", "waiting"]:
state[k] = valmap(len, self.plugin.state[k])
if not state["all"] and not len(self.source.data["all"]):
return
d = progress_quads(state)
update(self.source, d)
totals = {
k: sum(state[k].values())
for k in ["all", "memory", "erred", "released", "waiting"]
}
totals["processing"] = totals["all"] - sum(
v for k, v in totals.items() if k != "all"
)
self.root.title.text = (
"Progress -- total: %(all)s, "
"in-memory: %(memory)s, processing: %(processing)s, "
"waiting: %(waiting)s, "
"erred: %(erred)s" % totals
)
class MemoryUse(DashboardComponent):
""" The memory usage across the cluster, grouped by task type """
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
ps = [p for p in scheduler.plugins if isinstance(p, AllProgress)]
if ps:
self.plugin = ps[0]
else:
self.plugin = AllProgress(scheduler)
self.source = ColumnDataSource(
data=dict(
name=[],
left=[],
right=[],
center=[],
color=[],
percent=[],
MB=[],
text=[],
)
)
self.root = Plot(
id="bk-nbytes-plot",
x_range=DataRange1d(),
y_range=DataRange1d(),
toolbar_location=None,
outline_line_color=None,
**kwargs
)
self.root.add_glyph(
self.source,
Quad(
top=1,
bottom=0,
left="left",
right="right",
fill_color="color",
fill_alpha=1,
),
)
self.root.add_layout(LinearAxis(), "left")
self.root.add_layout(LinearAxis(), "below")
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 14px; font-weight: bold;">Name:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@name</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Percent:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@percent</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">MB:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@MB</span>
</div>
""",
)
self.root.add_tools(hover)
@without_property_validation
def update(self):
with log_errors():
nb = nbytes_bar(self.plugin.nbytes)
update(self.source, nb)
self.root.title.text = "Memory Use: %0.2f MB" % (
sum(self.plugin.nbytes.values()) / 1e6
)
class WorkerTable(DashboardComponent):
""" Status of the current workers
This is two plots, a text-based table for each host and a thin horizontal
plot laying out hosts by their current memory use.
"""
excluded_names = {"executing", "in_flight", "in_memory", "ready", "time"}
def __init__(self, scheduler, width=800, **kwargs):
self.scheduler = scheduler
self.names = [
"name",
"address",
"nthreads",
"cpu",
"memory",
"memory_limit",
"memory_percent",
"num_fds",
"read_bytes",
"write_bytes",
"cpu_fraction",
]
workers = self.scheduler.workers.values()
self.extra_names = sorted(
{m for ws in workers for m in ws.metrics if m not in self.names}
- self.excluded_names
)
table_names = [
"name",
"address",
"nthreads",
"cpu",
"memory",
"memory_limit",
"memory_percent",
"num_fds",
"read_bytes",
"write_bytes",
]
self.source = ColumnDataSource({k: [] for k in self.names})
columns = {
name: TableColumn(field=name, title=name.replace("_percent", " %"))
for name in table_names
}
formatters = {
"cpu": NumberFormatter(format="0.0 %"),
"memory_percent": NumberFormatter(format="0.0 %"),
"memory": NumberFormatter(format="0 b"),
"memory_limit": NumberFormatter(format="0 b"),
"read_bytes": NumberFormatter(format="0 b"),
"write_bytes": NumberFormatter(format="0 b"),
"num_fds": NumberFormatter(format="0"),
"nthreads": NumberFormatter(format="0"),
}
if BOKEH_VERSION < "0.12.15":
dt_kwargs = {"row_headers": False}
else:
dt_kwargs = {"index_position": None}
table = DataTable(
source=self.source,
columns=[columns[n] for n in table_names],
reorderable=True,
sortable=True,
width=width,
**dt_kwargs
)
for name in table_names:
if name in formatters:
table.columns[table_names.index(name)].formatter = formatters[name]
extra_names = ["name", "address"] + self.extra_names
extra_columns = {
name: TableColumn(field=name, title=name.replace("_percent", "%"))
for name in extra_names
}
extra_table = DataTable(
source=self.source,
columns=[extra_columns[n] for n in extra_names],
reorderable=True,
sortable=True,
width=width,
**dt_kwargs
)
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 10px; font-family: Monaco, monospace;">@worker: </span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@memory_percent</span>
</div>
""",
)
mem_plot = figure(
title="Memory Use (%)",
toolbar_location=None,
x_range=(0, 1),
y_range=(-0.1, 0.1),
height=60,
width=width,
tools="",
**kwargs
)
mem_plot.circle(
source=self.source, x="memory_percent", y=0, size=10, fill_alpha=0.5
)
mem_plot.ygrid.visible = False
mem_plot.yaxis.minor_tick_line_alpha = 0
mem_plot.xaxis.visible = False
mem_plot.yaxis.visible = False
mem_plot.add_tools(hover, BoxSelectTool())
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 10px; font-family: Monaco, monospace;">@worker: </span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@cpu</span>
</div>
""",
)
cpu_plot = figure(
title="CPU Use (%)",
toolbar_location=None,
x_range=(0, 1),
y_range=(-0.1, 0.1),
height=60,
width=width,
tools="",
**kwargs
)
cpu_plot.circle(
source=self.source, x="cpu_fraction", y=0, size=10, fill_alpha=0.5
)
cpu_plot.ygrid.visible = False
cpu_plot.yaxis.minor_tick_line_alpha = 0
cpu_plot.xaxis.visible = False
cpu_plot.yaxis.visible = False
cpu_plot.add_tools(hover, BoxSelectTool())
self.cpu_plot = cpu_plot
if "sizing_mode" in kwargs:
sizing_mode = {"sizing_mode": kwargs["sizing_mode"]}
else:
sizing_mode = {}
components = [cpu_plot, mem_plot, table]
if self.extra_names:
components.append(extra_table)
self.root = column(*components, id="bk-worker-table", **sizing_mode)
@without_property_validation
def update(self):
data = {name: [] for name in self.names + self.extra_names}
for i, (addr, ws) in enumerate(
sorted(self.scheduler.workers.items(), key=lambda kv: kv[1].name)
):
for name in self.names + self.extra_names:
data[name].append(ws.metrics.get(name, None))
data["name"][-1] = ws.name if ws.name is not None else i
data["address"][-1] = ws.address
if ws.memory_limit:
data["memory_percent"][-1] = ws.metrics["memory"] / ws.memory_limit
else:
data["memory_percent"][-1] = ""
data["memory_limit"][-1] = ws.memory_limit
data["cpu"][-1] = ws.metrics["cpu"] / 100.0
data["cpu_fraction"][-1] = ws.metrics["cpu"] / 100.0 / ws.nthreads
data["nthreads"][-1] = ws.nthreads
self.source.data.update(data)
def systemmonitor_doc(scheduler, extra, doc):
with log_errors():
sysmon = SystemMonitor(scheduler, sizing_mode="stretch_both")
doc.title = "Dask: Scheduler System Monitor"
add_periodic_callback(doc, sysmon, 500)
for subdoc in sysmon.root.children:
doc.add_root(subdoc)
doc.template = env.get_template("system.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def stealing_doc(scheduler, extra, doc):
with log_errors():
occupancy = Occupancy(scheduler, height=200, sizing_mode="scale_width")
stealing_ts = StealingTimeSeries(scheduler, sizing_mode="scale_width")
stealing_events = StealingEvents(scheduler, sizing_mode="scale_width")
stealing_events.root.x_range = stealing_ts.root.x_range
doc.title = "Dask: Work Stealing"
add_periodic_callback(doc, occupancy, 500)
add_periodic_callback(doc, stealing_ts, 500)
add_periodic_callback(doc, stealing_events, 500)
doc.add_root(
column(
occupancy.root,
stealing_ts.root,
stealing_events.root,
sizing_mode="scale_width",
)
)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def events_doc(scheduler, extra, doc):
with log_errors():
events = Events(scheduler, "all", height=250)
events.update()
add_periodic_callback(doc, events, 500)
doc.title = "Dask: Scheduler Events"
doc.add_root(column(events.root, sizing_mode="scale_width"))
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def workers_doc(scheduler, extra, doc):
with log_errors():
table = WorkerTable(scheduler)
table.update()
add_periodic_callback(doc, table, 500)
doc.title = "Dask: Workers"
doc.add_root(table.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def tasks_doc(scheduler, extra, doc):
with log_errors():
ts = TaskStream(
scheduler,
n_rectangles=dask.config.get(
"distributed.scheduler.dashboard.tasks.task-stream-length"
),
clear_interval="60s",
sizing_mode="stretch_both",
)
ts.update()
add_periodic_callback(doc, ts, 5000)
doc.title = "Dask: Task Stream"
doc.add_root(ts.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def graph_doc(scheduler, extra, doc):
with log_errors():
graph = GraphPlot(scheduler, sizing_mode="stretch_both")
doc.title = "Dask: Task Graph"
graph.update()
add_periodic_callback(doc, graph, 200)
doc.add_root(graph.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def status_doc(scheduler, extra, doc):
with log_errors():
task_stream = TaskStream(
scheduler,
n_rectangles=dask.config.get(
"distributed.scheduler.dashboard.status.task-stream-length"
),
clear_interval="10s",
sizing_mode="stretch_both",
)
task_stream.update()
add_periodic_callback(doc, task_stream, 100)
task_progress = TaskProgress(scheduler, sizing_mode="stretch_both")
task_progress.update()
add_periodic_callback(doc, task_progress, 100)
if len(scheduler.workers) < 50:
current_load = CurrentLoad(scheduler, sizing_mode="stretch_both")
current_load.update()
add_periodic_callback(doc, current_load, 100)
doc.add_root(current_load.nbytes_figure)
doc.add_root(current_load.processing_figure)
else:
nbytes_hist = NBytesHistogram(scheduler, sizing_mode="stretch_both")
nbytes_hist.update()
processing_hist = ProcessingHistogram(scheduler, sizing_mode="stretch_both")
processing_hist.update()
add_periodic_callback(doc, nbytes_hist, 100)
add_periodic_callback(doc, processing_hist, 100)
current_load_fig = row(
nbytes_hist.root, processing_hist.root, sizing_mode="stretch_both"
)
doc.add_root(nbytes_hist.root)
doc.add_root(processing_hist.root)
doc.title = "Dask: Status"
doc.add_root(task_progress.root)
doc.add_root(task_stream.root)
doc.theme = BOKEH_THEME
doc.template = env.get_template("status.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def individual_task_stream_doc(scheduler, extra, doc):
task_stream = TaskStream(
scheduler, n_rectangles=1000, clear_interval="10s", sizing_mode="stretch_both"
)
task_stream.update()
add_periodic_callback(doc, task_stream, 100)
doc.add_root(task_stream.root)
doc.theme = BOKEH_THEME
def individual_nbytes_doc(scheduler, extra, doc):
current_load = CurrentLoad(scheduler, sizing_mode="stretch_both")
current_load.update()
add_periodic_callback(doc, current_load, 100)
doc.add_root(current_load.nbytes_figure)
doc.theme = BOKEH_THEME
def individual_cpu_doc(scheduler, extra, doc):
current_load = CurrentLoad(scheduler, sizing_mode="stretch_both")
current_load.update()
add_periodic_callback(doc, current_load, 100)
doc.add_root(current_load.cpu_figure)
doc.theme = BOKEH_THEME
def individual_nprocessing_doc(scheduler, extra, doc):
current_load = CurrentLoad(scheduler, sizing_mode="stretch_both")
current_load.update()
add_periodic_callback(doc, current_load, 100)
doc.add_root(current_load.processing_figure)
doc.theme = BOKEH_THEME
def individual_progress_doc(scheduler, extra, doc):
task_progress = TaskProgress(scheduler, height=160, sizing_mode="stretch_both")
task_progress.update()
add_periodic_callback(doc, task_progress, 100)
doc.add_root(task_progress.root)
doc.theme = BOKEH_THEME
def individual_graph_doc(scheduler, extra, doc):
with log_errors():
graph = GraphPlot(scheduler, sizing_mode="stretch_both")
graph.update()
add_periodic_callback(doc, graph, 200)
doc.add_root(graph.root)
doc.theme = BOKEH_THEME
def individual_profile_doc(scheduler, extra, doc):
with log_errors():
prof = ProfileTimePlot(scheduler, sizing_mode="scale_width", doc=doc)
doc.add_root(prof.root)
prof.trigger_update()
doc.theme = BOKEH_THEME
def individual_profile_server_doc(scheduler, extra, doc):
with log_errors():
prof = ProfileServer(scheduler, sizing_mode="scale_width", doc=doc)
doc.add_root(prof.root)
prof.trigger_update()
doc.theme = BOKEH_THEME
def individual_workers_doc(scheduler, extra, doc):
with log_errors():
table = WorkerTable(scheduler)
table.update()
add_periodic_callback(doc, table, 500)
doc.add_root(table.root)
doc.theme = BOKEH_THEME
def profile_doc(scheduler, extra, doc):
with log_errors():
doc.title = "Dask: Profile"
prof = ProfileTimePlot(scheduler, sizing_mode="scale_width", doc=doc)
doc.add_root(prof.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
prof.trigger_update()
def profile_server_doc(scheduler, extra, doc):
with log_errors():
doc.title = "Dask: Profile of Event Loop"
prof = ProfileServer(scheduler, sizing_mode="scale_width", doc=doc)
doc.add_root(prof.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
prof.trigger_update()
class BokehScheduler(BokehServer):
def __init__(self, scheduler, io_loop=None, prefix="", **kwargs):
self.scheduler = scheduler
prefix = prefix or ""
prefix = prefix.rstrip("/")
if prefix and not prefix.startswith("/"):
prefix = "/" + prefix
self.prefix = prefix
self.server_kwargs = kwargs
# TLS configuration
http_server_kwargs = kwargs.setdefault("http_server_kwargs", {})
tls_key = dask.config.get("distributed.scheduler.dashboard.tls.key")
tls_cert = dask.config.get("distributed.scheduler.dashboard.tls.cert")
tls_ca_file = dask.config.get("distributed.scheduler.dashboard.tls.ca-file")
if tls_cert and "ssl_options" not in http_server_kwargs:
import ssl
ctx = ssl.create_default_context(
cafile=tls_ca_file, purpose=ssl.Purpose.SERVER_AUTH
)
ctx.load_cert_chain(tls_cert, keyfile=tls_key)
# Unlike the client/scheduler/worker TLS handling, we don't care
# about authenticating the user's webclient, TLS here is just for
# encryption. Disable these checks.
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
http_server_kwargs["ssl_options"] = ctx
self.server_kwargs["prefix"] = prefix or None
self.apps = {
"/system": systemmonitor_doc,
"/stealing": stealing_doc,
"/workers": workers_doc,
"/events": events_doc,
"/counters": counters_doc,
"/tasks": tasks_doc,
"/status": status_doc,
"/profile": profile_doc,
"/profile-server": profile_server_doc,
"/graph": graph_doc,
"/individual-task-stream": individual_task_stream_doc,
"/individual-progress": individual_progress_doc,
"/individual-graph": individual_graph_doc,
"/individual-profile": individual_profile_doc,
"/individual-profile-server": individual_profile_server_doc,
"/individual-nbytes": individual_nbytes_doc,
"/individual-cpu": individual_cpu_doc,
"/individual-nprocessing": individual_nprocessing_doc,
"/individual-workers": individual_workers_doc,
}
self.apps = {k: partial(v, scheduler, self.extra) for k, v in self.apps.items()}
self.loop = io_loop or scheduler.loop
self.server = None
@property
def extra(self):
return merge({"prefix": self.prefix}, template_variables)
@property
def my_server(self):
return self.scheduler
def listen(self, *args, **kwargs):
super(BokehScheduler, self).listen(*args, **kwargs)
from .scheduler_html import routes
handlers = [
(
self.prefix + "/" + url,
cls,
{"server": self.my_server, "extra": self.extra},
)
for url, cls in routes
]
self.server._tornado.add_handlers(r".*", handlers)
| {
"content_hash": "abe6a98899f614fab704b0d792cdf1d9",
"timestamp": "",
"source": "github",
"line_count": 1703,
"max_line_length": 103,
"avg_line_length": 32.670581327069875,
"alnum_prop": 0.5100470901182645,
"repo_name": "mrocklin/distributed",
"id": "3332f4fc27b1db741d59154c77aeae64dd5d8548",
"size": "55638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed/dashboard/scheduler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5161"
},
{
"name": "CSS",
"bytes": "2993"
},
{
"name": "CoffeeScript",
"bytes": "1093"
},
{
"name": "HTML",
"bytes": "12419"
},
{
"name": "Python",
"bytes": "1957174"
},
{
"name": "Shell",
"bytes": "3011"
}
],
"symlink_target": ""
} |
"""
Test running processes.
"""
import gzip
import os
import sys
import signal
import StringIO
import errno
import gc
import stat
import operator
try:
import fcntl
except ImportError:
fcntl = process = None
else:
from twisted.internet import process
from zope.interface.verify import verifyObject
from twisted.python.log import msg
from twisted.internet import reactor, protocol, error, interfaces, defer
from twisted.trial import unittest
from twisted.python import util, runtime, procutils
class StubProcessProtocol(protocol.ProcessProtocol):
"""
ProcessProtocol counter-implementation: all methods on this class raise an
exception, so instances of this may be used to verify that only certain
methods are called.
"""
def outReceived(self, data):
raise NotImplementedError()
def errReceived(self, data):
raise NotImplementedError()
def inConnectionLost(self):
raise NotImplementedError()
def outConnectionLost(self):
raise NotImplementedError()
def errConnectionLost(self):
raise NotImplementedError()
class ProcessProtocolTests(unittest.TestCase):
"""
Tests for behavior provided by the process protocol base class,
L{protocol.ProcessProtocol}.
"""
def test_interface(self):
"""
L{ProcessProtocol} implements L{IProcessProtocol}.
"""
verifyObject(interfaces.IProcessProtocol, protocol.ProcessProtocol())
def test_outReceived(self):
"""
Verify that when stdout is delivered to
L{ProcessProtocol.childDataReceived}, it is forwarded to
L{ProcessProtocol.outReceived}.
"""
received = []
class OutProtocol(StubProcessProtocol):
def outReceived(self, data):
received.append(data)
bytes = "bytes"
p = OutProtocol()
p.childDataReceived(1, bytes)
self.assertEqual(received, [bytes])
def test_errReceived(self):
"""
Similar to L{test_outReceived}, but for stderr.
"""
received = []
class ErrProtocol(StubProcessProtocol):
def errReceived(self, data):
received.append(data)
bytes = "bytes"
p = ErrProtocol()
p.childDataReceived(2, bytes)
self.assertEqual(received, [bytes])
def test_inConnectionLost(self):
"""
Verify that when stdin close notification is delivered to
L{ProcessProtocol.childConnectionLost}, it is forwarded to
L{ProcessProtocol.inConnectionLost}.
"""
lost = []
class InLostProtocol(StubProcessProtocol):
def inConnectionLost(self):
lost.append(None)
p = InLostProtocol()
p.childConnectionLost(0)
self.assertEqual(lost, [None])
def test_outConnectionLost(self):
"""
Similar to L{test_inConnectionLost}, but for stdout.
"""
lost = []
class OutLostProtocol(StubProcessProtocol):
def outConnectionLost(self):
lost.append(None)
p = OutLostProtocol()
p.childConnectionLost(1)
self.assertEqual(lost, [None])
def test_errConnectionLost(self):
"""
Similar to L{test_inConnectionLost}, but for stderr.
"""
lost = []
class ErrLostProtocol(StubProcessProtocol):
def errConnectionLost(self):
lost.append(None)
p = ErrLostProtocol()
p.childConnectionLost(2)
self.assertEqual(lost, [None])
class TrivialProcessProtocol(protocol.ProcessProtocol):
"""
Simple process protocol for tests purpose.
@ivar outData: data received from stdin
@ivar errData: data received from stderr
"""
def __init__(self, d):
"""
Create the deferred that will be fired at the end, and initialize
data structures.
"""
self.deferred = d
self.outData = []
self.errData = []
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
def outReceived(self, data):
self.outData.append(data)
def errReceived(self, data):
self.errData.append(data)
class TestProcessProtocol(protocol.ProcessProtocol):
def connectionMade(self):
self.stages = [1]
self.data = ''
self.err = ''
self.transport.write("abcd")
def childDataReceived(self, childFD, data):
"""
Override and disable the dispatch provided by the base class to ensure
that it is really this method which is being called, and the transport
is not going directly to L{outReceived} or L{errReceived}.
"""
if childFD == 1:
self.data += data
elif childFD == 2:
self.err += data
def childConnectionLost(self, childFD):
"""
Similarly to L{childDataReceived}, disable the automatic dispatch
provided by the base implementation to verify that the transport is
calling this method directly.
"""
if childFD == 1:
self.stages.append(2)
if self.data != "abcd":
raise RuntimeError(
"Data was %r instead of 'abcd'" % (self.data,))
self.transport.write("1234")
elif childFD == 2:
self.stages.append(3)
if self.err != "1234":
raise RuntimeError(
"Err was %r instead of '1234'" % (self.err,))
self.transport.write("abcd")
self.stages.append(4)
elif childFD == 0:
self.stages.append(5)
def processEnded(self, reason):
self.reason = reason
self.deferred.callback(None)
class EchoProtocol(protocol.ProcessProtocol):
s = "1234567" * 1001
n = 10
finished = 0
failure = None
def __init__(self, onEnded):
self.onEnded = onEnded
self.count = 0
def connectionMade(self):
assert self.n > 2
for i in range(self.n - 2):
self.transport.write(self.s)
# test writeSequence
self.transport.writeSequence([self.s, self.s])
self.buffer = self.s * self.n
def outReceived(self, data):
if buffer(self.buffer, self.count, len(data)) != buffer(data):
self.failure = ("wrong bytes received", data, self.count)
self.transport.closeStdin()
else:
self.count += len(data)
if self.count == len(self.buffer):
self.transport.closeStdin()
def processEnded(self, reason):
self.finished = 1
if not reason.check(error.ProcessDone):
self.failure = "process didn't terminate normally: " + str(reason)
self.onEnded.callback(self)
class SignalProtocol(protocol.ProcessProtocol):
"""
A process protocol that sends a signal when data is first received.
@ivar deferred: deferred firing on C{processEnded}.
@type deferred: L{defer.Deferred}
@ivar signal: the signal to send to the process.
@type signal: C{str}
@ivar signaled: A flag tracking whether the signal has been sent to the
child or not yet. C{False} until it is sent, then C{True}.
@type signaled: C{bool}
"""
def __init__(self, deferred, sig):
self.deferred = deferred
self.signal = sig
self.signaled = False
def outReceived(self, data):
"""
Handle the first output from the child process (which indicates it
is set up and ready to receive the signal) by sending the signal to
it. Also log all output to help with debugging.
"""
msg("Received %r from child stdout" % (data,))
if not self.signaled:
self.signaled = True
self.transport.signalProcess(self.signal)
def errReceived(self, data):
"""
Log all data received from the child's stderr to help with
debugging.
"""
msg("Received %r from child stderr" % (data,))
def processEnded(self, reason):
"""
Callback C{self.deferred} with C{None} if C{reason} is a
L{error.ProcessTerminated} failure with C{exitCode} set to C{None},
C{signal} set to C{self.signal}, and C{status} holding the status code
of the exited process. Otherwise, errback with a C{ValueError}
describing the problem.
"""
msg("Child exited: %r" % (reason.getTraceback(),))
if not reason.check(error.ProcessTerminated):
return self.deferred.errback(
ValueError("wrong termination: %s" % (reason,)))
v = reason.value
if isinstance(self.signal, str):
signalValue = getattr(signal, 'SIG' + self.signal)
else:
signalValue = self.signal
if v.exitCode is not None:
return self.deferred.errback(
ValueError("SIG%s: exitCode is %s, not None" %
(self.signal, v.exitCode)))
if v.signal != signalValue:
return self.deferred.errback(
ValueError("SIG%s: .signal was %s, wanted %s" %
(self.signal, v.signal, signalValue)))
if os.WTERMSIG(v.status) != signalValue:
return self.deferred.errback(
ValueError('SIG%s: %s' % (self.signal, os.WTERMSIG(v.status))))
self.deferred.callback(None)
class TestManyProcessProtocol(TestProcessProtocol):
def __init__(self):
self.deferred = defer.Deferred()
def processEnded(self, reason):
self.reason = reason
if reason.check(error.ProcessDone):
self.deferred.callback(None)
else:
self.deferred.errback(reason)
class UtilityProcessProtocol(protocol.ProcessProtocol):
"""
Helper class for launching a Python process and getting a result from it.
@ivar program: A string giving a Python program for the child process to
run.
"""
program = None
def run(cls, reactor, argv, env):
"""
Run a Python process connected to a new instance of this protocol
class. Return the protocol instance.
The Python process is given C{self.program} on the command line to
execute, in addition to anything specified by C{argv}. C{env} is
the complete environment.
"""
exe = sys.executable
self = cls()
reactor.spawnProcess(
self, exe, [exe, "-c", self.program] + argv, env=env)
return self
run = classmethod(run)
def __init__(self):
self.bytes = []
self.requests = []
def parseChunks(self, bytes):
"""
Called with all bytes received on stdout when the process exits.
"""
raise NotImplementedError()
def getResult(self):
"""
Return a Deferred which will fire with the result of L{parseChunks}
when the child process exits.
"""
d = defer.Deferred()
self.requests.append(d)
return d
def _fireResultDeferreds(self, result):
"""
Callback all Deferreds returned up until now by L{getResult}
with the given result object.
"""
requests = self.requests
self.requests = None
for d in requests:
d.callback(result)
def outReceived(self, bytes):
"""
Accumulate output from the child process in a list.
"""
self.bytes.append(bytes)
def processEnded(self, reason):
"""
Handle process termination by parsing all received output and firing
any waiting Deferreds.
"""
self._fireResultDeferreds(self.parseChunks(self.bytes))
class GetArgumentVector(UtilityProcessProtocol):
"""
Protocol which will read a serialized argv from a process and
expose it to interested parties.
"""
program = (
"from sys import stdout, argv\n"
"stdout.write(chr(0).join(argv))\n"
"stdout.flush()\n")
def parseChunks(self, chunks):
"""
Parse the output from the process to which this protocol was
connected, which is a single unterminated line of \\0-separated
strings giving the argv of that process. Return this as a list of
str objects.
"""
return ''.join(chunks).split('\0')
class GetEnvironmentDictionary(UtilityProcessProtocol):
"""
Protocol which will read a serialized environment dict from a process
and expose it to interested parties.
"""
program = (
"from sys import stdout\n"
"from os import environ\n"
"items = environ.iteritems()\n"
"stdout.write(chr(0).join([k + chr(0) + v for k, v in items]))\n"
"stdout.flush()\n")
def parseChunks(self, chunks):
"""
Parse the output from the process to which this protocol was
connected, which is a single unterminated line of \\0-separated
strings giving key value pairs of the environment from that process.
Return this as a dictionary.
"""
environString = ''.join(chunks)
if not environString:
return {}
environ = iter(environString.split('\0'))
d = {}
while 1:
try:
k = environ.next()
except StopIteration:
break
else:
v = environ.next()
d[k] = v
return d
class ProcessTestCase(unittest.TestCase):
"""Test running a process."""
usePTY = False
def testStdio(self):
"""twisted.internet.stdio test."""
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_twisted.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
env = {"PYTHONPATH": os.pathsep.join(sys.path)}
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=env,
path=None, usePTY=self.usePTY)
p.transport.write("hello, world")
p.transport.write("abc")
p.transport.write("123")
p.transport.closeStdin()
def processEnded(ign):
self.assertEqual(p.outF.getvalue(), "hello, worldabc123",
"Output follows:\n"
"%s\n"
"Error message from process_twisted follows:\n"
"%s\n" % (p.outF.getvalue(), p.errF.getvalue()))
return d.addCallback(processEnded)
def test_unsetPid(self):
"""
Test if pid is None/non-None before/after process termination. This
reuses process_echoer.py to get a process that blocks on stdin.
"""
finished = defer.Deferred()
p = TrivialProcessProtocol(finished)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
procTrans = reactor.spawnProcess(p, exe,
[exe, scriptPath], env=None)
self.failUnless(procTrans.pid)
def afterProcessEnd(ignored):
self.assertEqual(procTrans.pid, None)
p.transport.closeStdin()
return finished.addCallback(afterProcessEnd)
def test_process(self):
"""
Test running a process: check its output, it exitCode, some property of
signalProcess.
"""
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tester.py")
d = defer.Deferred()
p = TestProcessProtocol()
p.deferred = d
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None)
def check(ignored):
self.assertEqual(p.stages, [1, 2, 3, 4, 5])
f = p.reason
f.trap(error.ProcessTerminated)
self.assertEqual(f.value.exitCode, 23)
# would .signal be available on non-posix?
# self.assertEqual(f.value.signal, None)
self.assertRaises(
error.ProcessExitedAlready, p.transport.signalProcess, 'INT')
try:
import process_tester, glob
for f in glob.glob(process_tester.test_file_match):
os.remove(f)
except:
pass
d.addCallback(check)
return d
def testManyProcesses(self):
def _check(results, protocols):
for p in protocols:
self.assertEqual(p.stages, [1, 2, 3, 4, 5], "[%d] stages = %s" % (id(p.transport), str(p.stages)))
# test status code
f = p.reason
f.trap(error.ProcessTerminated)
self.assertEqual(f.value.exitCode, 23)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tester.py")
args = [exe, "-u", scriptPath]
protocols = []
deferreds = []
for i in xrange(50):
p = TestManyProcessProtocol()
protocols.append(p)
reactor.spawnProcess(p, exe, args, env=None)
deferreds.append(p.deferred)
deferredList = defer.DeferredList(deferreds, consumeErrors=True)
deferredList.addCallback(_check, protocols)
return deferredList
def test_echo(self):
"""
A spawning a subprocess which echoes its stdin to its stdout via
C{reactor.spawnProcess} will result in that echoed output being
delivered to outReceived.
"""
finished = defer.Deferred()
p = EchoProtocol(finished)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
reactor.spawnProcess(p, exe, [exe, scriptPath], env=None)
def asserts(ignored):
self.failIf(p.failure, p.failure)
self.failUnless(hasattr(p, 'buffer'))
self.assertEqual(len(''.join(p.buffer)), len(p.s * p.n))
def takedownProcess(err):
p.transport.closeStdin()
return err
return finished.addCallback(asserts).addErrback(takedownProcess)
def testCommandLine(self):
args = [r'a\"b ', r'a\b ', r' a\\"b', r' a\\b', r'"foo bar" "', '\tab', '"\\', 'a"b', "a'b"]
pyExe = sys.executable
scriptPath = util.sibpath(__file__, "process_cmdline.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, pyExe, [pyExe, "-u", scriptPath]+args, env=None,
path=None)
def processEnded(ign):
self.assertEqual(p.errF.getvalue(), "")
recvdArgs = p.outF.getvalue().splitlines()
self.assertEqual(recvdArgs, args)
return d.addCallback(processEnded)
def test_wrongArguments(self):
"""
Test invalid arguments to spawnProcess: arguments and environment
must only contains string or unicode, and not null bytes.
"""
exe = sys.executable
p = protocol.ProcessProtocol()
badEnvs = [
{"foo": 2},
{"foo": "egg\0a"},
{3: "bar"},
{"bar\0foo": "bar"}]
badArgs = [
[exe, 2],
"spam",
[exe, "foo\0bar"]]
# Sanity check - this will fail for people who have mucked with
# their site configuration in a stupid way, but there's nothing we
# can do about that.
badUnicode = u'\N{SNOWMAN}'
try:
badUnicode.encode(sys.getdefaultencoding())
except UnicodeEncodeError:
# Okay, that unicode doesn't encode, put it in as a bad environment
# key.
badEnvs.append({badUnicode: 'value for bad unicode key'})
badEnvs.append({'key for bad unicode value': badUnicode})
badArgs.append([exe, badUnicode])
else:
# It _did_ encode. Most likely, Gtk2 is being used and the
# default system encoding is UTF-8, which can encode anything.
# In any case, if implicit unicode -> str conversion works for
# that string, we can't test that TypeError gets raised instead,
# so just leave it off.
pass
for env in badEnvs:
self.assertRaises(
TypeError,
reactor.spawnProcess, p, exe, [exe, "-c", ""], env=env)
for args in badArgs:
self.assertRaises(
TypeError,
reactor.spawnProcess, p, exe, args, env=None)
# Use upper-case so that the environment key test uses an upper case
# name: some versions of Windows only support upper case environment
# variable names, and I think Python (as of 2.5) doesn't use the right
# syscall for lowercase or mixed case names to work anyway.
okayUnicode = u"UNICODE"
encodedValue = "UNICODE"
def _deprecatedUnicodeSupportTest(self, processProtocolClass, argv=[], env={}):
"""
Check that a deprecation warning is emitted when passing unicode to
spawnProcess for an argv value or an environment key or value.
Check that the warning is of the right type, has the right message,
and refers to the correct file. Unfortunately, don't check that the
line number is correct, because that is too hard for me to figure
out.
@param processProtocolClass: A L{UtilityProcessProtocol} subclass
which will be instantiated to communicate with the child process.
@param argv: The argv argument to spawnProcess.
@param env: The env argument to spawnProcess.
@return: A Deferred which fires when the test is complete.
"""
# Sanity to check to make sure we can actually encode this unicode
# with the default system encoding. This may be excessively
# paranoid. -exarkun
self.assertEqual(
self.okayUnicode.encode(sys.getdefaultencoding()),
self.encodedValue)
p = self.assertWarns(DeprecationWarning,
"Argument strings and environment keys/values passed to "
"reactor.spawnProcess should be str, not unicode.", __file__,
processProtocolClass.run, reactor, argv, env)
return p.getResult()
def test_deprecatedUnicodeArgvSupport(self):
"""
Test that a unicode string passed for an argument value is allowed
if it can be encoded with the default system encoding, but that a
deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(GetArgumentVector, argv=[self.okayUnicode])
def gotArgVector(argv):
self.assertEqual(argv, ['-c', self.encodedValue])
d.addCallback(gotArgVector)
return d
def test_deprecatedUnicodeEnvKeySupport(self):
"""
Test that a unicode string passed for the key of the environment
dictionary is allowed if it can be encoded with the default system
encoding, but that a deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(
GetEnvironmentDictionary, env={self.okayUnicode: self.encodedValue})
def gotEnvironment(environ):
self.assertEqual(environ[self.encodedValue], self.encodedValue)
d.addCallback(gotEnvironment)
return d
def test_deprecatedUnicodeEnvValueSupport(self):
"""
Test that a unicode string passed for the value of the environment
dictionary is allowed if it can be encoded with the default system
encoding, but that a deprecation warning is emitted.
"""
d = self._deprecatedUnicodeSupportTest(
GetEnvironmentDictionary, env={self.encodedValue: self.okayUnicode})
def gotEnvironment(environ):
# On Windows, the environment contains more things than we
# specified, so only make sure that at least the key we wanted
# is there, rather than testing the dictionary for exact
# equality.
self.assertEqual(environ[self.encodedValue], self.encodedValue)
d.addCallback(gotEnvironment)
return d
class TwoProcessProtocol(protocol.ProcessProtocol):
num = -1
finished = 0
def __init__(self):
self.deferred = defer.Deferred()
def outReceived(self, data):
pass
def processEnded(self, reason):
self.finished = 1
self.deferred.callback(None)
class TestTwoProcessesBase:
def setUp(self):
self.processes = [None, None]
self.pp = [None, None]
self.done = 0
self.verbose = 0
def createProcesses(self, usePTY=0):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_reader.py")
for num in (0,1):
self.pp[num] = TwoProcessProtocol()
self.pp[num].num = num
p = reactor.spawnProcess(self.pp[num],
exe, [exe, "-u", scriptPath], env=None,
usePTY=usePTY)
self.processes[num] = p
def close(self, num):
if self.verbose: print "closing stdin [%d]" % num
p = self.processes[num]
pp = self.pp[num]
self.failIf(pp.finished, "Process finished too early")
p.loseConnection()
if self.verbose: print self.pp[0].finished, self.pp[1].finished
def _onClose(self):
return defer.gatherResults([ p.deferred for p in self.pp ])
def testClose(self):
if self.verbose: print "starting processes"
self.createProcesses()
reactor.callLater(1, self.close, 0)
reactor.callLater(2, self.close, 1)
return self._onClose()
class TestTwoProcessesNonPosix(TestTwoProcessesBase, unittest.TestCase):
pass
class TestTwoProcessesPosix(TestTwoProcessesBase, unittest.TestCase):
def tearDown(self):
for pp, pr in zip(self.pp, self.processes):
if not pp.finished:
try:
os.kill(pr.pid, signal.SIGTERM)
except OSError:
# If the test failed the process may already be dead
# The error here is only noise
pass
return self._onClose()
def kill(self, num):
if self.verbose: print "kill [%d] with SIGTERM" % num
p = self.processes[num]
pp = self.pp[num]
self.failIf(pp.finished, "Process finished too early")
os.kill(p.pid, signal.SIGTERM)
if self.verbose: print self.pp[0].finished, self.pp[1].finished
def testKill(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=0)
reactor.callLater(1, self.kill, 0)
reactor.callLater(2, self.kill, 1)
return self._onClose()
def testClosePty(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=1)
reactor.callLater(1, self.close, 0)
reactor.callLater(2, self.close, 1)
return self._onClose()
def testKillPty(self):
if self.verbose: print "starting processes"
self.createProcesses(usePTY=1)
reactor.callLater(1, self.kill, 0)
reactor.callLater(2, self.kill, 1)
return self._onClose()
class FDChecker(protocol.ProcessProtocol):
state = 0
data = ""
failed = None
def __init__(self, d):
self.deferred = d
def fail(self, why):
self.failed = why
self.deferred.callback(None)
def connectionMade(self):
self.transport.writeToChild(0, "abcd")
self.state = 1
def childDataReceived(self, childFD, data):
if self.state == 1:
if childFD != 1:
self.fail("read '%s' on fd %d (not 1) during state 1" \
% (childFD, data))
return
self.data += data
#print "len", len(self.data)
if len(self.data) == 6:
if self.data != "righto":
self.fail("got '%s' on fd1, expected 'righto'" \
% self.data)
return
self.data = ""
self.state = 2
#print "state2", self.state
self.transport.writeToChild(3, "efgh")
return
if self.state == 2:
self.fail("read '%s' on fd %s during state 2" % (childFD, data))
return
if self.state == 3:
if childFD != 1:
self.fail("read '%s' on fd %s (not 1) during state 3" \
% (childFD, data))
return
self.data += data
if len(self.data) == 6:
if self.data != "closed":
self.fail("got '%s' on fd1, expected 'closed'" \
% self.data)
return
self.state = 4
return
if self.state == 4:
self.fail("read '%s' on fd %s during state 4" % (childFD, data))
return
def childConnectionLost(self, childFD):
if self.state == 1:
self.fail("got connectionLost(%d) during state 1" % childFD)
return
if self.state == 2:
if childFD != 4:
self.fail("got connectionLost(%d) (not 4) during state 2" \
% childFD)
return
self.state = 3
self.transport.closeChildFD(5)
return
def processEnded(self, status):
rc = status.value.exitCode
if self.state != 4:
self.fail("processEnded early, rc %d" % rc)
return
if status.value.signal != None:
self.fail("processEnded with signal %s" % status.value.signal)
return
if rc != 0:
self.fail("processEnded with rc %d" % rc)
return
self.deferred.callback(None)
class FDTest(unittest.TestCase):
def testFD(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_fds.py")
d = defer.Deferred()
p = FDChecker(d)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None,
childFDs={0:"w", 1:"r", 2:2,
3:"w", 4:"r", 5:"w"})
d.addCallback(lambda x : self.failIf(p.failed, p.failed))
return d
def testLinger(self):
# See what happens when all the pipes close before the process
# actually stops. This test *requires* SIGCHLD catching to work,
# as there is no other way to find out the process is done.
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_linger.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None,
childFDs={1:"r", 2:2},
)
def processEnded(ign):
self.assertEqual(p.outF.getvalue(),
"here is some text\ngoodbye\n")
return d.addCallback(processEnded)
class Accumulator(protocol.ProcessProtocol):
"""Accumulate data from a process."""
closed = 0
endedDeferred = None
def connectionMade(self):
self.outF = StringIO.StringIO()
self.errF = StringIO.StringIO()
def outReceived(self, d):
self.outF.write(d)
def errReceived(self, d):
self.errF.write(d)
def outConnectionLost(self):
pass
def errConnectionLost(self):
pass
def processEnded(self, reason):
self.closed = 1
if self.endedDeferred is not None:
d, self.endedDeferred = self.endedDeferred, None
d.callback(None)
class PosixProcessBase:
"""
Test running processes.
"""
usePTY = False
def getCommand(self, commandName):
"""
Return the path of the shell command named C{commandName}, looking at
common locations.
"""
if os.path.exists('/bin/%s' % (commandName,)):
cmd = '/bin/%s' % (commandName,)
elif os.path.exists('/usr/bin/%s' % (commandName,)):
cmd = '/usr/bin/%s' % (commandName,)
else:
raise RuntimeError(
"%s not found in /bin or /usr/bin" % (commandName,))
return cmd
def testNormalTermination(self):
cmd = self.getCommand('true')
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['true'], env=None,
usePTY=self.usePTY)
def check(ignored):
p.reason.trap(error.ProcessDone)
self.assertEqual(p.reason.value.exitCode, 0)
self.assertEqual(p.reason.value.signal, None)
d.addCallback(check)
return d
def test_abnormalTermination(self):
"""
When a process terminates with a system exit code set to 1,
C{processEnded} is called with a L{error.ProcessTerminated} error,
the C{exitCode} attribute reflecting the system exit code.
"""
exe = sys.executable
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, exe, [exe, '-c', 'import sys; sys.exit(1)'],
env=None, usePTY=self.usePTY)
def check(ignored):
p.reason.trap(error.ProcessTerminated)
self.assertEqual(p.reason.value.exitCode, 1)
self.assertEqual(p.reason.value.signal, None)
d.addCallback(check)
return d
def _testSignal(self, sig):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_signal.py")
d = defer.Deferred()
p = SignalProtocol(d, sig)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
usePTY=self.usePTY)
return d
def test_signalHUP(self):
"""
Sending the SIGHUP signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGHUP}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('HUP')
def test_signalINT(self):
"""
Sending the SIGINT signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGINT}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('INT')
def test_signalKILL(self):
"""
Sending the SIGKILL signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGKILL}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('KILL')
def test_signalTERM(self):
"""
Sending the SIGTERM signal to a running process interrupts it, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} set to C{None} and the C{signal} attribute set to
C{signal.SIGTERM}. C{os.WTERMSIG} can also be used on the C{status}
attribute to extract the signal value.
"""
return self._testSignal('TERM')
def test_childSignalHandling(self):
"""
The disposition of signals which are ignored in the parent
process is reset to the default behavior for the child
process.
"""
# Somewhat arbitrarily select SIGUSR1 here. It satisfies our
# requirements that:
# - The interpreter not fiddle around with the handler
# behind our backs at startup time (this disqualifies
# signals like SIGINT and SIGPIPE).
# - The default behavior is to exit.
#
# This lets us send the signal to the child and then verify
# that it exits with a status code indicating that it was
# indeed the signal which caused it to exit.
which = signal.SIGUSR1
# Ignore the signal in the parent (and make sure we clean it
# up).
handler = signal.signal(which, signal.SIG_IGN)
self.addCleanup(signal.signal, signal.SIGUSR1, handler)
# Now do the test.
return self._testSignal(signal.SIGUSR1)
def test_executionError(self):
"""
Raise an error during execvpe to check error management.
"""
cmd = self.getCommand('false')
d = defer.Deferred()
p = TrivialProcessProtocol(d)
def buggyexecvpe(command, args, environment):
raise RuntimeError("Ouch")
oldexecvpe = os.execvpe
os.execvpe = buggyexecvpe
try:
reactor.spawnProcess(p, cmd, ['false'], env=None,
usePTY=self.usePTY)
def check(ignored):
errData = "".join(p.errData + p.outData)
self.assertIn("Upon execvpe", errData)
self.assertIn("Ouch", errData)
d.addCallback(check)
finally:
os.execvpe = oldexecvpe
return d
def test_errorInProcessEnded(self):
"""
The handler which reaps a process is removed when the process is
reaped, even if the protocol's C{processEnded} method raises an
exception.
"""
connected = defer.Deferred()
ended = defer.Deferred()
# This script runs until we disconnect its transport.
pythonExecutable = sys.executable
scriptPath = util.sibpath(__file__, "process_echoer.py")
class ErrorInProcessEnded(protocol.ProcessProtocol):
"""
A protocol that raises an error in C{processEnded}.
"""
def makeConnection(self, transport):
connected.callback(transport)
def processEnded(self, reason):
reactor.callLater(0, ended.callback, None)
raise RuntimeError("Deliberate error")
# Launch the process.
reactor.spawnProcess(
ErrorInProcessEnded(), pythonExecutable,
[pythonExecutable, scriptPath],
env=None, path=None)
pid = []
def cbConnected(transport):
pid.append(transport.pid)
# There's now a reap process handler registered.
self.assertIn(transport.pid, process.reapProcessHandlers)
# Kill the process cleanly, triggering an error in the protocol.
transport.loseConnection()
connected.addCallback(cbConnected)
def checkTerminated(ignored):
# The exception was logged.
excs = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(excs), 1)
# The process is no longer scheduled for reaping.
self.assertNotIn(pid[0], process.reapProcessHandlers)
ended.addCallback(checkTerminated)
return ended
class MockSignal(object):
"""
Neuter L{signal.signal}, but pass other attributes unscathed
"""
def signal(self, sig, action):
return signal.getsignal(sig)
def __getattr__(self, attr):
return getattr(signal, attr)
class MockOS(object):
"""
The mock OS: overwrite L{os}, L{fcntl} and {sys} functions with fake ones.
@ivar exited: set to True when C{_exit} is called.
@type exited: C{bool}
@ivar O_RDWR: dumb value faking C{os.O_RDWR}.
@type O_RDWR: C{int}
@ivar O_NOCTTY: dumb value faking C{os.O_NOCTTY}.
@type O_NOCTTY: C{int}
@ivar WNOHANG: dumb value faking C{os.WNOHANG}.
@type WNOHANG: C{int}
@ivar raiseFork: if not C{None}, subsequent calls to fork will raise this
object.
@type raiseFork: C{NoneType} or C{Exception}
@ivar raiseExec: if set, subsequent calls to execvpe will raise an error.
@type raiseExec: C{bool}
@ivar fdio: fake file object returned by calls to fdopen.
@type fdio: C{StringIO.StringIO}
@ivar actions: hold names of some actions executed by the object, in order
of execution.
@type actions: C{list} of C{str}
@ivar closed: keep track of the file descriptor closed.
@param closed: C{list} of C{int}
@ivar child: whether fork return for the child or the parent.
@type child: C{bool}
@ivar pipeCount: count the number of time that C{os.pipe} has been called.
@type pipeCount: C{int}
@ivar raiseWaitPid: if set, subsequent calls to waitpid will raise
the error specified.
@type raiseWaitPid: C{None} or a class
@ivar waitChild: if set, subsequent calls to waitpid will return it.
@type waitChild: C{None} or a tuple
@ivar euid: the uid returned by the fake C{os.geteuid}
@type euid: C{int}
@ivar egid: the gid returned by the fake C{os.getegid}
@type egid: C{int}
@ivar seteuidCalls: stored results of C{os.seteuid} calls.
@type seteuidCalls: C{list}
@ivar setegidCalls: stored results of C{os.setegid} calls.
@type setegidCalls: C{list}
@ivar path: the path returned by C{os.path.expanduser}.
@type path: C{str}
@ivar raiseKill: if set, subsequent call to kill will raise the error
specified.
@type raiseKill: C{None} or an exception instance.
"""
exited = False
raiseExec = False
fdio = None
child = True
raiseWaitPid = None
raiseFork = None
waitChild = None
euid = 0
egid = 0
path = None
raiseKill = None
def __init__(self):
"""
Initialize data structures.
"""
self.actions = []
self.closed = []
self.pipeCount = 0
self.O_RDWR = -1
self.O_NOCTTY = -2
self.WNOHANG = -4
self.WEXITSTATUS = lambda x: 0
self.WIFEXITED = lambda x: 1
self.seteuidCalls = []
self.setegidCalls = []
def open(self, dev, flags):
"""
Fake C{os.open}. Return a non fd number to be sure it's not used
elsewhere.
"""
return -3
def fstat(self, fd):
"""
Fake C{os.fstat}. Return a C{os.stat_result} filled with garbage.
"""
return os.stat_result((0,) * 10)
def fdopen(self, fd, flag):
"""
Fake C{os.fdopen}. Return a StringIO object whose content can be tested
later via C{self.fdio}.
"""
self.fdio = StringIO.StringIO()
return self.fdio
def setsid(self):
"""
Fake C{os.setsid}. Do nothing.
"""
def fork(self):
"""
Fake C{os.fork}. Save the action in C{self.actions}, and return 0 if
C{self.child} is set, or a dumb number.
"""
self.actions.append(('fork', gc.isenabled()))
if self.raiseFork is not None:
raise self.raiseFork
elif self.child:
# Child result is 0
return 0
else:
return 21
def close(self, fd):
"""
Fake C{os.close}, saving the closed fd in C{self.closed}.
"""
self.closed.append(fd)
def dup2(self, fd1, fd2):
"""
Fake C{os.dup2}. Do nothing.
"""
def write(self, fd, data):
"""
Fake C{os.write}. Do nothing.
"""
def execvpe(self, command, args, env):
"""
Fake C{os.execvpe}. Save the action, and raise an error if
C{self.raiseExec} is set.
"""
self.actions.append('exec')
if self.raiseExec:
raise RuntimeError("Bar")
def pipe(self):
"""
Fake C{os.pipe}. Return non fd numbers to be sure it's not used
elsewhere, and increment C{self.pipeCount}. This is used to uniquify
the result.
"""
self.pipeCount += 1
return - 2 * self.pipeCount + 1, - 2 * self.pipeCount
def ttyname(self, fd):
"""
Fake C{os.ttyname}. Return a dumb string.
"""
return "foo"
def _exit(self, code):
"""
Fake C{os._exit}. Save the action, set the C{self.exited} flag, and
raise C{SystemError}.
"""
self.actions.append('exit')
self.exited = True
# Don't forget to raise an error, or you'll end up in parent
# code path.
raise SystemError()
def ioctl(self, fd, flags, arg):
"""
Override C{fcntl.ioctl}. Do nothing.
"""
def setNonBlocking(self, fd):
"""
Override C{fdesc.setNonBlocking}. Do nothing.
"""
def waitpid(self, pid, options):
"""
Override C{os.waitpid}. Return values meaning that the child process
has exited, save executed action.
"""
self.actions.append('waitpid')
if self.raiseWaitPid is not None:
raise self.raiseWaitPid
if self.waitChild is not None:
return self.waitChild
return 1, 0
def settrace(self, arg):
"""
Override C{sys.settrace} to keep coverage working.
"""
def getgid(self):
"""
Override C{os.getgid}. Return a dumb number.
"""
return 1235
def getuid(self):
"""
Override C{os.getuid}. Return a dumb number.
"""
return 1237
def setuid(self, val):
"""
Override C{os.setuid}. Do nothing.
"""
self.actions.append(('setuid', val))
def setgid(self, val):
"""
Override C{os.setgid}. Do nothing.
"""
self.actions.append(('setgid', val))
def setregid(self, val1, val2):
"""
Override C{os.setregid}. Do nothing.
"""
self.actions.append(('setregid', val1, val2))
def setreuid(self, val1, val2):
"""
Override C{os.setreuid}. Save the action.
"""
self.actions.append(('setreuid', val1, val2))
def switchUID(self, uid, gid):
"""
Override C{util.switchuid}. Save the action.
"""
self.actions.append(('switchuid', uid, gid))
def openpty(self):
"""
Override C{pty.openpty}, returning fake file descriptors.
"""
return -12, -13
def geteuid(self):
"""
Mock C{os.geteuid}, returning C{self.euid} instead.
"""
return self.euid
def getegid(self):
"""
Mock C{os.getegid}, returning C{self.egid} instead.
"""
return self.egid
def seteuid(self, egid):
"""
Mock C{os.seteuid}, store result.
"""
self.seteuidCalls.append(egid)
def setegid(self, egid):
"""
Mock C{os.setegid}, store result.
"""
self.setegidCalls.append(egid)
def expanduser(self, path):
"""
Mock C{os.path.expanduser}.
"""
return self.path
def getpwnam(self, user):
"""
Mock C{pwd.getpwnam}.
"""
return 0, 0, 1, 2
def listdir(self, path):
"""
Override C{os.listdir}, returning fake contents of '/dev/fd'
"""
return "-1", "-2"
def kill(self, pid, signalID):
"""
Override C{os.kill}: save the action and raise C{self.raiseKill} if
specified.
"""
self.actions.append(('kill', pid, signalID))
if self.raiseKill is not None:
raise self.raiseKill
if process is not None:
class DumbProcessWriter(process.ProcessWriter):
"""
A fake L{process.ProcessWriter} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class DumbProcessReader(process.ProcessReader):
"""
A fake L{process.ProcessReader} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class DumbPTYProcess(process.PTYProcess):
"""
A fake L{process.PTYProcess} used for tests.
"""
def startReading(self):
"""
Here's the faking: don't do anything here.
"""
class MockProcessTestCase(unittest.TestCase):
"""
Mock a process runner to test forked child code path.
"""
if process is None:
skip = "twisted.internet.process is never used on Windows"
def setUp(self):
"""
Replace L{process} os, fcntl, sys, switchUID, fdesc and pty modules
with the mock class L{MockOS}.
"""
if gc.isenabled():
self.addCleanup(gc.enable)
else:
self.addCleanup(gc.disable)
self.mockos = MockOS()
self.mockos.euid = 1236
self.mockos.egid = 1234
self.patch(process, "os", self.mockos)
self.patch(process, "fcntl", self.mockos)
self.patch(process, "sys", self.mockos)
self.patch(process, "switchUID", self.mockos.switchUID)
self.patch(process, "fdesc", self.mockos)
self.patch(process.Process, "processReaderFactory", DumbProcessReader)
self.patch(process.Process, "processWriterFactory", DumbProcessWriter)
self.patch(process, "pty", self.mockos)
self.mocksig = MockSignal()
self.patch(process, "signal", self.mocksig)
def tearDown(self):
"""
Reset processes registered for reap.
"""
process.reapProcessHandlers = {}
def test_mockFork(self):
"""
Test a classic spawnProcess. Check the path of the client code:
fork, exec, exit.
"""
gc.enable()
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions, [("fork", False), "exec", "exit"])
else:
self.fail("Should not be here")
# It should leave the garbage collector disabled.
self.assertFalse(gc.isenabled())
def _mockForkInParentTest(self):
"""
Assert that in the main process, spawnProcess disables the garbage
collector, calls fork, closes the pipe file descriptors it created for
the child process, and calls waitpid.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
# It should close the first read pipe, and the 2 last write pipes
self.assertEqual(set(self.mockos.closed), set([-1, -4, -6]))
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
def test_mockForkInParentGarbageCollectorEnabled(self):
"""
The garbage collector should be enabled when L{reactor.spawnProcess}
returns if it was initially enabled.
@see L{_mockForkInParentTest}
"""
gc.enable()
self._mockForkInParentTest()
self.assertTrue(gc.isenabled())
def test_mockForkInParentGarbageCollectorDisabled(self):
"""
The garbage collector should be disabled when L{reactor.spawnProcess}
returns if it was initially disabled.
@see L{_mockForkInParentTest}
"""
gc.disable()
self._mockForkInParentTest()
self.assertFalse(gc.isenabled())
def test_mockForkTTY(self):
"""
Test a TTY spawnProcess: check the path of the client code:
fork, exec, exit.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=True)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions, [("fork", False), "exec", "exit"])
else:
self.fail("Should not be here")
def _mockWithForkError(self):
"""
Assert that if the fork call fails, no other process setup calls are
made and that spawnProcess raises the exception fork raised.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None)
self.assertEqual(self.mockos.actions, [("fork", False)])
def test_mockWithForkErrorGarbageCollectorEnabled(self):
"""
The garbage collector should be enabled when L{reactor.spawnProcess}
raises because L{os.fork} raised, if it was initially enabled.
"""
gc.enable()
self._mockWithForkError()
self.assertTrue(gc.isenabled())
def test_mockWithForkErrorGarbageCollectorDisabled(self):
"""
The garbage collector should be disabled when
L{reactor.spawnProcess} raises because L{os.fork} raised, if it was
initially disabled.
"""
gc.disable()
self._mockWithForkError()
self.assertFalse(gc.isenabled())
def test_mockForkErrorCloseFDs(self):
"""
When C{os.fork} raises an exception, the file descriptors created
before are closed and don't leak.
"""
self._mockWithForkError()
self.assertEqual(set(self.mockos.closed), set([-1, -4, -6, -2, -3, -5]))
def test_mockForkErrorGivenFDs(self):
"""
When C{os.forks} raises an exception and that file descriptors have
been specified with the C{childFDs} arguments of
L{reactor.spawnProcess}, they are not closed.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
childFDs={0: -10, 1: -11, 2: -13})
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(self.mockos.closed, [])
# We can also put "r" or "w" to let twisted create the pipes
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
childFDs={0: "r", 1: -11, 2: -13})
self.assertEqual(set(self.mockos.closed), set([-1, -2]))
def test_mockForkErrorClosePTY(self):
"""
When C{os.fork} raises an exception, the file descriptors created by
C{pty.openpty} are closed and don't leak, when C{usePTY} is set to
C{True}.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
usePTY=True)
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(set(self.mockos.closed), set([-12, -13]))
def test_mockForkErrorPTYGivenFDs(self):
"""
If a tuple is passed to C{usePTY} to specify slave and master file
descriptors and that C{os.fork} raises an exception, these file
descriptors aren't closed.
"""
self.mockos.raiseFork = OSError(errno.EAGAIN, None)
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None,
usePTY=(-20, -21, 'foo'))
self.assertEqual(self.mockos.actions, [("fork", False)])
self.assertEqual(self.mockos.closed, [])
def test_mockWithExecError(self):
"""
Spawn a process but simulate an error during execution in the client
path: C{os.execvpe} raises an error. It should close all the standard
fds, try to print the error encountered, and exit cleanly.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
self.mockos.raiseExec = True
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions, [("fork", False), "exec", "exit"])
# Check that fd have been closed
self.assertIn(0, self.mockos.closed)
self.assertIn(1, self.mockos.closed)
self.assertIn(2, self.mockos.closed)
# Check content of traceback
self.assertIn("RuntimeError: Bar", self.mockos.fdio.getvalue())
else:
self.fail("Should not be here")
def test_mockSetUid(self):
"""
Try creating a process with setting its uid: it's almost the same path
as the standard path, but with a C{switchUID} call before the exec.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False, uid=8080)
except SystemError:
self.assert_(self.mockos.exited)
self.assertEqual(
self.mockos.actions,
[('fork', False), ('setuid', 0), ('setgid', 0),
('switchuid', 8080, 1234), 'exec', 'exit'])
else:
self.fail("Should not be here")
def test_mockSetUidInParent(self):
"""
When spawning a child process with a UID different from the UID of the
current process, the current process does not have its UID changed.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False, uid=8080)
self.assertEqual(self.mockos.actions, [('fork', False), 'waitpid'])
def test_mockPTYSetUid(self):
"""
Try creating a PTY process with setting its uid: it's almost the same
path as the standard path, but with a C{switchUID} call before the
exec.
"""
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
try:
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=True, uid=8081)
except SystemError:
self.assertTrue(self.mockos.exited)
self.assertEqual(
self.mockos.actions,
[('fork', False), ('setuid', 0), ('setgid', 0),
('switchuid', 8081, 1234), 'exec', 'exit'])
else:
self.fail("Should not be here")
def test_mockPTYSetUidInParent(self):
"""
When spawning a child process with PTY and a UID different from the UID
of the current process, the current process does not have its UID
changed.
"""
self.mockos.child = False
cmd = '/mock/ouch'
d = defer.Deferred()
p = TrivialProcessProtocol(d)
oldPTYProcess = process.PTYProcess
try:
process.PTYProcess = DumbPTYProcess
reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=True, uid=8080)
finally:
process.PTYProcess = oldPTYProcess
self.assertEqual(self.mockos.actions, [('fork', False), 'waitpid'])
def test_mockWithWaitError(self):
"""
Test that reapProcess logs errors raised.
"""
self.mockos.child = False
cmd = '/mock/ouch'
self.mockos.waitChild = (0, 0)
d = defer.Deferred()
p = TrivialProcessProtocol(d)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
self.mockos.raiseWaitPid = OSError()
proc.reapProcess()
errors = self.flushLoggedErrors()
self.assertEqual(len(errors), 1)
errors[0].trap(OSError)
def test_mockErrorECHILDInReapProcess(self):
"""
Test that reapProcess doesn't log anything when waitpid raises a
C{OSError} with errno C{ECHILD}.
"""
self.mockos.child = False
cmd = '/mock/ouch'
self.mockos.waitChild = (0, 0)
d = defer.Deferred()
p = TrivialProcessProtocol(d)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None,
usePTY=False)
self.assertEqual(self.mockos.actions, [("fork", False), "waitpid"])
self.mockos.raiseWaitPid = OSError()
self.mockos.raiseWaitPid.errno = errno.ECHILD
# This should not produce any errors
proc.reapProcess()
def test_mockErrorInPipe(self):
"""
If C{os.pipe} raises an exception after some pipes where created, the
created pipes are closed and don't leak.
"""
pipes = [-1, -2, -3, -4]
def pipe():
try:
return pipes.pop(0), pipes.pop(0)
except IndexError:
raise OSError()
self.mockos.pipe = pipe
protocol = TrivialProcessProtocol(None)
self.assertRaises(OSError, reactor.spawnProcess, protocol, None)
self.assertEqual(self.mockos.actions, [])
self.assertEqual(set(self.mockos.closed), set([-4, -3, -2, -1]))
def test_kill(self):
"""
L{process.Process.signalProcess} calls C{os.kill} translating the given
signal string to the PID.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
proc.signalProcess("KILL")
self.assertEqual(self.mockos.actions,
[('fork', False), 'waitpid', ('kill', 21, signal.SIGKILL)])
def test_killExited(self):
"""
L{process.Process.signalProcess} raises L{error.ProcessExitedAlready}
if the process has exited.
"""
self.mockos.child = False
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
# We didn't specify a waitpid value, so the waitpid call in
# registerReapProcessHandler has already reaped the process
self.assertRaises(error.ProcessExitedAlready,
proc.signalProcess, "KILL")
def test_killExitedButNotDetected(self):
"""
L{process.Process.signalProcess} raises L{error.ProcessExitedAlready}
if the process has exited but that twisted hasn't seen it (for example,
if the process has been waited outside of twisted): C{os.kill} then
raise C{OSError} with C{errno.ESRCH} as errno.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
self.mockos.raiseKill = OSError(errno.ESRCH, "Not found")
self.assertRaises(error.ProcessExitedAlready,
proc.signalProcess, "KILL")
def test_killErrorInKill(self):
"""
L{process.Process.signalProcess} doesn't mask C{OSError} exceptions if
the errno is different from C{errno.ESRCH}.
"""
self.mockos.child = False
self.mockos.waitChild = (0, 0)
cmd = '/mock/ouch'
p = TrivialProcessProtocol(None)
proc = reactor.spawnProcess(p, cmd, ['ouch'], env=None, usePTY=False)
self.mockos.raiseKill = OSError(errno.EINVAL, "Invalid signal")
err = self.assertRaises(OSError,
proc.signalProcess, "KILL")
self.assertEquals(err.errno, errno.EINVAL)
class PosixProcessTestCase(unittest.TestCase, PosixProcessBase):
# add two non-pty test cases
def test_stderr(self):
"""
Bytes written to stderr by the spawned process are passed to the
C{errReceived} callback on the C{ProcessProtocol} passed to
C{spawnProcess}.
"""
cmd = sys.executable
value = "42"
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, cmd,
[cmd, "-c",
"import sys; sys.stderr.write('%s')" % (value,)],
env=None, path="/tmp",
usePTY=self.usePTY)
def processEnded(ign):
self.assertEqual(value, p.errF.getvalue())
return d.addCallback(processEnded)
def testProcess(self):
cmd = self.getCommand('gzip')
s = "there's no place like home!\n" * 3
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, cmd, [cmd, "-c"], env=None, path="/tmp",
usePTY=self.usePTY)
p.transport.write(s)
p.transport.closeStdin()
def processEnded(ign):
f = p.outF
f.seek(0, 0)
gf = gzip.GzipFile(fileobj=f)
self.assertEqual(gf.read(), s)
return d.addCallback(processEnded)
class PosixProcessTestCasePTY(unittest.TestCase, PosixProcessBase):
"""
Just like PosixProcessTestCase, but use ptys instead of pipes.
"""
usePTY = True
# PTYs only offer one input and one output. What still makes sense?
# testNormalTermination
# test_abnormalTermination
# testSignal
# testProcess, but not without p.transport.closeStdin
# might be solveable: TODO: add test if so
def testOpeningTTY(self):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_tty.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None,
path=None, usePTY=self.usePTY)
p.transport.write("hello world!\n")
def processEnded(ign):
self.assertRaises(
error.ProcessExitedAlready, p.transport.signalProcess, 'HUP')
self.assertEqual(
p.outF.getvalue(),
"hello world!\r\nhello world!\r\n",
"Error message from process_tty follows:\n\n%s\n\n" % p.outF.getvalue())
return d.addCallback(processEnded)
def testBadArgs(self):
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
p = Accumulator()
self.assertRaises(ValueError, reactor.spawnProcess, p, pyExe, pyArgs,
usePTY=1, childFDs={1:'r'})
class Win32SignalProtocol(SignalProtocol):
"""
A win32-specific process protocol that handles C{processEnded}
differently: processes should exit with exit code 1.
"""
def processEnded(self, reason):
"""
Callback C{self.deferred} with C{None} if C{reason} is a
L{error.ProcessTerminated} failure with C{exitCode} set to 1.
Otherwise, errback with a C{ValueError} describing the problem.
"""
if not reason.check(error.ProcessTerminated):
return self.deferred.errback(
ValueError("wrong termination: %s" % (reason,)))
v = reason.value
if v.exitCode != 1:
return self.deferred.errback(
ValueError("Wrong exit code: %s" % (reason.exitCode,)))
self.deferred.callback(None)
class Win32ProcessTestCase(unittest.TestCase):
"""
Test process programs that are packaged with twisted.
"""
def testStdinReader(self):
pyExe = sys.executable
scriptPath = util.sibpath(__file__, "process_stdinreader.py")
p = Accumulator()
d = p.endedDeferred = defer.Deferred()
reactor.spawnProcess(p, pyExe, [pyExe, "-u", scriptPath], env=None,
path=None)
p.transport.write("hello, world")
p.transport.closeStdin()
def processEnded(ign):
self.assertEqual(p.errF.getvalue(), "err\nerr\n")
self.assertEqual(p.outF.getvalue(), "out\nhello, world\nout\n")
return d.addCallback(processEnded)
def testBadArgs(self):
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
p = Accumulator()
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, uid=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, gid=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, usePTY=1)
self.assertRaises(ValueError,
reactor.spawnProcess, p, pyExe, pyArgs, childFDs={1:'r'})
def _testSignal(self, sig):
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_signal.py")
d = defer.Deferred()
p = Win32SignalProtocol(d, sig)
reactor.spawnProcess(p, exe, [exe, "-u", scriptPath], env=None)
return d
def test_signalTERM(self):
"""
Sending the SIGTERM signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('TERM')
def test_signalINT(self):
"""
Sending the SIGINT signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('INT')
def test_signalKILL(self):
"""
Sending the SIGKILL signal terminates a created process, and
C{processEnded} is called with a L{error.ProcessTerminated} instance
with the C{exitCode} attribute set to 1.
"""
return self._testSignal('KILL')
def test_closeHandles(self):
"""
The win32 handles should be properly closed when the process exits.
"""
import win32api
connected = defer.Deferred()
ended = defer.Deferred()
class SimpleProtocol(protocol.ProcessProtocol):
"""
A protocol that fires deferreds when connected and disconnected.
"""
def makeConnection(self, transport):
connected.callback(transport)
def processEnded(self, reason):
ended.callback(None)
p = SimpleProtocol()
pyExe = sys.executable
pyArgs = [pyExe, "-u", "-c", "print 'hello'"]
proc = reactor.spawnProcess(p, pyExe, pyArgs)
def cbConnected(transport):
self.assertIdentical(transport, proc)
# perform a basic validity test on the handles
win32api.GetHandleInformation(proc.hProcess)
win32api.GetHandleInformation(proc.hThread)
# And save their values for later
self.hProcess = proc.hProcess
self.hThread = proc.hThread
connected.addCallback(cbConnected)
def checkTerminated(ignored):
# The attributes on the process object must be reset...
self.assertIdentical(proc.pid, None)
self.assertIdentical(proc.hProcess, None)
self.assertIdentical(proc.hThread, None)
# ...and the handles must be closed.
self.assertRaises(win32api.error,
win32api.GetHandleInformation, self.hProcess)
self.assertRaises(win32api.error,
win32api.GetHandleInformation, self.hThread)
ended.addCallback(checkTerminated)
return defer.gatherResults([connected, ended])
class Win32UnicodeEnvironmentTest(unittest.TestCase):
"""
Tests for Unicode environment on Windows
"""
goodKey = u'UNICODE'
goodValue = u'UNICODE'
def test_encodableUnicodeEnvironment(self):
"""
Test C{os.environ} (inherited by every subprocess on Windows) that
contains an ascii-encodable Unicode string. This is different from
passing Unicode environment explicitly to spawnProcess (which is not
supported).
"""
os.environ[self.goodKey] = self.goodValue
self.addCleanup(operator.delitem, os.environ, self.goodKey)
p = GetEnvironmentDictionary.run(reactor, [], {})
def gotEnvironment(environ):
self.assertEqual(
environ[self.goodKey.encode('ascii')],
self.goodValue.encode('ascii'))
return p.getResult().addCallback(gotEnvironment)
class Dumbwin32procPidTest(unittest.TestCase):
"""
Simple test for the pid attribute of Process on win32.
"""
def test_pid(self):
"""
Launch process with mock win32process. The only mock aspect of this
module is that the pid of the process created will always be 42.
"""
from twisted.internet import _dumbwin32proc
from twisted.test import mock_win32process
self.patch(_dumbwin32proc, "win32process", mock_win32process)
exe = sys.executable
scriptPath = util.sibpath(__file__, "process_cmdline.py")
d = defer.Deferred()
processProto = TrivialProcessProtocol(d)
comspec = str(os.environ["COMSPEC"])
cmd = [comspec, "/c", exe, scriptPath]
p = _dumbwin32proc.Process(reactor,
processProto,
None,
cmd,
{},
None)
self.assertEqual(42, p.pid)
self.assertEqual("<Process pid=42>", repr(p))
def pidCompleteCb(result):
self.assertEqual(None, p.pid)
return d.addCallback(pidCompleteCb)
class UtilTestCase(unittest.TestCase):
"""
Tests for process-related helper functions (currently only
L{procutils.which}.
"""
def setUp(self):
"""
Create several directories and files, some of which are executable
and some of which are not. Save the current PATH setting.
"""
j = os.path.join
base = self.mktemp()
self.foo = j(base, "foo")
self.baz = j(base, "baz")
self.foobar = j(self.foo, "bar")
self.foobaz = j(self.foo, "baz")
self.bazfoo = j(self.baz, "foo")
self.bazbar = j(self.baz, "bar")
for d in self.foobar, self.foobaz, self.bazfoo, self.bazbar:
os.makedirs(d)
for name, mode in [(j(self.foobaz, "executable"), 0700),
(j(self.foo, "executable"), 0700),
(j(self.bazfoo, "executable"), 0700),
(j(self.bazfoo, "executable.bin"), 0700),
(j(self.bazbar, "executable"), 0)]:
f = file(name, "w")
f.close()
os.chmod(name, mode)
self.oldPath = os.environ.get('PATH', None)
os.environ['PATH'] = os.pathsep.join((
self.foobar, self.foobaz, self.bazfoo, self.bazbar))
def tearDown(self):
"""
Restore the saved PATH setting, and set all created files readable
again so that they can be deleted easily.
"""
os.chmod(os.path.join(self.bazbar, "executable"), stat.S_IWUSR)
if self.oldPath is None:
try:
del os.environ['PATH']
except KeyError:
pass
else:
os.environ['PATH'] = self.oldPath
def test_whichWithoutPATH(self):
"""
Test that if C{os.environ} does not have a C{'PATH'} key,
L{procutils.which} returns an empty list.
"""
del os.environ['PATH']
self.assertEqual(procutils.which("executable"), [])
def testWhich(self):
j = os.path.join
paths = procutils.which("executable")
expectedPaths = [j(self.foobaz, "executable"),
j(self.bazfoo, "executable")]
if runtime.platform.isWindows():
expectedPaths.append(j(self.bazbar, "executable"))
self.assertEqual(paths, expectedPaths)
def testWhichPathExt(self):
j = os.path.join
old = os.environ.get('PATHEXT', None)
os.environ['PATHEXT'] = os.pathsep.join(('.bin', '.exe', '.sh'))
try:
paths = procutils.which("executable")
finally:
if old is None:
del os.environ['PATHEXT']
else:
os.environ['PATHEXT'] = old
expectedPaths = [j(self.foobaz, "executable"),
j(self.bazfoo, "executable"),
j(self.bazfoo, "executable.bin")]
if runtime.platform.isWindows():
expectedPaths.append(j(self.bazbar, "executable"))
self.assertEqual(paths, expectedPaths)
class ClosingPipesProcessProtocol(protocol.ProcessProtocol):
output = ''
errput = ''
def __init__(self, outOrErr):
self.deferred = defer.Deferred()
self.outOrErr = outOrErr
def processEnded(self, reason):
self.deferred.callback(reason)
def outReceived(self, data):
self.output += data
def errReceived(self, data):
self.errput += data
class ClosingPipes(unittest.TestCase):
def doit(self, fd):
"""
Create a child process and close one of its output descriptors using
L{IProcessTransport.closeStdout} or L{IProcessTransport.closeStderr}.
Return a L{Deferred} which fires after verifying that the descriptor was
really closed.
"""
p = ClosingPipesProcessProtocol(True)
self.assertFailure(p.deferred, error.ProcessTerminated)
p.deferred.addCallback(self._endProcess, p)
reactor.spawnProcess(
p, sys.executable, [
sys.executable, '-u', '-c',
'raw_input()\n'
'import sys, os, time\n'
# Give the system a bit of time to notice the closed
# descriptor. Another option would be to poll() for HUP
# instead of relying on an os.write to fail with SIGPIPE.
# However, that wouldn't work on OS X (or Windows?).
'for i in range(1000):\n'
' os.write(%d, "foo\\n")\n'
' time.sleep(0.01)\n'
'sys.exit(42)\n' % (fd,)
],
env=None)
if fd == 1:
p.transport.closeStdout()
elif fd == 2:
p.transport.closeStderr()
else:
raise RuntimeError
# Give the close time to propagate
p.transport.write('go\n')
# make the buggy case not hang
p.transport.closeStdin()
return p.deferred
def _endProcess(self, reason, p):
"""
Check that a failed write prevented the process from getting to its
custom exit code.
"""
# child must not get past that write without raising
self.assertNotEquals(
reason.exitCode, 42, 'process reason was %r' % reason)
self.assertEqual(p.output, '')
return p.errput
def test_stdout(self):
"""
ProcessProtocol.transport.closeStdout actually closes the pipe.
"""
d = self.doit(1)
def _check(errput):
self.assertIn('OSError', errput)
if runtime.platform.getType() != 'win32':
self.assertIn('Broken pipe', errput)
d.addCallback(_check)
return d
def test_stderr(self):
"""
ProcessProtocol.transport.closeStderr actually closes the pipe.
"""
d = self.doit(2)
def _check(errput):
# there should be no stderr open, so nothing for it to
# write the error to.
self.assertEqual(errput, '')
d.addCallback(_check)
return d
skipMessage = "wrong platform or reactor doesn't support IReactorProcess"
if (runtime.platform.getType() != 'posix') or (not interfaces.IReactorProcess(reactor, None)):
PosixProcessTestCase.skip = skipMessage
PosixProcessTestCasePTY.skip = skipMessage
TestTwoProcessesPosix.skip = skipMessage
FDTest.skip = skipMessage
if (runtime.platform.getType() != 'win32') or (not interfaces.IReactorProcess(reactor, None)):
Win32ProcessTestCase.skip = skipMessage
TestTwoProcessesNonPosix.skip = skipMessage
Dumbwin32procPidTest.skip = skipMessage
Win32UnicodeEnvironmentTest.skip = skipMessage
if not interfaces.IReactorProcess(reactor, None):
ProcessTestCase.skip = skipMessage
ClosingPipes.skip = skipMessage
| {
"content_hash": "c5ad13acef65c69af1f9b44934aaa5af",
"timestamp": "",
"source": "github",
"line_count": 2542,
"max_line_length": 114,
"avg_line_length": 32.14791502753737,
"alnum_prop": 0.5793930494371023,
"repo_name": "normanmaurer/autobahntestsuite-maven-plugin",
"id": "acfa108cb092c44857a2246c013091c57b53b4cc",
"size": "81793",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/main/resources/twisted/test/test_process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "70690"
},
{
"name": "C++",
"bytes": "1291"
},
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Gherkin",
"bytes": "2218"
},
{
"name": "HTML",
"bytes": "56655"
},
{
"name": "Java",
"bytes": "24931"
},
{
"name": "JavaScript",
"bytes": "9151"
},
{
"name": "Python",
"bytes": "13888733"
},
{
"name": "Shell",
"bytes": "1406"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.signal import pool
from theano.tensor.nnet import conv3d2d
from theano.printing import Print
try:
from theano.tensor.nnet.nnet import softsign as T_softsign
except ImportError:
from theano.sandbox.softsign import softsign as T_softsign
import inspect
import numpy as np
from .common import _FLOATX, _EPSILON, _IMAGE_DIM_ORDERING
# INTERNAL UTILS
theano.config.floatX = _FLOATX
_LEARNING_PHASE = T.scalar(dtype='uint8', name='keras_learning_phase') # 0 = test, 1 = train
def learning_phase():
# False = test, True = train
return _LEARNING_PHASE
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('Expected learning phase to be '
'0 or 1.')
_LEARNING_PHASE = value
# VARIABLE MANIPULATION
def variable(value, dtype=_FLOATX, name=None):
'''Instantiate a tensor variable.
'''
value = np.asarray(value, dtype=dtype)
return theano.shared(value=value, name=name, strict=False)
def placeholder(shape=None, ndim=None, dtype=_FLOATX, name=None):
'''Instantiate an input data placeholder variable.
'''
if shape is None and ndim is None:
raise Exception('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
else:
shape = tuple([None for _ in range(ndim)])
broadcast = (False,) * ndim
x = T.TensorType(dtype, broadcast)(name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
def shape(x):
'''Return the shape of a tensor.
Warning: type returned will be different for
Theano backend (Theano tensor type) and TF backend (TF TensorShape).
'''
return x.shape
def ndim(x):
return x.ndim
def dtype(x):
return x.dtype
def eval(x):
'''Run a graph.
'''
return x.eval()
def zeros(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-zeros variable.
'''
return variable(np.zeros(shape), dtype, name)
def ones(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-ones variable.
'''
return variable(np.ones(shape), dtype, name)
def eye(size, dtype=_FLOATX, name=None):
'''Instantiate an identity matrix.
'''
return variable(np.eye(size), dtype, name)
def ones_like(x):
return T.ones_like(x)
def zeros_like(x):
return T.zeros_like(x)
def random_uniform_variable(shape, low, high, dtype=_FLOATX, name=None):
return variable(np.random.uniform(low=low, high=high, size=shape),
dtype=dtype, name=name)
def random_normal_variable(shape, mean, scale, dtype=_FLOATX, name=None):
return variable(np.random.normal(loc=0.0, scale=scale, size=shape),
dtype=dtype, name=name)
def count_params(x):
'''Return number of scalars in a tensor.
Return: numpy integer.
'''
return np.prod(x.shape.eval())
def cast(x, dtype):
return T.cast(x, dtype)
# UPDATES OPS
def update(x, new_x):
return (x, new_x)
def update_add(x, increment):
return (x, x + increment)
def update_sub(x, decrement):
return (x, x - decrement)
def moving_average_update(variable, value, momentum):
return (variable, variable * momentum + value * (1. - momentum))
# LINEAR ALGEBRA
'''
Assumed overridden:
+, -, /, *, +=, -=, *=, /=
'''
def dot(x, y):
return T.dot(x, y)
def batch_dot(x, y, axes=None):
'''Batchwise dot product.
batch_dot results in a tensor with less dimensions than the input.
If the number of dimensions is reduced to 1, we use `expand_dims` to
make sure that ndim is at least 2.
# Arguments
x, y: tensors with ndim >= 2
axes: list (or single) int with target dimensions
# Returns
A tensor with shape equal to the concatenation of x's shape
(less the dimension that was summed over) and y's shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to (batch_size, 1).
# Examples
Assume x = [[1, 2], [3, 4]] and y = [[5, 6], [7, 8]]
batch_dot(x, y, axes=1) = [[17, 53]] which is the main diagonal
of x.dot(y.T), although we never have to calculate the off-diagonal
elements.
Shape inference:
Let x's shape be (100, 20) and y's shape be (100, 30, 20).
If dot_axes is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in x's shape and y's shape:
x.shape[0] : 100 : append to output shape
x.shape[1] : 20 : do not append to output shape,
dimension 1 of x has been summed over. (dot_axes[0] = 1)
y.shape[0] : 100 : do not append to output shape,
always ignore first dimension of y
y.shape[1] : 30 : append to output shape
y.shape[2] : 20 : do not append to output shape,
dimension 2 of y has been summed over. (dot_axes[1] = 2)
output_shape = (100, 30)
'''
if type(axes) == int:
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x.ndim - 1, y.ndim - 2]
out = T.batched_tensordot(x, y, axes=axes)
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
def transpose(x):
return T.transpose(x)
def gather(reference, indices):
'''reference: a tensor.
indices: an int tensor of indices.
Return: a tensor of same type as reference.
'''
return reference[indices]
# ELEMENT-WISE OPERATIONS
def max(x, axis=None, keepdims=False):
return T.max(x, axis=axis, keepdims=keepdims)
def min(x, axis=None, keepdims=False):
return T.min(x, axis=axis, keepdims=keepdims)
def sum(x, axis=None, keepdims=False):
'''Sum of the values in a tensor, alongside the specified axis.
'''
return T.sum(x, axis=axis, keepdims=keepdims)
def prod(x, axis=None, keepdims=False):
'''Multiply the values in a tensor, alongside the specified axis.
'''
return T.prod(x, axis=axis, keepdims=keepdims)
def mean(x, axis=None, keepdims=False):
dtype = None
if 'int' in x.dtype:
dtype = _FLOATX
return T.mean(x, axis=axis, keepdims=keepdims, dtype=dtype)
def std(x, axis=None, keepdims=False):
return T.std(x, axis=axis, keepdims=keepdims)
def var(x, axis=None, keepdims=False):
return T.var(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
'''Bitwise reduction (logical OR).
'''
return T.any(x, axis=axis, keepdims=keepdims)
def all(x, axis=None, keepdims=False):
'''Bitwise reduction (logical AND).
'''
return T.all(x, axis=axis, keepdims=keepdims)
def argmax(x, axis=-1):
return T.argmax(x, axis=axis, keepdims=False)
def argmin(x, axis=-1):
return T.argmin(x, axis=axis, keepdims=False)
def square(x):
return T.sqr(x)
def abs(x):
return T.abs_(x)
def sqrt(x):
x = T.clip(x, 0., np.inf)
return T.sqrt(x)
def exp(x):
return T.exp(x)
def log(x):
return T.log(x)
def round(x):
return T.round(x)
def sign(x):
return T.sgn(x)
def pow(x, a):
return T.pow(x, a)
def clip(x, min_value, max_value):
if max_value < min_value:
max_value = min_value
return T.clip(x, min_value, max_value)
def equal(x, y):
return T.eq(x, y)
def not_equal(x, y):
return T.neq(x, y)
def greater(x, y):
return T.gt(x, y)
def greater_equal(x, y):
return T.ge(x, y)
def lesser(x, y):
return T.lt(x, y)
def lesser_equal(x, y):
return T.le(x, y)
def maximum(x, y):
return T.maximum(x, y)
def minimum(x, y):
return T.minimum(x, y)
def sin(x):
return T.sin(x)
def cos(x):
return T.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=0.0001):
'''Compute mean and std for batch then apply batch_normalization on batch.
'''
var = x.var(reduction_axes)
mean = x.mean(reduction_axes)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x.shape[axis])
target_shape = T.stack(*target_shape)
broadcast_mean = T.reshape(mean, target_shape)
broadcast_var = T.reshape(var, target_shape)
broadcast_beta = T.reshape(beta, target_shape)
broadcast_gamma = T.reshape(gamma, target_shape)
normed = batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma,
epsilon)
return normed, mean, var
def batch_normalization(x, mean, var, beta, gamma, epsilon=0.0001):
'''Apply batch normalization on x given mean, var, beta and gamma.
'''
normed = T.nnet.bn.batch_normalization(x, gamma, beta, mean,
sqrt(var) + epsilon,
mode='high_mem')
return normed
# SHAPE OPERATIONS
def concatenate(tensors, axis=-1):
return T.concatenate(tensors, axis=axis)
def reshape(x, shape):
return T.reshape(x, shape)
def permute_dimensions(x, pattern):
'''Transpose dimensions.
pattern should be a tuple or list of
dimension indices, e.g. [0, 2, 1].
'''
pattern = tuple(pattern)
return x.dimshuffle(pattern)
def repeat_elements(x, rep, axis):
'''Repeat the elements of a tensor along an axis, like np.repeat.
If x has shape (s1, s2, s3) and axis=1, the output
will have shape (s1, s2 * rep, s3).
'''
return T.repeat(x, rep, axis=axis)
def resize_images(X, height_factor, width_factor, dim_ordering):
'''Resize the images contained in a 4D tensor of shape
- [batch, channels, height, width] (for 'th' dim_ordering)
- [batch, height, width, channels] (for 'tf' dim_ordering)
by a factor of (height_factor, width_factor). Both factors should be
positive integers.
'''
if dim_ordering == 'th':
output = repeat_elements(X, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif dim_ordering == 'tf':
output = repeat_elements(X, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
def resize_volumes(X, depth_factor, height_factor, width_factor, dim_ordering):
'''Resize the volume contained in a 5D tensor of shape
- [batch, channels, depth, height, width] (for 'th' dim_ordering)
- [batch, depth, height, width, channels] (for 'tf' dim_ordering)
by a factor of (depth_factor, height_factor, width_factor).
Both factors should be positive integers.
'''
if dim_ordering == 'th':
output = repeat_elements(X, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif dim_ordering == 'tf':
output = repeat_elements(X, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
def repeat(x, n):
'''Repeat a 2D tensor.
If x has shape (samples, dim) and n=2,
the output will have shape (samples, 2, dim).
'''
assert x.ndim == 2
x = x.dimshuffle((0, 'x', 1))
return T.extra_ops.repeat(x, n, axis=1)
def tile(x, n):
return T.tile(x, n)
def flatten(x):
return T.flatten(x)
def batch_flatten(x):
'''Turn a n-D tensor into a 2D tensor where
the first dimension is conserved.
'''
x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
return x
def expand_dims(x, dim=-1):
'''Add a 1-sized dimension at index "dim".
'''
pattern = [i for i in range(x.type.ndim)]
if dim < 0:
if x.type.ndim == 0:
dim = 0
else:
dim = dim % x.type.ndim + 1
pattern.insert(dim, 'x')
return x.dimshuffle(pattern)
def squeeze(x, axis):
'''Remove a 1-dimension from the tensor at index "axis".
'''
broadcastable = x.broadcastable[:axis] + x.broadcastable[axis+1:]
x = T.patternbroadcast(x, [i == axis for i in range(x.type.ndim)])
x = T.squeeze(x)
x = T.patternbroadcast(x, broadcastable)
return x
def temporal_padding(x, padding=1):
'''Pad the middle dimension of a 3D tensor
with "padding" zeros left and right.
Apologies for the inane API, but Theano makes this
really hard.
'''
input_shape = x.shape
output_shape = (input_shape[0],
input_shape[1] + 2 * padding,
input_shape[2])
output = T.zeros(output_shape)
return T.set_subtensor(output[:, padding:x.shape[1] + padding, :], x)
def spatial_2d_padding(x, padding=(1, 1), dim_ordering='th'):
'''Pad the 2nd and 3rd dimensions of a 4D tensor
with "padding[0]" and "padding[1]" (resp.) zeros left and right.
'''
input_shape = x.shape
if dim_ordering == 'th':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + 2 * padding[0],
input_shape[3] + 2 * padding[1])
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(padding[0], input_shape[2] + padding[0]),
slice(padding[1], input_shape[3] + padding[1]))
elif dim_ordering == 'tf':
output_shape = (input_shape[0],
input_shape[1] + 2 * padding[0],
input_shape[2] + 2 * padding[1],
input_shape[3])
output = T.zeros(output_shape)
indices = (slice(None),
slice(padding[0], input_shape[1] + padding[0]),
slice(padding[1], input_shape[2] + padding[1]),
slice(None))
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
return T.set_subtensor(output[indices], x)
def spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='th'):
'''Pad the 2nd, 3rd and 4th dimensions of a 5D tensor
with "padding[0]", "padding[1]" and "padding[2]" (resp.) zeros left and right.
'''
input_shape = x.shape
if dim_ordering == 'th':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + 2 * padding[0],
input_shape[3] + 2 * padding[1],
input_shape[4] + 2 * padding[2])
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(padding[0], input_shape[2] + padding[0]),
slice(padding[1], input_shape[3] + padding[1]),
slice(padding[2], input_shape[4] + padding[2]))
elif dim_ordering == 'tf':
output_shape = (input_shape[0],
input_shape[1] + 2 * padding[0],
input_shape[2] + 2 * padding[1],
input_shape[3] + 2 * padding[2],
input_shape[4])
output = T.zeros(output_shape)
indices = (slice(None),
slice(padding[0], input_shape[1] + padding[0]),
slice(padding[1], input_shape[2] + padding[1]),
slice(padding[2], input_shape[3] + padding[2]),
slice(None))
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
return T.set_subtensor(output[indices], x)
def pack(x):
return T.stack(*x)
def one_hot(indices, nb_classes):
'''
Input: nD integer tensor of shape (batch_size, dim1, dim2, ... dim(n-1))
Output: (n + 1)D one hot representation of the input with shape (batch_size, dim1, dim2, ... dim(n-1), nb_classes)
'''
input_shape = tuple((indices.shape[i] for i in range(indices.ndim)))
indices = T.flatten(indices)
oh = T.extra_ops.to_one_hot(indices, nb_classes)
oh = T.reshape(oh, input_shape + (nb_classes,))
return oh
# VALUE MANIPULATION
def get_value(x):
if not hasattr(x, 'get_value'):
raise Exception("'get_value() can only be called on a variable. " +
"If you have an expression instead, use eval().")
return x.get_value()
def batch_get_value(xs):
'''Returns the value of more than one tensor variable,
as a list of Numpy arrays.
'''
return [get_value(x) for x in xs]
def set_value(x, value):
x.set_value(np.asarray(value, dtype=x.dtype))
def batch_set_value(tuples):
for x, value in tuples:
x.set_value(np.asarray(value, dtype=x.dtype))
def print_tensor(x, message=''):
'''Print the message and the tensor when evaluated and return the same
tensor.
'''
p_op = Print(message)
return p_op(x)
# GRAPH MANIPULATION
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
self.function = theano.function(inputs, outputs, updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
**kwargs)
def __call__(self, inputs):
assert type(inputs) in {list, tuple}
return self.function(*inputs)
def function(inputs, outputs, updates=[], **kwargs):
if len(kwargs) > 0:
function_args = inspect.getargspec(theano.function)[0]
for key in kwargs.keys():
if key not in function_args:
msg = "Invalid argument '%s' passed to K.function" % key
raise ValueError(msg)
return Function(inputs, outputs, updates=updates, **kwargs)
def gradients(loss, variables):
return T.grad(loss, variables)
def stop_gradient(variables):
'''Returns `variables` but with zero gradient with respect to every other
variables.
'''
return theano.gradient.disconnected_grad(variables)
# CONTROL FLOW
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
'''Iterates over the time dimension of a tensor.
# Arguments
inputs: tensor of temporal data of shape (samples, time, ...)
(at least 3D).
step_function:
Parameters:
input: tensor with shape (samples, ...) (no time dimension),
representing input for the batch of samples at a certain
time step.
states: list of tensors.
Returns:
output: tensor with shape (samples, ...) (no time dimension),
new_states: list of tensors, same length and shapes
as 'states'.
initial_states: tensor with shape (samples, ...) (no time dimension),
containing the initial values for the states used in
the step function.
go_backwards: boolean. If True, do the iteration over
the time dimension in reverse order.
mask: binary tensor with shape (samples, time),
with a zero for every element that is masked.
constants: a list of constant values passed at each step.
unroll: whether to unroll the RNN or to use a symbolic loop (`scan`).
input_length: must be specified if using `unroll`.
# Returns
A tuple (last_output, outputs, new_states).
last_output: the latest output of the rnn, of shape (samples, ...)
outputs: tensor with shape (samples, time, ...) where each
entry outputs[s, t] is the output of the step function
at time t for sample s.
new_states: list of tensors, latest states returned by
the step function, of shape (samples, ...).
'''
ndim = inputs.ndim
assert ndim >= 3, 'Input should be at least 3D.'
if unroll:
if input_length is None:
raise Exception('When specifying `unroll=True`, an `input_length` '
'must be provided to `rnn`.')
axes = [1, 0] + list(range(2, ndim))
inputs = inputs.dimshuffle(axes)
if constants is None:
constants = []
if mask is not None:
if mask.ndim == ndim-1:
mask = expand_dims(mask)
assert mask.ndim == ndim
mask = mask.dimshuffle(axes)
if unroll:
indices = list(range(input_length))
if go_backwards:
indices = indices[::-1]
successive_outputs = []
successive_states = []
states = initial_states
for i in indices:
output, new_states = step_function(inputs[i], states + constants)
if len(successive_outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = T.switch(mask[i], output, prev_output)
kept_states = []
for state, new_state in zip(states, new_states):
kept_states.append(T.switch(mask[i], new_state, state))
states = kept_states
successive_outputs.append(output)
successive_states.append(states)
outputs = T.stack(*successive_outputs)
states = []
for i in range(len(successive_states[-1])):
states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states]))
else:
# build an all-zero tensor of shape (samples, output_dim)
initial_output = step_function(inputs[0], initial_states + constants)[0] * 0
# Theano gets confused by broadcasting patterns in the scan op
initial_output = T.unbroadcast(initial_output, 0, 1)
def _step(input, mask, output_tm1, *states):
output, new_states = step_function(input, states)
# output previous output if masked.
output = T.switch(mask, output, output_tm1)
return_states = []
for state, new_state in zip(states, new_states):
return_states.append(T.switch(mask, new_state, state))
return [output] + return_states
results, _ = theano.scan(
_step,
sequences=[inputs, mask],
outputs_info=[initial_output] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
# deal with Theano API inconsistency
if type(results) is list:
outputs = results[0]
states = results[1:]
else:
outputs = results
states = []
else:
if unroll:
indices = list(range(input_length))
if go_backwards:
indices = indices[::-1]
successive_outputs = []
successive_states = []
states = initial_states
for i in indices:
output, states = step_function(inputs[i], states + constants)
successive_outputs.append(output)
successive_states.append(states)
outputs = T.stack(*successive_outputs)
states = []
for i in range(len(successive_states[-1])):
states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states]))
else:
def _step(input, *states):
output, new_states = step_function(input, states)
return [output] + new_states
results, _ = theano.scan(
_step,
sequences=inputs,
outputs_info=[None] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
# deal with Theano API inconsistency
if type(results) is list:
outputs = results[0]
states = results[1:]
else:
outputs = results
states = []
outputs = T.squeeze(outputs)
last_output = outputs[-1]
axes = [1, 0] + list(range(2, outputs.ndim))
outputs = outputs.dimshuffle(axes)
states = [T.squeeze(state[-1]) for state in states]
return last_output, outputs, states
def switch(condition, then_expression, else_expression):
'''condition: scalar tensor.
'''
return T.switch(condition, then_expression, else_expression)
def in_train_phase(x, alt):
if _LEARNING_PHASE is 1:
return x
elif _LEARNING_PHASE is 0:
return alt
x = T.switch(_LEARNING_PHASE, x, alt)
x._uses_learning_phase = True
return x
def in_test_phase(x, alt):
if _LEARNING_PHASE is 1:
return alt
elif _LEARNING_PHASE is 0:
return x
x = T.switch(_LEARNING_PHASE, alt, x)
x._uses_learning_phase = True
return x
# NN OPERATIONS
def relu(x, alpha=0., max_value=None):
assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
'Theano is out of date. '
'Install the latest version with:\n'
'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
x = T.nnet.relu(x, alpha)
if max_value is not None:
x = T.minimum(x, max_value)
return x
def softmax(x):
return T.nnet.softmax(x)
def softplus(x):
return T.nnet.softplus(x)
def softsign(x):
return T_softsign(x)
def categorical_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.softmax(output)
else:
# scale preds so that the class probas of each sample sum to 1
output /= output.sum(axis=-1, keepdims=True)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.categorical_crossentropy(output, target)
def sparse_categorical_crossentropy(output, target, from_logits=False):
target = T.cast(T.flatten(target), 'int32')
target = T.extra_ops.to_one_hot(target, nb_class=output.shape[-1])
target = reshape(target, shape(output))
return categorical_crossentropy(output, target, from_logits)
def binary_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.binary_crossentropy(output, target)
def sigmoid(x):
return T.nnet.sigmoid(x)
def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
def tanh(x):
return T.tanh(x)
def dropout(x, level, seed=None):
if level < 0. or level >= 1:
raise Exception('Dropout level must be in interval [0, 1[.')
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
retain_prob = 1. - level
x *= rng.binomial(x.shape, p=retain_prob, dtype=x.dtype)
x /= retain_prob
return x
def l2_normalize(x, axis):
norm = T.sqrt(T.sum(T.square(x), axis=axis, keepdims=True))
return x / norm
# CONVOLUTIONS
def _preprocess_conv2d_input(x, dim_ordering):
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = x.dimshuffle((0, 3, 1, 2))
return x
def _preprocess_conv2d_kernel(kernel, dim_ordering):
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH kernel shape: (depth, input_depth, rows, cols)
# TF kernel shape: (rows, cols, input_depth, depth)
kernel = kernel.dimshuffle((3, 2, 0, 1))
return kernel
def _preprocess_border_mode(border_mode):
if border_mode == 'same':
th_border_mode = 'half'
elif border_mode == 'valid':
th_border_mode = 'valid'
else:
raise Exception('Border mode not supported: ' + str(border_mode))
return th_border_mode
def _preprocess_image_shape(dim_ordering, image_shape):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if dim_ordering == 'tf':
if image_shape:
image_shape = (image_shape[0], image_shape[3],
image_shape[1], image_shape[2])
if image_shape is not None:
image_shape = tuple(int_or_none(v) for v in image_shape)
return image_shape
def _preprocess_filter_shape(dim_ordering, filter_shape):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if dim_ordering == 'tf':
if filter_shape:
filter_shape = (filter_shape[3], filter_shape[2],
filter_shape[0], filter_shape[1])
if filter_shape is not None:
filter_shape = tuple(int_or_none(v) for v in filter_shape)
return filter_shape
def _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel, strides, dim_ordering):
if border_mode == 'same':
if np_kernel.shape[2] % 2 == 0:
conv_out = conv_out[:, :, :(x.shape[2] + strides[0] - 1) // strides[0], :]
if np_kernel.shape[3] % 2 == 0:
conv_out = conv_out[:, :, :, :(x.shape[3] + strides[1] - 1) // strides[1]]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 1))
return conv_out
def conv2d(x, kernel, strides=(1, 1), border_mode='valid',
dim_ordering=_IMAGE_DIM_ORDERING, image_shape=None,
filter_shape=None, filter_dilation=(1, 1)):
'''2D convolution.
# Arguments
kernel: kernel tensor.
strides: strides tuple.
border_mode: string, "same" or "valid".
dim_ordering: "tf" or "th".
Whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
'''
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
x = _preprocess_conv2d_input(x, dim_ordering)
kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
th_border_mode = _preprocess_border_mode(border_mode)
np_kernel = kernel.eval()
image_shape = _preprocess_image_shape(dim_ordering, image_shape)
filter_shape = _preprocess_filter_shape(dim_ordering, filter_shape)
# TODO: remove the if statement when theano with no filter dilation is deprecated.
if filter_dilation == (1, 1):
conv_out = T.nnet.conv2d(x, kernel,
border_mode=th_border_mode,
subsample=strides,
input_shape=image_shape,
filter_shape=filter_shape)
else:
conv_out = T.nnet.conv2d(x, kernel,
border_mode=th_border_mode,
subsample=strides,
input_shape=image_shape,
filter_shape=filter_shape,
filter_dilation=filter_dilation)
conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
strides, dim_ordering)
return conv_out
def deconv2d(x, kernel, output_shape, strides=(1, 1),
border_mode='valid',
dim_ordering=_IMAGE_DIM_ORDERING,
image_shape=None, filter_shape=None):
'''2D deconvolution (transposed convolution).
# Arguments
kernel: kernel tensor.
output_shape: desired dimensions of output.
strides: strides tuple.
border_mode: string, "same" or "valid".
dim_ordering: "tf" or "th".
Whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
'''
flip_filters = False
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
x = _preprocess_conv2d_input(x, dim_ordering)
kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
kernel = kernel.dimshuffle((1, 0, 2, 3))
th_border_mode = _preprocess_border_mode(border_mode)
np_kernel = kernel.eval()
filter_shape = _preprocess_filter_shape(dim_ordering, filter_shape)
op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(imshp=output_shape,
kshp=filter_shape,
subsample=strides,
border_mode=th_border_mode,
filter_flip=not flip_filters)
conv_out = op(kernel, x, output_shape[2:])
conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
strides, dim_ordering)
return conv_out
def atrous_conv2d(x, kernel, rate=1,
border_mode='valid',
dim_ordering=_IMAGE_DIM_ORDERING,
image_shape=None, filter_shape=None):
raise NotImplementedError
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
border_mode='valid', dim_ordering=_IMAGE_DIM_ORDERING):
raise NotImplementedError
def conv3d(x, kernel, strides=(1, 1, 1),
border_mode='valid', dim_ordering='th',
volume_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
'''
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if border_mode not in {'same', 'valid'}:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
# TH kernel shape: (out_depth, input_depth, kernel_dim1, kernel_dim2, kernel_dim3)
# TF kernel shape: (kernel_dim1, kernel_dim2, kernel_dim3, input_depth, out_depth)
x = x.dimshuffle((0, 4, 1, 2, 3))
kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
if volume_shape:
volume_shape = (volume_shape[0], volume_shape[4],
volume_shape[1], volume_shape[2], volume_shape[3])
if filter_shape:
filter_shape = (filter_shape[4], filter_shape[3],
filter_shape[0], filter_shape[1], filter_shape[2])
if border_mode == 'same':
assert(strides == (1, 1, 1))
pad_dim1 = (kernel.shape[2] - 1)
pad_dim2 = (kernel.shape[3] - 1)
pad_dim3 = (kernel.shape[4] - 1)
output_shape = (x.shape[0], x.shape[1],
x.shape[2] + pad_dim1,
x.shape[3] + pad_dim2,
x.shape[4] + pad_dim3)
output = T.zeros(output_shape)
indices = (slice(None), slice(None),
slice(pad_dim1 // 2, x.shape[2] + pad_dim1 // 2),
slice(pad_dim2 // 2, x.shape[3] + pad_dim2 // 2),
slice(pad_dim3 // 2, x.shape[4] + pad_dim3 // 2))
x = T.set_subtensor(output[indices], x)
border_mode = 'valid'
border_mode_3d = (border_mode, border_mode, border_mode)
conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
filters=kernel.dimshuffle(0, 2, 1, 3, 4),
border_mode=border_mode_3d)
conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
# support strides by manually slicing the output
if strides != (1, 1, 1):
conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1))
return conv_out
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == 'max':
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode='max')
elif pool_mode == 'avg':
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if border_mode == 'same':
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
pool_out = pool_out[:, :,
: expected_width,
: expected_height]
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
# TODO: add implementation for border_mode="same"
raise Exception('border_mode="same" not supported with Theano.')
elif border_mode == 'valid':
ignore_border = True
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 4, 1, 2, 3))
if pool_mode == 'max':
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=ignore_border,
padding=padding,
mode='max')
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=ignore_border,
padding=padding,
mode='max')
elif pool_mode == 'avg':
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
return pool_out
# RANDOMNESS
def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
def random_uniform(shape, low=0.0, high=1.0, dtype=_FLOATX, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.uniform(shape, low=low, high=high, dtype=dtype)
def random_binomial(shape, p=0.0, dtype=_FLOATX, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.binomial(shape, p=p, dtype=dtype)
| {
"content_hash": "6abc24b9a94f06c15ebd40ac8e9e100d",
"timestamp": "",
"source": "github",
"line_count": 1304,
"max_line_length": 118,
"avg_line_length": 31.73926380368098,
"alnum_prop": 0.5698511645887697,
"repo_name": "relh/keras",
"id": "f1383eab250fb27a6e5e10c92a51025fc3649189",
"size": "41388",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keras/backend/theano_backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "697"
},
{
"name": "Python",
"bytes": "905485"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.urls import reverse
from core.models import Transformation, OAIEndpoint, ValidationScenario, \
RecordIdentifierTransformation, DPLABulkDataDownload, FieldMapper
from tests.utils import TestConfiguration
class ConfigurationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.transformation = Transformation(name='Class Test Transformation')
cls.transformation.save()
cls.oai_endpoint = OAIEndpoint(name='Class Test OAI endpoint')
cls.oai_endpoint.save()
cls.validation_scenario = ValidationScenario(name='Class Test Validation Scenario',
validation_type='python')
cls.validation_scenario.save()
cls.rits = RecordIdentifierTransformation(name='Class Test RITS')
cls.rits.save()
cls.dpla_download = DPLABulkDataDownload()
cls.dpla_download.save()
cls.field_mapper = FieldMapper()
cls.field_mapper.save()
def setUp(self):
self.config = TestConfiguration()
self.client.force_login(self.config.user)
def test_configuration(self):
response = self.client.get(reverse('configuration'))
self.assertEqual(list(response.context['transformations']),
[ConfigurationTestCase.transformation])
self.assertEqual(list(response.context['oai_endpoints']),
[ConfigurationTestCase.oai_endpoint])
self.assertEqual(list(response.context['validation_scenarios']),
[ConfigurationTestCase.validation_scenario])
self.assertEqual(list(response.context['rits']),
[ConfigurationTestCase.rits])
self.assertEqual(list(response.context['field_mappers']),
[ConfigurationTestCase.field_mapper])
self.assertEqual(list(response.context['bulk_downloads']),
[ConfigurationTestCase.dpla_download])
def test_get_dpla_bulk_data_download(self):
response = self.client.get(reverse('dpla_bulk_data_download'))
# TODO: this entire path is kind of out of date and disused
self.assertFalse(response.context['bulk_data_keys'])
| {
"content_hash": "e1e85b41b4aba1b5fddaa0b8c10cf0d7",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 91,
"avg_line_length": 46.979166666666664,
"alnum_prop": 0.6585365853658537,
"repo_name": "WSULib/combine",
"id": "0130c20d9d94b04696cd1f695e26e982f9fd284b",
"size": "2255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_views/test_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "250903"
},
{
"name": "HTML",
"bytes": "372068"
},
{
"name": "JavaScript",
"bytes": "1926326"
},
{
"name": "Python",
"bytes": "639194"
},
{
"name": "Shell",
"bytes": "435"
},
{
"name": "XSLT",
"bytes": "38438"
}
],
"symlink_target": ""
} |
''' Fvtk module implements simple visualization functions using VTK.
The main idea is the following:
A window can have one or more renderers. A renderer can have none, one or more actors. Examples of actors are a sphere, line, point etc.
You basically add actors in a renderer and in that way you can visualize the forementioned objects e.g. sphere, line ...
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #fvtk.show(r)
For more information on VTK there many neat examples in
http://www.vtk.org/Wiki/VTK/Tutorials/External_Tutorials
'''
from __future__ import division, print_function, absolute_import
from dipy.utils.six.moves import xrange
import types
import numpy as np
from dipy.core.ndindex import ndindex
# Conditional import machinery for vtk
from ..utils.optpkg import optional_package
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
colors, have_vtk_colors, _ = optional_package('vtk.util.colors')
cm, have_matplotlib, _ = optional_package('matplotlib.cm')
if have_matplotlib:
get_cmap = cm.get_cmap
else:
from dipy.data import get_cmap
# a track buffer used only with picking tracks
track_buffer = []
# indices buffer for the tracks
ind_buffer = []
# tempory renderer used only with picking tracks
tmp_ren = None
if have_vtk:
major_version = vtk.vtkVersion.GetVTKMajorVersion()
# Create a text mapper and actor to display the results of picking.
textMapper = vtk.vtkTextMapper()
tprop = textMapper.GetTextProperty()
tprop.SetFontFamilyToArial()
tprop.SetFontSize(10)
# tprop.BoldOn()
# tprop.ShadowOn()
tprop.SetColor(1, 0, 0)
textActor = vtk.vtkActor2D()
textActor.VisibilityOff()
textActor.SetMapper(textMapper)
# Create a cell picker.
picker = vtk.vtkCellPicker()
def ren():
'''Create a renderer.
Returns
-------
v : vtkRenderer() object
Renderer.
Examples
--------
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3)]
>>> c=fvtk.line(lines, fvtk.colors.red)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
return vtk.vtkRenderer()
def add(ren, a):
''' Add a specific actor
'''
if isinstance(a, vtk.vtkVolume):
ren.AddVolume(a)
else:
ren.AddActor(a)
def rm(ren, a):
''' Remove a specific actor
'''
ren.RemoveActor(a)
def clear(ren):
''' Remove all actors from the renderer
'''
ren.RemoveAllViewProps()
def rm_all(ren):
''' Remove all actors from the renderer
'''
clear(ren)
def _arrow(pos=(0, 0, 0), color=(1, 0, 0), scale=(1, 1, 1), opacity=1):
''' Internal function for generating arrow actors.
'''
arrow = vtk.vtkArrowSource()
# arrow.SetTipLength(length)
arrowm = vtk.vtkPolyDataMapper()
if major_version <= 5:
arrowm.SetInput(arrow.GetOutput())
else:
arrowm.SetInputData(arrow.GetOutput())
arrowa = vtk.vtkActor()
arrowa.SetMapper(arrowm)
arrowa.GetProperty().SetColor(color)
arrowa.GetProperty().SetOpacity(opacity)
arrowa.SetScale(scale)
return arrowa
def axes(scale=(1, 1, 1), colorx=(1, 0, 0), colory=(0, 1, 0), colorz=(0, 0, 1),
opacity=1):
""" Create an actor with the coordinate's system axes where
red = x, green = y, blue =z.
Parameters
----------
scale : tuple (3,)
axes size e.g. (100, 100, 100)
colorx : tuple (3,)
x-axis color. Default red.
colory : tuple (3,)
y-axis color. Default blue.
colorz : tuple (3,)
z-axis color. Default green.
Returns
-------
vtkAssembly
"""
arrowx = _arrow(color=colorx, scale=scale, opacity=opacity)
arrowy = _arrow(color=colory, scale=scale, opacity=opacity)
arrowz = _arrow(color=colorz, scale=scale, opacity=opacity)
arrowy.RotateZ(90)
arrowz.RotateY(-90)
ass = vtk.vtkAssembly()
ass.AddPart(arrowx)
ass.AddPart(arrowy)
ass.AddPart(arrowz)
return ass
def _lookup(colors):
''' Internal function
Creates a lookup table with given colors.
Parameters
------------
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1,
Returns
----------
vtkLookupTable
'''
colors = np.asarray(colors, dtype=np.float32)
if colors.ndim > 2:
raise ValueError('Incorrect shape of array in colors')
if colors.ndim == 1:
N = 1
if colors.ndim == 2:
N = colors.shape[0]
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(N)
lut.Build()
if colors.ndim == 2:
scalar = 0
for (r, g, b) in colors:
lut.SetTableValue(scalar, r, g, b, 1.0)
scalar += 1
if colors.ndim == 1:
lut.SetTableValue(0, colors[0], colors[1], colors[2], 1.0)
return lut
def streamtube(lines, colors, opacity=1, linewidth=0.15, tube_sides=8,
lod=True, lod_points=10 ** 4, lod_points_size=5):
""" Uses streamtubes to visualize polylines
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3) or tuple (3,)
opacity : float
linewidth : float
tube_sides : int
lod : bool
use vtkLODActor rather than vtkActor
lod_points : int
number of points to be used when LOD is in effect
lod_points_size : int
size of points when lod is in effect
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors=np.random.rand(2, 3)
>>> c=fvtk.streamtube(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
Notes
-----
Streamtubes can be heavy on GPU when loading many streamlines and therefore,
you may experience slow rendering time depending on system GPU. A solution
to this problem is to reduce the number of points in each streamline. In Dipy
we provide an algorithm that will reduce the number of points on the straighter
parts of the streamline but keep more points on the curvier parts. This can
be used in the following way
from dipy.tracking.distances import approx_polygon_track
lines = [approx_polygon_track(line, 0.2) for line in lines]
"""
points = vtk.vtkPoints()
colors = np.asarray(colors)
if colors.ndim == 1:
colors = np.tile(colors, (len(lines), 1))
# Create the polyline.
streamlines = vtk.vtkCellArray()
cols = vtk.vtkUnsignedCharArray()
cols.SetName("Cols")
cols.SetNumberOfComponents(3)
len_lines = len(lines)
prior_line_shape = 0
for i in range(len_lines):
line = lines[i]
streamlines.InsertNextCell(line.shape[0])
for j in range(line.shape[0]):
points.InsertNextPoint(*line[j])
streamlines.InsertCellPoint(j + prior_line_shape)
color = (255 * colors[i]).astype('ubyte')
cols.InsertNextTuple3(*color)
prior_line_shape += line.shape[0]
profileData = vtk.vtkPolyData()
profileData.SetPoints(points)
profileData.SetLines(streamlines)
profileData.GetPointData().AddArray(cols)
# Add thickness to the resulting line.
profileTubes = vtk.vtkTubeFilter()
profileTubes.SetNumberOfSides(tube_sides)
if major_version <= 5:
profileTubes.SetInput(profileData)
else:
profileTubes.SetInputData(profileData)
#profileTubes.SetInput(profileData)
profileTubes.SetRadius(linewidth)
profileMapper = vtk.vtkPolyDataMapper()
profileMapper.SetInputConnection(profileTubes.GetOutputPort())
profileMapper.ScalarVisibilityOn()
profileMapper.SetScalarModeToUsePointFieldData()
profileMapper.SelectColorArray("Cols")
profileMapper.GlobalImmediateModeRenderingOn()
if lod:
profile = vtk.vtkLODActor()
profile.SetNumberOfCloudPoints(lod_points)
profile.GetProperty().SetPointSize(lod_points_size)
else:
profile = vtk.vtkActor()
profile.SetMapper(profileMapper)
profile.GetProperty().SetAmbient(0) # .3
profile.GetProperty().SetSpecular(0) # .3
profile.GetProperty().SetSpecularPower(10)
profile.GetProperty().SetInterpolationToGouraud()
profile.GetProperty().BackfaceCullingOn()
profile.GetProperty().SetOpacity(opacity)
return profile
def line(lines, colors, opacity=1, linewidth=1):
''' Create an actor for one or more lines.
Parameters
------------
lines : list of arrays representing lines as 3d points for example
lines=[np.random.rand(10,3),np.random.rand(20,3)]
represents 2 lines the first with 10 points and the second with 20 points in x,y,z coordinates.
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1
opacity : float, optional
``0 <= transparency <= 1``
linewidth : float, optional
Line thickness.
Returns
----------
v : vtkActor object
Line.
Examples
----------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3), np.random.rand(20,3)]
>>> colors=np.random.rand(2,3)
>>> c=fvtk.line(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
if not isinstance(lines, types.ListType):
lines = [lines]
points = vtk.vtkPoints()
lines_ = vtk.vtkCellArray()
linescalars = vtk.vtkFloatArray()
# lookuptable=vtk.vtkLookupTable()
lookuptable = _lookup(colors)
scalarmin = 0
colors = np.asarray(colors)
if colors.ndim == 2:
scalarmax = colors.shape[0] - 1
if colors.ndim == 1:
scalarmax = 0
curPointID = 0
m = (0.0, 0.0, 0.0)
n = (1.0, 0.0, 0.0)
scalar = 0
# many colors
if colors.ndim == 2:
for Line in lines:
inw = True
mit = iter(Line)
nit = iter(Line)
next(nit)
while(inw):
try:
m = next(mit)
n = next(nit)
# scalar=sp.rand(1)
linescalars.SetNumberOfComponents(1)
points.InsertNextPoint(m)
linescalars.InsertNextTuple1(scalar)
points.InsertNextPoint(n)
linescalars.InsertNextTuple1(scalar)
lines_.InsertNextCell(2)
lines_.InsertCellPoint(curPointID)
lines_.InsertCellPoint(curPointID + 1)
curPointID += 2
except StopIteration:
break
scalar += 1
# one color only
if colors.ndim == 1:
for Line in lines:
inw = True
mit = iter(Line)
nit = iter(Line)
next(nit)
while(inw):
try:
m = next(mit)
n = next(nit)
# scalar=sp.rand(1)
linescalars.SetNumberOfComponents(1)
points.InsertNextPoint(m)
linescalars.InsertNextTuple1(scalar)
points.InsertNextPoint(n)
linescalars.InsertNextTuple1(scalar)
lines_.InsertNextCell(2)
lines_.InsertCellPoint(curPointID)
lines_.InsertCellPoint(curPointID + 1)
curPointID += 2
except StopIteration:
break
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines_)
polydata.GetPointData().SetScalars(linescalars)
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetLookupTable(lookuptable)
mapper.SetColorModeToMapScalars()
mapper.SetScalarRange(scalarmin, scalarmax)
mapper.SetScalarModeToUsePointData()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(linewidth)
actor.GetProperty().SetOpacity(opacity)
return actor
def dots(points, color=(1, 0, 0), opacity=1, dot_size=5):
""" Create one or more 3d points
Parameters
----------
points : ndarray, (N, 3)
color : tuple (3,)
opacity : float
dot_size : int
Returns
--------
vtkActor
See Also
---------
dipy.viz.fvtk.point
"""
if points.ndim == 2:
points_no = points.shape[0]
else:
points_no = 1
polyVertexPoints = vtk.vtkPoints()
polyVertexPoints.SetNumberOfPoints(points_no)
aPolyVertex = vtk.vtkPolyVertex()
aPolyVertex.GetPointIds().SetNumberOfIds(points_no)
cnt = 0
if points.ndim > 1:
for point in points:
polyVertexPoints.InsertPoint(cnt, point[0], point[1], point[2])
aPolyVertex.GetPointIds().SetId(cnt, cnt)
cnt += 1
else:
polyVertexPoints.InsertPoint(cnt, points[0], points[1], points[2])
aPolyVertex.GetPointIds().SetId(cnt, cnt)
cnt += 1
aPolyVertexGrid = vtk.vtkUnstructuredGrid()
aPolyVertexGrid.Allocate(1, 1)
aPolyVertexGrid.InsertNextCell(aPolyVertex.GetCellType(),
aPolyVertex.GetPointIds())
aPolyVertexGrid.SetPoints(polyVertexPoints)
aPolyVertexMapper = vtk.vtkDataSetMapper()
if major_version <= 5:
aPolyVertexMapper.SetInput(aPolyVertexGrid)
else:
aPolyVertexMapper.SetInputData(aPolyVertexGrid)
aPolyVertexActor = vtk.vtkActor()
aPolyVertexActor.SetMapper(aPolyVertexMapper)
aPolyVertexActor.GetProperty().SetColor(color)
aPolyVertexActor.GetProperty().SetOpacity(opacity)
aPolyVertexActor.GetProperty().SetPointSize(dot_size)
return aPolyVertexActor
def point(points, colors, opacity=1, point_radius=0.1, theta=8, phi=8):
""" Visualize points as sphere glyphs
Parameters
----------
points : ndarray, shape (N, 3)
colors : ndarray (N,3) or tuple (3,)
point_radius : float
theta : int
phi : int
Returns
-------
vtkActor
Examples
--------
>>> from dipy.viz import fvtk
>>> ren = fvtk.ren()
>>> pts = np.random.rand(5, 3)
>>> point_actor = fvtk.point(pts, fvtk.colors.coral)
>>> fvtk.add(ren, point_actor)
>>> #fvtk.show(ren)
"""
if np.array(colors).ndim == 1:
# return dots(points,colors,opacity)
colors = np.tile(colors, (len(points), 1))
scalars = vtk.vtkUnsignedCharArray()
scalars.SetNumberOfComponents(3)
pts = vtk.vtkPoints()
cnt_colors = 0
for p in points:
pts.InsertNextPoint(p[0], p[1], p[2])
scalars.InsertNextTuple3(
round(255 * colors[cnt_colors][0]), round(255 * colors[cnt_colors][1]), round(255 * colors[cnt_colors][2]))
cnt_colors += 1
src = vtk.vtkSphereSource()
src.SetRadius(point_radius)
src.SetThetaResolution(theta)
src.SetPhiResolution(phi)
polyData = vtk.vtkPolyData()
polyData.SetPoints(pts)
polyData.GetPointData().SetScalars(scalars)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(src.GetOutputPort())
if major_version <= 5:
glyph.SetInput(polyData)
else:
glyph.SetInputData(polyData)
glyph.SetColorModeToColorByScalar()
glyph.SetScaleModeToDataScalingOff()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(glyph.GetOutput())
else:
mapper.SetInputData(glyph.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(opacity)
return actor
def label(ren, text='Origin', pos=(0, 0, 0), scale=(0.2, 0.2, 0.2),
color=(1, 1, 1)):
''' Create a label actor.
This actor will always face the camera
Parameters
----------
ren : vtkRenderer() object
Renderer as returned by ``ren()``.
text : str
Text for the label.
pos : (3,) array_like, optional
Left down position of the label.
scale : (3,) array_like
Changes the size of the label.
color : (3,) array_like
Label color as ``(r,g,b)`` tuple.
Returns
-------
l : vtkActor object
Label.
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
'''
atext = vtk.vtkVectorText()
atext.SetText(text)
textm = vtk.vtkPolyDataMapper()
if major_version <= 5:
textm.SetInput(atext.GetOutput())
else:
textm.SetInputData(atext.GetOutput())
texta = vtk.vtkFollower()
texta.SetMapper(textm)
texta.SetScale(scale)
texta.GetProperty().SetColor(color)
texta.SetPosition(pos)
ren.AddActor(texta)
texta.SetCamera(ren.GetActiveCamera())
return texta
def volume(vol, voxsz=(1.0, 1.0, 1.0), affine=None, center_origin=1,
info=0, maptype=0, trilinear=1, iso=0, iso_thr=100,
opacitymap=None, colormap=None):
''' Create a volume and return a volumetric actor using volumetric
rendering.
This function has many different interesting capabilities. The maptype,
opacitymap and colormap are the most crucial parameters here.
Parameters
----------
vol : array, shape (N, M, K), dtype uint8
An array representing the volumetric dataset that we want to visualize
using volumetric rendering.
voxsz : (3,) array_like
Voxel size.
affine : (4, 4) ndarray
As given by volumeimages.
center_origin : int {0,1}
It considers that the center of the volume is the
point ``(-vol.shape[0]/2.0+0.5,-vol.shape[1]/2.0+0.5,-vol.shape[2]/2.0+0.5)``.
info : int {0,1}
If 1 it prints out some info about the volume, the method and the
dataset.
trilinear : int {0,1}
Use trilinear interpolation, default 1, gives smoother rendering. If
you want faster interpolation use 0 (Nearest).
maptype : int {0,1}
The maptype is a very important parameter which affects the raycasting algorithm in use for the rendering.
The options are:
If 0 then vtkVolumeTextureMapper2D is used.
If 1 then vtkVolumeRayCastFunction is used.
iso : int {0,1}
If iso is 1 and maptype is 1 then we use
``vtkVolumeRayCastIsosurfaceFunction`` which generates an isosurface at
the predefined iso_thr value. If iso is 0 and maptype is 1
``vtkVolumeRayCastCompositeFunction`` is used.
iso_thr : int
If iso is 1 then then this threshold in the volume defines the value
which will be used to create the isosurface.
opacitymap : (2, 2) ndarray
The opacity map assigns a transparency coefficient to every point in
the volume. The default value uses the histogram of the volume to
calculate the opacitymap.
colormap : (4, 4) ndarray
The color map assigns a color value to every point in the volume.
When None from the histogram it uses a red-blue colormap.
Returns
-------
v : vtkVolume
Volume.
Notes
--------
What is the difference between TextureMapper2D and RayCastFunction? Coming
soon... See VTK user's guide [book] & The Visualization Toolkit [book] and
VTK's online documentation & online docs.
What is the difference between RayCastIsosurfaceFunction and
RayCastCompositeFunction? Coming soon... See VTK user's guide [book] &
The Visualization Toolkit [book] and VTK's online documentation &
online docs.
What about trilinear interpolation?
Coming soon... well when time permits really ... :-)
Examples
--------
First example random points.
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> vol=100*np.random.rand(100,100,100)
>>> vol=vol.astype('uint8')
>>> vol.min(), vol.max()
(0, 99)
>>> r = fvtk.ren()
>>> v = fvtk.volume(vol)
>>> fvtk.add(r,v)
>>> #fvtk.show(r)
Second example with a more complicated function
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
>>> s = np.sin(x*y*z)/(x*y*z)
>>> r = fvtk.ren()
>>> v = fvtk.volume(s)
>>> fvtk.add(r,v)
>>> #fvtk.show(r)
If you find this function too complicated you can always use mayavi.
Please do not forget to use the -wthread switch in ipython if you are
running mayavi.
from enthought.mayavi import mlab
import numpy as np
x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
s = np.sin(x*y*z)/(x*y*z)
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.show()
More mayavi demos are available here:
http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/mlab.html
'''
if vol.ndim != 3:
raise ValueError('3d numpy arrays only please')
if info:
print('Datatype', vol.dtype, 'converted to uint8')
vol = np.interp(vol, [vol.min(), vol.max()], [0, 255])
vol = vol.astype('uint8')
if opacitymap is None:
bin, res = np.histogram(vol.ravel())
res2 = np.interp(res, [vol.min(), vol.max()], [0, 1])
opacitymap = np.vstack((res, res2)).T
opacitymap = opacitymap.astype('float32')
'''
opacitymap=np.array([[ 0.0, 0.0],
[50.0, 0.9]])
'''
if info:
print('opacitymap', opacitymap)
if colormap is None:
bin, res = np.histogram(vol.ravel())
res2 = np.interp(res, [vol.min(), vol.max()], [0, 1])
zer = np.zeros(res2.shape)
colormap = np.vstack((res, res2, zer, res2[::-1])).T
colormap = colormap.astype('float32')
'''
colormap=np.array([[0.0, 0.5, 0.0, 0.0],
[64.0, 1.0, 0.5, 0.5],
[128.0, 0.9, 0.2, 0.3],
[196.0, 0.81, 0.27, 0.1],
[255.0, 0.5, 0.5, 0.5]])
'''
if info:
print('colormap', colormap)
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
if affine is not None:
aff = vtk.vtkMatrix4x4()
aff.DeepCopy((affine[0, 0], affine[0, 1], affine[0, 2], affine[0, 3], affine[1, 0], affine[1, 1], affine[1, 2], affine[1, 3], affine[2, 0], affine[
2, 1], affine[2, 2], affine[2, 3], affine[3, 0], affine[3, 1], affine[3, 2], affine[3, 3]))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],0,affine[1,0],affine[1,1],affine[1,2],0,affine[2,0],affine[2,1],affine[2,2],0,affine[3,0],affine[3,1],affine[3,2],1))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],127.5,affine[1,0],affine[1,1],affine[1,2],-127.5,affine[2,0],affine[2,1],affine[2,2],-127.5,affine[3,0],affine[3,1],affine[3,2],1))
reslice = vtk.vtkImageReslice()
if major_version <= 5:
reslice.SetInput(im)
else:
reslice.SetInputData(im)
# reslice.SetOutputDimensionality(2)
# reslice.SetOutputOrigin(127,-145,147)
reslice.SetResliceAxes(aff)
# reslice.SetOutputOrigin(-127,-127,-127)
# reslice.SetOutputExtent(-127,128,-127,128,-127,128)
# reslice.SetResliceAxesOrigin(0,0,0)
# print 'Get Reslice Axes Origin ', reslice.GetResliceAxesOrigin()
# reslice.SetOutputSpacing(1.0,1.0,1.0)
reslice.SetInterpolationModeToLinear()
# reslice.UpdateWholeExtent()
# print 'reslice GetOutputOrigin', reslice.GetOutputOrigin()
# print 'reslice GetOutputExtent',reslice.GetOutputExtent()
# print 'reslice GetOutputSpacing',reslice.GetOutputSpacing()
changeFilter = vtk.vtkImageChangeInformation()
if major_version <= 5:
changeFilter.SetInput(reslice.GetOutput())
else:
changeFilter.SetInputData(reslice.GetOutput())
# changeFilter.SetInput(im)
if center_origin:
changeFilter.SetOutputOrigin(
-vol.shape[0] / 2.0 + 0.5, -vol.shape[1] / 2.0 + 0.5, -vol.shape[2] / 2.0 + 0.5)
print('ChangeFilter ', changeFilter.GetOutputOrigin())
opacity = vtk.vtkPiecewiseFunction()
for i in range(opacitymap.shape[0]):
opacity.AddPoint(opacitymap[i, 0], opacitymap[i, 1])
color = vtk.vtkColorTransferFunction()
for i in range(colormap.shape[0]):
color.AddRGBPoint(
colormap[i, 0], colormap[i, 1], colormap[i, 2], colormap[i, 3])
if(maptype == 0):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if info:
print('mapper VolumeTextureMapper2D')
mapper = vtk.vtkVolumeTextureMapper2D()
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
if (maptype == 1):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
property.ShadeOn()
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if iso:
isofunc = vtk.vtkVolumeRayCastIsosurfaceFunction()
isofunc.SetIsoValue(iso_thr)
else:
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
if info:
print('mapper VolumeRayCastMapper')
mapper = vtk.vtkVolumeRayCastMapper()
if iso:
mapper.SetVolumeRayCastFunction(isofunc)
if info:
print('Isosurface')
else:
mapper.SetVolumeRayCastFunction(compositeFunction)
# mapper.SetMinimumImageSampleDistance(0.2)
if info:
print('Composite')
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
# mapper.SetInput(reslice.GetOutput())
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
# Return mid position in world space
# im2=reslice.GetOutput()
# index=im2.FindPoint(vol.shape[0]/2.0,vol.shape[1]/2.0,vol.shape[2]/2.0)
# print 'Image Getpoint ' , im2.GetPoint(index)
volum = vtk.vtkVolume()
volum.SetMapper(mapper)
volum.SetProperty(property)
if info:
print('Origin', volum.GetOrigin())
print('Orientation', volum.GetOrientation())
print('OrientationW', volum.GetOrientationWXYZ())
print('Position', volum.GetPosition())
print('Center', volum.GetCenter())
print('Get XRange', volum.GetXRange())
print('Get YRange', volum.GetYRange())
print('Get ZRange', volum.GetZRange())
print('Volume data type', vol.dtype)
return volum
def contour(vol, voxsz=(1.0, 1.0, 1.0), affine=None, levels=[50],
colors=[np.array([1.0, 0.0, 0.0])], opacities=[0.5]):
""" Take a volume and draw surface contours for any any number of
thresholds (levels) where every contour has its own color and opacity
Parameters
----------
vol : (N, M, K) ndarray
An array representing the volumetric dataset for which we will draw
some beautiful contours .
voxsz : (3,) array_like
Voxel size.
affine : None
Not used.
levels : array_like
Sequence of thresholds for the contours taken from image values needs
to be same datatype as `vol`.
colors : (N, 3) ndarray
RGB values in [0,1].
opacities : array_like
Opacities of contours.
Returns
-------
vtkAssembly
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> A=np.zeros((10,10,10))
>>> A[3:-3,3:-3,3:-3]=1
>>> r=fvtk.ren()
>>> fvtk.add(r,fvtk.contour(A,levels=[1]))
>>> #fvtk.show(r)
"""
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
ass = vtk.vtkAssembly()
# ass=[]
for (i, l) in enumerate(levels):
# print levels
skinExtractor = vtk.vtkContourFilter()
if major_version <= 5:
skinExtractor.SetInput(im)
else:
skinExtractor.SetInputData(im)
skinExtractor.SetValue(0, l)
skinNormals = vtk.vtkPolyDataNormals()
skinNormals.SetInputConnection(skinExtractor.GetOutputPort())
skinNormals.SetFeatureAngle(60.0)
skinMapper = vtk.vtkPolyDataMapper()
skinMapper.SetInputConnection(skinNormals.GetOutputPort())
skinMapper.ScalarVisibilityOff()
skin = vtk.vtkActor()
skin.SetMapper(skinMapper)
skin.GetProperty().SetOpacity(opacities[i])
# print colors[i]
skin.GetProperty().SetColor(colors[i][0], colors[i][1], colors[i][2])
# skin.Update()
ass.AddPart(skin)
del skin
del skinMapper
del skinExtractor
return ass
lowercase_cm_name = {'blues':'Blues', 'accent':'Accent'}
def create_colormap(v, name='jet', auto=True):
"""Create colors from a specific colormap and return it
as an array of shape (N,3) where every row gives the corresponding
r,g,b value. The colormaps we use are similar with those of pylab.
Parameters
----------
v : (N,) array
vector of values to be mapped in RGB colors according to colormap
name : str.
Name of the colormap. Currently implemented: 'jet', 'blues',
'accent', 'bone' and matplotlib colormaps if you have matplotlib
installed.
auto : bool,
if auto is True then v is interpolated to [0, 10] from v.min()
to v.max()
Notes
-----
Dipy supports a few colormaps for those who do not use Matplotlib, for
more colormaps consider downloading Matplotlib.
"""
if v.ndim > 1:
msg = 'This function works only with 1d arrays. Use ravel()'
raise ValueError(msg)
if auto:
v = np.interp(v, [v.min(), v.max()], [0, 1])
else:
v = np.clip(v, 0, 1)
# For backwards compatibility with lowercase names
newname = lowercase_cm_name.get(name) or name
colormap = get_cmap(newname)
if colormap is None:
e_s = "Colormap '%s' is not yet implemented " % name
raise ValueError(e_s)
rgba = colormap(v)
rgb = rgba[:, :3].copy()
return rgb
def _makeNd(array, ndim):
"""Pads as many 1s at the beginning of array's shape as are need to give
array ndim dimensions."""
new_shape = (1,) * (ndim - array.ndim) + array.shape
return array.reshape(new_shape)
def sphere_funcs(sphere_values, sphere, image=None, colormap='jet',
scale=2.2, norm=True, radial_scale=True):
"""Plot many morphed spherical functions simultaneously.
Parameters
----------
sphere_values : (M,) or (X, M) or (X, Y, M) or (X, Y, Z, M) ndarray
Values on the sphere.
sphere : Sphere
image : None,
Not yet supported.
colormap : None or 'jet'
If None then no color is used.
scale : float,
Distance between spheres.
norm : bool,
Normalize `sphere_values`.
radial_scale : bool,
Scale sphere points according to odf values.
Returns
-------
actor : vtkActor
Spheres.
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> odfs = np.ones((5, 5, 724))
>>> odfs[..., 0] = 2.
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.sphere_funcs(odfs, sphere))
>>> #fvtk.show(r)
"""
sphere_values = np.asarray(sphere_values)
if sphere_values.ndim > 4:
raise ValueError("Wrong shape")
sphere_values = _makeNd(sphere_values, 4)
grid_shape = np.array(sphere_values.shape[:3])
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
if sphere_values.shape[-1] != sphere.vertices.shape[0]:
msg = 'Sphere.vertices.shape[0] should be the same as the '
msg += 'last dimensions of sphere_values i.e. sphere_values.shape[-1]'
raise ValueError(msg)
list_sq = []
list_cols = []
for ijk in np.ndindex(*grid_shape):
m = sphere_values[ijk].copy()
if norm:
m /= abs(m).max()
if radial_scale:
xyz = vertices.T * m
else:
xyz = vertices.T.copy()
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
if colormap is not None:
cols = create_colormap(m, colormap)
cols = np.interp(cols, [0, 1], [0, 255]).astype('ubyte')
list_cols.append(cols)
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
if colormap is not None:
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
for k in xrange(len(list_sq)):
xyz = list_sq[k]
if colormap is not None:
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
if colormap is not None:
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
if colormap is not None:
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def peaks(peaks_dirs, peaks_values=None, scale=2.2, colors=(1, 0, 0)):
""" Visualize peak directions as given from ``peaks_from_model``
Parameters
----------
peaks_dirs : ndarray
Peak directions. The shape of the array can be (M, 3) or (X, M, 3) or
(X, Y, M, 3) or (X, Y, Z, M, 3)
peaks_values : ndarray
Peak values. The shape of the array can be (M, ) or (X, M) or
(X, Y, M) or (X, Y, Z, M)
scale : float
Distance between spheres
colors : ndarray or tuple
Peak colors
Returns
-------
vtkActor
See Also
--------
dipy.viz.fvtk.sphere_funcs
"""
peaks_dirs = np.asarray(peaks_dirs)
if peaks_dirs.ndim > 5:
raise ValueError("Wrong shape")
peaks_dirs = _makeNd(peaks_dirs, 5)
if peaks_values is not None:
peaks_values = _makeNd(peaks_values, 4)
grid_shape = np.array(peaks_dirs.shape[:3])
list_dirs = []
for ijk in np.ndindex(*grid_shape):
xyz = scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
for i in range(peaks_dirs.shape[-2]):
if peaks_values is not None:
pv = peaks_values[ijk][i]
else:
pv = 1.
symm = np.vstack((-peaks_dirs[ijk][i] * pv + xyz,
peaks_dirs[ijk][i] * pv + xyz))
list_dirs.append(symm)
return line(list_dirs, colors)
def tensor(evals, evecs, scalar_colors=None, sphere=None, scale=2.2, norm=True):
"""Plot many tensors as ellipsoids simultaneously.
Parameters
----------
evals : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
eigenvalues
evecs : (3, 3) or (X, 3, 3) or (X, Y, 3, 3) or (X, Y, Z, 3, 3) ndarray
eigenvectors
scalar_colors : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
RGB colors used to show the tensors
Default None, color the ellipsoids using ``color_fa``
sphere : Sphere,
this sphere will be transformed to the tensor ellipsoid
Default is None which uses a symmetric sphere with 724 points.
scale : float,
distance between ellipsoids.
norm : boolean,
Normalize `evals`.
Returns
-------
actor : vtkActor
Ellipsoids
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> evals = np.array([1.4, .35, .35]) * 10 ** (-3)
>>> evecs = np.eye(3)
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.tensor(evals, evecs, sphere=sphere))
>>> #fvtk.show(r)
"""
evals = np.asarray(evals)
if evals.ndim > 4:
raise ValueError("Wrong shape")
evals = _makeNd(evals, 4)
evecs = _makeNd(evecs, 5)
grid_shape = np.array(evals.shape[:3])
if sphere is None:
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
if scalar_colors is None:
from dipy.reconst.dti import color_fa, fractional_anisotropy
cfa = color_fa(fractional_anisotropy(evals), evecs)
else:
cfa = _makeNd(scalar_colors, 4)
list_sq = []
list_cols = []
for ijk in ndindex(grid_shape):
ea = evals[ijk]
if norm:
ea /= ea.max()
ea = np.diag(ea.copy())
ev = evecs[ijk].copy()
xyz = np.dot(ev, np.dot(ea, vertices.T))
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
acolor = np.zeros(xyz.shape)
acolor[:, :] = np.interp(cfa[ijk], [0, 1], [0, 255])
list_cols.append(acolor.astype('ubyte'))
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
for k in xrange(len(list_sq)):
xyz = list_sq[k]
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def slicer(vol, voxsz=(1.0, 1.0, 1.0), plane_i=[0], plane_j=None,
plane_k=None, outline=True):
""" Slice a 3D volume
Parameters
----------
vol : array, shape (N, M, K)
An array representing the volumetric dataset that we want to slice
voxsz : sequence of 3 floats
Voxel size.
plane_i : sequence of ints
show plane or planes along the first dimension
plane_j : sequence of ints
show plane or planes along the second dimension
plane_k : sequence of ints
show plane or planes along the third(last) dimension
outline : bool
if True (default) a small outline is drawn around the slices
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> x, y, z = np.ogrid[-10:10:80j, -10:10:80j, -10:10:80j]
>>> s = np.sin(x * y * z) / (x * y * z)
>>> r = fvtk.ren()
>>> fvtk.add(r, fvtk.slicer(s, plane_i=[0, 5]))
>>> #fvtk.show(r)
"""
if plane_i is None:
plane_i = []
if plane_j is None:
plane_j = []
if plane_k is None:
plane_k = []
if vol.ndim != 3:
raise ValueError("vol has to be a 3d array")
vol = np.interp(vol, xp=[vol.min(), vol.max()], fp=[0, 255])
vol = vol.astype('uint8')
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
I, J, K = vol.shape[:3]
im.SetDimensions(I, J, K)
# im.SetOrigin(0,0,0)
im.SetSpacing(voxsz[2], voxsz[0], voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
# copy data
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
# An outline provides context around the data.
outlineData = vtk.vtkOutlineFilter()
if major_version <= 5:
outlineData.SetInput(im)
else:
outlineData.SetInputData(im)
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outlineData.GetOutputPort())
outline_ = vtk.vtkActor()
outline_.SetMapper(mapOutline)
outline_.GetProperty().SetColor(1, 0, 0)
# Now we are creating three orthogonal planes passing through the
# volume. Each plane uses a different texture map and therefore has
# diferent coloration.
# Start by creatin a black/white lookup table.
lut = vtk.vtkLookupTable()
lut.SetTableRange(vol.min(), vol.max())
lut.SetSaturationRange(0, 0)
lut.SetHueRange(0, 0)
lut.SetValueRange(0, 1)
lut.SetRampToLinear()
lut.Build()
x1, x2, y1, y2, z1, z2 = im.GetExtent()
# print x1,x2,y1,y2,z1,z2
# Create the first of the three planes. The filter vtkImageMapToColors
# maps the data through the corresponding lookup table created above.
# The vtkImageActor is a type of vtkProp and conveniently displays an
# image on a single quadrilateral plane. It does this using texture
# mapping and as a result is quite fast. (Note: the input image has to
# be unsigned char values, which the vtkImageMapToColors produces.)
# Note also that by specifying the DisplayExtent, the pipeline
# requests data of this extent and the vtkImageMapToColors only
# processes a slice of data.
planeColors = vtk.vtkImageMapToColors()
# saggitalColors.SetInputConnection(im.GetOutputPort())
if major_version <= 5:
planeColors.SetInput(im)
else:
planeColors.SetInputData(im)
planeColors.SetLookupTable(lut)
planeColors.Update()
saggitals = []
for x in plane_i:
saggital = vtk.vtkImageActor()
if major_version <= 5:
saggital.SetInput(planeColors.GetOutput())
else:
saggital.SetInputData(planeColors.GetOutput())
saggital.SetDisplayExtent(x, x, y1, y2, z1, z2)
saggitals.append(saggital)
axials = []
for z in plane_k:
axial = vtk.vtkImageActor()
if major_version <= 5:
axial.SetInput(planeColors.GetOutput())
else:
axial.SetInputData(planeColors.GetOutput())
axial.SetDisplayExtent(x1, x2, y1, y2, z, z)
axials.append(axial)
coronals = []
for y in plane_j:
coronal = vtk.vtkImageActor()
if major_version <= 5:
coronal.SetInput(planeColors.GetOutput())
else:
coronal.SetInputData(planeColors.GetOutput())
coronal.SetDisplayExtent(x1, x2, y, y, z1, z2)
coronals.append(coronal)
assem = vtk.vtkAssembly()
for sag in saggitals:
assem.AddPart(sag)
for ax in axials:
assem.AddPart(ax)
for cor in coronals:
assem.AddPart(cor)
if outline:
assem.AddPart(outline_)
return assem
def camera(ren, pos=None, focal=None, viewup=None, verbose=True):
""" Change the active camera
Parameters
----------
ren : vtkRenderer
pos : tuple
(x, y, z) position of the camera
focal : tuple
(x, y, z) focal point
viewup : tuple
(x, y, z) viewup vector
verbose : bool
show information about the camera
Returns
-------
vtkCamera
"""
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f,%.2f,%.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
if pos is not None:
cam = ren.GetActiveCamera().SetPosition(*pos)
if focal is not None:
ren.GetActiveCamera().SetFocalPoint(*focal)
if viewup is not None:
ren.GetActiveCamera().SetViewUp(*viewup)
cam = ren.GetActiveCamera()
if pos is not None or focal is not None or viewup is not None:
if verbose:
print('-------------------------------------')
print('Camera New Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera New Focal Point (%.2f,%.2f,%.2f)' %
cam.GetFocalPoint())
print('Camera New View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
return cam
def show(ren, title='Dipy', size=(300, 300), png_magnify=1):
""" Show window
Notes
-----
To save a screenshot press's' and check your current directory
for ``fvtk.png``.
Parameters
------------
ren : vtkRenderer() object
As returned from function ``ren()``.
title : string
A string for the window title bar.
size : (int, int)
``(width, height)`` of the window
png_magnify : int
Number of times to magnify the screenshot.
Notes
-----
If you want to:
* navigate in the the 3d world use the left - middle - right mouse buttons
* reset the screen press 'r'
* save a screenshot press 's'
* quit press 'q'
See also
---------
dipy.viz.fvtk.record
Examples
----------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3),np.random.rand(20,3)]
>>> colors=np.array([[0.2,0.2,0.2],[0.8,0.8,0.8]])
>>> c=fvtk.line(lines,colors)
>>> fvtk.add(r,c)
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
See also
----------
dipy.viz.fvtk.record
"""
ren.ResetCamera()
window = vtk.vtkRenderWindow()
window.AddRenderer(ren)
# window.SetAAFrames(6)
window.SetWindowName(title)
window.SetSize(size[0], size[1])
style = vtk.vtkInteractorStyleTrackballCamera()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(window)
iren.SetPicker(picker)
def key_press(obj, event):
key = obj.GetKeySym()
if key == 's' or key == 'S':
print('Saving image...')
renderLarge = vtk.vtkRenderLargeImage()
if major_version <= 5:
renderLarge.SetInput(ren)
else:
renderLarge.SetInputData(ren)
renderLarge.SetMagnification(png_magnify)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(renderLarge.GetOutputPort())
writer.SetFileName('fvtk.png')
writer.Write()
print('Look for fvtk.png in your current working directory.')
iren.AddObserver('KeyPressEvent', key_press)
iren.SetInteractorStyle(style)
iren.Initialize()
picker.Pick(85, 126, 0, ren)
window.Render()
iren.Start()
# window.RemoveAllObservers()
# ren.SetRenderWindow(None)
window.RemoveRenderer(ren)
ren.SetRenderWindow(None)
def record(ren=None, cam_pos=None, cam_focal=None, cam_view=None,
out_path=None, path_numbering=False, n_frames=1, az_ang=10,
magnification=1, size=(300, 300), verbose=False):
''' This will record a video of your scene
Records a video as a series of ``.png`` files of your scene by rotating
the azimuth angle az_angle in every frame.
Parameters
-----------
ren : vtkRenderer() object
As returned from :func:`ren`.
cam_pos : None or sequence (3,), optional
Camera position.
cam_focal : None or sequence (3,), optional
Camera focal point.
cam_view : None or sequence (3,), optional
Camera view up.
out_path : str, optional
Output directory for the frames
path_numbering : bool, optional
when recording it changes out_path to out_path + str(frame number).
If n_frames is larger than 1, this will default to True
n_frames : int, optional
number of frames to save. Default: 1
az_ang : float, optional
Azimuthal angle of camera rotation (degrees). Default: 10.
magnification : int, optional
How much to magnify the saved frame. Default: 1 (no magnification).
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #uncomment below to record
>>> #fvtk.record(r)
>>> #check for new images in current directory
'''
if ren is None:
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(size[0], size[1])
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# ren.GetActiveCamera().Azimuth(180)
ren.ResetCamera()
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
ang = 0
if cam_pos is not None:
cx, cy, cz = cam_pos
ren.GetActiveCamera().SetPosition(cx, cy, cz)
if cam_focal is not None:
fx, fy, fz = cam_focal
ren.GetActiveCamera().SetFocalPoint(fx, fy, fz)
if cam_view is not None:
ux, uy, uz = cam_view
ren.GetActiveCamera().SetViewUp(ux, uy, uz)
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f,%.2f,%.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
for i in range(n_frames):
ren.GetActiveCamera().Azimuth(ang)
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer.SetInputConnection(renderLarge.GetOutputPort())
# filename='/tmp/'+str(3000000+i)+'.png'
if n_frames > 1 or path_numbering:
if out_path is None:
filename = str(1000000 + i) + '.png'
else:
filename = out_path + str(1000000 + i) + '.png'
else:
filename = out_path
writer.SetFileName(filename)
writer.Write()
ang = +az_ang
if __name__ == "__main__":
pass
| {
"content_hash": "dc37232d370f8e9897aeca36e8bf4a98",
"timestamp": "",
"source": "github",
"line_count": 1849,
"max_line_length": 191,
"avg_line_length": 28.758788534342887,
"alnum_prop": 0.592872590503056,
"repo_name": "demianw/dipy",
"id": "dd34ee8ba2acede5e2ceabc968a4e01fcdb59e87",
"size": "53175",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "dipy/viz/fvtk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2694"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2551559"
}
],
"symlink_target": ""
} |
from pyhmsa.type.xrayline import xrayline, NOTATION_SIEGBAHN
line = xrayline('Ka1', NOTATION_SIEGBAHN, 'K-L3')
from pyhmsa.spec.condition.elementalid import ElementalIDXray
condition = ElementalIDXray(29, line, (8047.82, 'eV'))
print(condition) # Returns: <ElementalIDXray(atomic_number=29, energy=8047.82 eV, line=Ka1)> | {
"content_hash": "eb9eeaf40cf6e373629acc3877f84a04",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 92,
"avg_line_length": 53.5,
"alnum_prop": 0.778816199376947,
"repo_name": "pyhmsa/pyhmsa",
"id": "b1747fc053fc9529e8cf6a77f8c024f4424e94c1",
"size": "344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/06_xrayline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "569193"
}
],
"symlink_target": ""
} |
import frappe
import json
def get_email_accounts(user=None):
if not user:
user = frappe.session.user
email_accounts = []
accounts = frappe.get_all("User Email", filters={ "parent": user },
fields=["email_account", "email_id", "enable_outgoing"],
distinct=True, order_by="idx")
if not accounts:
return {
"email_accounts": [],
"all_accounts": ""
}
all_accounts = ",".join([ account.get("email_account") for account in accounts ])
if len(accounts) > 1:
email_accounts.append({
"email_account": all_accounts,
"email_id": "All Accounts"
})
email_accounts.extend(accounts)
email_accounts.extend([
{
"email_account": "Sent",
"email_id": "Sent Mail"
},
{
"email_account": "Spam",
"email_id": "Spam"
},
{
"email_account": "Trash",
"email_id": "Trash"
}
])
return {
"email_accounts": email_accounts,
"all_accounts": all_accounts
}
@frappe.whitelist()
def create_email_flag_queue(names, action):
""" create email flag queue to mark email either as read or unread """
def mark_as_seen_unseen(name, action):
doc = frappe.get_doc("Communication", name)
if action == "Read":
doc.add_seen()
else:
_seen = json.loads(doc._seen or '[]')
_seen = [user for user in _seen if frappe.session.user != user]
doc.db_set('_seen', json.dumps(_seen), update_modified=False)
if not all([names, action]):
return
for name in json.loads(names or []):
uid, seen_status, email_account = frappe.db.get_value("Communication", name,
["ifnull(uid, -1)", "ifnull(seen, 0)", "email_account"])
# can not mark email SEEN or UNSEEN without uid
if not uid or uid == -1:
continue
seen = 1 if action == "Read" else 0
# check if states are correct
if (action =='Read' and seen_status == 0) or (action =='Unread' and seen_status == 1):
create_new = True
email_flag_queue = frappe.db.sql("""select name, action from `tabEmail Flag Queue`
where communication = %(name)s and is_completed=0""", {"name":name}, as_dict=True)
for queue in email_flag_queue:
if queue.action != action:
frappe.delete_doc("Email Flag Queue", queue.name, ignore_permissions=True)
elif queue.action == action:
# Read or Unread request for email is already available
create_new = False
if create_new:
flag_queue = frappe.get_doc({
"uid": uid,
"action": action,
"communication": name,
"doctype": "Email Flag Queue",
"email_account": email_account
})
flag_queue.save(ignore_permissions=True)
frappe.db.set_value("Communication", name, "seen", seen,
update_modified=False)
mark_as_seen_unseen(name, action)
@frappe.whitelist()
def mark_as_trash(communication):
"""set email status to trash"""
frappe.db.set_value("Communication", communication, "email_status", "Trash")
@frappe.whitelist()
def mark_as_spam(communication, sender):
""" set email status to spam """
email_rule = frappe.db.get_value("Email Rule", { "email_id": sender })
if not email_rule:
frappe.get_doc({
"doctype": "Email Rule",
"email_id": sender,
"is_spam": 1
}).insert(ignore_permissions=True)
frappe.db.set_value("Communication", communication, "email_status", "Spam")
def link_communication_to_document(doc, reference_doctype, reference_name, ignore_communication_links):
if not ignore_communication_links:
doc.reference_doctype = reference_doctype
doc.reference_name = reference_name
doc.status = "Linked"
doc.save(ignore_permissions=True)
@frappe.whitelist()
def make_issue_from_communication(communication, ignore_communication_links=False):
""" raise a issue from email """
doc = frappe.get_doc("Communication", communication)
issue = frappe.get_doc({
"doctype": "Issue",
"subject": doc.subject,
"raised_by": doc.sender
}).insert(ignore_permissions=True)
link_communication_to_document(doc, "Issue", issue.name, ignore_communication_links)
return issue.name
@frappe.whitelist()
def make_lead_from_communication(communication, ignore_communication_links=False):
""" raise a issue from email """
doc = frappe.get_doc("Communication", communication)
frappe.errprint(doc.sender_full_name)
lead_name = frappe.db.get_value("Lead", {"email_id": doc.sender})
if not lead_name:
lead = frappe.get_doc({
"doctype": "Lead",
"lead_name": doc.sender_full_name,
"email_id": doc.sender
})
lead.flags.ignore_mandatory = True
lead.flags.ignore_permissions = True
lead.insert()
lead_name = lead.name
link_communication_to_document(doc, "Lead", lead_name, ignore_communication_links)
return lead_name
@frappe.whitelist()
def make_opportunity_from_communication(communication, ignore_communication_links=False):
doc = frappe.get_doc("Communication", communication)
lead = doc.reference_name if doc.reference_doctype == "Lead" else None
if not lead:
lead = make_lead_from_communication(communication, ignore_communication_links=True)
enquiry_from = "Lead"
opportunity = frappe.get_doc({
"doctype": "Opportunity",
"enquiry_from": enquiry_from,
"lead": lead
}).insert(ignore_permissions=True)
link_communication_to_document(doc, "Opportunity", opportunity.name, ignore_communication_links)
return opportunity.name
| {
"content_hash": "f2f695264508323f4d3700280034da61",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 103,
"avg_line_length": 29.418079096045197,
"alnum_prop": 0.6854234684079125,
"repo_name": "mbauskar/frappe",
"id": "a3847dc0e625d68c120e7a72b8c17749db250b0c",
"size": "5207",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "frappe/email/inbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "406441"
},
{
"name": "HTML",
"bytes": "213724"
},
{
"name": "JavaScript",
"bytes": "1742788"
},
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "1966810"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
bl_info = {
"name": "SinGED Client",
"author": "Will Cassella",
"version": (0, 1),
"blender": (2, 78, 0),
"description": "SinGE editor client for Blender",
"warning": "",
"wiki_url": "",
"category": "Development",
}
# If we're reloading the module
if "bpy" in locals():
# Close the active session
integration.close_active_session()
# Reimport modules
import importlib
importlib.reload(types)
importlib.reload(ui)
importlib.reload(integration)
importlib.reload(static_mesh)
else:
import bpy
from bpy.props import PointerProperty, IntProperty, StringProperty
from . import types, ui, integration, static_mesh
# Register classes in the module
def register():
print ("Registering ", __name__)
bpy.utils.register_class(types.SGETypes)
bpy.utils.register_class(types.SinGEDProps)
bpy.utils.register_class(integration.SinGEDConnect)
bpy.utils.register_class(integration.SinGEDConnectPanel)
bpy.utils.register_class(operators.SinGEDNotification)
bpy.utils.register_class(operators.SinGEDNewComponent)
bpy.utils.register_class(operators.SinGEDDestroyComponent)
bpy.utils.register_class(operators.SinGEDSaveScene)
bpy.utils.register_class(operators.SinGEDGenerateLightmaps)
bpy.utils.register_class(ui.SinGEDNodePanel)
bpy.utils.register_class(ui.SinGEDMaterialPanel)
bpy.utils.register_class(static_mesh.SGEStaticMeshExporter)
bpy.types.INFO_MT_file_export.append(static_mesh.export_menu_func)
bpy.types.Scene.singed = PointerProperty(type=types.SinGEDProps)
bpy.types.Object.sge_node_id = IntProperty(default=0)
bpy.types.Material.sge_path = StringProperty()
# Unregister classes
def unregister():
print ("Unregistering ", __name__)
# Close the active session
integration.close_active_session()
del bpy.types.Material.sge_path
del bpy.types.Object.sge_node_id
del bpy.types.Scene.singed
bpy.types.INFO_MT_file_export.remove(static_mesh.export_menu_func)
bpy.utils.unregister_class(static_mesh.SGEStaticMeshExporter)
bpy.utils.unregister_class(ui.SinGEDMaterialPanel)
bpy.utils.unregister_class(ui.SinGEDNodePanel)
bpy.utils.unregister_class(operators.SinGEDGenerateLightmaps)
bpy.utils.unregister_class(operators.SinGEDSaveScene)
bpy.utils.unregister_class(operators.SinGEDDestroyComponent)
bpy.utils.unregister_class(operators.SinGEDNewComponent)
bpy.utils.unregister_class(operators.SinGEDNotification)
bpy.utils.unregister_class(integration.SinGEDConnectPanel)
bpy.utils.unregister_class(integration.SinGEDConnect)
bpy.utils.unregister_class(types.SinGEDProps)
bpy.utils.unregister_class(types.SGETypes)
if __name__ == "__main__":
register()
| {
"content_hash": "0be144d0372d6d6698eb17092cbf8380",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 70,
"avg_line_length": 36.93333333333333,
"alnum_prop": 0.73971119133574,
"repo_name": "willcassella/SinGE",
"id": "b6323c00e5cab5ff6a1fdb887b5e5ec8790e870b",
"size": "2801",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "Tools/SinGED/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1442"
},
{
"name": "C++",
"bytes": "952159"
},
{
"name": "CMake",
"bytes": "6596"
},
{
"name": "GLSL",
"bytes": "14456"
},
{
"name": "JavaScript",
"bytes": "191"
},
{
"name": "PowerShell",
"bytes": "1879"
},
{
"name": "Python",
"bytes": "116337"
}
],
"symlink_target": ""
} |
import logging
import warnings
from typing import Dict, Set
from great_expectations.core.id_dict import BatchKwargs
logger = logging.getLogger(__name__)
class BatchKwargsGenerator:
r"""
BatchKwargsGenerators produce identifying information, called "batch_kwargs" that datasources
can use to get individual batches of data. They add flexibility in how to obtain data
such as with time-based partitioning, downsampling, or other techniques appropriate
for the datasource.
For example, a batch kwargs generator could produce a SQL query that logically represents "rows in
the Events table with a timestamp on February 7, 2012," which a SqlAlchemyDatasource
could use to materialize a SqlAlchemyDataset corresponding to that batch of data and
ready for validation.
A batch is a sample from a data asset, sliced according to a particular rule. For
example, an hourly slide of the Events table or “most recent `users` records.”
A Batch is the primary unit of validation in the Great Expectations DataContext.
Batches include metadata that identifies how they were constructed--the same “batch_kwargs”
assembled by the batch kwargs generator, While not every datasource will enable re-fetching a
specific batch of data, GE can store snapshots of batches or store metadata from an
external data version control system.
Example Generator Configurations follow::
my_datasource_1:
class_name: PandasDatasource
batch_kwargs_generators:
# This generator will provide two data assets, corresponding to the globs defined under the "file_logs"
# and "data_asset_2" keys. The file_logs asset will be partitioned according to the match group
# defined in partition_regex
default:
class_name: GlobReaderBatchKwargsGenerator
base_directory: /var/logs
reader_options:
sep: "
globs:
file_logs:
glob: logs/*.gz
partition_regex: logs/file_(\d{0,4})_\.log\.gz
data_asset_2:
glob: data/*.csv
my_datasource_2:
class_name: PandasDatasource
batch_kwargs_generators:
# This generator will create one data asset per subdirectory in /data
# Each asset will have partitions corresponding to the filenames in that subdirectory
default:
class_name: SubdirReaderBatchKwargsGenerator
reader_options:
sep: "
base_directory: /data
my_datasource_3:
class_name: SqlalchemyDatasource
batch_kwargs_generators:
# This generator will search for a file named with the name of the requested data asset and the
# .sql suffix to open with a query to use to generate data
default:
class_name: QueryBatchKwargsGenerator
--ge-feature-maturity-info--
id: batch_kwargs_generator_manual
title: Batch Kwargs Generator - Manual
icon:
short_description: Manually configure how files on a filesystem are presented as batches of data
description: Manually configure how files on a filesystem are presented as batches of data
how_to_guide_url:
maturity: Beta
maturity_details:
api_stability: Mostly Stable (key generator functionality will remain but batch API changes still possible)
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: N/A
documentation_completeness: Minimal
bug_risk: Moderate
id: batch_kwargs_generator_s3
title: Batch Kwargs Generator - S3
icon:
short_description: Present files on S3 as batches of data
description: Present files on S3 as batches of data for profiling and validation
how_to_guide_url:
maturity: Beta
maturity_details:
api_stability: Mostly Stable (expect changes in partitioning)
implementation_completeness: Partial
unit_test_coverage: Complete
integration_infrastructure_test_coverage: Complete
documentation_completeness: Minimal
bug_risk: Moderate
id: batch_kwargs_generator_glob_reader
title: Batch Kwargs Generator - Glob Reader
icon:
short_description: A configurable way to present files in a directory as batches of data
description: A configurable way to present files in a directory as batches of data
how_to_guide_url:
maturity: Beta
maturity_details:
api_stability: Mostly Stable (expect changes in partitioning)
implementation_completeness: Partial
unit_test_coverage: Complete
integration_infrastructure_test_coverage: N/A
documentation_completeness: Minimal
bug_risk: Moderate
id: batch_kwargs_generator_table
title: Batch Kwargs Generator - Table
icon:
short_description: Present database tables as batches of data
description: Present database tables as batches of data for validation and profiling
how_to_guide_url:
maturity: Beta
maturity_details:
api_stability: Unstable (no existing native support for "partitioning")
implementation_completeness: Minimal
unit_test_coverage: Partial
integration_infrastructure_test_coverage: Minimal
documentation_completeness: Partial
bug_risk: Low
id: batch_kwargs_generator_query
title: Batch Kwargs Generator - Query
icon:
short_description: Present the result sets of SQL queries as batches of data
description: Present the result sets of SQL queries as batches of data for validation and profiling
how_to_guide_url:
maturity: Beta
maturity_details:
api_stability: Unstable (expect changes in query template configuration and query storage)
implementation_completeness: Complete
unit_test_coverage: Partial
integration_infrastructure_test_coverage: Minimal
documentation_completeness: Partial
bug_risk: Moderate
id: batch_kwargs_generator_subdir_reader
title: Batch Kwargs Generator - Subdir Reader
icon:
short_description: Present the files in a directory as batches of data
description: Present the files in a directory as batches of data for profiling and validation.
how_to_guide_url:
maturity: Beta
maturity_details:
api_stability: Mostly Stable (new configuration options likely)
implementation_completeness: Partial
unit_test_coverage: Complete
integration_infrastructure_test_coverage: N/A
documentation_completeness: Minimal
bug_risk: Low
--ge-feature-maturity-info--
"""
_batch_kwargs_type = BatchKwargs
recognized_batch_parameters: Set = set()
def __init__(self, name, datasource) -> None:
self._name = name
self._generator_config = {"class_name": self.__class__.__name__}
self._data_asset_iterators: Dict = {}
if datasource is None:
raise ValueError("datasource must be provided for a BatchKwargsGenerator")
self._datasource = datasource
@property
def name(self):
return self._name
def _get_iterator(self, data_asset_name, **kwargs):
raise NotImplementedError
def get_available_data_asset_names(self) -> None:
"""Return the list of asset names known by this batch kwargs generator.
Returns:
A list of available names
"""
raise NotImplementedError
# TODO: deprecate generator_asset argument
def get_available_partition_ids(
self, generator_asset=None, data_asset_name=None
) -> None:
"""
Applies the current _partitioner to the batches available on data_asset_name and returns a list of valid
partition_id strings that can be used to identify batches of data.
Args:
data_asset_name: the data asset whose partitions should be returned.
Returns:
A list of partition_id strings
"""
raise NotImplementedError
def get_config(self):
return self._generator_config
# TODO: deprecate generator_asset argument
def reset_iterator(
self, generator_asset=None, data_asset_name=None, **kwargs
) -> None:
assert (generator_asset and not data_asset_name) or (
not generator_asset and data_asset_name
), "Please provide either generator_asset or data_asset_name."
if generator_asset:
# deprecated-v0.11.0
warnings.warn(
"The 'generator_asset' argument is deprecated as of v0.11.0 and will be removed in v0.16. "
"Please use 'data_asset_name' instead.",
DeprecationWarning,
)
data_asset_name = generator_asset
self._data_asset_iterators[data_asset_name] = (
self._get_iterator(data_asset_name=data_asset_name, **kwargs),
kwargs,
)
# TODO: deprecate generator_asset argument
def get_iterator(self, generator_asset=None, data_asset_name=None, **kwargs):
assert (generator_asset and not data_asset_name) or (
not generator_asset and data_asset_name
), "Please provide either generator_asset or data_asset_name."
if generator_asset:
# deprecated-v0.11.0
warnings.warn(
"The 'generator_asset' argument is deprecated as of v0.11.0 and will be removed in v0.16. "
"Please use 'data_asset_name' instead.",
DeprecationWarning,
)
data_asset_name = generator_asset
if data_asset_name in self._data_asset_iterators:
data_asset_iterator, passed_kwargs = self._data_asset_iterators[
data_asset_name
]
if passed_kwargs != kwargs:
logger.warning(
"Asked to yield batch_kwargs using different supplemental kwargs. Please reset iterator to "
"use different supplemental kwargs."
)
return data_asset_iterator
else:
self.reset_iterator(data_asset_name=data_asset_name, **kwargs)
return self._data_asset_iterators[data_asset_name][0]
def build_batch_kwargs(self, data_asset_name=None, partition_id=None, **kwargs):
if (not kwargs.get("name") and not data_asset_name) or (
kwargs.get("name") and data_asset_name
):
raise ValueError("Please provide either name or data_asset_name.")
if kwargs.get("name"):
# deprecated-v0.11.0
warnings.warn(
"The 'generator_asset' argument is deprecated as of v0.11.0 and will be removed in v0.16. "
"Please use 'data_asset_name' instead.",
DeprecationWarning,
)
data_asset_name = kwargs.pop("name")
"""The key workhorse. Docs forthcoming."""
if data_asset_name is not None:
batch_parameters = {"data_asset_name": data_asset_name}
else:
batch_parameters = {}
if partition_id is not None:
batch_parameters["partition_id"] = partition_id
batch_parameters.update(kwargs)
param_keys = set(batch_parameters.keys())
recognized_params = (
self.recognized_batch_parameters
| self._datasource.recognized_batch_parameters
)
if not param_keys <= recognized_params:
logger.warning(
f"Unrecognized batch_parameter(s): {str(param_keys - recognized_params)}"
)
batch_kwargs = self._build_batch_kwargs(batch_parameters)
batch_kwargs["data_asset_name"] = data_asset_name
# Track the datasource *in batch_kwargs* when building from a context so that the context can easily reuse them.
batch_kwargs["datasource"] = self._datasource.name
return batch_kwargs
def _build_batch_kwargs(self, batch_parameters) -> None:
raise NotImplementedError
# TODO: deprecate generator_asset argument
def yield_batch_kwargs(self, data_asset_name=None, generator_asset=None, **kwargs):
assert (generator_asset and not data_asset_name) or (
not generator_asset and data_asset_name
), "Please provide either generator_asset or data_asset_name."
if generator_asset:
# deprecated-v0.11.0
warnings.warn(
"The 'generator_asset' argument is deprecated as of v0.11.0 and will be removed in v0.16. "
"Please use 'data_asset_name' instead.",
DeprecationWarning,
)
data_asset_name = generator_asset
if data_asset_name not in self._data_asset_iterators:
self.reset_iterator(data_asset_name=data_asset_name, **kwargs)
data_asset_iterator, passed_kwargs = self._data_asset_iterators[data_asset_name]
if passed_kwargs != kwargs:
logger.warning(
"Asked to yield batch_kwargs using different supplemental kwargs. Resetting iterator to "
"use new supplemental kwargs."
)
self.reset_iterator(data_asset_name=data_asset_name, **kwargs)
data_asset_iterator, passed_kwargs = self._data_asset_iterators[
data_asset_name
]
try:
batch_kwargs = next(data_asset_iterator)
batch_kwargs["datasource"] = self._datasource.name
return batch_kwargs
except StopIteration:
self.reset_iterator(data_asset_name=data_asset_name, **kwargs)
data_asset_iterator, passed_kwargs = self._data_asset_iterators[
data_asset_name
]
if passed_kwargs != kwargs:
logger.warning(
"Asked to yield batch_kwargs using different batch parameters. Resetting iterator to "
"use different batch parameters."
)
self.reset_iterator(data_asset_name=data_asset_name, **kwargs)
data_asset_iterator, passed_kwargs = self._data_asset_iterators[
data_asset_name
]
try:
batch_kwargs = next(data_asset_iterator)
batch_kwargs["datasource"] = self._datasource.name
return batch_kwargs
except StopIteration:
# This is a degenerate case in which no kwargs are actually being generated
logger.warning(
f"No batch_kwargs found for data_asset_name {data_asset_name}"
)
return {}
except TypeError:
# If we don't actually have an iterator we can generate, even after resetting, just return empty
logger.warning(
f"Unable to generate batch_kwargs for data_asset_name {data_asset_name}"
)
return {}
| {
"content_hash": "1f570d8b3efaf3a09062f847507a6082",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 120,
"avg_line_length": 43.598885793871865,
"alnum_prop": 0.6160235113723486,
"repo_name": "great-expectations/great_expectations",
"id": "44ebf2bb006afa0ae0217e565d26af2cb6ac91cf",
"size": "15660",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "great_expectations/datasource/batch_kwargs_generator/batch_kwargs_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
} |
"""Common utilities for the various process_* implementations.
This file is only meant to be imported by the platform-specific implementations
of subprocess utilities, and it contains tools that are common to all of them.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import subprocess
import shlex
import sys
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Function definitions
#-----------------------------------------------------------------------------
def read_no_interrupt(p):
"""Read from a pipe ignoring EINTR errors.
This is necessary because when reading from pipes with GUI event loops
running in the background, often interrupts are raised that stop the
command from completing."""
import errno
try:
return p.read()
except IOError as err:
if err.errno != errno.EINTR:
raise
def process_handler(cmd, callback, stderr=subprocess.PIPE):
"""Open a command in a shell subprocess and execute a callback.
This function provides common scaffolding for creating subprocess.Popen()
calls. It creates a Popen object and then calls the callback with it.
Parameters
----------
cmd : str
A string to be executed with the underlying system shell (by calling
:func:`Popen` with ``shell=True``.
callback : callable
A one-argument function that will be called with the Popen object.
stderr : file descriptor number, optional
By default this is set to ``subprocess.PIPE``, but you can also pass the
value ``subprocess.STDOUT`` to force the subprocess' stderr to go into
the same file descriptor as its stdout. This is useful to read stdout
and stderr combined in the order they are generated.
Returns
-------
The return value of the provided callback is returned.
"""
sys.stdout.flush()
sys.stderr.flush()
# On win32, close_fds can't be true when using pipes for stdin/out/err
close_fds = sys.platform != 'win32'
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=stderr,
close_fds=close_fds)
try:
out = callback(p)
except KeyboardInterrupt:
print('^C')
sys.stdout.flush()
sys.stderr.flush()
out = None
finally:
# Make really sure that we don't leave processes behind, in case the
# call above raises an exception
# We start by assuming the subprocess finished (to avoid NameErrors
# later depending on the path taken)
if p.returncode is None:
try:
p.terminate()
p.poll()
except OSError:
pass
# One last try on our way out
if p.returncode is None:
try:
p.kill()
except OSError:
pass
return out
def getoutput(cmd):
"""Run a command and return its stdout/stderr as a string.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
output : str
A string containing the combination of stdout and stderr from the
subprocess, in whatever order the subprocess originally wrote to its
file descriptors (so the order of the information in this string is the
correct order as would be seen if running the command in a terminal).
"""
out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
if out is None:
return ''
return py3compat.bytes_to_str(out)
def getoutputerror(cmd):
"""Return (standard output, standard error) of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
stderr : str
"""
return get_output_error_code(cmd)[:2]
def get_output_error_code(cmd):
"""Return (standard output, standard error, return code) of executing cmd
in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
stderr : str
returncode: int
"""
out_err, p = process_handler(cmd, lambda p: (p.communicate(), p))
if out_err is None:
return '', '', p.returncode
out, err = out_err
return py3compat.bytes_to_str(out), py3compat.bytes_to_str(err), p.returncode
def arg_split(s, posix=False, strict=True):
"""Split a command line's arguments in a shell-like manner.
This is a modified version of the standard library's shlex.split()
function, but with a default of posix=False for splitting, so that quotes
in inputs are respected.
if strict=False, then any errors shlex.split would raise will result in the
unparsed remainder being the last element of the list, rather than raising.
This is because we sometimes use arg_split to parse things other than
command-line args.
"""
# Unfortunately, python's shlex module is buggy with unicode input:
# http://bugs.python.org/issue1170
# At least encoding the input when it's unicode seems to help, but there
# may be more problems lurking. Apparently this is fixed in python3.
is_unicode = False
if (not py3compat.PY3) and isinstance(s, unicode):
is_unicode = True
s = s.encode('utf-8')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
# Extract tokens, ensuring that things like leaving open quotes
# does not cause this to raise. This is important, because we
# sometimes pass Python source through this (e.g. %timeit f(" ")),
# and it shouldn't raise an exception.
# It may be a bad idea to parse things that are not command-line args
# through this function, but we do, so let's be safe about it.
lex.commenters='' #fix for GH-1269
tokens = []
while True:
try:
tokens.append(next(lex))
except StopIteration:
break
except ValueError:
if strict:
raise
# couldn't parse, get remaining blob as last token
tokens.append(lex.token)
break
if is_unicode:
# Convert the tokens back to unicode.
tokens = [x.decode('utf-8') for x in tokens]
return tokens
| {
"content_hash": "88397cca909bcca0a68ea6acdbc5bef1",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 81,
"avg_line_length": 32.925233644859816,
"alnum_prop": 0.595799034913426,
"repo_name": "marcoantoniooliveira/labweb",
"id": "926721cb113bfa2a68b8e25b7e5fe7a5ffbc47ab",
"size": "7046",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/IPython/utils/_process_common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1534157"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "2968822"
},
{
"name": "LiveScript",
"bytes": "6103"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "30402832"
},
{
"name": "Shell",
"bytes": "10782"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import tempfile
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('Shakespeare WordCount').getOrCreate()
table = 'bigquery-public-data.samples.shakespeare'
df = spark.read.format('bigquery').load(table)
# Only these columns will be read
df = df.select('word', 'word_count')
# The filters that are allowed will be automatically pushed down.
# Those that are not will be computed client side
df = df.where("word_count > 0 AND word NOT LIKE '%\\'%'")
# Further processing is done inside Spark
df = df.groupBy('word').sum('word_count')
df = df.orderBy(df['sum(word_count)'].desc()).cache()
print('The resulting schema is')
df.printSchema()
print('The top words in shakespeare are')
df.show()
# Use tempfile just to get random directory name. Spark will create the
# directory in the default file system anyways.
path = tempfile.mkdtemp(prefix='spark-bigquery')
print('Writing table out to {}'.format(path))
df.write.csv(path)
| {
"content_hash": "3fb791cbc7a43eec973d2c6f8b8fbcc5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 35.32142857142857,
"alnum_prop": 0.7441860465116279,
"repo_name": "GoogleCloudDataproc/spark-bigquery-connector",
"id": "d75a482694f153c972ce35d8afcf11818d8aae52",
"size": "1610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/python/shakespeare.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "80"
},
{
"name": "Java",
"bytes": "1052532"
},
{
"name": "Python",
"bytes": "4333"
},
{
"name": "Scala",
"bytes": "340812"
},
{
"name": "Shell",
"bytes": "3822"
}
],
"symlink_target": ""
} |
from mininode import *
from blockstore import BlockStore, TxStore
from util import p2p_port
'''
This is a tool for comparing two or more vcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == 'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == 'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print 'Block not in reject map: %064x' % (blockhash)
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print 'Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print 'Tx not in reject map: %064x' % (txhash)
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print 'Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| {
"content_hash": "0b4550725d13ab52e748f8066ca66e9d",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 145,
"avg_line_length": 44.37073170731707,
"alnum_prop": 0.5829485488126649,
"repo_name": "vcoin-project/v",
"id": "def4036a8597f76b412af3d371f623a9cd8a993b",
"size": "18361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/test_framework/comptool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "708021"
},
{
"name": "C++",
"bytes": "4107591"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "543539"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "M4",
"bytes": "146937"
},
{
"name": "Makefile",
"bytes": "1093629"
},
{
"name": "NSIS",
"bytes": "6503"
},
{
"name": "Objective-C",
"bytes": "2156"
},
{
"name": "Objective-C++",
"bytes": "7232"
},
{
"name": "Protocol Buffer",
"bytes": "2300"
},
{
"name": "Python",
"bytes": "613863"
},
{
"name": "Shell",
"bytes": "1762241"
}
],
"symlink_target": ""
} |
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| {
"content_hash": "3c04fe98c55dd1dc14faa83e2180cc00",
"timestamp": "",
"source": "github",
"line_count": 954,
"max_line_length": 79,
"avg_line_length": 27.370020964360588,
"alnum_prop": 0.5439852935544407,
"repo_name": "ryfeus/lambda-packs",
"id": "98efba1911e13515737e21a385eb004924c3d283",
"size": "26111",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "HDF4_H5_NETCDF/source2.7/numpy/lib/twodim_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
import datetime
import copy
import random
from collections import namedtuple
import mock
import testtools
from testtools.matchers import GreaterThan
from oslo_config import cfg
from oslo_log import log as logging
from oslo_db import exception as db_exception
from oslo_messaging.notify import notifier
from designate import exceptions
from designate import objects
from designate.mdns import rpcapi as mdns_api
from designate.tests.test_central import CentralTestCase
from designate.storage.impl_sqlalchemy import tables
LOG = logging.getLogger(__name__)
class CentralServiceTest(CentralTestCase):
def test_stop(self):
# Test stopping the service
self.central_service.stop()
def test_start_with_tlds(self):
# Stop Service
self.central_service.stop()
list = objects.TldList()
list.append(objects.Tld(name='com.'))
with mock.patch.object(self.central_service.storage, 'find_tlds',
return_value=list):
self.central_service.start()
self.assertTrue(self.central_service.check_for_tlds)
def test_is_valid_domain_name(self):
self.config(max_domain_name_len=10,
group='service:central')
context = self.get_context()
self.central_service._is_valid_domain_name(context, 'valid.org.')
with testtools.ExpectedException(exceptions.InvalidDomainName):
self.central_service._is_valid_domain_name(context, 'example.org.')
with testtools.ExpectedException(exceptions.InvalidDomainName):
self.central_service._is_valid_domain_name(context, 'example.tld.')
with testtools.ExpectedException(exceptions.InvalidDomainName):
self.central_service._is_valid_domain_name(context, 'tld.')
def test_is_valid_domain_name_with_tlds(self):
# Stop Service
self.central_service.stop()
list = objects.TldList()
list.append(objects.Tld(name='com'))
list.append(objects.Tld(name='biz'))
list.append(objects.Tld(name='z'))
with mock.patch.object(self.central_service.storage, 'find_tlds',
return_value=list):
self.central_service.start()
context = self.get_context()
with mock.patch.object(self.central_service.storage, 'find_tld',
return_value=objects.Tld(name='biz')):
with testtools.ExpectedException(exceptions.InvalidDomainName):
self.central_service._is_valid_domain_name(context, 'biz.')
def test_is_valid_recordset_name(self):
self.config(max_recordset_name_len=18,
group='service:central')
context = self.get_context()
domain = self.create_domain(name='example.org.')
self.central_service._is_valid_recordset_name(
context, domain, 'valid.example.org.')
with testtools.ExpectedException(exceptions.InvalidRecordSetName):
self.central_service._is_valid_recordset_name(
context, domain, 'toolong.example.org.')
with testtools.ExpectedException(ValueError):
self.central_service._is_valid_recordset_name(
context, domain, 'invalidtld.example.org')
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
self.central_service._is_valid_recordset_name(
context, domain, 'a.example.COM.')
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
# Ensure names ending in the domain name, but
# not contained in it fail
self.central_service._is_valid_recordset_name(
context, domain, 'aexample.org.')
def test_is_blacklisted_domain_name(self):
# Create blacklisted zones with specific names
self.create_blacklist(pattern='example.org.')
self.create_blacklist(pattern='example.net.')
self.create_blacklist(pattern='^blacklisted.org.$')
self.create_blacklist(pattern='com.$')
# Set the policy to reject the authz
self.policy({'use_blacklisted_domain': '!'})
context = self.get_context()
result = self.central_service._is_blacklisted_domain_name(
context, 'org.')
self.assertFalse(result)
# Subdomains should not be allowed from a blacklisted domain
result = self.central_service._is_blacklisted_domain_name(
context, 'www.example.org.')
self.assertTrue(result)
result = self.central_service._is_blacklisted_domain_name(
context, 'example.org.')
self.assertTrue(result)
# Check for blacklisted domains containing regexps
result = self.central_service._is_blacklisted_domain_name(
context, 'example.net.')
self.assertTrue(result)
result = self.central_service._is_blacklisted_domain_name(
context, 'example.com.')
self.assertTrue(result)
result = self.central_service._is_blacklisted_domain_name(
context, 'blacklisted.org.')
self.assertTrue(result)
def test_is_blacklisted_domain_name_evil(self):
evil_regex = "(([a-z])+.)+[A-Z]([a-z])+$"
evil_zone_name = ("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaa.com.")
blacklists = objects.BlacklistList(
objects=[objects.Blacklist(pattern=evil_regex)])
context = self.get_context()
with mock.patch.object(self.central_service.storage,
'find_blacklists',
return_value=blacklists):
result = self.central_service._is_blacklisted_domain_name(
context, evil_zone_name)
self.assertTrue(result)
def test_is_subdomain(self):
context = self.get_context()
# Create a domain (using the specified domain name)
domain = self.create_domain(name='example.org.')
result = self.central_service._is_subdomain(
context, 'org.', domain.pool_id)
self.assertFalse(result)
result = self.central_service._is_subdomain(
context, 'www.example.net.', domain.pool_id)
self.assertFalse(result)
result = self.central_service._is_subdomain(
context, 'example.org.', domain.pool_id)
self.assertFalse(result)
result = self.central_service._is_subdomain(
context, 'www.example.org.', domain.pool_id)
self.assertTrue(result)
def test_is_superdomain(self):
context = self.get_context()
# Create a domain (using the specified domain name)
domain = self.create_domain(name='example.org.')
LOG.debug("Testing 'org.'")
result = self.central_service._is_superdomain(
context, 'org.', domain.pool_id)
self.assertTrue(result)
LOG.debug("Testing 'www.example.net.'")
result = self.central_service._is_superdomain(
context, 'www.example.net.', domain.pool_id)
self.assertFalse(result)
LOG.debug("Testing 'www.example.org.'")
result = self.central_service._is_superdomain(
context, 'www.example.org.', domain.pool_id)
self.assertFalse(result)
def test_is_valid_recordset_placement_subdomain(self):
context = self.get_context()
# Create a domain (using the specified domain name)
domain = self.create_domain(name='example.org.')
sub_domain = self.create_domain(name='sub.example.org.')
def _fail(domain_, name):
with testtools.ExpectedException(
exceptions.InvalidRecordSetLocation):
self.central_service._is_valid_recordset_placement_subdomain(
context, domain_, name)
def _ok(domain_, name):
self.central_service._is_valid_recordset_placement_subdomain(
context, domain_, name)
_fail(domain, 'record.sub.example.org.')
_fail(domain, 'sub.example.org.')
_ok(domain, 'example.org.')
_ok(domain, 'record.example.org.')
_ok(sub_domain, 'record.example.org.')
def test_is_valid_ttl(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=100,
group='service:central')
context = self.get_context()
values = self.get_domain_fixture(fixture=1)
values['ttl'] = 0
with testtools.ExpectedException(exceptions.InvalidTTL):
self.central_service._is_valid_ttl(
context, values['ttl'])
# TLD Tests
def test_create_tld(self):
# Create a TLD with one label
tld = self.create_tld(fixture=0)
# Ensure all values have been set correctly
self.assertIsNotNone(tld['id'])
self.assertEqual(self.get_tld_fixture(fixture=0)['name'], tld['name'])
# Create a TLD with more than one label
tld = self.create_tld(fixture=1)
# Ensure all values have been set correctly
self.assertIsNotNone(tld['id'])
self.assertEqual(self.get_tld_fixture(fixture=1)['name'], tld['name'])
def test_find_tlds(self):
# Ensure we have no tlds to start with.
tlds = self.central_service.find_tlds(self.admin_context)
self.assertEqual(0, len(tlds))
# Create a single tld
self.create_tld(fixture=0)
# Ensure we can retrieve the newly created tld
tlds = self.central_service.find_tlds(self.admin_context)
self.assertEqual(1, len(tlds))
self.assertEqual(self.get_tld_fixture(fixture=0)['name'],
tlds[0]['name'])
# Create a second tld
self.create_tld(fixture=1)
# Ensure we can retrieve both tlds
tlds = self.central_service.find_tlds(self.admin_context)
self.assertEqual(2, len(tlds))
self.assertEqual(self.get_tld_fixture(fixture=0)['name'],
tlds[0]['name'])
self.assertEqual(self.get_tld_fixture(fixture=1)['name'],
tlds[1]['name'])
def test_get_tld(self):
# Create a tld
tld_name = 'ns%d.co.uk' % random.randint(10, 1000)
expected_tld = self.create_tld(name=tld_name)
# Retrieve it, and ensure it's the same
tld = self.central_service.get_tld(
self.admin_context, expected_tld['id'])
self.assertEqual(expected_tld['id'], tld['id'])
self.assertEqual(expected_tld['name'], tld['name'])
def test_update_tld(self):
# Create a tld
tld = self.create_tld(name='org.')
# Update the Object
tld.name = 'net.'
# Perform the update
self.central_service.update_tld(self.admin_context, tld)
# Fetch the tld again
tld = self.central_service.get_tld(self.admin_context, tld.id)
# Ensure the tld was updated correctly
self.assertEqual('net.', tld.name)
def test_delete_tld(self):
# Create a tld
tld = self.create_tld(fixture=0)
# Delete the tld
self.central_service.delete_tld(self.admin_context, tld['id'])
# Fetch the tld again, ensuring an exception is raised
self.assertRaises(
exceptions.TldNotFound,
self.central_service.get_tld,
self.admin_context, tld['id'])
# TsigKey Tests
def test_create_tsigkey(self):
values = self.get_tsigkey_fixture(fixture=0)
# Create a tsigkey
tsigkey = self.central_service.create_tsigkey(
self.admin_context, tsigkey=objects.TsigKey.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(tsigkey['id'])
self.assertEqual(values['name'], tsigkey['name'])
self.assertEqual(values['algorithm'], tsigkey['algorithm'])
self.assertEqual(values['secret'], tsigkey['secret'])
def test_find_tsigkeys(self):
# Ensure we have no tsigkeys to start with.
tsigkeys = self.central_service.find_tsigkeys(self.admin_context)
self.assertEqual(0, len(tsigkeys))
# Create a single tsigkey (using default values)
tsigkey_one = self.create_tsigkey()
# Ensure we can retrieve the newly created tsigkey
tsigkeys = self.central_service.find_tsigkeys(self.admin_context)
self.assertEqual(1, len(tsigkeys))
self.assertEqual(tsigkey_one['name'], tsigkeys[0]['name'])
# Create a second tsigkey
tsigkey_two = self.create_tsigkey(fixture=1)
# Ensure we can retrieve both tsigkeys
tsigkeys = self.central_service.find_tsigkeys(self.admin_context)
self.assertEqual(2, len(tsigkeys))
self.assertEqual(tsigkey_one['name'], tsigkeys[0]['name'])
self.assertEqual(tsigkey_two['name'], tsigkeys[1]['name'])
def test_get_tsigkey(self):
# Create a tsigkey
expected = self.create_tsigkey()
# Retrieve it, and ensure it's the same
tsigkey = self.central_service.get_tsigkey(
self.admin_context, expected['id'])
self.assertEqual(expected['id'], tsigkey['id'])
self.assertEqual(expected['name'], tsigkey['name'])
self.assertEqual(expected['algorithm'], tsigkey['algorithm'])
self.assertEqual(expected['secret'], tsigkey['secret'])
def test_update_tsigkey(self):
# Create a tsigkey
tsigkey = self.create_tsigkey(name='test-key')
# Update the Object
tsigkey.name = 'test-key-updated'
# Perform the update
self.central_service.update_tsigkey(self.admin_context, tsigkey)
# Fetch the tsigkey again
tsigkey = self.central_service.get_tsigkey(
self.admin_context, tsigkey.id)
# Ensure the new value took
self.assertEqual('test-key-updated', tsigkey.name)
def test_delete_tsigkey(self):
# Create a tsigkey
tsigkey = self.create_tsigkey()
# Delete the tsigkey
self.central_service.delete_tsigkey(self.admin_context, tsigkey['id'])
# Fetch the tsigkey again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.TsigKeyNotFound):
self.central_service.get_tsigkey(self.admin_context, tsigkey['id'])
# Tenant Tests
def test_count_tenants(self):
admin_context = self.get_admin_context()
admin_context.all_tenants = True
tenant_one_context = self.get_context(tenant=1)
tenant_two_context = self.get_context(tenant=2)
# in the beginning, there should be nothing
tenants = self.central_service.count_tenants(admin_context)
self.assertEqual(0, tenants)
# Explicitly set a tenant_id
self.create_domain(fixture=0, context=tenant_one_context)
self.create_domain(fixture=1, context=tenant_two_context)
tenants = self.central_service.count_tenants(admin_context)
self.assertEqual(2, tenants)
def test_count_tenants_policy_check(self):
# Set the policy to reject the authz
self.policy({'count_tenants': '!'})
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.count_tenants(self.get_context())
# Domain Tests
@mock.patch.object(notifier.Notifier, "info")
def _test_create_domain(self, values, mock_notifier):
# Reset the mock to avoid the calls from the create_nameserver() call
mock_notifier.reset_mock()
# Create a domain
domain = self.central_service.create_domain(
self.admin_context, domain=objects.Domain.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(domain['id'])
self.assertEqual(values['name'], domain['name'])
self.assertEqual(values['email'], domain['email'])
self.assertIn('status', domain)
self.assertEqual(1, mock_notifier.call_count)
# Ensure the correct NS Records are in place
pool = self.central_service.get_pool(
self.admin_context, domain.pool_id)
ns_recordset = self.central_service.find_recordset(
self.admin_context,
criterion={'domain_id': domain.id, 'type': "NS"})
self.assertIsNotNone(ns_recordset.id)
self.assertEqual('NS', ns_recordset.type)
self.assertIsNotNone(ns_recordset.records)
self.assertEqual(set([n.hostname for n in pool.ns_records]),
set([n.data for n in ns_recordset.records]))
mock_notifier.assert_called_once_with(
self.admin_context, 'dns.domain.create', domain)
return domain
def test_create_domain_duplicate_different_pools(self):
fixture = self.get_domain_fixture()
# Create first domain that's placed in default pool
self.create_domain(**fixture)
# Create a secondary pool
second_pool = self.create_pool()
fixture["pool_id"] = second_pool.id
self.create_domain(**fixture)
def test_create_domain_over_tld(self):
values = dict(
name='example.com.',
email='[email protected]',
type='PRIMARY'
)
self._test_create_domain(values)
def test_idn_create_domain_over_tld(self):
values = dict(
name='xn--3e0b707e'
)
# Create the appropriate TLD
self.central_service.create_tld(
self.admin_context, objects.Tld.from_dict(values))
# Test creation of a domain in 한국 (kr)
values = dict(
name='example.xn--3e0b707e.',
email='[email protected]',
type='PRIMARY'
)
self._test_create_domain(values)
def test_create_domain_over_quota(self):
self.config(quota_domains=1)
self.create_domain()
with testtools.ExpectedException(exceptions.OverQuota):
self.create_domain()
def test_create_subdomain(self):
# Create the Parent Domain using fixture 0
parent_domain = self.create_domain(fixture=0)
# Prepare values for the subdomain using fixture 1 as a base
values = self.get_domain_fixture(fixture=1)
values['name'] = 'www.%s' % parent_domain['name']
# Create the subdomain
domain = self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(domain['id'])
self.assertEqual(parent_domain['id'], domain['parent_domain_id'])
def test_create_subdomain_different_pools(self):
fixture = self.get_domain_fixture()
# Create first domain that's placed in default pool
self.create_domain(**fixture)
# Create a secondary pool
second_pool = self.create_pool()
fixture["pool_id"] = second_pool.id
fixture["name"] = "sub.%s" % fixture["name"]
subdomain = self.create_domain(**fixture)
self.assertIsNone(subdomain.parent_domain_id)
def test_create_superdomain(self):
# Prepare values for the domain and subdomain
# using fixture 1 as a base
domain_values = self.get_domain_fixture(fixture=1)
subdomain_values = copy.deepcopy(domain_values)
subdomain_values['name'] = 'www.%s' % domain_values['name']
subdomain_values['context'] = self.admin_context
LOG.debug("domain_values: {0}".format(domain_values))
LOG.debug("subdomain_values: {0}".format(subdomain_values))
# Create the subdomain
subdomain = self.create_domain(**subdomain_values)
# Create the Parent Domain using fixture 1
parent_domain = self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(domain_values))
# Get updated subdomain values
subdomain = self.central_service.get_domain(self.admin_context,
subdomain.id)
# Ensure all values have been set correctly
self.assertIsNotNone(parent_domain['id'])
self.assertEqual(parent_domain['id'], subdomain['parent_domain_id'])
def test_create_subdomain_failure(self):
context = self.get_admin_context()
# Explicitly set a tenant_id
context.tenant = '1'
# Create the Parent Domain using fixture 0
parent_domain = self.create_domain(fixture=0, context=context)
context = self.get_admin_context()
# Explicitly use a different tenant_id
context.tenant = '2'
# Prepare values for the subdomain using fixture 1 as a base
values = self.get_domain_fixture(fixture=1)
values['name'] = 'www.%s' % parent_domain['name']
# Attempt to create the subdomain
with testtools.ExpectedException(exceptions.IllegalChildDomain):
self.central_service.create_domain(
context, objects.Domain.from_dict(values))
def test_create_superdomain_failure(self):
context = self.get_admin_context()
# Explicitly set a tenant_id
context.tenant = '1'
# Set up domain and subdomain values
domain_values = self.get_domain_fixture(fixture=1)
domain_name = domain_values['name']
subdomain_values = copy.deepcopy(domain_values)
subdomain_values['name'] = 'www.%s' % domain_name
subdomain_values['context'] = context
# Create sub domain
self.create_domain(**subdomain_values)
context = self.get_admin_context()
# Explicitly use a different tenant_id
context.tenant = '2'
# Attempt to create the domain
with testtools.ExpectedException(exceptions.IllegalParentDomain):
self.central_service.create_domain(
context, objects.Domain.from_dict(domain_values))
def test_create_blacklisted_domain_success(self):
# Create blacklisted zone using default values
self.create_blacklist()
# Set the policy to accept the authz
self.policy({'use_blacklisted_domain': '@'})
values = dict(
name='blacklisted.com.',
email='[email protected]'
)
# Create a zone that is blacklisted
domain = self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(domain['id'])
self.assertEqual(domain['name'], values['name'])
self.assertEqual(domain['email'], values['email'])
def test_create_blacklisted_domain_fail(self):
self.create_blacklist()
# Set the policy to reject the authz
self.policy({'use_blacklisted_domain': '!'})
values = dict(
name='blacklisted.com.',
email='[email protected]'
)
with testtools.ExpectedException(exceptions.InvalidDomainName):
# Create a domain
self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
def _test_create_domain_fail(self, values, exception):
with testtools.ExpectedException(exception):
# Create an invalid domain
self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
def test_create_domain_invalid_tld_fail(self):
# add a tld for com
self.create_tld(fixture=0)
values = dict(
name='example.com.',
email='[email protected]'
)
# Create a valid domain
self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
values = dict(
name='example.net.',
email='[email protected]'
)
# There is no TLD for net so it should fail
with testtools.ExpectedException(exceptions.InvalidDomainName):
# Create an invalid domain
self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
def test_create_domain_invalid_ttl_fail(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=100,
group='service:central')
context = self.get_context()
values = self.get_domain_fixture(fixture=1)
values['ttl'] = 0
with testtools.ExpectedException(exceptions.InvalidTTL):
self.central_service.create_domain(
context, objects.Domain.from_dict(values))
def test_create_domain_no_min_ttl(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=None,
group='service:central')
values = self.get_domain_fixture(fixture=1)
values['ttl'] = -100
# Create domain with random TTL
domain = self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
# Ensure all values have been set correctly
self.assertEqual(values['ttl'], domain['ttl'])
def test_find_domains(self):
# Ensure we have no domains to start with.
domains = self.central_service.find_domains(self.admin_context)
self.assertEqual(0, len(domains))
# Create a single domain (using default values)
self.create_domain()
# Ensure we can retrieve the newly created domain
domains = self.central_service.find_domains(self.admin_context)
self.assertEqual(1, len(domains))
self.assertEqual('example.com.', domains[0]['name'])
# Create a second domain
self.create_domain(name='example.net.')
# Ensure we can retrieve both domain
domains = self.central_service.find_domains(self.admin_context)
self.assertEqual(2, len(domains))
self.assertEqual('example.com.', domains[0]['name'])
self.assertEqual('example.net.', domains[1]['name'])
def test_find_domains_criteria(self):
# Create a domain
domain_name = '%d.example.com.' % random.randint(10, 1000)
expected_domain = self.create_domain(name=domain_name)
# Retrieve it, and ensure it's the same
criterion = {'name': domain_name}
domains = self.central_service.find_domains(
self.admin_context, criterion)
self.assertEqual(expected_domain['id'], domains[0]['id'])
self.assertEqual(expected_domain['name'], domains[0]['name'])
self.assertEqual(expected_domain['email'], domains[0]['email'])
def test_find_domains_tenant_restrictions(self):
admin_context = self.get_admin_context()
admin_context.all_tenants = True
tenant_one_context = self.get_context(tenant=1)
tenant_two_context = self.get_context(tenant=2)
# Ensure we have no domains to start with.
domains = self.central_service.find_domains(admin_context)
self.assertEqual(0, len(domains))
# Create a single domain (using default values)
domain = self.create_domain(context=tenant_one_context)
# Ensure admins can retrieve the newly created domain
domains = self.central_service.find_domains(admin_context)
self.assertEqual(1, len(domains))
self.assertEqual(domain['name'], domains[0]['name'])
# Ensure tenant=1 can retrieve the newly created domain
domains = self.central_service.find_domains(tenant_one_context)
self.assertEqual(1, len(domains))
self.assertEqual(domain['name'], domains[0]['name'])
# Ensure tenant=2 can NOT retrieve the newly created domain
domains = self.central_service.find_domains(tenant_two_context)
self.assertEqual(0, len(domains))
def test_get_domain(self):
# Create a domain
domain_name = '%d.example.com.' % random.randint(10, 1000)
expected_domain = self.create_domain(name=domain_name)
# Retrieve it, and ensure it's the same
domain = self.central_service.get_domain(
self.admin_context, expected_domain['id'])
self.assertEqual(expected_domain['id'], domain['id'])
self.assertEqual(expected_domain['name'], domain['name'])
self.assertEqual(expected_domain['email'], domain['email'])
def test_get_domain_servers(self):
# Create a domain
domain = self.create_domain()
# Retrieve the servers list
servers = self.central_service.get_domain_servers(
self.admin_context, domain['id'])
self.assertTrue(len(servers) > 0)
def test_find_domain(self):
# Create a domain
domain_name = '%d.example.com.' % random.randint(10, 1000)
expected_domain = self.create_domain(name=domain_name)
# Retrieve it, and ensure it's the same
criterion = {'name': domain_name}
domain = self.central_service.find_domain(
self.admin_context, criterion)
self.assertEqual(expected_domain['id'], domain['id'])
self.assertEqual(expected_domain['name'], domain['name'])
self.assertEqual(expected_domain['email'], domain['email'])
self.assertIn('status', domain)
@mock.patch.object(notifier.Notifier, "info")
def test_update_domain(self, mock_notifier):
# Create a domain
domain = self.create_domain(email='[email protected]')
original_serial = domain.serial
# Update the object
domain.email = '[email protected]'
# Reset the mock to avoid the calls from the create_domain() call
mock_notifier.reset_mock()
# Perform the update
self.central_service.update_domain(self.admin_context, domain)
# Fetch the domain again
domain = self.central_service.get_domain(
self.admin_context, domain.id)
# Ensure the domain was updated correctly
self.assertTrue(domain.serial > original_serial)
self.assertEqual('[email protected]', domain.email)
self.assertEqual(1, mock_notifier.call_count)
# Check that the object used in the notify is a Domain and the id
# matches up
notified_domain = mock_notifier.call_args[0][-1]
self.assertIsInstance(notified_domain, objects.Domain)
self.assertEqual(domain.id, notified_domain.id)
mock_notifier.assert_called_once_with(
self.admin_context, 'dns.domain.update', mock.ANY)
def test_update_domain_without_id(self):
# Create a domain
domain = self.create_domain(email='[email protected]')
# Update the object
domain.email = '[email protected]'
domain.id = None
# Perform the update
with testtools.ExpectedException(Exception):
self.central_service.update_domain(self.admin_context, domain)
def test_update_domain_without_incrementing_serial(self):
# Create a domain
domain = self.create_domain(email='[email protected]')
original_serial = domain.serial
# Update the object
domain.email = '[email protected]'
# Perform the update
self.central_service.update_domain(
self.admin_context, domain, increment_serial=False)
# Fetch the domain again
domain = self.central_service.get_domain(self.admin_context, domain.id)
# Ensure the domain was updated correctly
self.assertEqual(original_serial, domain.serial)
self.assertEqual('[email protected]', domain.email)
def test_update_domain_name_fail(self):
# Create a domain
domain = self.create_domain(name='example.org.')
# Update the Object
domain.name = 'example.net.'
# Perform the update
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_domain(self.admin_context, domain)
def test_update_domain_deadlock_retry(self):
# Create a domain
domain = self.create_domain(name='example.org.')
original_serial = domain.serial
# Update the Object
domain.email = '[email protected]'
# Due to Python's scoping of i - we need to make it a mutable type
# for the counter to work.. In Py3, we can use the nonlocal keyword.
i = [False]
def fail_once_then_pass():
if i[0] is True:
return self.central_service.storage.session.commit()
else:
i[0] = True
raise db_exception.DBDeadlock()
with mock.patch.object(self.central_service.storage, 'commit',
side_effect=fail_once_then_pass):
# Perform the update
domain = self.central_service.update_domain(
self.admin_context, domain)
# Ensure i[0] is True, indicating the side_effect code above was
# triggered
self.assertTrue(i[0])
# Ensure the domain was updated correctly
self.assertTrue(domain.serial > original_serial)
self.assertEqual('[email protected]', domain.email)
@mock.patch.object(notifier.Notifier, "info")
def test_delete_domain(self, mock_notifier):
# Create a domain
domain = self.create_domain()
mock_notifier.reset_mock()
# Delete the domain
self.central_service.delete_domain(self.admin_context, domain['id'])
# Fetch the domain
deleted_domain = self.central_service.get_domain(
self.admin_context, domain['id'])
# Ensure the domain is marked for deletion
self.assertEqual(domain.id, deleted_domain.id)
self.assertEqual(domain.name, deleted_domain.name)
self.assertEqual(domain.email, deleted_domain.email)
self.assertEqual('PENDING', deleted_domain.status)
self.assertEqual(domain.tenant_id, deleted_domain.tenant_id)
self.assertEqual(domain.parent_domain_id,
deleted_domain.parent_domain_id)
self.assertEqual('DELETE', deleted_domain.action)
self.assertEqual(domain.serial, deleted_domain.serial)
self.assertEqual(domain.pool_id, deleted_domain.pool_id)
self.assertEqual(1, mock_notifier.call_count)
# Check that the object used in the notify is a Domain and the id
# matches up
notified_domain = mock_notifier.call_args[0][-1]
self.assertIsInstance(notified_domain, objects.Domain)
self.assertEqual(deleted_domain.id, notified_domain.id)
mock_notifier.assert_called_once_with(
self.admin_context, 'dns.domain.delete', mock.ANY)
def test_delete_parent_domain(self):
# Create the Parent Domain using fixture 0
parent_domain = self.create_domain(fixture=0)
# Create the subdomain
self.create_domain(fixture=1, name='www.%s' % parent_domain['name'])
# Attempt to delete the parent domain
with testtools.ExpectedException(exceptions.DomainHasSubdomain):
self.central_service.delete_domain(
self.admin_context, parent_domain['id'])
def test_count_domains(self):
# in the beginning, there should be nothing
domains = self.central_service.count_domains(self.admin_context)
self.assertEqual(0, domains)
# Create a single domain
self.create_domain()
# count 'em up
domains = self.central_service.count_domains(self.admin_context)
# well, did we get 1?
self.assertEqual(1, domains)
def test_count_domains_policy_check(self):
# Set the policy to reject the authz
self.policy({'count_domains': '!'})
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.count_domains(self.get_context())
def _fetch_all_domains(self):
"""Fetch all domains including deleted ones
"""
query = tables.domains.select()
return self.central_service.storage.session.execute(query).fetchall()
def _log_all_domains(self, zones, msg=None):
"""Log out a summary of zones
"""
if msg:
LOG.debug("--- %s ---" % msg)
cols = ('name', 'status', 'action', 'deleted', 'deleted_at',
'parent_domain_id')
tpl = "%-35s | %-11s | %-11s | %-32s | %-20s | %s"
LOG.debug(tpl % cols)
for z in zones:
LOG.debug(tpl % tuple(z[k] for k in cols))
def _assert_count_all_domains(self, n):
"""Assert count ALL domains including deleted ones
"""
zones = self._fetch_all_domains()
if len(zones) == n:
return
msg = "failed: %d zones expected, %d found" % (n, len(zones))
self._log_all_domains(zones, msg=msg)
raise Exception("Unexpected number of zones")
def _create_deleted_domain(self, name, mock_deletion_time):
# Create a domain and set it as deleted
domain = self.create_domain(name=name)
self._delete_domain(domain, mock_deletion_time)
return domain
def _delete_domain(self, domain, mock_deletion_time):
# Set a domain as deleted
zid = domain.id.replace('-', '')
query = tables.domains.update().\
where(tables.domains.c.id == zid).\
values(
action='NONE',
deleted=zid,
deleted_at=mock_deletion_time,
status='DELETED',
)
pxy = self.central_service.storage.session.execute(query)
self.assertEqual(1, pxy.rowcount)
return domain
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_nothing_to_purge(self, mock_notifier):
# Create a zone
self.create_domain()
mock_notifier.reset_mock()
self._assert_count_all_domains(1)
now = datetime.datetime(2015, 7, 31, 0, 0)
self.central_service.purge_domains(
self.admin_context,
{
'status': 'DELETED',
'deleted': '!0',
'deleted_at': "<=%s" % now
},
limit=100
)
self._assert_count_all_domains(1)
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_one_to_purge(self, mock_notifier):
self.create_domain()
new = datetime.datetime(2015, 7, 30, 0, 0)
now = datetime.datetime(2015, 7, 31, 0, 0)
self._create_deleted_domain('example2.org.', new)
mock_notifier.reset_mock()
self._assert_count_all_domains(2)
self.central_service.purge_domains(
self.admin_context,
{
'deleted': '!0',
'deleted_at': "<=%s" % now
},
limit=100,
)
self._assert_count_all_domains(1)
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_one_to_purge_out_of_three(self, mock_notifier):
self.create_domain()
old = datetime.datetime(2015, 7, 20, 0, 0)
time_threshold = datetime.datetime(2015, 7, 25, 0, 0)
new = datetime.datetime(2015, 7, 30, 0, 0)
self._create_deleted_domain('old.org.', old)
self._create_deleted_domain('new.org.', new)
mock_notifier.reset_mock()
self._assert_count_all_domains(3)
purge_cnt = self.central_service.purge_domains(
self.admin_context,
{
'deleted': '!0',
'deleted_at': "<=%s" % time_threshold
},
limit=100,
)
self._assert_count_all_domains(2)
self.assertEqual(1, purge_cnt)
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_without_time_threshold(self, mock_notifier):
self.create_domain()
old = datetime.datetime(2015, 7, 20, 0, 0)
new = datetime.datetime(2015, 7, 30, 0, 0)
self._create_deleted_domain('old.org.', old)
self._create_deleted_domain('new.org.', new)
mock_notifier.reset_mock()
self._assert_count_all_domains(3)
purge_cnt = self.central_service.purge_domains(
self.admin_context,
{
'deleted': '!0',
},
limit=100,
)
self._assert_count_all_domains(1)
self.assertEqual(2, purge_cnt)
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_without_deleted_criterion(self, mock_notifier):
self.create_domain()
old = datetime.datetime(2015, 7, 20, 0, 0)
time_threshold = datetime.datetime(2015, 7, 25, 0, 0)
new = datetime.datetime(2015, 7, 30, 0, 0)
self._create_deleted_domain('old.org.', old)
self._create_deleted_domain('new.org.', new)
mock_notifier.reset_mock()
self._assert_count_all_domains(3)
# Nothing should be purged
purge_cnt = self.central_service.purge_domains(
self.admin_context,
{
'deleted_at': "<=%s" % time_threshold
},
limit=100,
)
self._assert_count_all_domains(3)
self.assertEqual(None, purge_cnt)
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_by_name(self, mock_notifier):
self.create_domain()
# The domain is purged (even if it was not deleted)
purge_cnt = self.central_service.purge_domains(
self.admin_context,
{
'name': 'example.com.'
},
limit=100,
)
self._assert_count_all_domains(0)
self.assertEqual(1, purge_cnt)
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_without_any_criterion(self, mock_notifier):
with testtools.ExpectedException(TypeError):
self.central_service.purge_domains(
self.admin_context,
limit=100,
)
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_with_sharding(self, mock_notifier):
old = datetime.datetime(2015, 7, 20, 0, 0)
time_threshold = datetime.datetime(2015, 7, 25, 0, 0)
domain = self._create_deleted_domain('old.org.', old)
mock_notifier.reset_mock()
# purge domains in an empty shard
self.central_service.purge_domains(
self.admin_context,
{
'deleted': '!0',
'deleted_at': "<=%s" % time_threshold,
'shard': 'BETWEEN 99998, 99999',
},
limit=100,
)
n_zones = self.central_service.count_domains(self.admin_context)
self.assertEqual(1, n_zones)
# purge domains in a shard that contains the domain created above
self.central_service.purge_domains(
self.admin_context,
{
'deleted': '!0',
'deleted_at': "<=%s" % time_threshold,
'shard': 'BETWEEN 0, %d' % domain.shard,
},
limit=100,
)
n_zones = self.central_service.count_domains(self.admin_context)
self.assertEqual(0, n_zones)
def test_purge_domains_walk_up_domains(self):
Zone = namedtuple('Zone', 'id parent_domain_id')
zones = [Zone(x + 1, x) for x in range(1234, 1237)]
zones_by_id = {z.id: z for z in zones}
sid = self.central_service.storage._walk_up_domains(
zones[0], zones_by_id)
self.assertEqual(1234, sid)
sid = self.central_service.storage._walk_up_domains(
zones[-1], zones_by_id)
self.assertEqual(1234, sid)
def test_purge_domains_walk_up_domains_loop(self):
Zone = namedtuple('Zone', 'id parent_domain_id')
zones = [Zone(2, 1), Zone(3, 2), Zone(1, 3)]
zones_by_id = {z.id: z for z in zones}
with testtools.ExpectedException(exceptions.IllegalParentDomain):
self.central_service.storage._walk_up_domains(
zones[0], zones_by_id)
@mock.patch.object(notifier.Notifier, "info")
def test_purge_domains_with_orphans(self, mock_notifier):
old = datetime.datetime(2015, 7, 20, 0, 0)
time_threshold = datetime.datetime(2015, 7, 25, 0, 0)
# Create a tree of alive and deleted [sub]domains
z1 = self.create_domain(name='alive.org.')
z2 = self.create_domain(name='deleted.alive.org.')
z3 = self.create_domain(name='del2.deleted.alive.org.')
z4 = self.create_domain(name='del3.del2.deleted.alive.org.')
z5 = self.create_domain(name='alive2.del3.del2.deleted.alive.org.')
self._delete_domain(z2, old)
self._delete_domain(z3, old)
self._delete_domain(z4, old)
self.assertEqual(z1.id, z2['parent_domain_id'])
self.assertEqual(z2.id, z3['parent_domain_id'])
self.assertEqual(z3.id, z4['parent_domain_id'])
self.assertEqual(z4.id, z5['parent_domain_id'])
self._assert_count_all_domains(5)
mock_notifier.reset_mock()
zones = self._fetch_all_domains()
self._log_all_domains(zones)
self.central_service.purge_domains(
self.admin_context,
{
'deleted': '!0',
'deleted_at': "<=%s" % time_threshold
},
limit=100,
)
self._assert_count_all_domains(2)
zones = self._fetch_all_domains()
self._log_all_domains(zones)
for z in zones:
if z.name == 'alive.org.':
self.assertEqual(None, z.parent_domain_id)
elif z.name == 'alive2.del3.del2.deleted.alive.org.':
# alive2.del2.deleted.alive.org is to be reparented under
# alive.org
self.assertEqual(z1.id, z.parent_domain_id)
else:
raise Exception("Unexpected zone %r" % z)
def test_touch_domain(self):
# Create a domain
expected_domain = self.create_domain()
# Touch the domain
self.central_service.touch_domain(
self.admin_context, expected_domain['id'])
# Fetch the domain again
domain = self.central_service.get_domain(
self.admin_context, expected_domain['id'])
# Ensure the serial was incremented
self.assertTrue(domain['serial'] > expected_domain['serial'])
def test_xfr_domain(self):
# Create a domain
fixture = self.get_domain_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
fixture['attributes'] = [{"key": "master", "value": "10.0.0.10"}]
# Create a zone
secondary = self.create_domain(**fixture)
mdns = mock.Mock()
with mock.patch.object(mdns_api.MdnsAPI, 'get_instance') as get_mdns:
get_mdns.return_value = mdns
mdns.get_serial_number.return_value = ('SUCCESS', 10, 1, )
self.central_service.xfr_domain(self.admin_context, secondary.id)
self.assertTrue(mdns.perform_zone_xfr.called)
def test_xfr_domain_same_serial(self):
# Create a domain
fixture = self.get_domain_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
fixture['attributes'] = [{"key": "master", "value": "10.0.0.10"}]
# Create a zone
secondary = self.create_domain(**fixture)
mdns = mock.Mock()
with mock.patch.object(mdns_api.MdnsAPI, 'get_instance') as get_mdns:
get_mdns.return_value = mdns
mdns.get_serial_number.return_value = ('SUCCESS', 1, 1, )
self.central_service.xfr_domain(self.admin_context, secondary.id)
self.assertFalse(mdns.perform_zone_xfr.called)
def test_xfr_domain_lower_serial(self):
# Create a domain
fixture = self.get_domain_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
fixture['attributes'] = [{"key": "master", "value": "10.0.0.10"}]
fixture['serial'] = 10
# Create a zone
secondary = self.create_domain(**fixture)
secondary.serial
mdns = mock.Mock()
with mock.patch.object(mdns_api.MdnsAPI, 'get_instance') as get_mdns:
get_mdns.return_value = mdns
mdns.get_serial_number.return_value = ('SUCCESS', 0, 1, )
self.central_service.xfr_domain(self.admin_context, secondary.id)
self.assertFalse(mdns.perform_zone_xfr.called)
def test_xfr_domain_invalid_type(self):
domain = self.create_domain()
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.xfr_domain(self.admin_context, domain.id)
# RecordSet Tests
def test_create_recordset(self):
domain = self.create_domain()
original_serial = domain.serial
# Create the Object
recordset = objects.RecordSet(name='www.%s' % domain.name, type='A')
# Persist the Object
recordset = self.central_service.create_recordset(
self.admin_context, domain.id, recordset=recordset)
# Get the zone again to check if serial increased
updated_domain = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_domain.serial
# Ensure all values have been set correctly
self.assertIsNotNone(recordset.id)
self.assertEqual('www.%s' % domain.name, recordset.name)
self.assertEqual('A', recordset.type)
self.assertIsNotNone(recordset.records)
# The serial number does not get updated is there are no records
# in the recordset
self.assertEqual(original_serial, new_serial)
def test_create_recordset_with_records(self):
domain = self.create_domain()
original_serial = domain.serial
# Create the Object
recordset = objects.RecordSet(
name='www.%s' % domain.name,
type='A',
records=objects.RecordList(objects=[
objects.Record(data='192.3.3.15'),
objects.Record(data='192.3.3.16'),
])
)
# Persist the Object
recordset = self.central_service.create_recordset(
self.admin_context, domain.id, recordset=recordset)
# Get updated serial number
updated_zone = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_zone.serial
# Ensure all values have been set correctly
self.assertIsNotNone(recordset.records)
self.assertEqual(2, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertIsNotNone(recordset.records[1].id)
self.assertThat(new_serial, GreaterThan(original_serial))
def test_create_recordset_over_quota(self):
# SOA, NS recordsets exist by default.
self.config(quota_domain_recordsets=3)
domain = self.create_domain()
self.create_recordset(domain)
with testtools.ExpectedException(exceptions.OverQuota):
self.create_recordset(domain)
def test_create_invalid_recordset_location_cname_at_apex(self):
domain = self.create_domain()
values = dict(
name=domain['name'],
type='CNAME'
)
# Attempt to create a CNAME record at the apex
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
def test_create_invalid_recordset_location_cname_sharing(self):
domain = self.create_domain()
expected = self.create_recordset(domain)
values = dict(
name=expected['name'],
type='CNAME'
)
# Attempt to create a CNAME record alongside another record
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
def test_create_invalid_recordset_location_wrong_domain(self):
domain = self.create_domain()
other_domain = self.create_domain(fixture=1)
values = dict(
name=other_domain['name'],
type='A'
)
# Attempt to create a record in the incorrect domain
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
def test_create_invalid_recordset_ttl(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=100,
group='service:central')
domain = self.create_domain()
values = dict(
name='www.%s' % domain['name'],
type='A',
ttl=10
)
# Attempt to create a A record under the TTL
with testtools.ExpectedException(exceptions.InvalidTTL):
self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
def test_create_recordset_no_min_ttl(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=None,
group='service:central')
domain = self.create_domain()
values = dict(
name='www.%s' % domain['name'],
type='A',
ttl=10
)
recordset = self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
self.assertEqual(values['ttl'], recordset['ttl'])
def test_get_recordset(self):
domain = self.create_domain()
# Create a recordset
expected = self.create_recordset(domain)
# Retrieve it, and ensure it's the same
recordset = self.central_service.get_recordset(
self.admin_context, domain['id'], expected['id'])
self.assertEqual(expected['id'], recordset['id'])
self.assertEqual(expected['name'], recordset['name'])
self.assertEqual(expected['type'], recordset['type'])
def test_get_recordset_with_records(self):
domain = self.create_domain()
# Create a recordset and two records
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
self.create_record(domain, recordset, fixture=1)
# Retrieve it, and ensure it's the same
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
self.assertEqual(2, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertIsNotNone(recordset.records[1].id)
def test_get_recordset_incorrect_domain_id(self):
domain = self.create_domain()
other_domain = self.create_domain(fixture=1)
# Create a recordset
expected = self.create_recordset(domain)
# Ensure we get a 404 if we use the incorrect domain_id
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_recordset(
self.admin_context, other_domain['id'], expected['id'])
def test_find_recordsets(self):
domain = self.create_domain()
criterion = {'domain_id': domain['id']}
# Ensure we have two recordsets to start with as SOA & NS
# recordsets are created automatically
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
self.assertEqual(2, len(recordsets))
# Create a single recordset (using default values)
self.create_recordset(domain, name='www.%s' % domain['name'])
# Ensure we can retrieve the newly created recordset
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
self.assertEqual(3, len(recordsets))
self.assertEqual('www.%s' % domain['name'], recordsets[2]['name'])
# Create a second recordset
self.create_recordset(domain, name='mail.%s' % domain['name'])
# Ensure we can retrieve both recordsets
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
self.assertEqual(4, len(recordsets))
self.assertEqual('www.%s' % domain['name'], recordsets[2]['name'])
self.assertEqual('mail.%s' % domain['name'], recordsets[3]['name'])
def test_find_recordset(self):
domain = self.create_domain()
# Create a recordset
expected = self.create_recordset(domain)
# Retrieve it, and ensure it's the same
criterion = {'domain_id': domain['id'], 'name': expected['name']}
recordset = self.central_service.find_recordset(
self.admin_context, criterion)
self.assertEqual(expected['id'], recordset['id'], expected['id'])
self.assertEqual(expected['name'], recordset['name'])
def test_find_recordset_with_records(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
self.create_record(domain, recordset, fixture=1)
# Retrieve it, and ensure it's the same
criterion = {'domain_id': domain.id, 'name': recordset.name}
recordset = self.central_service.find_recordset(
self.admin_context, criterion)
self.assertEqual(2, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertIsNotNone(recordset.records[1].id)
def test_update_recordset(self):
# Create a domain
domain = self.create_domain()
original_serial = domain.serial
# Create a recordset
recordset = self.create_recordset(domain)
# Update the recordset
recordset.ttl = 1800
# Perform the update
self.central_service.update_recordset(self.admin_context, recordset)
# Get domain again to verify that serial number was updated
updated_domain = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_domain.serial
# Fetch the resource again
recordset = self.central_service.get_recordset(
self.admin_context, recordset.domain_id, recordset.id)
# Ensure the new value took
self.assertEqual(1800, recordset.ttl)
self.assertThat(new_serial, GreaterThan(original_serial))
def test_update_recordset_deadlock_retry(self):
# Create a domain
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Update the recordset
recordset.ttl = 1800
# Due to Python's scoping of i - we need to make it a mutable type
# for the counter to work.. In Py3, we can use the nonlocal keyword.
i = [False]
def fail_once_then_pass():
if i[0] is True:
return self.central_service.storage.session.commit()
else:
i[0] = True
raise db_exception.DBDeadlock()
with mock.patch.object(self.central_service.storage, 'commit',
side_effect=fail_once_then_pass):
# Perform the update
recordset = self.central_service.update_recordset(
self.admin_context, recordset)
# Ensure i[0] is True, indicating the side_effect code above was
# triggered
self.assertTrue(i[0])
# Ensure the recordset was updated correctly
self.assertEqual(1800, recordset.ttl)
def test_update_recordset_with_record_create(self):
# Create a domain
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Append two new Records
recordset.records.append(objects.Record(data='192.0.2.1'))
recordset.records.append(objects.Record(data='192.0.2.2'))
# Perform the update
self.central_service.update_recordset(self.admin_context, recordset)
# Fetch the RecordSet again
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
# Ensure two Records are attached to the RecordSet correctly
self.assertEqual(2, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertIsNotNone(recordset.records[1].id)
def test_update_recordset_with_record_delete(self):
# Create a domain
domain = self.create_domain()
original_serial = domain.serial
# Create a recordset and two records
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
self.create_record(domain, recordset, fixture=1)
# Append two new Records
recordset.records.append(objects.Record(data='192.0.2.1'))
recordset.records.append(objects.Record(data='192.0.2.2'))
# Remove one of the Records
recordset.records.pop(0)
# Perform the update
self.central_service.update_recordset(self.admin_context, recordset)
# Fetch the RecordSet again
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
# Fetch the Domain again
updated_domain = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_domain.serial
# Ensure two Records are attached to the RecordSet correctly
self.assertEqual(1, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertThat(new_serial, GreaterThan(original_serial))
def test_update_recordset_with_record_update(self):
# Create a domain
domain = self.create_domain()
# Create a recordset and two records
recordset = self.create_recordset(domain, 'A')
self.create_record(domain, recordset)
self.create_record(domain, recordset, fixture=1)
# Fetch the RecordSet again
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
# Update one of the Records
updated_record_id = recordset.records[0].id
recordset.records[0].data = '192.0.2.255'
# Perform the update
self.central_service.update_recordset(self.admin_context, recordset)
# Fetch the RecordSet again
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
# Ensure the Record has been updated
for record in recordset.records:
if record.id != updated_record_id:
continue
self.assertEqual('192.0.2.255', record.data)
return # Exits this test early as we succeeded
raise Exception('Updated record not found')
def test_update_recordset_without_incrementing_serial(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Fetch the domain so we have the latest serial number
domain_before = self.central_service.get_domain(
self.admin_context, domain.id)
# Update the recordset
recordset.ttl = 1800
# Perform the update
self.central_service.update_recordset(
self.admin_context, recordset, increment_serial=False)
# Fetch the resource again
recordset = self.central_service.get_recordset(
self.admin_context, recordset.domain_id, recordset.id)
# Ensure the recordset was updated correctly
self.assertEqual(1800, recordset.ttl)
# Ensure the domains serial number was not updated
domain_after = self.central_service.get_domain(
self.admin_context, domain.id)
self.assertEqual(domain_before.serial, domain_after.serial)
def test_update_recordset_immutable_domain_id(self):
domain = self.create_domain()
other_domain = self.create_domain(fixture=1)
# Create a recordset
recordset = self.create_recordset(domain)
# Update the recordset
recordset.ttl = 1800
recordset.domain_id = other_domain.id
# Ensure we get a BadRequest if we change the domain_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_recordset(
self.admin_context, recordset)
def test_update_recordset_immutable_tenant_id(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Update the recordset
recordset.ttl = 1800
recordset.tenant_id = 'other-tenant'
# Ensure we get a BadRequest if we change the domain_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_recordset(
self.admin_context, recordset)
def test_update_recordset_immutable_type(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
cname_recordset = self.create_recordset(domain, type='CNAME')
# Update the recordset
recordset.ttl = 1800
recordset.type = cname_recordset.type
# Ensure we get a BadRequest if we change the domain_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_recordset(
self.admin_context, recordset)
def test_delete_recordset(self):
domain = self.create_domain()
original_serial = domain.serial
# Create a recordset
recordset = self.create_recordset(domain)
# Delete the recordset
self.central_service.delete_recordset(
self.admin_context, domain['id'], recordset['id'])
# Fetch the recordset again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_recordset(
self.admin_context, domain['id'], recordset['id'])
# Fetch the domain again to verify serial number increased
updated_domain = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_domain.serial
self.assertThat(new_serial, GreaterThan(original_serial))
def test_delete_recordset_without_incrementing_serial(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Fetch the domain so we have the latest serial number
domain_before = self.central_service.get_domain(
self.admin_context, domain['id'])
# Delete the recordset
self.central_service.delete_recordset(
self.admin_context, domain['id'], recordset['id'],
increment_serial=False)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_recordset(
self.admin_context, domain['id'], recordset['id'])
# Ensure the domains serial number was not updated
domain_after = self.central_service.get_domain(
self.admin_context, domain['id'])
self.assertEqual(domain_before['serial'], domain_after['serial'])
def test_delete_recordset_incorrect_domain_id(self):
domain = self.create_domain()
other_domain = self.create_domain(fixture=1)
# Create a recordset
recordset = self.create_recordset(domain)
# Ensure we get a 404 if we use the incorrect domain_id
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.delete_recordset(
self.admin_context, other_domain['id'], recordset['id'])
def test_count_recordsets(self):
# in the beginning, there should be nothing
recordsets = self.central_service.count_recordsets(self.admin_context)
self.assertEqual(0, recordsets)
# Create a domain to put our recordset in
domain = self.create_domain()
# Create a recordset
self.create_recordset(domain)
# We should have 1 recordset now, plus SOA & NS recordsets
recordsets = self.central_service.count_recordsets(self.admin_context)
self.assertEqual(3, recordsets)
def test_count_recordsets_policy_check(self):
# Set the policy to reject the authz
self.policy({'count_recordsets': '!'})
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.count_recordsets(self.get_context())
# Record Tests
def test_create_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain, type='A')
values = dict(
data='127.0.0.1'
)
# Create a record
record = self.central_service.create_record(
self.admin_context, domain['id'], recordset['id'],
objects.Record.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(record['id'])
self.assertEqual(values['data'], record['data'])
self.assertIn('status', record)
def test_create_record_over_domain_quota(self):
# SOA and NS Records exist
self.config(quota_domain_records=3)
# Creating the domain automatically creates SOA & NS records
domain = self.create_domain()
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
with testtools.ExpectedException(exceptions.OverQuota):
self.create_record(domain, recordset)
def test_create_record_over_recordset_quota(self):
self.config(quota_recordset_records=1)
# Creating the domain automatically creates SOA & NS records
domain = self.create_domain()
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
with testtools.ExpectedException(exceptions.OverQuota):
self.create_record(domain, recordset)
def test_create_record_without_incrementing_serial(self):
domain = self.create_domain()
recordset = self.create_recordset(domain, type='A')
values = dict(
data='127.0.0.1'
)
# Create a record
self.central_service.create_record(
self.admin_context, domain['id'], recordset['id'],
objects.Record.from_dict(values),
increment_serial=False)
# Ensure the domains serial number was not updated
updated_domain = self.central_service.get_domain(
self.admin_context, domain['id'])
self.assertEqual(domain['serial'], updated_domain['serial'])
def test_get_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
expected = self.create_record(domain, recordset)
# Retrieve it, and ensure it's the same
record = self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'], expected['id'])
self.assertEqual(expected['id'], record['id'])
self.assertEqual(expected['data'], record['data'])
self.assertIn('status', record)
def test_get_record_incorrect_domain_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_domain = self.create_domain(fixture=1)
# Create a record
expected = self.create_record(domain, recordset)
# Ensure we get a 404 if we use the incorrect domain_id
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.get_record(
self.admin_context, other_domain['id'], recordset['id'],
expected['id'])
def test_get_record_incorrect_recordset_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_recordset = self.create_recordset(domain, fixture=1)
# Create a record
expected = self.create_record(domain, recordset)
# Ensure we get a 404 if we use the incorrect recordset_id
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], other_recordset['id'],
expected['id'])
def test_find_records(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
# Ensure we have no records to start with.
records = self.central_service.find_records(
self.admin_context, criterion)
self.assertEqual(0, len(records))
# Create a single record (using default values)
expected_one = self.create_record(domain, recordset)
# Ensure we can retrieve the newly created record
records = self.central_service.find_records(
self.admin_context, criterion)
self.assertEqual(1, len(records))
self.assertEqual(expected_one['data'], records[0]['data'])
# Create a second record
expected_two = self.create_record(domain, recordset, fixture=1)
# Ensure we can retrieve both records
records = self.central_service.find_records(
self.admin_context, criterion)
self.assertEqual(2, len(records))
self.assertEqual(expected_one['data'], records[0]['data'])
self.assertEqual(expected_two['data'], records[1]['data'])
def test_find_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
expected = self.create_record(domain, recordset)
# Retrieve it, and ensure it's the same
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id'],
'data': expected['data']
}
record = self.central_service.find_record(
self.admin_context, criterion)
self.assertEqual(expected['id'], record['id'])
self.assertEqual(expected['data'], record['data'])
self.assertIn('status', record)
def test_update_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain, 'A')
# Create a record
record = self.create_record(domain, recordset)
# Update the Object
record.data = '192.0.2.255'
# Perform the update
self.central_service.update_record(self.admin_context, record)
# Fetch the resource again
record = self.central_service.get_record(
self.admin_context, record.domain_id, record.recordset_id,
record.id)
# Ensure the new value took
self.assertEqual('192.0.2.255', record.data)
def test_update_record_without_incrementing_serial(self):
domain = self.create_domain()
recordset = self.create_recordset(domain, 'A')
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain so we have the latest serial number
domain_before = self.central_service.get_domain(
self.admin_context, domain.id)
# Update the Object
record.data = '192.0.2.255'
# Perform the update
self.central_service.update_record(
self.admin_context, record, increment_serial=False)
# Fetch the resource again
record = self.central_service.get_record(
self.admin_context, record.domain_id, record.recordset_id,
record.id)
# Ensure the new value took
self.assertEqual('192.0.2.255', record.data)
# Ensure the domains serial number was not updated
domain_after = self.central_service.get_domain(
self.admin_context, domain.id)
self.assertEqual(domain_before.serial, domain_after.serial)
def test_update_record_immutable_domain_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_domain = self.create_domain(fixture=1)
# Create a record
record = self.create_record(domain, recordset)
# Update the record
record.domain_id = other_domain.id
# Ensure we get a BadRequest if we change the domain_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_record(self.admin_context, record)
def test_update_record_immutable_recordset_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_recordset = self.create_recordset(domain, fixture=1)
# Create a record
record = self.create_record(domain, recordset)
# Update the record
record.recordset_id = other_recordset.id
# Ensure we get a BadRequest if we change the recordset_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_record(self.admin_context, record)
def test_delete_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'])
# Ensure the domain serial number was updated
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.assertNotEqual(new_domain_serial, domain_serial)
# Fetch the record
deleted_record = self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the record is marked for deletion
self.assertEqual(record.id, deleted_record.id)
self.assertEqual(record.data, deleted_record.data)
self.assertEqual(record.domain_id, deleted_record.domain_id)
self.assertEqual('PENDING', deleted_record.status)
self.assertEqual(record.tenant_id, deleted_record.tenant_id)
self.assertEqual(record.recordset_id, deleted_record.recordset_id)
self.assertEqual('DELETE', deleted_record.action)
self.assertEqual(new_domain_serial, deleted_record.serial)
def test_delete_record_without_incrementing_serial(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'],
increment_serial=False)
# Ensure the domains serial number was not updated
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id'])['serial']
self.assertEqual(domain_serial, new_domain_serial)
# Fetch the record
deleted_record = self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the record is marked for deletion
self.assertEqual(record.id, deleted_record.id)
self.assertEqual(record.data, deleted_record.data)
self.assertEqual(record.domain_id, deleted_record.domain_id)
self.assertEqual('PENDING', deleted_record.status)
self.assertEqual(record.tenant_id, deleted_record.tenant_id)
self.assertEqual(record.recordset_id, deleted_record.recordset_id)
self.assertEqual('DELETE', deleted_record.action)
self.assertEqual(new_domain_serial, deleted_record.serial)
def test_delete_record_incorrect_domain_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_domain = self.create_domain(fixture=1)
# Create a record
record = self.create_record(domain, recordset)
# Ensure we get a 404 if we use the incorrect domain_id
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.delete_record(
self.admin_context, other_domain['id'], recordset['id'],
record['id'])
def test_delete_record_incorrect_recordset_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_recordset = self.create_recordset(domain, fixture=1)
# Create a record
record = self.create_record(domain, recordset)
# Ensure we get a 404 if we use the incorrect recordset_id
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.delete_record(
self.admin_context, domain['id'], other_recordset['id'],
record['id'])
def test_count_records(self):
# in the beginning, there should be nothing
records = self.central_service.count_records(self.admin_context)
self.assertEqual(0, records)
# Create a domain and recordset to put our record in
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
self.create_record(domain, recordset)
# we should have 1 record now, plus SOA & NS records
records = self.central_service.count_records(self.admin_context)
self.assertEqual(3, records)
def test_count_records_policy_check(self):
# Set the policy to reject the authz
self.policy({'count_records': '!'})
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.count_records(self.get_context())
def test_get_floatingip_no_record(self):
context = self.get_context(tenant='a')
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fip_ptr = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
self.assertEqual(fip['region'], fip_ptr['region'])
self.assertEqual(fip['id'], fip_ptr['id'])
self.assertEqual(fip['address'], fip_ptr['address'])
self.assertEqual(None, fip_ptr['ptrdname'])
def test_get_floatingip_with_record(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
expected = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
actual = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
self.assertEqual(expected, actual)
self.assertEqual(expected, actual)
def test_get_floatingip_not_allocated(self):
context = self.get_context(tenant='a')
fip = self.network_api.fake.allocate_floatingip(context.tenant)
self.network_api.fake.deallocate_floatingip(fip['id'])
with testtools.ExpectedException(exceptions.NotFound):
self.central_service.get_floatingip(
context, fip['region'], fip['id'])
def test_get_floatingip_deallocated_and_invalidate(self):
context_a = self.get_context(tenant='a')
elevated_a = context_a.elevated()
elevated_a.all_tenants = True
context_b = self.get_context(tenant='b')
fixture = self.get_ptr_fixture()
# First allocate and create a FIP as tenant a
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
self.network_api.fake.deallocate_floatingip(fip['id'])
with testtools.ExpectedException(exceptions.NotFound):
self.central_service.get_floatingip(
context_a, fip['region'], fip['id'])
# Ensure that the record is still in DB (No invalidation)
self.central_service.find_record(elevated_a, criterion)
# Now give the fip id to tenant 'b' and see that it get's deleted
self.network_api.fake.allocate_floatingip(
context_b.tenant, fip['id'])
# There should be a fip returned with ptrdname of None
fip_ptr = self.central_service.get_floatingip(
context_b, fip['region'], fip['id'])
self.assertEqual(None, fip_ptr['ptrdname'])
# Simulate the invalidation on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
# Ensure that the old record for tenant a for the fip now owned by
# tenant b is gone
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.find_record(elevated_a, criterion)
def test_list_floatingips_no_allocations(self):
context = self.get_context(tenant='a')
fips = self.central_service.list_floatingips(context)
self.assertEqual(0, len(fips))
def test_list_floatingips_no_record(self):
context = self.get_context(tenant='a')
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fips = self.central_service.list_floatingips(context)
self.assertEqual(1, len(fips))
self.assertEqual(None, fips[0]['ptrdname'])
self.assertEqual(fip['id'], fips[0]['id'])
self.assertEqual(fip['region'], fips[0]['region'])
self.assertEqual(fip['address'], fips[0]['address'])
self.assertEqual(None, fips[0]['description'])
def test_list_floatingips_with_record(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fip_ptr = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
fips = self.central_service.list_floatingips(context)
self.assertEqual(1, len(fips))
self.assertEqual(fip_ptr['ptrdname'], fips[0]['ptrdname'])
self.assertEqual(fip_ptr['id'], fips[0]['id'])
self.assertEqual(fip_ptr['region'], fips[0]['region'])
self.assertEqual(fip_ptr['address'], fips[0]['address'])
self.assertEqual(fip_ptr['description'], fips[0]['description'])
def test_list_floatingips_deallocated_and_invalidate(self):
context_a = self.get_context(tenant='a')
elevated_a = context_a.elevated()
elevated_a.all_tenants = True
context_b = self.get_context(tenant='b')
fixture = self.get_ptr_fixture()
# First allocate and create a FIP as tenant a
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
self.network_api.fake.deallocate_floatingip(fip['id'])
fips = self.central_service.list_floatingips(context_a)
assert(len(fips) == 0)
# Ensure that the record is still in DB (No invalidation)
self.central_service.find_record(elevated_a, criterion)
# Now give the fip id to tenant 'b' and see that it get's deleted
self.network_api.fake.allocate_floatingip(
context_b.tenant, fip['id'])
# There should be a fip returned with ptrdname of None
fips = self.central_service.list_floatingips(context_b)
self.assertEqual(1, len(fips))
self.assertEqual(None, fips[0]['ptrdname'])
# Simulate the invalidation on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
# Ensure that the old record for tenant a for the fip now owned by
# tenant b is gone
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.find_record(elevated_a, criterion)
def test_set_floatingip(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fip_ptr = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
self.assertEqual(fixture['ptrdname'], fip_ptr['ptrdname'])
self.assertEqual(fip['address'], fip_ptr['address'])
self.assertEqual(None, fip_ptr['description'])
self.assertIsNotNone(fip_ptr['ttl'])
def test_set_floatingip_no_managed_resource_tenant_id(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
tenant_id = "00000000-0000-0000-0000-000000000000"
elevated_context = context.elevated()
elevated_context.all_tenants = True
# The domain created should have the default 0's uuid as owner
domain = self.central_service.find_domain(
elevated_context,
{"tenant_id": tenant_id})
self.assertEqual(tenant_id, domain.tenant_id)
def test_set_floatingip_removes_old_record(self):
context_a = self.get_context(tenant='a')
elevated_a = context_a.elevated()
elevated_a.all_tenants = True
context_b = self.get_context(tenant='b')
fixture = self.get_ptr_fixture()
# Test that re-setting as tenant a an already set floatingip leaves
# only 1 record
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
fixture2 = self.get_ptr_fixture(fixture=1)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture2)
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
count = self.central_service.count_records(
elevated_a, {'managed_resource_id': fip['id']})
self.assertEqual(1, count)
self.network_api.fake.deallocate_floatingip(fip['id'])
# Now test that tenant b allocating the same fip and setting a ptr
# deletes any records
fip = self.network_api.fake.allocate_floatingip(
context_b.tenant, fip['id'])
self.central_service.update_floatingip(
context_b, fip['region'], fip['id'], fixture)
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
count = self.central_service.count_records(
elevated_a, {'managed_resource_id': fip['id']})
self.assertEqual(1, count)
def test_set_floatingip_not_allocated(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
self.network_api.fake.deallocate_floatingip(fip['id'])
# If one attempts to assign a de-allocated FIP or not-owned it should
# fail with BadRequest
with testtools.ExpectedException(exceptions.NotFound):
fixture = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
def test_unset_floatingip(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fip_ptr = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
self.assertEqual(fixture['ptrdname'], fip_ptr['ptrdname'])
self.assertEqual(fip['address'], fip_ptr['address'])
self.assertEqual(None, fip_ptr['description'])
self.assertIsNotNone(fip_ptr['ttl'])
self.central_service.update_floatingip(
context, fip['region'], fip['id'],
objects.FloatingIP().from_dict({'ptrdname': None}))
self.central_service.get_floatingip(
context, fip['region'], fip['id'])
# Blacklist Tests
def test_create_blacklist(self):
values = self.get_blacklist_fixture(fixture=0)
blacklist = self.create_blacklist(fixture=0)
# Verify all values have been set correctly
self.assertIsNotNone(blacklist['id'])
self.assertEqual(values['pattern'], blacklist['pattern'])
self.assertEqual(values['description'], blacklist['description'])
def test_get_blacklist(self):
# Create a blacklisted zone
expected = self.create_blacklist(fixture=0)
# Retrieve it, and verify it is the same
blacklist = self.central_service.get_blacklist(
self.admin_context, expected['id'])
self.assertEqual(expected['id'], blacklist['id'])
self.assertEqual(expected['pattern'], blacklist['pattern'])
self.assertEqual(expected['description'], blacklist['description'])
def test_find_blacklists(self):
# Verify there are no blacklisted zones to start with
blacklists = self.central_service.find_blacklists(
self.admin_context)
self.assertEqual(0, len(blacklists))
# Create a single blacklisted zone
self.create_blacklist()
# Verify we can retrieve the newly created blacklist
blacklists = self.central_service.find_blacklists(
self.admin_context)
values1 = self.get_blacklist_fixture(fixture=0)
self.assertEqual(1, len(blacklists))
self.assertEqual(values1['pattern'], blacklists[0]['pattern'])
# Create a second blacklisted zone
self.create_blacklist(fixture=1)
# Verify we can retrieve both blacklisted zones
blacklists = self.central_service.find_blacklists(
self.admin_context)
values2 = self.get_blacklist_fixture(fixture=1)
self.assertEqual(2, len(blacklists))
self.assertEqual(values1['pattern'], blacklists[0]['pattern'])
self.assertEqual(values2['pattern'], blacklists[1]['pattern'])
def test_find_blacklist(self):
# Create a blacklisted zone
expected = self.create_blacklist(fixture=0)
# Retrieve the newly created blacklist
blacklist = self.central_service.find_blacklist(
self.admin_context, {'id': expected['id']})
self.assertEqual(expected['pattern'], blacklist['pattern'])
self.assertEqual(expected['description'], blacklist['description'])
def test_update_blacklist(self):
# Create a blacklisted zone
blacklist = self.create_blacklist(fixture=0)
# Update the Object
blacklist.description = "New Comment"
# Perform the update
self.central_service.update_blacklist(self.admin_context, blacklist)
# Fetch the resource again
blacklist = self.central_service.get_blacklist(self.admin_context,
blacklist.id)
# Verify that the record was updated correctly
self.assertEqual("New Comment", blacklist.description)
def test_delete_blacklist(self):
# Create a blacklisted zone
blacklist = self.create_blacklist()
# Delete the blacklist
self.central_service.delete_blacklist(self.admin_context,
blacklist['id'])
# Try to fetch the blacklist to verify an exception is raised
with testtools.ExpectedException(exceptions.BlacklistNotFound):
self.central_service.get_blacklist(self.admin_context,
blacklist['id'])
# SOA recordset tests
def test_create_SOA(self):
# A SOA record should automatically be created each time
# a zone is created
# Create a zone
zone = self.create_domain(name='example3.org.')
# Retrieve SOA
criterion = {'domain_id': zone['id'], 'type': 'SOA'}
soa = self.central_service.find_recordset(self.admin_context,
criterion)
# Split out the various soa values
soa_record_values = soa.records[0].data.split()
zone_email = zone['email'].replace("@", ".")
zone_email += (".")
# Ensure all values have been set correctly
self.assertIsNotNone(soa.id)
self.assertEqual('SOA', soa.type)
self.assertIsNotNone(soa.records)
self.assertEqual(zone['serial'], int(soa_record_values[2]))
self.assertEqual(zone_email, soa_record_values[1])
self.assertEqual(zone['refresh'], int(soa_record_values[3]))
self.assertEqual(zone['retry'], int(soa_record_values[4]))
self.assertEqual(zone['expire'], int(soa_record_values[5]))
self.assertEqual(zone['minimum'], int(soa_record_values[6]))
def test_update_soa(self):
# Anytime the zone's serial number is incremented
# the SOA recordset should automatically be updated
zone = self.create_domain(email='[email protected]')
# Update the object
zone.email = '[email protected]'
# Perform the update
self.central_service.update_domain(self.admin_context, zone)
# Fetch the domain again
updated_zone = self.central_service.get_domain(self.admin_context,
zone.id)
# Retrieve SOA
criterion = {'domain_id': zone['id'], 'type': 'SOA'}
soa = self.central_service.find_recordset(self.admin_context,
criterion)
# Split out the various soa values
soa_record_values = soa.records[0].data.split()
self.assertEqual(updated_zone['serial'], int(soa_record_values[2]))
# Pool Tests
def test_create_pool(self):
# Get the values
values = self.get_pool_fixture(fixture=0)
# Create the pool using the values
pool = self.central_service.create_pool(
self.admin_context, objects.Pool.from_dict(values))
# Verify that all the values were set correctly
self.assertIsNotNone(pool['id'])
self.assertIsNotNone(pool['created_at'])
self.assertIsNotNone(pool['version'])
self.assertIsNotNone(pool['tenant_id'])
self.assertIsNone(pool['updated_at'])
self.assertIsNotNone(pool['attributes'])
self.assertIsNotNone(pool['ns_records'])
self.assertEqual(values['name'], pool['name'])
# Compare the actual values of attributes and ns_records
for k in range(0, len(values['attributes'])):
self.assertDictContainsSubset(
values['attributes'][k],
pool['attributes'][k].to_primitive()['designate_object.data']
)
for k in range(0, len(values['ns_records'])):
self.assertDictContainsSubset(
values['ns_records'][k],
pool['ns_records'][k].to_primitive()['designate_object.data'])
def test_get_pool(self):
# Create a server pool
expected = self.create_pool(fixture=0)
# GET the pool and verify it is the same
pool = self.central_service.get_pool(self.admin_context,
expected['id'])
self.assertEqual(expected['id'], pool['id'])
self.assertEqual(expected['created_at'], pool['created_at'])
self.assertEqual(expected['version'], pool['version'])
self.assertEqual(expected['tenant_id'], pool['tenant_id'])
self.assertEqual(expected['name'], pool['name'])
# Compare the actual values of attributes and ns_records
for k in range(0, len(expected['attributes'])):
self.assertEqual(
expected['attributes'][k].to_primitive()
['designate_object.data'],
pool['attributes'][k].to_primitive()['designate_object.data'])
for k in range(0, len(expected['ns_records'])):
self.assertEqual(
expected['ns_records'][k].to_primitive()
['designate_object.data'],
pool['ns_records'][k].to_primitive()['designate_object.data'])
def test_find_pools(self):
# Verify no pools exist, except for default pool
pools = self.central_service.find_pools(self.admin_context)
self.assertEqual(1, len(pools))
# Create a pool
self.create_pool(fixture=0)
# Verify we can find the newly created pool
pools = self.central_service.find_pools(self.admin_context)
values = self.get_pool_fixture(fixture=0)
self.assertEqual(2, len(pools))
self.assertEqual(values['name'], pools[1]['name'])
# Compare the actual values of attributes and ns_records
expected_attributes = values['attributes'][0]
actual_attributes = \
pools[1]['attributes'][0].to_primitive()['designate_object.data']
for k in expected_attributes:
self.assertEqual(expected_attributes[k], actual_attributes[k])
expected_ns_records = values['ns_records'][0]
actual_ns_records = \
pools[1]['ns_records'][0].to_primitive()['designate_object.data']
for k in expected_ns_records:
self.assertEqual(expected_ns_records[k], actual_ns_records[k])
def test_find_pool(self):
# Create a server pool
expected = self.create_pool(fixture=0)
# Find the created pool
pool = self.central_service.find_pool(self.admin_context,
{'id': expected['id']})
self.assertEqual(expected['name'], pool['name'])
# Compare the actual values of attributes and ns_records
for k in range(0, len(expected['attributes'])):
self.assertEqual(
expected['attributes'][k].to_primitive()
['designate_object.data'],
pool['attributes'][k].to_primitive()['designate_object.data'])
for k in range(0, len(expected['ns_records'])):
self.assertEqual(
expected['ns_records'][k].to_primitive()
['designate_object.data'],
pool['ns_records'][k].to_primitive()['designate_object.data'])
def test_update_pool(self):
# Create a server pool
pool = self.create_pool(fixture=0)
# Update and save the pool
pool.description = 'New Comment'
self.central_service.update_pool(self.admin_context, pool)
# Fetch the pool
pool = self.central_service.get_pool(self.admin_context, pool.id)
# Verify that the pool was updated correctly
self.assertEqual("New Comment", pool.description)
def test_update_pool_add_ns_record(self):
# Create a server pool and domain
pool = self.create_pool(fixture=0)
domain = self.create_domain(pool_id=pool.id)
ns_record_count = len(pool.ns_records)
new_ns_record = objects.PoolNsRecord(
priority=10,
hostname='ns-new.example.org.')
# Update and save the pool
pool.ns_records.append(new_ns_record)
self.central_service.update_pool(self.admin_context, pool)
# Fetch the pool
pool = self.central_service.get_pool(self.admin_context, pool.id)
# Verify that the pool was updated correctly
self.assertEqual(ns_record_count + 1, len(pool.ns_records))
self.assertIn(new_ns_record.hostname,
[n.hostname for n in pool.ns_records])
# Fetch the domains NS recordset
ns_recordset = self.central_service.find_recordset(
self.admin_context,
criterion={'domain_id': domain.id, 'type': "NS"})
# Verify that the doamins NS records ware updated correctly
self.assertEqual(set([n.hostname for n in pool.ns_records]),
set([n.data for n in ns_recordset.records]))
def test_update_pool_remove_ns_record(self):
# Create a server pool and domain
pool = self.create_pool(fixture=0)
domain = self.create_domain(pool_id=pool.id)
ns_record_count = len(pool.ns_records)
# Update and save the pool
removed_ns_record = pool.ns_records.pop(-1)
self.central_service.update_pool(self.admin_context, pool)
# Fetch the pool
pool = self.central_service.get_pool(self.admin_context, pool.id)
# Verify that the pool was updated correctly
self.assertEqual(ns_record_count - 1, len(pool.ns_records))
self.assertNotIn(removed_ns_record.hostname,
[n.hostname for n in pool.ns_records])
# Fetch the domains NS recordset
ns_recordset = self.central_service.find_recordset(
self.admin_context,
criterion={'domain_id': domain.id, 'type': "NS"})
# Verify that the doamins NS records ware updated correctly
self.assertEqual(set([n.hostname for n in pool.ns_records]),
set([n.data for n in ns_recordset.records]))
def test_delete_pool(self):
# Create a server pool
pool = self.create_pool()
# Delete the pool
self.central_service.delete_pool(self.admin_context, pool['id'])
# Verify that the pool has been deleted
with testtools.ExpectedException(exceptions.PoolNotFound):
self.central_service.get_pool(self.admin_context, pool['id'])
def test_update_status_delete_domain(self):
# Create a domain
domain = self.create_domain()
# Delete the domain
self.central_service.delete_domain(self.admin_context, domain['id'])
# Simulate the domain having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the domain again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.DomainNotFound):
self.central_service.get_domain(self.admin_context, domain['id'])
def test_update_status_delete_last_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'])
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
def test_update_status_delete_last_record_without_incrementing_serial(
self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'],
increment_serial=False)
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the domains serial number was not updated
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.assertEqual(domain_serial, new_domain_serial)
def test_create_zone_transfer_request(self):
domain = self.create_domain()
zone_transfer_request = self.create_zone_transfer_request(domain)
# Verify all values have been set correctly
self.assertIsNotNone(zone_transfer_request.id)
self.assertIsNotNone(zone_transfer_request.tenant_id)
self.assertIsNotNone(zone_transfer_request.key)
self.assertEqual(domain.id, zone_transfer_request.domain_id)
def test_create_zone_transfer_request_duplicate(self):
domain = self.create_domain()
self.create_zone_transfer_request(domain)
with testtools.ExpectedException(
exceptions.DuplicateZoneTransferRequest):
self.create_zone_transfer_request(domain)
def test_create_scoped_zone_transfer_request(self):
domain = self.create_domain()
values = self.get_zone_transfer_request_fixture(fixture=1)
zone_transfer_request = self.create_zone_transfer_request(domain,
fixture=1)
# Verify all values have been set correctly
self.assertIsNotNone(zone_transfer_request.id)
self.assertIsNotNone(zone_transfer_request.tenant_id)
self.assertEqual(domain.id, zone_transfer_request.domain_id)
self.assertIsNotNone(zone_transfer_request.key)
self.assertEqual(
values['target_tenant_id'],
zone_transfer_request.target_tenant_id)
def test_get_zone_transfer_request(self):
domain = self.create_domain()
zt_request = self.create_zone_transfer_request(domain,
fixture=1)
retrived_zt = self.central_service.get_zone_transfer_request(
self.admin_context,
zt_request.id)
self.assertEqual(zt_request.domain_id, retrived_zt.domain_id)
self.assertEqual(zt_request.key, retrived_zt.key)
def test_get_zone_transfer_request_scoped(self):
tenant_1_context = self.get_context(tenant=1)
tenant_2_context = self.get_context(tenant=2)
tenant_3_context = self.get_context(tenant=3)
domain = self.create_domain(context=tenant_1_context)
zt_request = self.create_zone_transfer_request(
domain,
context=tenant_1_context,
target_tenant_id=2)
self.central_service.get_zone_transfer_request(
tenant_2_context, zt_request.id)
self.central_service.get_zone_transfer_request(
tenant_1_context, zt_request.id)
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.get_zone_transfer_request(
tenant_3_context, zt_request.id)
def test_update_zone_transfer_request(self):
domain = self.create_domain()
zone_transfer_request = self.create_zone_transfer_request(domain)
zone_transfer_request.description = 'TEST'
self.central_service.update_zone_transfer_request(
self.admin_context, zone_transfer_request)
# Verify all values have been set correctly
self.assertIsNotNone(zone_transfer_request.id)
self.assertIsNotNone(zone_transfer_request.tenant_id)
self.assertIsNotNone(zone_transfer_request.key)
self.assertEqual('TEST', zone_transfer_request.description)
def test_delete_zone_transfer_request(self):
domain = self.create_domain()
zone_transfer_request = self.create_zone_transfer_request(domain)
self.central_service.delete_zone_transfer_request(
self.admin_context, zone_transfer_request.id)
with testtools.ExpectedException(
exceptions.ZoneTransferRequestNotFound):
self.central_service.get_zone_transfer_request(
self.admin_context,
zone_transfer_request.id)
def test_create_zone_transfer_accept(self):
tenant_1_context = self.get_context(tenant=1)
tenant_2_context = self.get_context(tenant=2)
admin_context = self.get_admin_context()
admin_context.all_tenants = True
domain = self.create_domain(context=tenant_1_context)
recordset = self.create_recordset(domain, context=tenant_1_context)
record = self.create_record(
domain, recordset, context=tenant_1_context)
zone_transfer_request = self.create_zone_transfer_request(
domain, context=tenant_1_context)
zone_transfer_accept = objects.ZoneTransferAccept()
zone_transfer_accept.zone_transfer_request_id =\
zone_transfer_request.id
zone_transfer_accept.key = zone_transfer_request.key
zone_transfer_accept.domain_id = domain.id
zone_transfer_accept = \
self.central_service.create_zone_transfer_accept(
tenant_2_context, zone_transfer_accept)
result = {}
result['domain'] = self.central_service.get_domain(
admin_context, domain.id)
result['recordset'] = self.central_service.get_recordset(
admin_context, domain.id, recordset.id)
result['record'] = self.central_service.get_record(
admin_context, domain.id, recordset.id, record.id)
result['zt_accept'] = self.central_service.get_zone_transfer_accept(
admin_context, zone_transfer_accept.id)
result['zt_request'] = self.central_service.get_zone_transfer_request(
admin_context, zone_transfer_request.id)
self.assertEqual(
str(tenant_2_context.tenant), result['domain'].tenant_id)
self.assertEqual(
str(tenant_2_context.tenant), result['recordset'].tenant_id)
self.assertEqual(
str(tenant_2_context.tenant), result['record'].tenant_id)
self.assertEqual(
'COMPLETE', result['zt_accept'].status)
self.assertEqual(
'COMPLETE', result['zt_request'].status)
def test_create_zone_transfer_accept_scoped(self):
tenant_1_context = self.get_context(tenant=1)
tenant_2_context = self.get_context(tenant=2)
admin_context = self.get_admin_context()
admin_context.all_tenants = True
domain = self.create_domain(context=tenant_1_context)
recordset = self.create_recordset(domain, context=tenant_1_context)
record = self.create_record(
domain, recordset, context=tenant_1_context)
zone_transfer_request = self.create_zone_transfer_request(
domain,
context=tenant_1_context,
target_tenant_id='2')
zone_transfer_accept = objects.ZoneTransferAccept()
zone_transfer_accept.zone_transfer_request_id =\
zone_transfer_request.id
zone_transfer_accept.key = zone_transfer_request.key
zone_transfer_accept.domain_id = domain.id
zone_transfer_accept = \
self.central_service.create_zone_transfer_accept(
tenant_2_context, zone_transfer_accept)
result = {}
result['domain'] = self.central_service.get_domain(
admin_context, domain.id)
result['recordset'] = self.central_service.get_recordset(
admin_context, domain.id, recordset.id)
result['record'] = self.central_service.get_record(
admin_context, domain.id, recordset.id, record.id)
result['zt_accept'] = self.central_service.get_zone_transfer_accept(
admin_context, zone_transfer_accept.id)
result['zt_request'] = self.central_service.get_zone_transfer_request(
admin_context, zone_transfer_request.id)
self.assertEqual(
str(tenant_2_context.tenant), result['domain'].tenant_id)
self.assertEqual(
str(tenant_2_context.tenant), result['recordset'].tenant_id)
self.assertEqual(
str(tenant_2_context.tenant), result['record'].tenant_id)
self.assertEqual(
'COMPLETE', result['zt_accept'].status)
self.assertEqual(
'COMPLETE', result['zt_request'].status)
def test_create_zone_transfer_accept_failed_key(self):
tenant_1_context = self.get_context(tenant=1)
tenant_2_context = self.get_context(tenant=2)
admin_context = self.get_admin_context()
admin_context.all_tenants = True
domain = self.create_domain(context=tenant_1_context)
zone_transfer_request = self.create_zone_transfer_request(
domain,
context=tenant_1_context,
target_tenant_id=2)
zone_transfer_accept = objects.ZoneTransferAccept()
zone_transfer_accept.zone_transfer_request_id =\
zone_transfer_request.id
zone_transfer_accept.key = 'WRONG KEY'
zone_transfer_accept.domain_id = domain.id
with testtools.ExpectedException(exceptions.IncorrectZoneTransferKey):
zone_transfer_accept = \
self.central_service.create_zone_transfer_accept(
tenant_2_context, zone_transfer_accept)
def test_create_zone_tarnsfer_accept_out_of_tenant_scope(self):
tenant_1_context = self.get_context(tenant=1)
tenant_3_context = self.get_context(tenant=3)
admin_context = self.get_admin_context()
admin_context.all_tenants = True
domain = self.create_domain(context=tenant_1_context)
zone_transfer_request = self.create_zone_transfer_request(
domain,
context=tenant_1_context,
target_tenant_id=2)
zone_transfer_accept = objects.ZoneTransferAccept()
zone_transfer_accept.zone_transfer_request_id =\
zone_transfer_request.id
zone_transfer_accept.key = zone_transfer_request.key
zone_transfer_accept.domain_id = domain.id
with testtools.ExpectedException(exceptions.Forbidden):
zone_transfer_accept = \
self.central_service.create_zone_transfer_accept(
tenant_3_context, zone_transfer_accept)
# Zone Import Tests
def test_create_zone_import(self):
# Create a Zone Import
context = self.get_context()
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(context,
request_body)
# Ensure all values have been set correctly
self.assertIsNotNone(zone_import['id'])
self.assertEqual('PENDING', zone_import.status)
self.assertEqual(None, zone_import.message)
self.assertEqual(None, zone_import.domain_id)
self.wait_for_import(zone_import.id)
def test_find_zone_imports(self):
context = self.get_context()
# Ensure we have no zone_imports to start with.
zone_imports = self.central_service.find_zone_imports(
self.admin_context)
self.assertEqual(0, len(zone_imports))
# Create a single zone_import
request_body = self.get_zonefile_fixture()
zone_import_one = self.central_service.create_zone_import(
context, request_body)
# Wait for the import to complete
self.wait_for_import(zone_import_one.id)
# Ensure we can retrieve the newly created zone_import
zone_imports = self.central_service.find_zone_imports(
self.admin_context)
self.assertEqual(1, len(zone_imports))
# Create a second zone_import
request_body = self.get_zonefile_fixture(variant="two")
zone_import_two = self.central_service.create_zone_import(
context, request_body)
# Wait for the imports to complete
self.wait_for_import(zone_import_two.id)
# Ensure we can retrieve both zone_imports
zone_imports = self.central_service.find_zone_imports(
self.admin_context)
self.assertEqual(2, len(zone_imports))
self.assertEqual('COMPLETE', zone_imports[0].status)
self.assertEqual('COMPLETE', zone_imports[1].status)
def test_get_zone_import(self):
# Create a Zone Import
context = self.get_context()
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
# Wait for the import to complete
self.wait_for_import(zone_import.id)
# Retrieve it, and ensure it's the same
zone_import = self.central_service.get_zone_import(
self.admin_context, zone_import.id)
self.assertEqual(zone_import.id, zone_import['id'])
self.assertEqual(zone_import.status, zone_import['status'])
self.assertEqual('COMPLETE', zone_import.status)
def test_update_zone_import(self):
# Create a Zone Import
context = self.get_context()
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
self.wait_for_import(zone_import.id)
# Update the Object
zone_import.message = 'test message'
# Perform the update
zone_import = self.central_service.update_zone_import(
self.admin_context, zone_import)
# Fetch the zone_import again
zone_import = self.central_service.get_zone_import(context,
zone_import.id)
# Ensure the zone_import was updated correctly
self.assertEqual('test message', zone_import.message)
def test_delete_zone_import(self):
# Create a Zone Import
context = self.get_context()
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
self.wait_for_import(zone_import.id)
# Delete the zone_import
self.central_service.delete_zone_import(context,
zone_import['id'])
# Fetch the zone_import again, ensuring an exception is raised
self.assertRaises(
exceptions.ZoneImportNotFound,
self.central_service.get_zone_import,
context, zone_import['id'])
| {
"content_hash": "95d3a3da5bee359e97f3569687f8d274",
"timestamp": "",
"source": "github",
"line_count": 3372,
"max_line_length": 79,
"avg_line_length": 37.46708185053381,
"alnum_prop": 0.619911507927085,
"repo_name": "muraliselva10/designate",
"id": "204e1d219e2f2b3e4b5a0e768e924aa1a91c7499",
"size": "126994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/tests/test_central/test_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2185372"
},
{
"name": "Ruby",
"bytes": "4170"
},
{
"name": "Shell",
"bytes": "12933"
}
],
"symlink_target": ""
} |
import mock
from testtools import matchers
from neutron.common import exceptions as exc
import neutron.db.api as db
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_vlan
from neutron.tests import base
PROVIDER_NET = 'phys_net1'
TENANT_NET = 'phys_net2'
VLAN_MIN = 200
VLAN_MAX = 209
NETWORK_VLAN_RANGES = {
PROVIDER_NET: [],
TENANT_NET: [(VLAN_MIN, VLAN_MAX)],
}
UPDATED_VLAN_RANGES = {
PROVIDER_NET: [],
TENANT_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)],
}
class VlanTypeTest(base.BaseTestCase):
def setUp(self):
super(VlanTypeTest, self).setUp()
db.configure_db()
self.driver = type_vlan.VlanTypeDriver()
self.driver.network_vlan_ranges = NETWORK_VLAN_RANGES
self.driver._sync_vlan_allocations()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def _get_allocation(self, session, segment):
return session.query(type_vlan.VlanAllocation).filter_by(
physical_network=segment[api.PHYSICAL_NETWORK],
vlan_id=segment[api.SEGMENTATION_ID]).first()
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PROVIDER_NET,
api.SEGMENTATION_ID: 1}
self.assertIsNone(self.driver.validate_provider_segment(segment))
def test_validate_provider_segment_with_missing_physical_network(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.SEGMENTATION_ID: 1}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_validate_provider_segment_with_missing_segmentation_id(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PROVIDER_NET}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_validate_provider_segment_with_invalid_physical_network(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: 'other_phys_net',
api.SEGMENTATION_ID: 1}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_validate_provider_segment_with_invalid_segmentation_id(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PROVIDER_NET,
api.SEGMENTATION_ID: 5000}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_validate_provider_segment_with_invalid_input(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PROVIDER_NET,
api.SEGMENTATION_ID: 1,
'invalid': 1}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_sync_vlan_allocations(self):
def check_in_ranges(network_vlan_ranges):
vlan_min, vlan_max = network_vlan_ranges[TENANT_NET][0]
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: TENANT_NET}
segment[api.SEGMENTATION_ID] = vlan_min - 1
self.assertIsNone(
self._get_allocation(self.session, segment))
segment[api.SEGMENTATION_ID] = vlan_max + 1
self.assertIsNone(
self._get_allocation(self.session, segment))
segment[api.SEGMENTATION_ID] = vlan_min
self.assertFalse(
self._get_allocation(self.session, segment).allocated)
segment[api.SEGMENTATION_ID] = vlan_max
self.assertFalse(
self._get_allocation(self.session, segment).allocated)
check_in_ranges(NETWORK_VLAN_RANGES)
self.driver.network_vlan_ranges = UPDATED_VLAN_RANGES
self.driver._sync_vlan_allocations()
check_in_ranges(UPDATED_VLAN_RANGES)
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PROVIDER_NET,
api.SEGMENTATION_ID: 101}
alloc = self._get_allocation(self.session, segment)
self.assertIsNone(alloc)
self.driver.reserve_provider_segment(self.session, segment)
alloc = self._get_allocation(self.session, segment)
self.assertTrue(alloc.allocated)
def test_reserve_provider_segment_already_allocated(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PROVIDER_NET,
api.SEGMENTATION_ID: 101}
self.driver.reserve_provider_segment(self.session, segment)
self.assertRaises(exc.VlanIdInUse,
self.driver.reserve_provider_segment,
self.session,
segment)
def test_reserve_provider_segment_in_tenant_pools(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: TENANT_NET,
api.SEGMENTATION_ID: VLAN_MIN}
alloc = self._get_allocation(self.session, segment)
self.assertFalse(alloc.allocated)
self.driver.reserve_provider_segment(self.session, segment)
alloc = self._get_allocation(self.session, segment)
self.assertTrue(alloc.allocated)
def test_allocate_tenant_segment(self):
for __ in range(VLAN_MIN, VLAN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
alloc = self._get_allocation(self.session, segment)
self.assertTrue(alloc.allocated)
vlan_id = segment[api.SEGMENTATION_ID]
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
self.assertEqual(TENANT_NET, segment[api.PHYSICAL_NETWORK])
def test_allocate_tenant_segment_no_available(self):
for __ in range(VLAN_MIN, VLAN_MAX + 1):
self.driver.allocate_tenant_segment(self.session)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertIsNone(segment)
def test_release_segment(self):
segment = self.driver.allocate_tenant_segment(self.session)
self.driver.release_segment(self.session, segment)
alloc = self._get_allocation(self.session, segment)
self.assertFalse(alloc.allocated)
def test_release_segment_unallocated(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PROVIDER_NET,
api.SEGMENTATION_ID: 101}
with mock.patch.object(type_vlan.LOG, 'warning') as log_warn:
self.driver.release_segment(self.session, segment)
log_warn.assert_called_once_with(
"No vlan_id %(vlan_id)s found on physical network "
"%(physical_network)s",
{'vlan_id': 101, 'physical_network': PROVIDER_NET})
| {
"content_hash": "91d46ccda91ef580083bb2bca9a1076e",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 75,
"avg_line_length": 42.98255813953488,
"alnum_prop": 0.6157175706749628,
"repo_name": "gopal1cloud/neutron",
"id": "deb86c0afcb50b52ee05f6d2f9bff0c790b6bd95",
"size": "8032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/ml2/test_type_vlan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1451"
},
{
"name": "Python",
"bytes": "9138456"
},
{
"name": "Shell",
"bytes": "9202"
}
],
"symlink_target": ""
} |
import os, sys, random
files = os.listdir('readable')
root = os.getcwd()
def isheader(line):
header = True
linelen = len(line)
if linelen > 5:
linelen = 5
for i in range(linelen):
char = line[i]
if char.islower():
header = False
break
return header
pagedict = dict()
for afile in files:
path = os.path.join(root, 'readable/' + afile)
with open(path, encoding = 'utf-8') as f:
filelines = f.readlines()
filelen = len(filelines)
n = random.randrange(int(filelen * .35), int(filelen * .7))
pagelines = list()
pgct = 0
while pgct < 2:
line = filelines[n].strip()
if line.startswith('<pb>'):
if len(pagelines) > 8 or pgct < 1:
pgct += 1
elif pgct > 0:
if len(line) > 2:
if isheader(line):
print(line)
else:
pagelines.append(line)
n += 1
newfile = afile.replace('.norm.txt', '.poe.txt')
outpath = os.path.join(root, 'poems/' + newfile)
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
for line in pagelines:
f.write(line + '\n')
| {
"content_hash": "b6e01f57da63ece5e24c7394e426a973",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 63,
"avg_line_length": 22.29090909090909,
"alnum_prop": 0.5106035889070146,
"repo_name": "tedunderwood/GenreProject",
"id": "633362c086fb26b5945f44e24f24294456c08407",
"size": "1226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/reception/poetry/selectpoems.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "187389"
},
{
"name": "Python",
"bytes": "645172"
},
{
"name": "R",
"bytes": "34870"
}
],
"symlink_target": ""
} |
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
import boto.cloudsearch
class CertVerificationTest(unittest.TestCase):
cloudsearch = True
ssl = True
def test_certs(self):
for region in boto.cloudsearch.regions():
c = region.connect()
c.describe_domains()
| {
"content_hash": "cca661dd1dd404c1c65d55fef134925e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 62,
"avg_line_length": 20.176470588235293,
"alnum_prop": 0.6647230320699709,
"repo_name": "Timus1712/boto",
"id": "679f8706556db7f911bc807655f2d68d966706a3",
"size": "1522",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/integration/cloudsearch/test_cert_verification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "540"
},
{
"name": "Python",
"bytes": "4436964"
}
],
"symlink_target": ""
} |
from test.test_support import TestFailed, have_unicode
class base_set:
def __init__(self, el):
self.el = el
class set(base_set):
def __contains__(self, el):
return self.el == el
class seq(base_set):
def __getitem__(self, n):
return [self.el][n]
def check(ok, *args):
if not ok:
raise TestFailed, " ".join(map(str, args))
a = base_set(1)
b = set(1)
c = seq(1)
check(1 in b, "1 not in set(1)")
check(0 not in b, "0 in set(1)")
check(1 in c, "1 not in seq(1)")
check(0 not in c, "0 in seq(1)")
try:
1 in a
check(0, "in base_set did not raise error")
except TypeError:
pass
try:
1 not in a
check(0, "not in base_set did not raise error")
except TypeError:
pass
# Test char in string
check('c' in 'abc', "'c' not in 'abc'")
check('d' not in 'abc', "'d' in 'abc'")
check('' in '', "'' not in ''")
check('' in 'abc', "'' not in 'abc'")
try:
None in 'abc'
check(0, "None in 'abc' did not raise error")
except TypeError:
pass
if have_unicode:
# Test char in Unicode
check('c' in unicode('abc'), "'c' not in u'abc'")
check('d' not in unicode('abc'), "'d' in u'abc'")
check('' in unicode(''), "'' not in u''")
check(unicode('') in '', "u'' not in ''")
check(unicode('') in unicode(''), "u'' not in u''")
check('' in unicode('abc'), "'' not in u'abc'")
check(unicode('') in 'abc', "u'' not in 'abc'")
check(unicode('') in unicode('abc'), "u'' not in u'abc'")
try:
None in unicode('abc')
check(0, "None in u'abc' did not raise error")
except TypeError:
pass
# Test Unicode char in Unicode
check(unicode('c') in unicode('abc'), "u'c' not in u'abc'")
check(unicode('d') not in unicode('abc'), "u'd' in u'abc'")
# Test Unicode char in string
check(unicode('c') in 'abc', "u'c' not in 'abc'")
check(unicode('d') not in 'abc', "u'd' in 'abc'")
# A collection of tests on builtin sequence types
a = range(10)
for i in a:
check(i in a, "%r not in %r" % (i, a))
check(16 not in a, "16 not in %r" % (a,))
check(a not in a, "%s not in %r" % (a, a))
a = tuple(a)
for i in a:
check(i in a, "%r not in %r" % (i, a))
check(16 not in a, "16 not in %r" % (a,))
check(a not in a, "%r not in %r" % (a, a))
class Deviant1:
"""Behaves strangely when compared
This class is designed to make sure that the contains code
works when the list is modified during the check.
"""
aList = range(15)
def __cmp__(self, other):
if other == 12:
self.aList.remove(12)
self.aList.remove(13)
self.aList.remove(14)
return 1
check(Deviant1() not in Deviant1.aList, "Deviant1 failed")
class Deviant2:
"""Behaves strangely when compared
This class raises an exception during comparison. That in
turn causes the comparison to fail with a TypeError.
"""
def __cmp__(self, other):
if other == 4:
raise RuntimeError, "gotcha"
try:
check(Deviant2() not in a, "oops")
except TypeError:
pass
| {
"content_hash": "ffe8ad0252ae43709fd80bdafaac5343",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 63,
"avg_line_length": 23.13533834586466,
"alnum_prop": 0.5680857978550536,
"repo_name": "MalloyPower/parsing-python",
"id": "e6f5cf727273ebdbcf486cb07bb5c78194eea872",
"size": "3077",
"binary": false,
"copies": "34",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/test/test_contains.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import os
import json
curr_path = os.path.abspath(__file__)
curr_dir = os.path.split(curr_path)[0]
def list_examples():
files = os.listdir(os.path.join(curr_dir, "..", "examples"))
return [filename.replace('.json', '') for filename in files]
def get_example(example_name):
with open(os.path.join(curr_dir, "..", "examples/", example_name+".json"), "r") as file:
return json.load(file)
| {
"content_hash": "7f268a8c67daddc7b7ec850ebcec8917",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 92,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6439024390243903,
"repo_name": "ALU-CloudBand/yaqluator",
"id": "8d44c01aee28adc2eb4c0605ed5b1970d8c5cdd9",
"size": "410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/utils/files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4999"
},
{
"name": "HTML",
"bytes": "10551"
},
{
"name": "JavaScript",
"bytes": "15037"
},
{
"name": "Python",
"bytes": "8558"
}
],
"symlink_target": ""
} |
"""Miscellaneous file manipulation functions
"""
import os
import re
import shutil
from glob import glob
import logging
from nipype.utils.misc import isdefined
# The md5 module is deprecated in Python 2.6, but hashlib is only
# available as an external package for versions of python before 2.6.
# Both md5 algorithms appear to return the same result.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
# json included in Python 2.6
import json
except ImportError:
# simplejson is the json module that was included in 2.6 (I
# believe). Used here for Python 2.5
import simplejson as json
import numpy as np
from nipype.utils.misc import is_container
fmlogger = logging.getLogger("filemanip")
class FileNotFoundError(Exception):
pass
def split_filename(fname):
"""Split a filename into parts: path, base filename and extension.
Parameters
----------
fname : str
file or path name
Returns
-------
pth : str
base path from fname
fname : str
filename from fname, without extension
ext : str
file extenstion from fname
Examples
--------
>>> from nipype.utils.filemanip import split_filename
>>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
>>> pth
'/home/data'
>>> fname
'subject'
>>> ext
'.nii.gz'
"""
pth, fname = os.path.split(fname)
tmp = '.none'
ext = []
while tmp:
fname, tmp = os.path.splitext(fname)
ext.append(tmp)
ext.reverse()
return pth, fname, ''.join(ext)
def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True):
"""Manipulates path and name of input filename
Parameters
----------
fname : string
A filename (may or may not include path
prefix : string
Characters to prepend to the filename
suffix : string
Characters to append to the filename
newpath : string
Path to replace the path of the input fname
use_ext : boolean
If True (default), appends the extension of the original file
to the output name.
Returns
-------
Absolute path of the modified filename
>>> from nipype.utils.filemanip import fname_presuffix
>>> fname = 'foo.nii.gz'
>>> fname_presuffix(fname,'pre','post','/tmp')
'/tmp/prefoopost.nii.gz'
"""
pth, fname, ext = split_filename(fname)
if not use_ext:
ext = ''
if newpath and isdefined(newpath):
pth = os.path.abspath(newpath)
return os.path.join(pth, prefix+fname+suffix+ext)
def fnames_presuffix(fnames, prefix='', suffix='', newpath=None,use_ext=True):
"""Calls fname_presuffix for a list of files.
"""
f2 = []
for fname in fnames:
f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext))
return f2
def hash_rename(filename, hash):
"""renames a file given original filename and hash
and sets path to output_directory
"""
path, name, ext = split_filename(filename)
newfilename = ''.join((name,'_0x', hash, ext))
return os.path.join(path, newfilename)
def check_forhash(filename):
"""checks if file has a hash in its filename"""
if isinstance(filename,list):
filename = filename[0]
path, name = os.path.split(filename)
if re.search('(_0x[a-z0-9]{32})', name):
hash = re.findall('(_0x[a-z0-9]{32})', name)
return True, hash
else:
return False, None
def hash_infile(afile, chunk_len=8192):
""" Computes md5 hash of a file"""
md5hex = None
if os.path.isfile(afile):
md5obj = md5()
fp = file(afile, 'rb')
while True:
data = fp.read(chunk_len)
if not data:
break
md5obj.update(data)
fp.close()
md5hex = md5obj.hexdigest()
return md5hex
def hash_timestamp(afile):
""" Computes md5 hash of the timestamp of a file """
md5hex = None
if os.path.isfile(afile):
md5obj = md5()
stat = os.stat(afile)
md5obj.update(str(stat.st_size))
md5obj.update(str(stat.st_ctime))
md5obj.update(str(stat.st_mtime))
md5hex = md5obj.hexdigest()
return md5hex
def copyfile(originalfile, newfile, copy=False):
"""Copy or symlink ``originalfile`` to ``newfile``.
Parameters
----------
originalfile : str
full path to original file
newfile : str
full path to new file
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for posix systems
Returns
-------
None
"""
if os.path.lexists(newfile):
fmlogger.warn("File: %s already exists, overwriting with %s, copy:%d" \
% (newfile, originalfile, copy))
if os.name is 'posix' and not copy:
if os.path.lexists(newfile):
os.unlink(newfile)
os.symlink(originalfile,newfile)
else:
try:
shutil.copyfile(originalfile, newfile)
except shutil.Error, e:
fmlogger.warn(e.message)
if originalfile.endswith(".img"):
hdrofile = originalfile[:-4] + ".hdr"
hdrnfile = newfile[:-4] + ".hdr"
matofile = originalfile[:-4] + ".mat"
if os.path.exists(matofile):
matnfile = newfile[:-4] + ".mat"
copyfile(matofile, matnfile, copy)
copyfile(hdrofile, hdrnfile, copy)
def copyfiles(filelist, dest, copy=False):
"""Copy or symlink files in ``filelist`` to ``dest`` directory.
Parameters
----------
filelist : list
List of files to copy.
dest : path/files
full path to destination. If it is a list of length greater
than 1, then it assumes that these are the names of the new
files.
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for posix systems
Returns
-------
None
"""
outfiles = filename_to_list(dest)
newfiles = []
for i,f in enumerate(filename_to_list(filelist)):
if isinstance(f, list):
newfiles.insert(i, copyfiles(f, dest, copy=copy))
else:
if len(outfiles) > 1:
destfile = outfiles[i]
else:
destfile = fname_presuffix(f, newpath=outfiles[0])
copyfile(f,destfile,copy)
newfiles.insert(i,destfile)
return newfiles
def filename_to_list(filename):
"""Returns a list given either a string or a list
"""
if isinstance(filename,(str, unicode)):
return [filename]
elif isinstance(filename,list):
return filename
elif is_container(filename):
return [x for x in filename]
else:
return None
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0]
def cleandir(dir):
"""Cleans all nifti, img/hdr, txt and matfiles from dir"""
filetypes = ['*.nii','*.nii.gz','*.txt','*.img','*.hdr','*.mat','*.json']
for ftype in filetypes:
for f in glob(os.path.join(dir,ftype)):
os.remove(f)
def save_json(filename, data):
"""Save data to a json file
Parameters
----------
filename : str
Filename to save data in.
data : dict
Dictionary to save in json file.
"""
fp = file(filename, 'w')
json.dump(data, fp, sort_keys=True, indent=4)
fp.close()
def debuglog(inputlines,filename='/tmp/dbginputs.txt'):
fp=open(filename,'at')
fp.writelines(inputlines)
fp.close()
def load_json(filename):
"""Load data from a json file
Parameters
----------
filename : str
Filename to load data from.
Returns
-------
data : dict
"""
fp = file(filename, 'r')
data = json.load(fp)
fp.close()
return data
def loadflat(infile, *args):
"""Load an npz file into a dict
"""
data = np.load(infile)
out = {}
if args:
outargs = np.setdiff1d(args,data.files)
if outargs:
raise IOError('File does not contain variables: '+str(outargs))
for k in data.files:
if k in args or not args:
out[k] = [f for f in data[k].flat]
if len(out[k])==1:
out[k] = out[k].pop()
return out
def loadcrash(infile, *args):
return loadflat(infile, *args)
| {
"content_hash": "525f1ba6e1ba916632029def7ad98f39",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 79,
"avg_line_length": 26.31498470948012,
"alnum_prop": 0.5937245787332947,
"repo_name": "satra/NiPypeold",
"id": "c746a1902db611755a6f56844685c6cc7acec7f9",
"size": "8719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/utils/filemanip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "931"
},
{
"name": "Objective-C",
"bytes": "4736"
},
{
"name": "Python",
"bytes": "1389618"
},
{
"name": "Tcl",
"bytes": "43377"
}
],
"symlink_target": ""
} |
'''
Set up the version of Salt
'''
# Import python libs
from __future__ import absolute_import, print_function
import re
import sys
import platform
# pylint: disable=invalid-name,redefined-builtin
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import map
# Don't rely on external packages in this module since it's used at install time
if sys.version_info[0] == 3:
MAX_SIZE = sys.maxsize
string_types = (str,)
else:
MAX_SIZE = sys.maxint
string_types = (six.string_types,)
# pylint: enable=invalid-name,redefined-builtin
# ----- ATTENTION --------------------------------------------------------------------------------------------------->
#
# ALL major version bumps, new release codenames, MUST be defined in the SaltStackVersion.NAMES dictionary, i.e.:
#
# class SaltStackVersion(object):
#
# NAMES = {
# 'Hydrogen': (2014, 1), # <- This is the tuple to bump versions
# ( ... )
# }
#
#
# ONLY UPDATE CODENAMES AFTER BRANCHING
#
# As an example, The Helium codename must only be properly defined with "(2014, 7)" after Hydrogen, "(2014, 1)", has
# been branched out into it's own branch.
#
# ALL OTHER VERSION INFORMATION IS EXTRACTED FROM THE GIT TAGS
#
# <---- ATTENTION ----------------------------------------------------------------------------------------------------
class SaltStackVersion(object):
'''
Handle SaltStack versions class.
Knows how to parse ``git describe`` output, knows about release candidates
and also supports version comparison.
'''
__slots__ = ('name', 'major', 'minor', 'bugfix', 'mbugfix', 'rc', 'noc', 'sha')
git_describe_regex = re.compile(
r'(?:[^\d]+)?(?P<major>[\d]{1,4})'
r'\.(?P<minor>[\d]{1,2})'
r'(?:\.(?P<bugfix>[\d]{0,2}))?'
r'(?:\.(?P<mbugfix>[\d]{0,2}))?'
r'(?:rc(?P<rc>[\d]{1}))?'
r'(?:(?:.*)-(?P<noc>(?:[\d]+|n/a))-(?P<sha>[a-z0-9]{8}))?'
)
git_sha_regex = re.compile(r'(?P<sha>[a-z0-9]{7})')
# Salt versions after 0.17.0 will be numbered like:
# <4-digit-year>.<month>.<bugfix>
#
# Since the actual version numbers will only be know on release dates, the
# periodic table element names will be what's going to be used to name
# versions and to be able to mention them.
NAMES = {
# Let's keep at least 3 version names uncommented counting from the
# latest release so we can map deprecation warnings to versions.
# pylint: disable=E8203
# ----- Please refrain from fixing PEP-8 E203 and E265 ----->
# The idea is to keep this readable.
# -----------------------------------------------------------
'Hydrogen' : (2014, 1),
'Helium' : (2014, 7),
'Lithium' : (2015, 5),
'Beryllium' : (2015, 8),
'Boron' : (2016, 3),
'Carbon' : (MAX_SIZE - 103, 0),
'Nitrogen' : (MAX_SIZE - 102, 0),
'Oxygen' : (MAX_SIZE - 101, 0),
# pylint: disable=E8265
#'Fluorine' : (MAX_SIZE - 100, 0),
#'Neon' : (MAX_SIZE - 99 , 0),
#'Sodium' : (MAX_SIZE - 98 , 0),
#'Magnesium' : (MAX_SIZE - 97 , 0),
#'Aluminium' : (MAX_SIZE - 96 , 0),
#'Silicon' : (MAX_SIZE - 95 , 0),
#'Phosphorus' : (MAX_SIZE - 94 , 0),
#'Sulfur' : (MAX_SIZE - 93 , 0),
#'Chlorine' : (MAX_SIZE - 92 , 0),
#'Argon' : (MAX_SIZE - 91 , 0),
#'Potassium' : (MAX_SIZE - 90 , 0),
#'Calcium' : (MAX_SIZE - 89 , 0),
#'Scandium' : (MAX_SIZE - 88 , 0),
#'Titanium' : (MAX_SIZE - 87 , 0),
#'Vanadium' : (MAX_SIZE - 86 , 0),
#'Chromium' : (MAX_SIZE - 85 , 0),
#'Manganese' : (MAX_SIZE - 84 , 0),
#'Iron' : (MAX_SIZE - 83 , 0),
#'Cobalt' : (MAX_SIZE - 82 , 0),
#'Nickel' : (MAX_SIZE - 81 , 0),
#'Copper' : (MAX_SIZE - 80 , 0),
#'Zinc' : (MAX_SIZE - 79 , 0),
#'Gallium' : (MAX_SIZE - 78 , 0),
#'Germanium' : (MAX_SIZE - 77 , 0),
#'Arsenic' : (MAX_SIZE - 76 , 0),
#'Selenium' : (MAX_SIZE - 75 , 0),
#'Bromine' : (MAX_SIZE - 74 , 0),
#'Krypton' : (MAX_SIZE - 73 , 0),
#'Rubidium' : (MAX_SIZE - 72 , 0),
#'Strontium' : (MAX_SIZE - 71 , 0),
#'Yttrium' : (MAX_SIZE - 70 , 0),
#'Zirconium' : (MAX_SIZE - 69 , 0),
#'Niobium' : (MAX_SIZE - 68 , 0),
#'Molybdenum' : (MAX_SIZE - 67 , 0),
#'Technetium' : (MAX_SIZE - 66 , 0),
#'Ruthenium' : (MAX_SIZE - 65 , 0),
#'Rhodium' : (MAX_SIZE - 64 , 0),
#'Palladium' : (MAX_SIZE - 63 , 0),
#'Silver' : (MAX_SIZE - 62 , 0),
#'Cadmium' : (MAX_SIZE - 61 , 0),
#'Indium' : (MAX_SIZE - 60 , 0),
#'Tin' : (MAX_SIZE - 59 , 0),
#'Antimony' : (MAX_SIZE - 58 , 0),
#'Tellurium' : (MAX_SIZE - 57 , 0),
#'Iodine' : (MAX_SIZE - 56 , 0),
#'Xenon' : (MAX_SIZE - 55 , 0),
#'Caesium' : (MAX_SIZE - 54 , 0),
#'Barium' : (MAX_SIZE - 53 , 0),
#'Lanthanum' : (MAX_SIZE - 52 , 0),
#'Cerium' : (MAX_SIZE - 51 , 0),
#'Praseodymium' : (MAX_SIZE - 50 , 0),
#'Neodymium' : (MAX_SIZE - 49 , 0),
#'Promethium' : (MAX_SIZE - 48 , 0),
#'Samarium' : (MAX_SIZE - 47 , 0),
#'Europium' : (MAX_SIZE - 46 , 0),
#'Gadolinium' : (MAX_SIZE - 45 , 0),
#'Terbium' : (MAX_SIZE - 44 , 0),
#'Dysprosium' : (MAX_SIZE - 43 , 0),
#'Holmium' : (MAX_SIZE - 42 , 0),
#'Erbium' : (MAX_SIZE - 41 , 0),
#'Thulium' : (MAX_SIZE - 40 , 0),
#'Ytterbium' : (MAX_SIZE - 39 , 0),
#'Lutetium' : (MAX_SIZE - 38 , 0),
#'Hafnium' : (MAX_SIZE - 37 , 0),
#'Tantalum' : (MAX_SIZE - 36 , 0),
#'Tungsten' : (MAX_SIZE - 35 , 0),
#'Rhenium' : (MAX_SIZE - 34 , 0),
#'Osmium' : (MAX_SIZE - 33 , 0),
#'Iridium' : (MAX_SIZE - 32 , 0),
#'Platinum' : (MAX_SIZE - 31 , 0),
#'Gold' : (MAX_SIZE - 30 , 0),
#'Mercury' : (MAX_SIZE - 29 , 0),
#'Thallium' : (MAX_SIZE - 28 , 0),
#'Lead' : (MAX_SIZE - 27 , 0),
#'Bismuth' : (MAX_SIZE - 26 , 0),
#'Polonium' : (MAX_SIZE - 25 , 0),
#'Astatine' : (MAX_SIZE - 24 , 0),
#'Radon' : (MAX_SIZE - 23 , 0),
#'Francium' : (MAX_SIZE - 22 , 0),
#'Radium' : (MAX_SIZE - 21 , 0),
#'Actinium' : (MAX_SIZE - 20 , 0),
#'Thorium' : (MAX_SIZE - 19 , 0),
#'Protactinium' : (MAX_SIZE - 18 , 0),
#'Uranium' : (MAX_SIZE - 17 , 0),
#'Neptunium' : (MAX_SIZE - 16 , 0),
#'Plutonium' : (MAX_SIZE - 15 , 0),
#'Americium' : (MAX_SIZE - 14 , 0),
#'Curium' : (MAX_SIZE - 13 , 0),
#'Berkelium' : (MAX_SIZE - 12 , 0),
#'Californium' : (MAX_SIZE - 11 , 0),
#'Einsteinium' : (MAX_SIZE - 10 , 0),
#'Fermium' : (MAX_SIZE - 9 , 0),
#'Mendelevium' : (MAX_SIZE - 8 , 0),
#'Nobelium' : (MAX_SIZE - 7 , 0),
#'Lawrencium' : (MAX_SIZE - 6 , 0),
#'Rutherfordium': (MAX_SIZE - 5 , 0),
#'Dubnium' : (MAX_SIZE - 4 , 0),
#'Seaborgium' : (MAX_SIZE - 3 , 0),
#'Bohrium' : (MAX_SIZE - 2 , 0),
#'Hassium' : (MAX_SIZE - 1 , 0),
#'Meitnerium' : (MAX_SIZE - 0 , 0),
# <---- Please refrain from fixing PEP-8 E203 and E265 ------
# pylint: enable=E8203,E8265
}
LNAMES = dict((k.lower(), v) for (k, v) in iter(NAMES.items()))
VNAMES = dict((v, k) for (k, v) in iter(NAMES.items()))
RMATCH = dict((v[:2], k) for (k, v) in iter(NAMES.items()))
def __init__(self, # pylint: disable=C0103
major,
minor,
bugfix=0,
mbugfix=0,
rc=0, # pylint: disable=C0103
noc=0,
sha=None):
if isinstance(major, string_types):
major = int(major)
if isinstance(minor, string_types):
minor = int(minor)
if bugfix is None:
bugfix = 0
elif isinstance(bugfix, string_types):
bugfix = int(bugfix)
if mbugfix is None:
mbugfix = 0
elif isinstance(mbugfix, string_types):
mbugfix = int(mbugfix)
if rc is None:
rc = 0
elif isinstance(rc, string_types):
rc = int(rc)
if noc is None:
noc = 0
elif isinstance(noc, string_types) and noc == 'n/a':
noc = -1
elif isinstance(noc, string_types):
noc = int(noc)
self.major = major
self.minor = minor
self.bugfix = bugfix
self.mbugfix = mbugfix
self.rc = rc # pylint: disable=C0103
self.name = self.VNAMES.get((major, minor), None)
self.noc = noc
self.sha = sha
@classmethod
def parse(cls, version_string):
if version_string.lower() in cls.LNAMES:
return cls.from_name(version_string)
s = version_string.decode() if isinstance(version_string, bytes) else version_string
match = cls.git_describe_regex.match(s)
if not match:
raise ValueError(
'Unable to parse version string: \'{0}\''.format(version_string)
)
return cls(*match.groups())
@classmethod
def from_name(cls, name):
if name.lower() not in cls.LNAMES:
raise ValueError(
'Named version \'{0}\' is not known'.format(name)
)
return cls(*cls.LNAMES[name.lower()])
@classmethod
def from_last_named_version(cls):
return cls.from_name(
cls.VNAMES[
max([version_info for version_info in
cls.VNAMES if
version_info[0] < (MAX_SIZE - 200)])
]
)
@property
def sse(self):
# Higher than 0.17, lower than first date based
return 0 < self.major < 2014
@property
def info(self):
return (
self.major,
self.minor,
self.bugfix,
self.mbugfix
)
@property
def rc_info(self):
return (
self.major,
self.minor,
self.bugfix,
self.mbugfix,
self.rc
)
@property
def noc_info(self):
return (
self.major,
self.minor,
self.bugfix,
self.mbugfix,
self.rc,
self.noc
)
@property
def full_info(self):
return (
self.major,
self.minor,
self.bugfix,
self.mbugfix,
self.rc,
self.noc,
self.sha
)
@property
def string(self):
version_string = '{0}.{1}.{2}'.format(
self.major,
self.minor,
self.bugfix
)
if self.mbugfix:
version_string += '.{0}'.format(self.mbugfix)
if self.rc:
version_string += 'rc{0}'.format(self.rc)
if self.noc and self.sha:
noc = self.noc
if noc < 0:
noc = 'n/a'
version_string += '-{0}-{1}'.format(noc, self.sha)
return version_string
@property
def formatted_version(self):
if self.name and self.major > 10000:
version_string = self.name
if self.sse:
version_string += ' Enterprise'
version_string += ' (Unreleased)'
return version_string
version_string = self.string
if self.sse:
version_string += ' Enterprise'
if (self.major, self.minor) in self.RMATCH:
version_string += ' ({0})'.format(self.RMATCH[(self.major, self.minor)])
return version_string
def __str__(self):
return self.string
def __compare__(self, other, method):
if not isinstance(other, SaltStackVersion):
if isinstance(other, string_types):
other = SaltStackVersion.parse(other)
elif isinstance(other, (list, tuple)):
other = SaltStackVersion(*other)
else:
raise ValueError(
'Cannot instantiate Version from type \'{0}\''.format(
type(other)
)
)
if (self.rc and other.rc) or (not self.rc and not other.rc):
# Both have rc information, regular compare is ok
return method(self.noc_info, other.noc_info)
# RC's are always lower versions than non RC's
if self.rc > 0 and other.rc <= 0:
noc_info = list(self.noc_info)
noc_info[3] = -1
return method(tuple(noc_info), other.noc_info)
if self.rc <= 0 and other.rc > 0:
other_noc_info = list(other.noc_info)
other_noc_info[3] = -1
return method(self.noc_info, tuple(other_noc_info))
def __lt__(self, other):
return self.__compare__(other, lambda _self, _other: _self < _other)
def __le__(self, other):
return self.__compare__(other, lambda _self, _other: _self <= _other)
def __eq__(self, other):
return self.__compare__(other, lambda _self, _other: _self == _other)
def __ne__(self, other):
return self.__compare__(other, lambda _self, _other: _self != _other)
def __ge__(self, other):
return self.__compare__(other, lambda _self, _other: _self >= _other)
def __gt__(self, other):
return self.__compare__(other, lambda _self, _other: _self > _other)
def __repr__(self):
parts = []
if self.name:
parts.append('name=\'{0}\''.format(self.name))
parts.extend([
'major={0}'.format(self.major),
'minor={0}'.format(self.minor),
'bugfix={0}'.format(self.bugfix)
])
if self.mbugfix:
parts.append('minor-bugfix={0}'.format(self.mbugfix))
if self.rc:
parts.append('rc={0}'.format(self.rc))
noc = self.noc
if noc == -1:
noc = 'n/a'
if noc and self.sha:
parts.extend([
'noc={0}'.format(noc),
'sha={0}'.format(self.sha)
])
return '<{0} {1}>'.format(self.__class__.__name__, ' '.join(parts))
# ----- Hardcoded Salt Codename Version Information ----------------------------------------------------------------->
#
# There's no need to do anything here. The last released codename will be picked up
# --------------------------------------------------------------------------------------------------------------------
__saltstack_version__ = SaltStackVersion.from_last_named_version()
# <---- Hardcoded Salt Version Information ---------------------------------------------------------------------------
# ----- Dynamic/Runtime Salt Version Information -------------------------------------------------------------------->
def __discover_version(saltstack_version):
# This might be a 'python setup.py develop' installation type. Let's
# discover the version information at runtime.
import os
import subprocess
if 'SETUP_DIRNAME' in globals():
# This is from the exec() call in Salt's setup.py
cwd = SETUP_DIRNAME # pylint: disable=E0602
if not os.path.exists(os.path.join(cwd, '.git')):
# This is not a Salt git checkout!!! Don't even try to parse...
return saltstack_version
else:
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(os.path.dirname(cwd), '.git')):
# This is not a Salt git checkout!!! Don't even try to parse...
return saltstack_version
try:
kwargs = dict(
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd
)
if not sys.platform.startswith('win'):
# Let's not import `salt.utils` for the above check
kwargs['close_fds'] = True
process = subprocess.Popen(
['git', 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*', '--always'], **kwargs)
out, err = process.communicate()
if process.returncode != 0:
# The git version running this might not support --first-parent
# Revert to old command
process = subprocess.Popen(
['git', 'describe', '--tags', '--match', 'v[0-9]*', '--always'], **kwargs)
out, err = process.communicate()
out = out.strip()
err = err.strip()
if not out or err:
return saltstack_version
try:
return SaltStackVersion.parse(out)
except ValueError:
if not SaltStackVersion.git_sha_regex.match(out):
raise
# We only define the parsed SHA and set NOC as ??? (unknown)
saltstack_version.sha = out.strip()
saltstack_version.noc = -1
except OSError as os_err:
if os_err.errno != 2:
# If the errno is not 2(The system cannot find the file
# specified), raise the exception so it can be catch by the
# developers
raise
return saltstack_version
def __get_version(saltstack_version):
'''
If we can get a version provided at installation time or from Git, use
that instead, otherwise we carry on.
'''
try:
# Try to import the version information provided at install time
from salt._version import __saltstack_version__ # pylint: disable=E0611,F0401
return __saltstack_version__
except ImportError:
return __discover_version(saltstack_version)
# Get additional version information if available
__saltstack_version__ = __get_version(__saltstack_version__)
# This function has executed once, we're done with it. Delete it!
del __get_version
# <---- Dynamic/Runtime Salt Version Information ---------------------------------------------------------------------
# ----- Common version related attributes - NO NEED TO CHANGE ------------------------------------------------------->
__version_info__ = __saltstack_version__.info
__version__ = __saltstack_version__.string
# <---- Common version related attributes - NO NEED TO CHANGE --------------------------------------------------------
def salt_information():
'''
Report version of salt.
'''
yield 'Salt', __version__
def dependency_information(include_salt_cloud=False):
'''
Report versions of library dependencies.
'''
libs = [
('Python', None, sys.version.rsplit('\n')[0].strip()),
('Jinja2', 'jinja2', '__version__'),
('M2Crypto', 'M2Crypto', 'version'),
('msgpack-python', 'msgpack', 'version'),
('msgpack-pure', 'msgpack_pure', 'version'),
('pycrypto', 'Crypto', '__version__'),
('libnacl', 'libnacl', '__version__'),
('PyYAML', 'yaml', '__version__'),
('ioflo', 'ioflo', '__version__'),
('PyZMQ', 'zmq', '__version__'),
('RAET', 'raet', '__version__'),
('ZMQ', 'zmq', 'zmq_version'),
('Mako', 'mako', '__version__'),
('Tornado', 'tornado', 'version'),
('timelib', 'timelib', 'version'),
('dateutil', 'dateutil', '__version__'),
('pygit2', 'pygit2', '__version__'),
('libgit2', 'pygit2', 'LIBGIT2_VERSION'),
('smmap', 'smmap', '__version__'),
('cffi', 'cffi', '__version__'),
('pycparser', 'pycparser', '__version__'),
('gitdb', 'gitdb', '__version__'),
('gitpython', 'git', '__version__'),
('python-gnupg', 'gnupg', '__version__'),
('mysql-python', 'MySQLdb', '__version__'),
('cherrypy', 'cherrypy', '__version__'),
]
if include_salt_cloud:
libs.append(
('Apache Libcloud', 'libcloud', '__version__'),
)
for name, imp, attr in libs:
if imp is None:
yield name, attr
continue
try:
imp = __import__(imp)
version = getattr(imp, attr)
if callable(version):
version = version()
if isinstance(version, (tuple, list)):
version = '.'.join(map(str, version))
yield name, version
except Exception:
yield name, None
def system_information():
'''
Report system versions.
'''
def system_version():
'''
Return host system version.
'''
lin_ver = platform.linux_distribution()
mac_ver = platform.mac_ver()
win_ver = platform.win32_ver()
if lin_ver[0]:
return ' '.join(lin_ver)
elif mac_ver[0]:
if isinstance(mac_ver[1], (tuple, list)) and ''.join(mac_ver[1]):
return ' '.join([mac_ver[0], '.'.join(mac_ver[1]), mac_ver[2]])
else:
return ' '.join([mac_ver[0], mac_ver[2]])
elif win_ver[0]:
return ' '.join(win_ver)
else:
return ''
system = [
('system', platform.system()),
('dist', ' '.join(platform.dist())),
('release', platform.release()),
('machine', platform.machine()),
('version', system_version()),
]
for name, attr in system:
yield name, attr
continue
def versions_information(include_salt_cloud=False):
'''
Report the versions of dependent software.
'''
salt_info = list(salt_information())
lib_info = list(dependency_information(include_salt_cloud))
sys_info = list(system_information())
return {'Salt Version': dict(salt_info),
'Dependency Versions': dict(lib_info),
'System Versions': dict(sys_info)}
def versions_report(include_salt_cloud=False):
'''
Yield each version properly formatted for console output.
'''
ver_info = versions_information(include_salt_cloud)
lib_pad = max(len(name) for name in ver_info['Dependency Versions'])
sys_pad = max(len(name) for name in ver_info['System Versions'])
padding = max(lib_pad, sys_pad) + 1
fmt = '{0:>{pad}}: {1}'
info = []
for ver_type in ('Salt Version', 'Dependency Versions', 'System Versions'):
info.append('{0}:'.format(ver_type))
# List dependencies in alphabetical, case insensitive order
for name in sorted(ver_info[ver_type], cmp=lambda x, y: cmp(x.lower(), y.lower())):
ver = fmt.format(name,
ver_info[ver_type][name] or 'Not Installed',
pad=padding)
info.append(ver)
info.append(' ')
for line in info:
yield line
if __name__ == '__main__':
print(__version__)
| {
"content_hash": "7fef73f91f1edfc90c0ad113e2206c7a",
"timestamp": "",
"source": "github",
"line_count": 668,
"max_line_length": 118,
"avg_line_length": 35.0059880239521,
"alnum_prop": 0.48503250085528565,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "d02a49eb775603bf48be3225bb3fe6747968dfff",
"size": "23408",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/salt/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
"""Problem 16 of https://projecteuler.net"""
def problem_16():
"""Solution to problem 16."""
answer = sum([int(x) for x in str(2 ** 1000)])
return answer
| {
"content_hash": "6c10008907d272c8c41fe6d12223615f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 50,
"avg_line_length": 24,
"alnum_prop": 0.6011904761904762,
"repo_name": "hjheath/ProjectEuler",
"id": "3219d789783f4566f6229c24277c82c82d5da6af",
"size": "168",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "projecteuler/problems/problem_16.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46161"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import shutil
import getpass
# from fabric.contrib.files import append
from fabric.api import settings
import burlap
from burlap.common import env, Satchel
from burlap.tests.base import TestCase
from burlap.project import project
from burlap.context import set_cwd
from burlap.deploy import STORAGE_LOCAL
class _TestSatchel(Satchel):
name = 'test'
def configure(self):
pass
class ApacheTests(TestCase):
def get_test_satchel(self):
test = _TestSatchel()
test.genv.hosts = ['localhost']
test.genv.host_string = test.genv.hosts[0]
return test
def test_diff(self):
"""
Confirm on a multi-site multi-host environment, apache correctly reports change.
"""
test = self.get_test_satchel()
print('Setting paths...')
#env.plan_storage = STORAGE_LOCAL
env.disable_known_hosts = True
burlap_dir = os.path.abspath(os.path.split(burlap.__file__)[0])
print('Initializing tmp directory...')
d = '/tmp/test_apache_change'
if os.path.isdir(d):
shutil.rmtree(d)
os.makedirs(d)
activate_cmd = '. {d}/.env/bin/activate;'.format(d=d)
with set_cwd(d):
print('Creating project skeleton...')
project.create_skeleton(
project_name='test_apache_change',
roles='prod',
components='apache',
)
assert not os.path.isfile('%s/plans/prod/000/thumbprints/test-dj-migrate-1' % d)
assert not os.path.isfile('%s/plans/prod/000/thumbprints/test-dj-migrate-2' % d)
# Simulate multiple remote hosts by creating aliases of localhost.
# Note, for testing this on your localhost for a user without passwordless sudo,
# you may have to run: `sudo chmod 777 /etc/hosts`
# This won't work on Travis, where these will instead be set in .travis.yml.
print('Modifying /etc/hosts...')
env.host_string = 'localhost'
env.hosts = [env.host_string]
env.user = getpass.getuser()
hosts_updated = False
with settings(warn_only=True):
output = test.run('cat /etc/hosts')
if 'test-dj-migrate-1' not in output:
for use_sudo in (False, True):
print('Attempting to append to /etc/hosts with use_sudo=%s...' % use_sudo)
#ret = append(filename='/etc/hosts', text='127.0.0.1 test-dj-migrate-1\n127.0.0.1 test-dj-migrate-2', use_sudo=use_sudo)
ret = test.append(filename='/etc/hosts', text='127.0.0.1 test-dj-migrate-1\n127.0.0.1 test-dj-migrate-2', use_sudo=use_sudo)
print('ret:', ret)
print('Checking /etc/hosts content...')
output = test.run('cat /etc/hosts')
print('output:', output)
if 'test-dj-migrate-1' in output:
hosts_updated = True
print('Success!')
break
else:
hosts_updated = True
assert hosts_updated
os.system('ln -s %s %s/' % (burlap_dir, d))
project.update_settings({
'plan_storage': STORAGE_LOCAL,
'plan_data_dir': os.path.join(d, 'plans'),
'services': ['apache'],
'default_site': 'testsite1',
'default_role': 'prod',
# This is necessary to stop get_current_hostname() from attempting to lookup our actual hostname.
'_ip_to_hostname': {
'test-dj-migrate-1': 'test-dj-migrate-1',
'test-dj-migrate-2': 'test-dj-migrate-2',
},
'apache_application_name': 'testsite1',
'apache_server_admin_email': '[email protected]',
'apache_server_aliases_template': '{apache_locale}.mydomain.com',
'apache_wsgi_dir_template': '/usr/local/{apache_application_name}/wsgi',
'apache_wsgi_processes': 1,
'apache_wsgi_threads': 0,
},
role='all')
project.update_settings({
'hosts': ['test-dj-migrate-1', 'test-dj-migrate-2'],
'available_sites_by_host':{
'test-dj-migrate-1': [
'testsite1',
],
'test-dj-migrate-2': [
'testsite2',
]
},
'sites': {
'testsite1': {
'apache_domain_template': 'testsite1.test-dj-migrate-1.com',
},
'testsite2': {
'apache_domain_template': 'testsite2.test-dj-migrate-2.com',
},
},
},
role='prod')
# Run a deployment preview.
kwargs = dict(
activate_cmd=activate_cmd,
)
status, output = self.getstatusoutput('{activate_cmd} fab prod deploy.preview'.format(**kwargs))
print('output:\n%s' % output)
assert not status
# The deployment preview should include both hosts.
assert "[test-dj-migrate-1] Executing task 'deploy.preview'" in output
assert "[test-dj-migrate-2] Executing task 'deploy.preview'" in output
assert not os.path.isfile('%s/plans/prod/000/thumbprints/test-dj-migrate-1' % d)
assert not os.path.isfile('%s/plans/prod/000/thumbprints/test-dj-migrate-2' % d)
status, output = self.getstatusoutput(
'{activate_cmd} '
'fab prod debug.set_satchel_value:apache,site,xyz '
'debug.show_satchel_items:apache'.format(**kwargs))
assert not status
assert ' = xyz' in output
# Fake a deployment.
#status, output = self.getstatusoutput((
#'{activate_cmd} '
#'fab prod debug.set_satchel_value:apache,site,abc '
#'deploy.fake:set_satchels="apache-site-abc"').format(**kwargs))
#assert not status
#assert os.path.isfile('%s/plans/prod/000/thumbprints/test-dj-migrate-1' % d)
#assert os.path.isfile('%s/plans/prod/000/thumbprints/test-dj-migrate-2' % d)
## Confirm apache now reports no changes needing deployment.
#status, output = self.getstatusoutput('{activate_cmd} fab prod deploy.preview'.format(**kwargs))
#assert not status
#assert "[test-dj-migrate-1] Executing task 'deploy.preview'" in output
#assert "[test-dj-migrate-2] Executing task 'deploy.preview'" in output
#assert os.path.isfile('%s/plans/prod/000/thumbprints/test-dj-migrate-1' % d)
#print(open('%s/plans/prod/000/thumbprints/test-dj-migrate-1' % d).read())
#assert os.path.isfile('%s/plans/prod/000/thumbprints/test-dj-migrate-2' % d)
#print(open('%s/plans/prod/000/thumbprints/test-dj-migrate-2' % d).read())
| {
"content_hash": "93eee3a31341cd100ef868fde06193c6",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 148,
"avg_line_length": 43.43023255813954,
"alnum_prop": 0.5243641231593039,
"repo_name": "chrisspen/burlap",
"id": "6db733718d65e8d03107053c6abf9ec3e435c667",
"size": "7470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "burlap/tests/test_apache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "722479"
},
{
"name": "Shell",
"bytes": "11659"
}
],
"symlink_target": ""
} |
from mod_base import*
class Nick(Hybrid):
"""Change the nick of the bot. If nick isn't available, the nick will be reclaimed when it becomes available."""
def init(self):
self.events = [IRC_EVT_INTERVAL, IRC_EVT_USER_NICK_CHANGE, IRC_EVT_USER_QUIT]
def event(self, event):
desired = self.bot.config["identity"]["nicks"][0]
if event.type == IRC_EVT_INTERVAL:
if self.bot.me.nick != desired:
self.ReclaimNick()
elif event.type == IRC_EVT_USER_QUIT:
if event.user.GetNick() == desired:
self.ReclaimNick()
elif event.type == IRC_EVT_USER_NICK_CHANGE:
if event.user.GetNick() == desired:
self.ReclaimNick()
def run(self, win, user, data, caller=None):
args = Args(data)
if len(args) == 1:
self.bot.me.Nick(args[0])
self.bot.config["identity"]["nicks"] = [args[0]]
else:
win.Privmsg("nick can't contain spaces")
return False
def ReclaimNick(self):
self.bot.me.Nick(self.bot.config["identity"]["nicks"][0])
module = {
"class": Nick,
"type": MOD_BOTH,
"level": 5,
"zone": IRC_ZONE_BOTH,
"interval": 60*10,
}
| {
"content_hash": "c51a004f7e7a3bb9cce86f2c7175768a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 116,
"avg_line_length": 32.86842105263158,
"alnum_prop": 0.5628502802241794,
"repo_name": "richrd/bx",
"id": "5c772f4a3236a0fc65fb943b8b176b3acee08932",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/nick.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "193806"
}
],
"symlink_target": ""
} |
import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", help="run slow tests")
def pytest_runtest_setup(item):
if "slow" in item.keywords and not item.config.getoption("--runslow"):
pytest.skip("need --runslow option to run")
| {
"content_hash": "b949ad2a4aa9a5a59cc0783a1f77b3ad",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 77,
"avg_line_length": 28.5,
"alnum_prop": 0.6947368421052632,
"repo_name": "GeoscienceAustralia/eo-datasets",
"id": "3171bdd753addfb8ab8d6902bc7caedd5a67342e",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/eodatasets3",
"path": "conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "501206"
},
{
"name": "Shell",
"bytes": "244"
}
],
"symlink_target": ""
} |
import copy
import glob
import json
import logging
import os
import pickle
import re
import shutil
import signal
import time
import urllib.parse
import uuid
import xml.etree.ElementTree as ET
from concurrent import futures
from distutils import dir_util
from functools import reduce
from operator import itemgetter
from pathlib import Path
from typing import List, Optional, Union, ValuesView, Tuple, Dict
from xml.dom import minidom
from zipfile import ZipFile
import requests
import yaml
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.core.cache import cache
from django.db import connection
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils import timezone
from numpy import linspace
from requests import Response
from eventkit_cloud.core.helpers import get_or_update_session, handle_auth
from eventkit_cloud.jobs.enumerations import GeospatialDataType
from eventkit_cloud.jobs.models import ExportFormat, get_data_type_from_provider
from eventkit_cloud.tasks import DEFAULT_CACHE_EXPIRATION, set_cache_value
from eventkit_cloud.tasks.enumerations import Directory, PREVIEW_TAIL, UNSUPPORTED_CARTOGRAPHY_FORMATS
from eventkit_cloud.tasks.exceptions import FailedException
from eventkit_cloud.tasks.models import DataProviderTaskRecord, ExportRunFile, ExportTaskRecord, ExportRun
from eventkit_cloud.utils import gdalutils, s3
from eventkit_cloud.utils.gdalutils import get_band_statistics, get_chunked_bbox
from eventkit_cloud.utils.generic import cd, get_file_paths # NOQA
from eventkit_cloud.utils.s3 import download_folder_from_s3
logger = logging.getLogger()
CHUNK = 1024 * 1024 * 2 # 2MB chunks
def get_run_staging_dir(run_uid):
"""
The run staging dir is where all files are stored while they are being processed.
It is a unique space to ensure that files aren't being improperly modified.
:param run_uid: The unique value to store the directory for the run data.
:return: The path to the run directory.
"""
return os.path.join(settings.EXPORT_STAGING_ROOT.rstrip("\/"), str(run_uid))
def get_download_path(folder_name):
"""
The download dir is where all files are stored after they are processed.
It is a unique space to ensure that files aren't being improperly modified.
:param file_path: The unique value to store the directory for the data.
:return: The path to the directory.
"""
return os.path.join(settings.EXPORT_DOWNLOAD_ROOT.rstrip("\/"), str(folder_name))
def get_download_url(file_name):
"""
A URL path to the run data
:param run_uid: The unique identifier for the run data.
:return: The url context. (e.g. /downloads/123e4567-e89b-12d3-a456-426655440000)
"""
return f"{settings.EXPORT_MEDIA_ROOT.rstrip('/')}/{str(file_name)}"
def get_provider_staging_dir(run_uid, provider_slug):
"""
The provider staging dir is where all files are stored while they are being processed.
It is a unique space to ensure that files aren't being improperly modified.
:param run_uid: The unique id for the run.
:param provider_slug: The unique value to store the directory for the provider data.
:return: The path to the provider directory.
"""
run_staging_dir = get_run_staging_dir(run_uid)
return os.path.join(run_staging_dir, provider_slug)
def get_provider_staging_preview(run_uid, provider_slug):
"""
The provider staging dir is where all files are stored while they are being processed.
It is a unique space to ensure that files aren't being improperly modified.
:param run_uid: The unique id for the run.
:param provider_slug: The unique value to store the directory for the provider data.
:return: The path to the provider directory.
"""
run_staging_dir = get_run_staging_dir(run_uid)
return os.path.join(run_staging_dir, provider_slug, PREVIEW_TAIL)
def get_archive_data_path(provider_slug=None, file_name=None, archive=True):
"""
Gets a datapath for the files to be placed in the zip file.
:param provider_slug: An optional unique value to store files.
:param file_name: The name of a file.
:return:
"""
if archive:
file_path = Directory.DATA.value
else:
file_path = ""
if provider_slug:
file_path = os.path.join(file_path, provider_slug)
if file_name:
file_path = os.path.join(file_path, file_name)
return file_path
def default_format_time(date_time):
return date_time.strftime("%Y%m%d")
def normalize_name(name):
if not name:
return
# Remove all non-word characters
s = re.sub(r"[^\w\s]", "", name)
# Replace all whitespace with a single underscore
s = re.sub(r"\s+", "_", s)
return s.lower()
def get_export_task_record(export_task_record_uid: str) -> ExportTaskRecord:
"""
Gets the ExportTaskRecord and related models used for export_tasks from the ExportTaskRecord.
:param export_task_record_uid: The UID of an ExportTaskRecord.
:return provider_slug: The associated provider_slug value.
"""
return ExportTaskRecord.objects.select_related(
"export_provider_task__provider", "export_provider_task__run__job"
).get(uid=export_task_record_uid)
def get_supported_projections(export_format: ExportFormat) -> List[int]:
supported_projections = export_format.supported_projections.all().values_list("srid", flat=True)
return supported_projections
def get_default_projection(supported_projections: List[int], selected_projections: List[int]) -> Optional[int]:
"""
Gets a default projection of either 4326 or the first supported projection.
"""
if 4326 in supported_projections and 4326 in selected_projections:
return 4326
for supported_projection in supported_projections:
if supported_projection in selected_projections:
return supported_projection
return None
def get_export_filepath(
stage_dir: str, export_task_record: ExportTaskRecord, descriptor: Optional[Union[int, str]], extension: str
):
"""
Gets a filepath for an export.
:param stage_dir: The staging directory to place files in while they process.
:param job_name: The name of the job being processed.
:param descriptor: A projection (or other description) as an int or string referencing an EPSG code
(e.g. 4326 = EPSG:4326)
:param export_task: The provider slug (e.g. osm) for the filename.
:param extension: The file extension for the filename.
"""
provider = export_task_record.export_provider_task.provider if export_task_record else None
descriptors = "-".join(
filter(
None,
[
normalize_name(export_task_record.export_provider_task.run.job.name) if export_task_record else None,
str(descriptor) if descriptor else None,
provider.slug if provider else None,
normalize_name(export_task_record.export_provider_task.run.job.event) if export_task_record else None,
default_format_time(time),
"eventkit",
normalize_name(provider.label) if provider else None,
],
)
)
if extension == "shp":
filepath = os.path.join(stage_dir, f"{descriptors}_{extension}")
else:
filepath = os.path.join(stage_dir, f"{descriptors}.{extension}")
return filepath
def get_style_files():
"""
:return: A list of all of the static files used for styles (e.g. icons)
"""
style_dir = os.path.join(os.path.dirname(__file__), "static", "tasks", "styles")
return get_file_paths(style_dir)
def generate_qgs_style(metadata, skip_formats=UNSUPPORTED_CARTOGRAPHY_FORMATS) -> Dict[str, str]:
"""
Task to create QGIS project file with styles for osm.
If a data_provider_task_record is provided a style file will be generated only for that, otherwise all of the
data providers in the run will be added to the style file.
:param metadata: A dict of metadata provided by get_metadata.
:return: The path to the generated qgs file.
"""
from eventkit_cloud.tasks.helpers import normalize_name
cleaned_metadata = remove_formats(metadata, formats=UNSUPPORTED_CARTOGRAPHY_FORMATS)
stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT, str(cleaned_metadata["run_uid"]))
job_name = normalize_name(cleaned_metadata["name"].lower())
provider_details = [provider_detail for provider_slug, provider_detail in cleaned_metadata["data_sources"].items()]
if len(provider_details) == 1:
style_file_name = "{0}-{1}-{2}.qgs".format(
job_name,
normalize_name(provider_details[0]["slug"]),
default_format_time(timezone.now()),
)
else:
style_file_name = "{0}-{1}.qgs".format(job_name, default_format_time(timezone.now()))
style_file = os.path.join(stage_dir, style_file_name)
context = {
"job_name": job_name,
"job_date_time": "{0}".format(timezone.now().strftime("%Y%m%d%H%M%S%f")[:-3]),
"provider_details": provider_details,
"bbox": metadata["bbox"],
"has_raster": metadata["has_raster"],
"has_elevation": metadata["has_elevation"],
"has_vector": metadata["has_vector"],
}
with open(style_file, "wb") as open_file:
open_file.write(
render_to_string(
"styles/Style.qgs",
context=context,
).encode()
)
return {style_file: f"{job_name}.qgs"}
def get_arcgis_templates(metadata: dict) -> dict:
"""
Gets the arcgis template file and if possible uses provided metadata to update the file.
:param metadata: A dict of metadata provided by get_metadata.
:return: A dict with the absolute path to the file and a relative path to desired location in the datapack.
"""
cleaned_metadata = remove_formats(metadata, formats=UNSUPPORTED_CARTOGRAPHY_FORMATS)
files = {}
stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT, str(cleaned_metadata["run_uid"]), Directory.ARCGIS.value)
if not os.path.dirname(stage_dir):
os.makedirs(stage_dir)
with cd(os.path.join(os.path.dirname(__file__), "arcgis")):
for dirpath, _, arcgis_template_files in os.walk("./"):
if not os.path.isdir(stage_dir):
os.mkdir(stage_dir)
for arcgis_template_file in arcgis_template_files:
basename = os.path.basename(arcgis_template_file)
template_file = os.path.join(stage_dir, basename)
if os.path.splitext(basename)[-1] in [".lyrx"]:
with open(arcgis_template_file, "rb") as open_file:
template = json.load(open_file)
update_arcgis_json_extents(template, metadata["bbox"])
with open(template_file, "w") as open_file:
json.dump(template, open_file)
files[template_file] = os.path.join(
dirpath, Directory.ARCGIS.value, Directory.TEMPLATES.value, arcgis_template_file
)
else:
if basename in ["create_mxd.py", "ReadMe.txt"]:
files[os.path.abspath(os.path.join(dirpath, arcgis_template_file))] = os.path.join(
Directory.ARCGIS.value, "{0}".format(basename)
)
# This bit is needed because its easier to test and reference the file with a standard extension.
elif basename in ["create_aprx.py"]:
files[os.path.abspath(os.path.join(dirpath, arcgis_template_file))] = os.path.join(
Directory.ARCGIS.value, "{0}.pyt".format(os.path.splitext(basename)[0])
)
else:
# Put the support files in the correct directory.
files[os.path.abspath(os.path.join(dirpath, arcgis_template_file))] = os.path.join(
Directory.ARCGIS.value, Directory.TEMPLATES.value, "{0}".format(basename)
)
arcgis_metadata_file = os.path.join(stage_dir, "metadata.json")
arcgis_metadata = get_arcgis_metadata(metadata)
with open(arcgis_metadata_file, "w") as open_md_file:
json.dump(arcgis_metadata, open_md_file)
files[os.path.abspath(arcgis_metadata_file)] = os.path.join(Directory.ARCGIS.value, "metadata.json")
return files
def update_arcgis_json_extents(document, bbox):
extent = {
"xmin": bbox[0],
"ymin": bbox[1],
"xmax": bbox[2],
"ymax": bbox[3],
"spatialReference": {"wkid": 4326, "latestWkid": 4326},
}
layer_definitions = document["layerDefinitions"]
for layer_definition in layer_definitions:
if layer_definition.get("featureTable"):
layer_definition["featureTable"]["dataConnection"]["extent"] = extent
return document
def remove_formats(metadata: dict, formats: List[str] = UNSUPPORTED_CARTOGRAPHY_FORMATS):
"""
Used to remove formats from the metadata especially so that they don't show up in the cartography.
:param data_sources: A dict of metadata provided by get_metadata.
:param formats: A list of unsupported file extensions (i.e. .gpx)
:return: The path to the generated qgs file.
"""
# Create a new dict to not alter the input data.
if metadata is None:
metadata = {}
cleaned_metadata = copy.deepcopy(metadata)
for slug, data_source in cleaned_metadata.get("data_sources", {}).items():
cleaned_metadata["data_sources"][slug] = data_source
cleaned_files = []
for file_info in cleaned_metadata["data_sources"][slug].get("files"):
# Add all files that aren't in the remove list.
if file_info.get("file_ext") not in formats:
cleaned_files.append(file_info)
cleaned_metadata["data_sources"][slug]["files"] = cleaned_files
return cleaned_metadata
def get_human_readable_metadata_document(metadata) -> Dict[str, str]:
"""
:param metadata: A dictionary returned by get_metadata.
:return: A filepath to a txt document.
"""
from eventkit_cloud.tasks.helpers import normalize_name
stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT, str(metadata["run_uid"]))
metadata_file = os.path.join(stage_dir, "{0}_ReadMe.txt".format(normalize_name(metadata["name"])))
with open(metadata_file, "wb") as open_file:
open_file.write(
render_to_string("styles/metadata.txt", context={"metadata": metadata})
.replace("\r\n", "\n")
.replace("\n", "\r\n")
.encode()
)
return {metadata_file: "metadata.txt"}
def get_last_update(url, type, cert_info=None):
"""
A wrapper to get different timestamps.
:param url: The url to get the timestamp
:param type: The type of services (e.g. osm)
:param cert_info: Optionally a dict containing cert path and pass
:return: The timestamp as a string.
"""
if type == "osm":
return get_osm_last_update(url, cert_info=cert_info)
def get_metadata_url(url, type):
"""
A wrapper to get different timestamps.
:param url: The url to get the timestamp
:param type: The type of services (e.g. osm)
:return: The timestamp as a string.
"""
if type in ["wcs", "wms", "wmts"]:
return "{0}?request=GetCapabilities".format(url.split("?")[0])
else:
return url
def get_osm_last_update(url, **kwargs):
"""
:param url: A path to the overpass api.
:param cert_info: Optionally cert info if needed
:return: The default timestamp as a string (2018-06-18T13:09:59Z)
"""
try:
timestamp_url = "{0}timestamp".format(url.rstrip("/").rstrip("interpreter"))
session = get_or_update_session(**kwargs)
response = session.get(timestamp_url)
if response:
return response.content.decode()
raise Exception("Get OSM last update failed with {0}: {1}".format(response.status_code, response.content))
except Exception as e:
logger.warning(e)
logger.warning("Could not get the timestamp from the overpass url.")
return None
def progressive_kill(pid):
"""
Tries to kill first with TERM and then with KILL.
:param pid: The process ID to kill
:return: None.
"""
try:
logger.info("Trying to terminate pid {0} with SIGTERM.".format(pid))
os.kill(pid, signal.SIGTERM)
time.sleep(5)
logger.info("Trying to kill pid {0} with SIGKILL.".format(pid))
os.kill(pid, signal.SIGKILL)
time.sleep(1)
except OSError:
logger.info("{0} PID no longer exists.".format(pid))
def pickle_exception(exception):
return pickle.dumps(exception, 0).decode()
def get_metadata(data_provider_task_record_uids: List[str], source_only=False):
"""
A object to hold metadata about the run for the sake of being passed to various scripts for the creation of
style files or metadata documents for within the datapack.
This also creates a license file which is considered a part of the metadata and adds it to the "include_files"
:param source_only: If enabled only the first task for the data_provider_task_record will be included in the
metadata. This is useful for generating style files for a single layer instead of redundant layers for each file.
:param data_provider_task_record_uids: A list of Provider task uid string for either the run task which will add
all of the provider tasks for the run or for a single data provider task.
:return: A dict containing the run metadata.
Example:
{
"aoi": "GEOJSON representing the selected AOI."
"bbox": [
w, s, e, n
],
"data_sources": {
"osm": {
"copyright": None,
"description": "OpenStreetMap vector data provided in a custom thematic schema. \r\n\t\r\n\tData is
grouped into separate tables (e.g. water, roads...).",
"files": [{"file_path": "data/osm/test-osm-20181101.gpkg",
"file_ext": ".gpkg",
"full_file_path": "/var/lib/eventkit/exports_stage/7fadf34e-58f9-4bb8-ab57-adc1015c4269
/osm/test.gpkg",
"band_stats":
"ramp_shader_steps":}]
"last_update": "2018-10-29T04:35:02Z\n",
"metadata": "https://overpass-server.com/overpass/interpreter",
"name": "OpenStreetMap Data (Themes)",
"slug": "osm",
"type": "osm",
"uid": "0d08ddf6-35c1-464f-b271-75f6911c3f78",
"layers": ["layer1", "layer2"]
}
},
"date": "20181101",
"description": "Test",
"has_elevation": False,
"has_raster": True,
"include_files": [
"/var/lib/eventkit/exports_stage/7fadf34e-58f9-4bb8-ab57-adc1015c4269/osm/test.gpkg",
"/var/lib/eventkit/exports_stage/7fadf34e-58f9-4bb8-ab57-adc1015c4269/osm/osm_selection.geojson"
],
"name": "test",
"project": "Test",
"projections": [4326, 3857]
"run_uid": "7fadf34e-58f9-4bb8-ab57-adc1015c4269",
"url": "http://cloud.eventkit.test/status/2010025c-6d61-4a0b-8d5d-ff9c657259eb"
}
"""
from eventkit_cloud.tasks.enumerations import TaskState
from eventkit_cloud.tasks.export_tasks import create_zip_task
data_provider_task_records = (
DataProviderTaskRecord.objects.select_related("run__job")
.prefetch_related("run__job__projections")
.prefetch_related("provider")
.filter(uid__in=data_provider_task_record_uids)
)
run = data_provider_task_records.first().run
projections = []
for projection in run.job.projections.all():
projections.append(projection.srid)
# To prepare for the zipfile task, the files need to be checked to ensure they weren't
# deleted during cancellation.
include_files = {}
# A dict is used here to ensure that just one file per provider is added,
# this should be updated when multiple formats are supported.
metadata = {
"name": normalize_name(run.job.name),
"url": "{0}/status/{1}".format(getattr(settings, "SITE_URL"), str(run.job.uid)),
"description": run.job.description,
"project": run.job.event,
"projections": projections,
"date": timezone.now().strftime("%Y%m%d"),
"run_uid": str(run.uid),
"data_sources": {},
"bbox": run.job.extents,
"aoi": run.job.bounds_geojson,
"has_raster": False, # TODO: These are used for style groupings and seem frivolous.
"has_elevation": False,
"has_vector": False,
}
for data_provider_task_record in data_provider_task_records:
data_provider = data_provider_task_record.provider
provider_type = data_provider.export_provider_type.type_name
conf = yaml.safe_load(data_provider.config) or dict()
cert_info = conf.get("cert_info", None)
metadata["data_sources"][data_provider_task_record.provider.slug] = {
"uid": str(data_provider_task_record.uid),
"slug": data_provider_task_record.provider.slug,
"name": data_provider_task_record.name,
"files": [],
"type": get_data_type_from_provider(data_provider_task_record.provider),
"description": str(data_provider.service_description).replace("\r\n", "\n").replace("\n", "\r\n\t"),
"last_update": get_last_update(data_provider.url, provider_type, cert_info=cert_info),
"metadata": get_metadata_url(data_provider.url, provider_type),
"copyright": data_provider.service_copyright,
"layers": data_provider.layers,
"level_from": data_provider.level_from,
"level_to": data_provider.level_to,
}
if (
metadata["data_sources"][data_provider_task_record.provider.slug].get("type")
== GeospatialDataType.RASTER.value
):
metadata["has_raster"] = True
if (
metadata["data_sources"][data_provider_task_record.provider.slug].get("type")
== GeospatialDataType.ELEVATION.value
):
metadata["has_elevation"] = True
if metadata["data_sources"][data_provider_task_record.provider.slug].get("type") in [
GeospatialDataType.VECTOR.value,
"osm",
"nome",
]: # TODO: handle osm generically like vector layers
metadata["has_vector"] = True
if data_provider_task_record.preview is not None:
include_files[
data_provider_task_record.preview.get_file_path(staging=True)
] = data_provider_task_record.preview.get_file_path(archive=True)
# Only include tasks with a specific projection in the metadata.
# TODO: Refactor to make explicit which files are included in map documents.
query = reduce(lambda q, value: q | Q(name__icontains=value), projections, Q())
export_tasks = data_provider_task_record.tasks.filter(query)
if source_only:
export_tasks = [export_tasks.first()]
for export_task in export_tasks:
if TaskState[export_task.status] in TaskState.get_incomplete_states():
continue
try:
staging_filepath = export_task.result.get_file_path(staging=True)
archive_filepath = export_task.result.get_file_path(archive=True)
except Exception:
continue
current_files = metadata["data_sources"][data_provider_task_record.provider.slug]["files"]
if staging_filepath not in map(itemgetter("full_file_path"), current_files):
# Only include files relevant to the user that we can actually add to the carto.
if export_task.display and ("project file" not in export_task.name.lower()):
pattern = re.compile(".*EPSG:(?P<projection>3857|4326).*$")
matches = pattern.match(export_task.name)
projection = "4326"
if matches:
projection = pattern.match(export_task.name).groupdict().get("projection")
file_data = {
"file_path": archive_filepath,
"full_file_path": staging_filepath,
"file_ext": os.path.splitext(staging_filepath)[1],
"projection": projection,
}
if (
metadata["data_sources"][data_provider_task_record.provider.slug].get("type")
== GeospatialDataType.ELEVATION.value
):
# Get statistics to update ranges in template.
try:
band_stats = get_band_statistics(staging_filepath)
logger.info("Band Stats {0}: {1}".format(staging_filepath, band_stats))
file_data["band_stats"] = band_stats
# Calculate the value for each elevation step (of 16)
try:
steps = linspace(band_stats[0], band_stats[1], num=16)
file_data["ramp_shader_steps"] = list(map(int, steps))
except TypeError:
file_data["ramp_shader_steps"] = None
except Exception:
# TODO: Allow file paths for vszip or extract zip data.
file_data["ramp_shader_steps"] = None
metadata["data_sources"][data_provider_task_record.provider.slug]["files"] += [file_data]
if not os.path.isfile(staging_filepath):
logger.error("Could not find file {0} for export {1}.".format(staging_filepath, export_task.name))
logger.error(f"Contents of directory: {os.listdir(os.path.dirname(staging_filepath))}")
continue
# Exclude zip files created by zip_export_provider
if not (staging_filepath.endswith(".zip") and export_task.name == create_zip_task.name):
include_files[staging_filepath] = archive_filepath
# add the license for this provider if there are other files already
if include_files:
try:
include_files.update(create_license_file(data_provider_task_record))
except FileNotFoundError:
# This fails if run at beginning of run.
pass
metadata["include_files"] = include_files
return metadata
def get_arcgis_metadata(metadata):
"""
A way to add or remove information which will be used by the arcgis script.
:param metadata: A metadata dict returned from get_metadata
:return: A metadata dict to be provided within the datapack.
"""
arcgis_metadata = remove_formats(metadata, formats=UNSUPPORTED_CARTOGRAPHY_FORMATS)
# remove files which reference the server directories.
arcgis_metadata.pop("include_files")
for data_source, data_source_values in arcgis_metadata["data_sources"].items():
for file_details in data_source_values["files"]:
file_details.pop("full_file_path", "")
return arcgis_metadata
def get_all_rabbitmq_objects(api_url: str, rabbit_class: str) -> list:
"""
:param api_url: The http api url including authentication values.
:param rabbit_class: The type of rabbitmq class (i.e. queues or exchanges) as a string.
:return: An array of dicts with the desired objects.
"""
url = f"{api_url.rstrip('/')}/{rabbit_class}"
params = {"page": 1, "page_size": 100, "pagination": True}
response = None
try:
logger.info(f"Getting all {rabbit_class}")
response = requests.get(url, params=params)
objects_page = response.json()
rabbit_objects = objects_page.get("items")
pages = objects_page.get("page_count", 0)
for page in range(2, pages + 1):
logger.info(f"Getting page: {page} of {pages} for {rabbit_class}")
params["page"] = page
response = requests.get(url, params=params)
if response.ok:
rabbit_objects += response.json()["items"]
else:
raise Exception(f"Failed to fetch {rabbit_class}")
return rabbit_objects
except Exception as e:
if response:
logger.error(response.content.decode())
logger.error(e)
raise e
def delete_rabbit_objects(api_url: str, rabbit_classes: list = ["queues"], force: bool = False) -> None:
api_url = api_url.rstrip("/")
for rabbit_class in rabbit_classes:
for rabbit_object in get_all_rabbitmq_objects(api_url, rabbit_class):
object_name = urllib.parse.quote(rabbit_object.get("name"), safe="")
vhost = urllib.parse.quote(rabbit_object.get("vhost"), safe="")
# Exchanges don't have consumers or messages, so deleting exchanges is always done.
consumers = rabbit_object.get("consumers")
messages = rabbit_object.get("messages")
if not (messages or consumers) or force:
object_url = f"{api_url}/{rabbit_class}/{vhost}/{object_name}"
res = requests.delete(object_url)
if res.ok:
logger.info(f"Removed {rabbit_class}: {object_name}")
else:
logger.info(f"Could not remove {rabbit_class} {object_name}: {res.content}")
else:
logger.info(f"Cannot remove {rabbit_class}: {rabbit_object}")
if consumers:
logger.info(f"There are {consumers} consumers")
if messages:
logger.info(f"There are {messages} messages")
def get_message_count(queue_name: str, message_type: str = "messages") -> int:
"""
:param queue_name: The queue that you want to check messages for.
:param message_type: The type of message you want. e.g. messages or messages_ready
:return: An integer count of pending messages.
"""
broker_api_url = getattr(settings, "BROKER_API_URL")
queue_class = "queues"
for queue in get_all_rabbitmq_objects(broker_api_url, queue_class):
if queue.get("name") == queue_name:
try:
return queue.get(message_type, 0)
except Exception as e:
logger.info(e)
logger.info(f"Cannot find queue named {queue_name}, returning 0 messages.")
return 0
def check_cached_task_failures(task_name, task_uid):
"""
Used to check how many times this task has already attempted to run.
If the task continues to fail, this will fire an exception to be
handled by the task.
"""
cache_key = f"{task_uid}-task-attempts"
task_attempts = cache.get_or_set(cache_key, 0)
task_attempts += 1
cache.set(cache_key, task_attempts)
if task_attempts > settings.MAX_TASK_ATTEMPTS:
raise FailedException(task_name=task_name)
def add_export_run_files_to_zip(zipfile, run_zip_file):
"""
Add additional files stored in ExportRunFile objects to a zipfile.
"""
if not os.path.exists(settings.EXPORT_RUN_FILES):
os.makedirs(settings.EXPORT_RUN_FILES)
export_run_files = ExportRunFile.objects.all()
for export_run_file in export_run_files:
run_zip_file.message = f"Adding {export_run_file.file.name} to zip archive."
export_run_file_path = os.path.join(settings.EXPORT_RUN_FILES, export_run_file.file.name)
if settings.USE_S3:
request = requests.get(export_run_file.file.url)
with open(export_run_file_path, "wb+") as file:
file.write(request.content)
extra_directory = export_run_file.directory or ""
if export_run_file.provider:
arcname = os.path.join("data", export_run_file.provider.slug, extra_directory, export_run_file.file.name)
zipfile.write(export_run_file_path, arcname=arcname)
else:
arcname = os.path.join(extra_directory, export_run_file.file.name)
zipfile.write(export_run_file_path, arcname)
def get_data_package_manifest(metadata: dict, ignore_files: list) -> str:
"""
Uses a metadata to generate a manifest file.
<MissionPackageManifest version="2">
<Configuration>
<Parameter name="uid" value="<UID>"/>
<Parameter name="name" value="<Name>"/>
</Configuration>
<Contents>
<Content ignore="false" zipEntry="<file_path>">
<Parameter name="contentType" value="External Native Data"/>
</Content>
</Contents>
</MissionPackageManifest>
:param metadata: A dict of run contents.
:param ignore_files: A list of files to ignore.
:return: File path to manifest file.
"""
from eventkit_cloud.tasks.helpers import normalize_name
# Placeholder to add unsupported formats.
cleaned_metadata = remove_formats(metadata, formats=[])
if cleaned_metadata:
run_uid = cleaned_metadata.get("run_uid")
job_name = normalize_name(cleaned_metadata["name"].lower())
else:
run_uid = uuid.uuid4()
job_name = "DataPack"
stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT, str(run_uid))
root = ET.Element("MissionPackageManifest", attrib={"version": "2"})
# Set up configuration
configuration = ET.SubElement(root, "Configuration")
ET.SubElement(configuration, "Parameter", attrib={"name": "uid", "value": run_uid})
# use the first 30 characters from the name
ET.SubElement(configuration, "Parameter", attrib={"name": "name", "value": job_name[:30]})
# Add contents
contents = ET.SubElement(root, "Contents")
for data_source_slug, data_source_info in cleaned_metadata.get("data_sources", {}).items():
data_source_type = data_source_info["type"]
for data_file in data_source_info["files"]:
file_path = os.path.relpath(data_file["file_path"])
content = ET.SubElement(contents, "Content", attrib={"ignore": "false", "zipEntry": file_path})
if data_source_type == GeospatialDataType.RASTER.value:
# Let application know that this is raster data.
ET.SubElement(content, "Parameter", attrib={"name": "contentType", "value": "External Native Data"})
# Ignore contents
for data_file in ignore_files:
file_path = os.path.relpath(data_file)
ET.SubElement(contents, "Content", attrib={"ignore": "true", "zipEntry": file_path})
ET.SubElement(contents, "Content", attrib={"ignore": "false", "zipEntry": os.path.join("manifest", "manifest.xml")})
# Pretty print using xml dom
manifest_file = os.path.join(stage_dir, "manifest.xml")
manifest = minidom.parseString(ET.tostring(root)).toprettyxml(indent=" ")
# Strip the header (and newline) that minidom forces. Consider lxml in future.
manifest = "\n".join(manifest.split("\n")[1:-1])
if not os.path.isdir(os.path.dirname(manifest_file)):
os.makedirs(os.path.dirname(manifest_file))
with open(manifest_file, "w") as open_file:
open_file.write(manifest)
return manifest_file
def merge_chunks(
output_file,
layer_name,
projection,
task_uid: str,
bbox: list,
stage_dir: str,
base_url: str,
cert_info=None,
task_points=100,
feature_data=False,
distinct_field=None,
):
chunks = download_chunks(task_uid, bbox, stage_dir, base_url, cert_info, task_points, feature_data)
out = gdalutils.convert(
driver="gpkg",
input_file=chunks,
output_file=output_file,
task_uid=task_uid,
boundary=bbox,
layer_name=layer_name,
projection=projection,
access_mode="append",
distinct_field=distinct_field,
)
return out
def download_chunks_concurrently(layer, task_points, feature_data):
base_path = layer.get("base_path")
if not os.path.exists(base_path):
os.mkdir(base_path)
merge_chunks(
output_file=layer.get("path"),
projection=layer.get("projection"),
layer_name=layer.get("layer_name"),
task_uid=layer.get("task_uid"),
bbox=layer.get("bbox"),
stage_dir=base_path,
base_url=layer.get("url"),
cert_info=layer.get("cert_info"),
task_points=task_points,
feature_data=feature_data,
distinct_field=layer.get("distinct_field"),
)
def download_concurrently(layers: ValuesView, concurrency=None, feature_data=False):
"""
Function concurrently downloads data from a given list URLs and download paths.
"""
try:
executor = futures.ThreadPoolExecutor(max_workers=concurrency)
# Get the total number of task points to compare against current progress.
task_points = len(layers) * 100
futures_list = [
executor.submit(
download_chunks_concurrently, layer=layer, task_points=task_points, feature_data=feature_data
)
for layer in layers
]
futures.wait(futures_list)
# result() is called for all futures so that any exception raised within is propagated to the caller.
[ftr.result() for ftr in futures_list]
except Exception as e:
logger.error(f"Unable to execute concurrent downloads: {e}")
raise e
return layers
@gdalutils.retry
def download_feature_data(task_uid: str, input_url: str, out_file: str, cert_info=None, task_points=100):
# This function is necessary because ArcGIS servers often either
# respond with a 200 status code but also return an error message in the response body,
# or redirect to a parent URL if a resource is not found.
try:
out_file = download_data(task_uid, input_url, out_file, task_points=task_points)
with open(out_file) as f:
json_response = json.load(f)
if json_response.get("error"):
logger.error(json_response)
raise Exception("The service did not receive a valid response.")
if "features" not in json_response:
logger.error(f"No features were returned for {input_url}")
raise Exception("No features were returned.")
except Exception as e:
logger.error(f"Feature data download error: {e}")
raise e
return out_file
def download_chunks(
task_uid: str,
bbox: list,
stage_dir: str,
base_url: str,
cert_info=None,
task_points=100,
feature_data=False,
level=15,
):
tile_bboxes = get_chunked_bbox(bbox, level=level)
chunks = []
for _index, _tile_bbox in enumerate(tile_bboxes):
# Replace bbox placeholder here, allowing for the bbox as either a list or tuple
url = base_url.replace("BBOX_PLACEHOLDER", urllib.parse.quote(str([*_tile_bbox]).strip("[]")))
outfile = os.path.join(stage_dir, f"chunk{_index}.json")
download_function = download_feature_data if feature_data else download_data
download_function(task_uid, url, outfile, cert_info=cert_info, task_points=(task_points * len(tile_bboxes)))
chunks.append(outfile)
return chunks
def get_file_name_from_response(response: Response) -> str:
"""
Creates an arbitary file name from a content-type for example content-type: 'application/json; charset=UTF-8'
would return, 'download.json'.
"""
filename = "download"
logger.error(f"Response Headers:{response.headers.get('content-type', '')}")
mimetype = response.headers.get("content-type", "").split(";")
if mimetype:
ext = mimetype[0].split("/")
if ext:
filename = f"{filename}.{ext[1]}"
return filename
@handle_auth
def download_data(task_uid: str, input_url: str, out_file: str = None, session=None, task_points=100, *args, **kwargs):
"""
Function for downloading data, optionally using a certificate.
"""
response = None
try:
session = get_or_update_session(session=session, *args, **kwargs)
response = session.get(input_url, stream=True)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to get data from: {input_url}")
if response:
logger.error(response.text)
raise Exception("Failed to download data.") from e
from audit_logging.file_logging import logging_open
try:
total_size = int(response.headers.get("content-length"))
except (ValueError, TypeError):
if response.content:
total_size = len(response.content)
else:
raise Exception("Request failed to return any data.")
try:
if out_file:
content_type = response.headers.get("content-type")
if Path(out_file).suffix.replace(".", "") not in content_type:
raise Exception("The returned data is not in the expected format.")
else:
out_file = os.path.join(get_run_staging_dir(task_uid), get_file_name_from_response(response))
make_dirs(os.path.dirname(out_file))
except Exception:
logger.error("Unable to verify data type.")
written_size = 0
update_interval = total_size / 100
start_points = cache.get_or_set(get_task_progress_cache_key(task_uid), 0, timeout=DEFAULT_CACHE_EXPIRATION)
start_percent = (start_points / task_points) * 100
logger.info(f"Saving data to: {out_file}")
with logging_open(out_file, "wb") as file_:
for chunk in response.iter_content(CHUNK):
file_.write(chunk)
written_size += CHUNK
last_update = cache.get_or_set(get_last_update_cache_key(task_uid), 0)
last_update += CHUNK
cache.set(get_last_update_cache_key(task_uid), last_update, timeout=DEFAULT_CACHE_EXPIRATION)
if last_update > update_interval:
updated_points = int((last_update / total_size) * 100) if last_update < total_size else 100
cache.incr(get_task_progress_cache_key(task_uid), updated_points)
progress_points = cache.get(get_task_progress_cache_key(task_uid))
progress = progress_points / task_points * 100 if progress_points < task_points else 100
update_progress(task_uid, progress, subtask_percentage=100 / task_points, subtask_start=start_percent)
cache.set(get_last_update_cache_key(task_uid), 0, timeout=DEFAULT_CACHE_EXPIRATION)
if not os.path.isfile(out_file):
raise Exception("Nothing was returned from the vector feature service.")
return out_file
def get_task_points_cache_key(task_uid: str):
return f"{task_uid}_task_points"
def get_task_progress_cache_key(task_uid: str):
return f"{task_uid}_progress"
def get_last_update_cache_key(task_uid: str):
return f"{task_uid}_mb_since_update"
def find_in_zip(
zip_filepath: str,
stage_dir: str,
extension: str = None,
archive_extension: str = "zip",
matched_files: list = list(),
extract: bool = False,
):
"""
Function finds files within archives and returns their vsi path.
"""
with ZipFile(zip_filepath) as zip_file:
files_in_zip = zip_file.namelist()
extension = (extension or "").lower()
for filepath in files_in_zip:
file_path = Path(filepath)
if extension and extension in file_path.suffix.lower() and file_path not in matched_files:
if extract:
output_dest = Path(stage_dir).joinpath(file_path.name)
zip_file.extract(member=filepath, path=stage_dir)
os.rename(Path(stage_dir).joinpath(file_path), output_dest)
return str(output_dest)
else:
return f"/vsizip/{zip_filepath}/{filepath}"
elif not extension and file_path.suffix:
file = f"/vsizip/{zip_filepath}/{filepath}"
meta = gdalutils.get_meta(file)
driver = meta["driver"] or None
if driver:
return file
if archive_extension in file_path.suffix:
nested = Path(f"{stage_dir}/{filepath}")
nested.parent.mkdir(parents=True, exist_ok=True)
with open(nested, "wb") as f:
f.write(zip_file.read(filepath))
return find_in_zip(nested.absolute(), stage_dir, extension=extension, matched_files=matched_files)
def extract_metadata_files(
zip_filepath: str, destination: str, extensions: list = [".md", ".txt", ".doc", ".docx", ".csv", ".xls", ".xlsx"]
):
"""
Function extract metadata files from archives.
The function will look for any files that match the extensions that were provided,
and will extract those files into a metadata directory.
"""
zip_file = ZipFile(zip_filepath)
files_in_zip = zip_file.namelist()
metadata_dir = Path(f"{destination}/metadata/")
metadata_dir.mkdir(parents=True, exist_ok=True)
for filepath in files_in_zip:
file_path = Path(filepath)
if file_path.suffix in extensions:
zip_file.extract(filepath, path=metadata_dir)
return str(metadata_dir)
def get_celery_queue_group(run_uid=None, worker=None):
# IF CELERY_GROUP_NAME is specifically set then that makes most sense to use it.
if getattr(settings, "CELERY_GROUP_NAME"):
return getattr(settings, "CELERY_GROUP_NAME")
if getattr(settings, "CELERY_SCALE_BY_RUN"):
if not run_uid:
logger.warning("Attempted to get a celery_queue_group for scaling by run without a run uid.")
else:
# Celery group names have to be strings, make sure we always return the UID as a string.
return str(run_uid)
# If scaling by run we need to keep tasks for a specific run organized together.
if not worker:
raise Exception(
"Attempted to get a group name without setting CELERY_GROUP_NAME "
"using a RUN_UID or passing a worker explicitly."
)
return worker
def get_geometry(bbox: list, selection: str = None) -> GEOSGeometry:
geom = GEOSGeometry(Polygon.from_bbox(bbox))
if selection:
try:
with open(selection, "r") as geojson:
geom = GEOSGeometry(geojson.read())
except Exception as e:
logger.error(e)
return geom
def update_progress(
task_uid,
progress=None,
subtask_percentage=100.0,
subtask_start=0,
estimated_finish=None,
eta=None,
msg=None,
):
"""
Updates the progress of the ExportTaskRecord from the given task_uid.
:param task_uid: A uid to reference the ExportTaskRecord.
:param progress: The percent of completion for the task or subtask [0-100]
:param subtask_percentage: is the percentage of the task referenced by task_uid the caller takes up. [0-100]
:param subtask_start: is the beginning of where this subtask's percentage block beings [0-100]
(e.g. when subtask_percentage=0.0 the absolute_progress=subtask_start)
:param estimated_finish: The datetime of when the entire task is expected to finish, overrides eta estimator
:param eta: The ETA estimator for this task will be used to automatically determine estimated_finish
:param msg: Message describing the current activity of the task
"""
if task_uid is None:
return
if not progress and not estimated_finish:
return
subtask_percentage = subtask_percentage or 100.0
subtask_start = subtask_start or 0
if progress is not None:
subtask_progress = min(progress, 100.0)
absolute_progress = min(subtask_start + subtask_progress * (subtask_percentage / 100.0), 100.0)
# We need to close the existing connection because the logger could be using a forked process which
# will be invalid and throw an error.
connection.close()
if absolute_progress:
set_cache_value(
uid=task_uid,
attribute="progress",
model_name="ExportTaskRecord",
value=absolute_progress,
)
if eta is not None:
eta.update(absolute_progress / 100.0, dbg_msg=msg) # convert to [0-1.0]
if estimated_finish:
set_cache_value(
uid=task_uid,
attribute="estimated_finish",
model_name="ExportTaskRecord",
value=estimated_finish,
)
elif eta is not None:
# Use the updated ETA estimator to determine an estimated_finish
set_cache_value(
uid=task_uid,
attribute="estimated_finish",
model_name="ExportTaskRecord",
value=eta.eta_datetime(),
)
def create_license_file(data_provider_task_record: DataProviderTaskRecord) -> Dict[str, str]:
# checks a DataProviderTaskRecord's license file and adds it to the file list if it exists
data_provider_license = data_provider_task_record.provider.license
# DataProviders are not required to have a license
if data_provider_license is None:
return {}
stage_path = Path(data_provider_task_record.tasks.first().result.get_file_path(staging=True)).parent
archive_path = Path(data_provider_task_record.tasks.first().result.get_file_path(archive=True)).parent
stage_license_path = stage_path.joinpath("{0}.txt".format(normalize_name(data_provider_license.name)))
archive_license_path = archive_path.joinpath("{0}.txt".format(normalize_name(data_provider_license.name)))
with open(stage_license_path, "wb") as license_file:
license_file.write(data_provider_license.text.encode())
return {str(stage_license_path): str(archive_license_path)}
def download_run_directory(old_run: ExportRun, new_run: ExportRun):
download_dir = get_download_path(old_run.uid)
old_run_dir = get_run_staging_dir(old_run.uid)
new_run_dir = get_run_staging_dir(new_run.uid)
cache_key = str(new_run.uid)
if not os.path.exists(new_run_dir):
os.mkdir(new_run_dir)
# Download the data from previous exports so we can rezip.
if cache.add(cache_key, True, DEFAULT_CACHE_EXPIRATION):
logger.info(f"Downloading run data {old_run.uid} -> {new_run.uid}")
try:
# TODO: Switch to copytree when migrating to 3.8 after dirs_exist_ok is added.
dir_util.copy_tree(old_run_dir, new_run_dir)
except Exception:
logger.error(
f"Could not copy run data from staging directory {old_run_dir} it might have already been removed."
)
if getattr(settings, "USE_S3", False):
download_folder_from_s3(str(old_run.uid), output_dir=new_run_dir)
else:
try:
dir_util.copy_tree(download_dir, new_run_dir)
except Exception as e:
logger.error(e)
logger.error(
f"Could not copy run data from download directory {download_dir} "
f"it might have already been removed."
)
# TODO: Use ignore on copytree when switching to shutil in python 3.8.
delete_files = glob.glob(os.path.join(new_run_dir, "run/*.zip"))
for delete_file in delete_files:
os.unlink(delete_file)
cache.delete(cache_key)
return new_run_dir
def make_file_downloadable(file_path: Path, skip_copy: bool = False) -> Tuple[Path, str]:
"""Construct the filesystem location and url needed to download the file at filepath.
Copy filepath to the filesystem location required for download.
@provider_slug is specific to ExportTasks, not needed for FinalizeHookTasks
@skip_copy: It looks like sometimes (At least for OverpassQuery) we don't want the file copied,
generally can be ignored
@return A url to reach filepath.
"""
# File name is the relative path, e.g. run/provider_slug/file.ext.
# File path is an absolute path e.g. /var/lib/eventkit/export_stage/run/provider_slug/file.ext.
file_name = Path(file_path)
if Path(settings.EXPORT_STAGING_ROOT) in file_name.parents:
file_name = file_name.relative_to(settings.EXPORT_STAGING_ROOT)
download_url = get_download_url(file_name)
if getattr(settings, "USE_S3", False):
download_url = s3.upload_to_s3(file_path)
else:
download_path = get_download_path(file_name)
make_dirs(os.path.dirname(download_path))
if not skip_copy:
if not os.path.isfile(file_path):
logger.error(f"Cannot make file {file_path} downloadable because it does not exist.")
else:
shutil.copy(file_path, download_path)
return file_name, download_url
def make_dirs(path):
try:
os.makedirs(path, 0o751, exist_ok=True)
except OSError:
if not os.path.isdir(path):
raise
| {
"content_hash": "bda923d8c3ec23415580c3b1f45d61fb",
"timestamp": "",
"source": "github",
"line_count": 1329,
"max_line_length": 120,
"avg_line_length": 40.24153498871332,
"alnum_prop": 0.6324676053177765,
"repo_name": "terranodo/eventkit-cloud",
"id": "7d7a136d7e847c1a92d400c4841d2b079aa7b6b2",
"size": "53481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventkit_cloud/tasks/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "72684"
},
{
"name": "HTML",
"bytes": "87673"
},
{
"name": "JavaScript",
"bytes": "3699859"
},
{
"name": "Python",
"bytes": "634218"
},
{
"name": "Shell",
"bytes": "15117"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class PyTorchSettings(Model):
"""Specifies the settings for pyTorch job.
All required parameters must be populated in order to send to Azure.
:param python_script_file_path: Required. The path and file name of the
python script to execute the job.
:type python_script_file_path: str
:param python_interpreter_path: The path to python interpreter.
:type python_interpreter_path: str
:param command_line_args: Specifies the command line arguments for the
master task.
:type command_line_args: str
:param process_count: Number of processes to launch for the job execution.
The default value for this property is equal to nodeCount property.
:type process_count: int
:param communication_backend: Type of the communication backend for
distributed jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required
for non-distributed jobs.
:type communication_backend: str
"""
_validation = {
'python_script_file_path': {'required': True},
}
_attribute_map = {
'python_script_file_path': {'key': 'pythonScriptFilePath', 'type': 'str'},
'python_interpreter_path': {'key': 'pythonInterpreterPath', 'type': 'str'},
'command_line_args': {'key': 'commandLineArgs', 'type': 'str'},
'process_count': {'key': 'processCount', 'type': 'int'},
'communication_backend': {'key': 'communicationBackend', 'type': 'str'},
}
def __init__(self, **kwargs):
super(PyTorchSettings, self).__init__(**kwargs)
self.python_script_file_path = kwargs.get('python_script_file_path', None)
self.python_interpreter_path = kwargs.get('python_interpreter_path', None)
self.command_line_args = kwargs.get('command_line_args', None)
self.process_count = kwargs.get('process_count', None)
self.communication_backend = kwargs.get('communication_backend', None)
| {
"content_hash": "7cd80cfc7535654942357cd285e09060",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 83,
"avg_line_length": 44.38636363636363,
"alnum_prop": 0.6717869943676396,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "a765cafdae9ccc2e233b1440e51fd72f94d0fd2a",
"size": "2427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-batchai/azure/mgmt/batchai/models/py_torch_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
""" Sahana Eden Staff Module Automated Tests
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from tests.web2unittest import SeleniumUnitTest
class Staff(SeleniumUnitTest):
def test_staff001_create_staff(self):
"""
@case: asset001
@description: Create a Staff Member - IN PROGRESS
* RENE: Insert instructions
"""
print "\n"
| {
"content_hash": "1a3de8c1eddfcf38ee6f5b8573c256f4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 69,
"avg_line_length": 39.1,
"alnum_prop": 0.7116368286445013,
"repo_name": "mrGeen/eden",
"id": "233e4e82f6f73c55bc2ceda80c3a6edb630471f4",
"size": "1589",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/tests/staff/staff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "1070670"
},
{
"name": "HTML",
"bytes": "358005"
},
{
"name": "JavaScript",
"bytes": "14790995"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "22735063"
},
{
"name": "XSLT",
"bytes": "1263876"
}
],
"symlink_target": ""
} |
fhand = open("mbox_short.txt")
for line in fhand:
line = line.rstrip()
if not line.startswith('From'):
continue
print (line)
| {
"content_hash": "8bbf04cafee325323024084c132e3b43",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 32,
"avg_line_length": 19,
"alnum_prop": 0.6691729323308271,
"repo_name": "ttruongdc/py-Learning",
"id": "19b6cfebccc04b2b5ff4d9d57322ab9c1bf44528",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rstrip_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4394"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
class BooleanField(serializers.BooleanField):
TRUE_VALUES = ('true', 't', 'True', '1')
FALSE_VALUES = ('false', 'f', 'False', '0')
def to_internal_value(self, value):
if value in self.TRUE_VALUES:
return True
if value in self.FALSE_VALUES:
return False
return value
| {
"content_hash": "e8d374a4f105062aec8da78a3be0b85d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 47,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6027027027027027,
"repo_name": "awemulya/fieldsight-kobocat",
"id": "0238f7f14c93cfd12a4d1b412eee0e18e5a8556a",
"size": "370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onadata/libs/serializers/fields/boolean_field.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "Dockerfile",
"bytes": "2462"
},
{
"name": "HTML",
"bytes": "1488442"
},
{
"name": "JavaScript",
"bytes": "674757"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "5340355"
},
{
"name": "Shell",
"bytes": "16493"
}
],
"symlink_target": ""
} |
from typing import Optional
from pytest import raises
from graphql.error import GraphQLSyntaxError
from graphql.language import Lexer, Source, TokenKind, parse
from graphql.utilities import strip_ignored_characters
from ..fixtures import kitchen_sink_query, kitchen_sink_sdl # noqa: F401
from ..utils import dedent
def lex_value(s: str) -> Optional[str]:
lexer = Lexer(Source(s))
value = lexer.advance().value
assert lexer.advance().kind == TokenKind.EOF, "Expected EOF"
return value
class ExpectStripped:
def __init__(self, doc_string: str):
self.doc_string = doc_string
def to_equal(self, expected: str):
doc_string = self.doc_string
stripped = strip_ignored_characters(doc_string)
assert stripped == expected
stripped_twice = strip_ignored_characters(stripped)
assert stripped == stripped_twice
def to_stay_the_same(self):
self.to_equal(self.doc_string)
def describe_strip_ignored_characters():
def strips_ignored_characters_from_graphql_query_document():
query = dedent(
"""
query SomeQuery($foo: String!, $bar: String) {
someField(foo: $foo, bar: $bar) {
a
b {
c
d
}
}
}
"""
)
assert strip_ignored_characters(query) == (
"query SomeQuery($foo:String!$bar:String)"
"{someField(foo:$foo bar:$bar){a b{c d}}}"
)
def strips_ignored_characters_from_graphql_sdl_document():
sdl = dedent(
'''
"""
Type description
"""
type Foo {
"""
Field description
"""
bar: String
}
'''
)
assert strip_ignored_characters(sdl) == (
'"""Type description""" type Foo{"""Field description""" bar:String}'
)
def strips_ignored_characters_from_source():
source = Source(
dedent(
"""
{
foo {
bar
}
}
"""
)
)
assert strip_ignored_characters(source) == "{foo{bar}}"
def report_document_with_invalid_token():
with raises(GraphQLSyntaxError) as exc_info:
strip_ignored_characters('{ foo(arg: "\n"')
assert str(exc_info.value) == dedent(
"""
Syntax Error: Unterminated string.
GraphQL request:1:13
1 | { foo(arg: "
| ^
2 | "
"""
)
def strips_non_parsable_document():
ExpectStripped('{ foo(arg: "str"').to_equal('{foo(arg:"str"')
def strips_documents_with_only_ignored_characters():
ExpectStripped("\n").to_equal("")
ExpectStripped(",").to_equal("")
ExpectStripped(",,").to_equal("")
ExpectStripped("#comment\n, \n").to_equal("")
def strips_leading_and_trailing_ignored_tokens():
ExpectStripped("\n1").to_equal("1")
ExpectStripped(",1").to_equal("1")
ExpectStripped(",,1").to_equal("1")
ExpectStripped("#comment\n, \n1").to_equal("1")
ExpectStripped("1\n").to_equal("1")
ExpectStripped("1,").to_equal("1")
ExpectStripped("1,,").to_equal("1")
ExpectStripped("1#comment\n, \n").to_equal("1")
def strips_ignored_tokens_between_punctuator_tokens():
ExpectStripped("[,)").to_equal("[)")
ExpectStripped("[\r)").to_equal("[)")
ExpectStripped("[\r\r)").to_equal("[)")
ExpectStripped("[\r,)").to_equal("[)")
ExpectStripped("[,\n)").to_equal("[)")
def strips_ignored_tokens_between_punctuator_and_non_punctuator_tokens():
ExpectStripped("[,1").to_equal("[1")
ExpectStripped("[\r1").to_equal("[1")
ExpectStripped("[\r\r1").to_equal("[1")
ExpectStripped("[\r,1").to_equal("[1")
ExpectStripped("[,\n1").to_equal("[1")
def strips_ignored_tokens_between_non_punctuator_and_punctuator_tokens():
ExpectStripped("1,[").to_equal("1[")
ExpectStripped("1\r[").to_equal("1[")
ExpectStripped("1\r\r[").to_equal("1[")
ExpectStripped("1\r,[").to_equal("1[")
ExpectStripped("1,\n[").to_equal("1[")
def replace_ignored_tokens_between_non_punctuator_tokens_and_spread_with_space():
ExpectStripped("a ...").to_equal("a ...")
ExpectStripped("1 ...").to_equal("1 ...")
ExpectStripped("1 ... ...").to_equal("1 ......")
def replace_ignored_tokens_between_non_punctuator_tokens_with_space():
ExpectStripped("1 2").to_stay_the_same()
ExpectStripped('"" ""').to_stay_the_same()
ExpectStripped("a b").to_stay_the_same()
ExpectStripped("a,1").to_equal("a 1")
ExpectStripped("a,,1").to_equal("a 1")
ExpectStripped("a 1").to_equal("a 1")
ExpectStripped("a \t 1").to_equal("a 1")
def does_not_strip_ignored_tokens_embedded_in_the_string():
ExpectStripped('" "').to_stay_the_same()
ExpectStripped('","').to_stay_the_same()
ExpectStripped('",,"').to_stay_the_same()
ExpectStripped('",|"').to_stay_the_same()
def does_not_strip_ignored_tokens_embedded_in_the_block_string():
ExpectStripped('""","""').to_stay_the_same()
ExpectStripped('""",,"""').to_stay_the_same()
ExpectStripped('""",|"""').to_stay_the_same()
def strips_ignored_characters_inside_block_strings():
# noinspection PyShadowingNames
def expect_stripped_string(block_str: str):
original_value = lex_value(block_str)
stripped_value = lex_value(strip_ignored_characters(block_str))
assert original_value == stripped_value, dedent(
f"""
Expected lexValue(stripIgnoredCharacters({block_str!r})
to equal {original_value!r}
but got {stripped_value!r}
"""
)
return ExpectStripped(block_str)
expect_stripped_string('""""""').to_stay_the_same()
expect_stripped_string('""" """').to_equal('""""""')
expect_stripped_string('"""a"""').to_stay_the_same()
expect_stripped_string('""" a"""').to_equal('""" a"""')
expect_stripped_string('""" a """').to_equal('""" a """')
expect_stripped_string('"""\n"""').to_equal('""""""')
expect_stripped_string('"""a\nb"""').to_equal('"""a\nb"""')
expect_stripped_string('"""a\rb"""').to_equal('"""a\nb"""')
expect_stripped_string('"""a\r\nb"""').to_equal('"""a\nb"""')
expect_stripped_string('"""a\r\n\nb"""').to_equal('"""a\n\nb"""')
expect_stripped_string('"""\\\n"""').to_stay_the_same()
expect_stripped_string('""""\n"""').to_stay_the_same()
expect_stripped_string('"""\\"""\n"""').to_equal('"""\\""""""')
expect_stripped_string('"""\na\n b"""').to_stay_the_same()
expect_stripped_string('"""\n a\n b"""').to_equal('"""a\nb"""')
expect_stripped_string('"""\na\n b\nc"""').to_equal('"""a\n b\nc"""')
# noinspection PyShadowingNames
def strips_kitchen_sink_query_but_maintains_the_exact_same_ast(
kitchen_sink_query, # noqa: F811
):
stripped_query = strip_ignored_characters(kitchen_sink_query)
assert strip_ignored_characters(stripped_query) == stripped_query
query_ast = parse(kitchen_sink_query, no_location=True)
stripped_ast = parse(stripped_query, no_location=True)
assert stripped_ast == query_ast
# noinspection PyShadowingNames
def strips_kitchen_sink_sdl_but_maintains_the_exact_same_ast(
kitchen_sink_sdl, # noqa: F811
):
stripped_sdl = strip_ignored_characters(kitchen_sink_sdl)
assert strip_ignored_characters(stripped_sdl) == stripped_sdl
sdl_ast = parse(kitchen_sink_sdl, no_location=True)
stripped_ast = parse(stripped_sdl, no_location=True)
assert stripped_ast == sdl_ast
| {
"content_hash": "56803820dab59fb44526e521d876dee9",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 85,
"avg_line_length": 35.54585152838428,
"alnum_prop": 0.5511056511056511,
"repo_name": "graphql-python/graphql-core",
"id": "2e026af8ae3164ea4bad3fa98341001623cdbc32",
"size": "8140",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/utilities/test_strip_ignored_characters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2235538"
}
],
"symlink_target": ""
} |
import socketio
import asyncio
import json
import os
class PlaylistList:
def __init__(self, _config, _socketio, _loop, _processor):
self.config = _config
self.socketio = _socketio
self.loop = _loop
self.processor = _processor
if not os.path.isfile('savedPlaylists.json'):
with open('savedPlaylists.json', 'w') as file:
json.dump({}, file)
self.loadFile()
def loadFile(self):
with open('savedPlaylists.json', 'r') as file:
self.playlists = json.load(file)
def saveFile(self):
with open('savedPlaylists.json', 'w') as file:
json.dump(self.playlists, file)
def getPlaylists(self):
msg = {}
for index in self.playlists:
msg[index] = self.playlists[index]['data']
return msg
def getsongs(self, name):
try:
songs = self.playlists[name]
except:
songs = {}
return songs
async def addqueue(self, songs):
name = songs['data']['name']
name = self.checkUnique(name)
songs['data']['name'] = name
self.playlists[name] = songs
self.saveFile()
self.loadFile()
await self.socketio.emit('playlistList', self.getPlaylists(), namespace='/main', broadcast = True)
async def newPlaylist(self, name):
name = self.checkUnique(name)
data = {'name': name, 'dur' : 0}
self.playlists[name] = {}
self.playlists[name]['data'] = data
self.saveFile()
self.loadFile()
await self.socketio.emit('playlistList', self.getPlaylists(), namespace='/main', broadcast = True)
async def removePlaylist(self, playlistName):
del self.playlists[playlistName]
self.saveFile()
self.loadFile()
await self.socketio.emit('playlistList', self.getPlaylists(), namespace='/main', broadcast = True)
async def addSong(self, playlistName, title):
entry = []
await self.processor.process(entry, title, requester='playlist')
for songs in entry:
song = {'url': songs.url, 'title': songs.title, 'dur': songs.duration}
index = len(self.playlists[playlistName]) - 1
self.playlists[playlistName][index] = song
self.playlists[playlistName]['data']['dur'] += entry[0].duration
self.saveFile()
self.loadFile()
await self.socketio.emit('playlistList', self.getPlaylists(), namespace='/main', broadcast = True)
async def removeSong(self, playlistName, index, title):
temp = {}
ind = 0
duration = 0
for song in self.playlists[playlistName]:
if song == str(index) and self.playlists[playlistName][song]['title'] == title:
print('Song removed from playlist - {} - at index - {}'.format(playlistName, index))
pass
elif song == 'data':
temp['data'] = self.playlists[playlistName][song]
else:
temp[ind] = self.playlists[playlistName][song]
duration += self.playlists[playlistName][song]['dur']
ind += 1
temp['data']['dur'] = duration
self.playlists[playlistName] = temp
self.saveFile()
self.loadFile()
async def beingModified(self, playlistName):
pass
def checkUnique(self, name):
append = 1;
dupe = True
test = name
while dupe:
dupe = False
for names in self.playlists:
if self.playlists[names]['data']['name'] == test:
dupe = True
test = name + '-' + str(append)
append += 1
return test | {
"content_hash": "abb75ccb717ab36c8294d20a168729e3",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 106,
"avg_line_length": 30.739837398373982,
"alnum_prop": 0.5604337476857975,
"repo_name": "8BitJosh/JukeBot",
"id": "fea1fe5963252709a18291a5c13125d18c9025dc",
"size": "3781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JukeBot/playlistList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "135"
},
{
"name": "HTML",
"bytes": "21603"
},
{
"name": "JavaScript",
"bytes": "19641"
},
{
"name": "Python",
"bytes": "48730"
},
{
"name": "Shell",
"bytes": "1573"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.