code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python3
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
from os import path, environ
from subprocess import check_output, CalledProcessError
from sys import stderr
parser = argparse.ArgumentParser()
parser.add_argument('--repository', help='maven repository id')
parser.add_argument('--url', help='maven repository url')
parser.add_argument('-o')
parser.add_argument('-a', help='action (valid actions are: install,deploy)')
parser.add_argument('-v', help='gerrit version')
parser.add_argument('-s', action='append', help='triplet of artifactId:type:path')
args = parser.parse_args()
if not args.v:
print('version is empty', file=stderr)
exit(1)
root = path.abspath(__file__)
while not path.exists(path.join(root, 'WORKSPACE')):
root = path.dirname(root)
if 'install' == args.a:
cmd = [
'mvn',
'install:install-file',
'-Dversion=%s' % args.v,
]
elif 'deploy' == args.a:
cmd = [
'mvn',
'gpg:sign-and-deploy-file',
'-Dversion=%s' % args.v,
'-DrepositoryId=%s' % args.repository,
'-Durl=%s' % args.url,
]
else:
print("unknown action -a %s" % args.a, file=stderr)
exit(1)
for spec in args.s:
artifact, packaging_type, src = spec.split(':')
exe = cmd + [
'-DpomFile=%s' % path.join(root, 'tools', 'maven',
'%s_pom.xml' % artifact),
'-Dpackaging=%s' % packaging_type,
'-Dfile=%s' % src,
]
try:
if environ.get('VERBOSE'):
print(' '.join(exe), file=stderr)
check_output(exe)
except Exception as e:
print('%s command failed: %s\n%s' % (args.a, ' '.join(exe), e),
file=stderr)
if environ.get('VERBOSE') and isinstance(e, CalledProcessError):
print('Command output\n%s' % e.output, file=stderr)
exit(1)
out = stderr
if args.o:
out = open(args.o, 'w')
with out as fd:
if args.repository:
print('Repository: %s' % args.repository, file=fd)
if args.url:
print('URL: %s' % args.url, file=fd)
print('Version: %s' % args.v, file=fd)
| GerritCodeReview/gerrit | tools/maven/mvn.py | Python | apache-2.0 | 2,729 |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import faulthandler
import logging
import os
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from typing import Callable, Iterator, Optional
import setproctitle
from pants.base.exiter import Exiter
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.osutil import Pid
logger = logging.getLogger(__name__)
class SignalHandler:
"""A specification for how to handle a fixed set of nonfatal signals.
This is subclassed and registered with ExceptionSink.reset_signal_handler() whenever the signal
handling behavior is modified for different pants processes, for example in the remote client when
pantsd is enabled. The default behavior is to exit "gracefully" by leaving a detailed log of which
signal was received, then exiting with failure.
Note that the terminal will convert a ctrl-c from the user into a SIGINT.
"""
@property
def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self._handle_sigint_if_enabled,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
}
def __init__(self):
self._ignore_sigint_lock = threading.Lock()
self._threads_ignoring_sigint = 0
self._ignoring_sigint_v2_engine = False
def _check_sigint_gate_is_correct(self):
assert (
self._threads_ignoring_sigint >= 0
), "This should never happen, someone must have modified the counter outside of SignalHandler."
def _handle_sigint_if_enabled(self, signum, _frame):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
threads_ignoring_sigint = self._threads_ignoring_sigint
ignoring_sigint_v2_engine = self._ignoring_sigint_v2_engine
if threads_ignoring_sigint == 0 and not ignoring_sigint_v2_engine:
self.handle_sigint(signum, _frame)
def _toggle_ignoring_sigint_v2_engine(self, toggle: bool):
with self._ignore_sigint_lock:
self._ignoring_sigint_v2_engine = toggle
@contextmanager
def _ignoring_sigint(self):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
self._threads_ignoring_sigint += 1
try:
yield
finally:
with self._ignore_sigint_lock:
self._threads_ignoring_sigint -= 1
self._check_sigint_gate_is_correct()
def handle_sigint(self, signum, _frame):
raise KeyboardInterrupt("User interrupted execution with control-c!")
# TODO(#7406): figure out how to let sys.exit work in a signal handler instead of having to raise
# this exception!
class SignalHandledNonLocalExit(Exception):
"""Raised in handlers for non-fatal signals to overcome Python limitations.
When waiting on a subprocess and in a signal handler, sys.exit appears to be ignored, and
causes the signal handler to return. We want to (eventually) exit after these signals, not
ignore them, so we raise this exception instead and check it in our sys.excepthook override.
"""
def __init__(self, signum, signame):
self.signum = signum
self.signame = signame
self.traceback_lines = traceback.format_stack()
super(SignalHandler.SignalHandledNonLocalExit, self).__init__()
def handle_sigquit(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, "SIGQUIT")
def handle_sigterm(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, "SIGTERM")
class ExceptionSink:
"""A mutable singleton object representing where exceptions should be logged to."""
# NB: see the bottom of this file where we call reset_log_location() and other mutators in order
# to properly setup global state.
_log_dir = None
# We need an exiter in order to know what to do after we log a fatal exception or handle a
# catchable signal.
_exiter: Optional[Exiter] = None
# Where to log stacktraces to in a SIGUSR2 handler.
_interactive_output_stream = None
# Whether to print a stacktrace in any fatal error message printed to the terminal.
_should_print_backtrace_to_terminal = True
# An instance of `SignalHandler` which is invoked to handle a static set of specific
# nonfatal signals (these signal handlers are allowed to make pants exit, but unlike SIGSEGV they
# don't need to exit immediately).
_signal_handler: Optional[SignalHandler] = None
# These persistent open file descriptors are kept so the signal handler can do almost no work
# (and lets faulthandler figure out signal safety).
_pid_specific_error_fileobj = None
_shared_error_fileobj = None
def __new__(cls, *args, **kwargs):
raise TypeError("Instances of {} are not allowed to be constructed!".format(cls.__name__))
class ExceptionSinkError(Exception):
pass
@classmethod
def reset_should_print_backtrace_to_terminal(cls, should_print_backtrace):
"""Set whether a backtrace gets printed to the terminal error stream on a fatal error.
Class state:
- Overwrites `cls._should_print_backtrace_to_terminal`.
"""
cls._should_print_backtrace_to_terminal = should_print_backtrace
# All reset_* methods are ~idempotent!
@classmethod
def reset_log_location(cls, new_log_location: str) -> None:
"""Re-acquire file handles to error logs based in the new location.
Class state:
- Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and
`cls._shared_error_fileobj`.
OS state:
- May create a new directory.
- Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2).
:raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not
writable.
"""
# We could no-op here if the log locations are the same, but there's no reason not to have the
# additional safety of re-acquiring file descriptors each time (and erroring out early if the
# location is no longer writable).
try:
safe_mkdir(new_log_location)
except Exception as e:
raise cls.ExceptionSinkError(
"The provided log location path at '{}' is not writable or could not be created: {}.".format(
new_log_location, str(e)
),
e,
)
pid = os.getpid()
pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location)
shared_log_path = cls.exceptions_log_path(in_dir=new_log_location)
assert pid_specific_log_path != shared_log_path
try:
pid_specific_error_stream = safe_open(pid_specific_log_path, mode="w")
shared_error_stream = safe_open(shared_log_path, mode="a")
except Exception as e:
raise cls.ExceptionSinkError(
"Error opening fatal error log streams for log location '{}': {}".format(
new_log_location, str(e)
)
)
# NB: mutate process-global state!
if faulthandler.is_enabled():
logger.debug("re-enabling faulthandler")
# Call Py_CLEAR() on the previous error stream:
# https://github.com/vstinner/faulthandler/blob/master/faulthandler.c
faulthandler.disable()
# Send a stacktrace to this file if interrupted by a fatal error.
faulthandler.enable(file=pid_specific_error_stream, all_threads=True)
# NB: mutate the class variables!
cls._log_dir = new_log_location
cls._pid_specific_error_fileobj = pid_specific_error_stream
cls._shared_error_fileobj = shared_error_stream
class AccessGlobalExiterMixin:
@property
def _exiter(self) -> Optional[Exiter]:
return ExceptionSink.get_global_exiter()
@classmethod
def get_global_exiter(cls) -> Optional[Exiter]:
return cls._exiter
@classmethod
@contextmanager
def exiter_as(cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]) -> Iterator[None]:
"""Temporarily override the global exiter.
NB: We don't want to try/finally here, because we want exceptions to propagate
with the most recent exiter installed in sys.excepthook.
If we wrap this in a try:finally, exceptions will be caught and exiters unset.
"""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
cls._reset_exiter(new_exiter)
yield
cls._reset_exiter(previous_exiter)
@classmethod
@contextmanager
def exiter_as_until_exception(
cls, new_exiter_fun: Callable[[Optional[Exiter]], Exiter]
) -> Iterator[None]:
"""Temporarily override the global exiter, except this will unset it when an exception
happens."""
previous_exiter = cls._exiter
new_exiter = new_exiter_fun(previous_exiter)
try:
cls._reset_exiter(new_exiter)
yield
finally:
cls._reset_exiter(previous_exiter)
@classmethod
def _reset_exiter(cls, exiter: Optional[Exiter]) -> None:
"""Class state:
- Overwrites `cls._exiter`.
Python state:
- Overwrites sys.excepthook.
"""
logger.debug(f"overriding the global exiter with {exiter} (from {cls._exiter})")
# NB: mutate the class variables! This is done before mutating the exception hook, because the
# uncaught exception handler uses cls._exiter to exit.
cls._exiter = exiter
# NB: mutate process-global state!
sys.excepthook = cls._log_unhandled_exception_and_exit
@classmethod
def reset_interactive_output_stream(
cls, interactive_output_stream, override_faulthandler_destination=True
):
"""Class state:
- Overwrites `cls._interactive_output_stream`.
OS state:
- Overwrites the SIGUSR2 handler.
This method registers a SIGUSR2 handler, which permits a non-fatal `kill -31 <pants pid>` for
stacktrace retrieval. This is also where the the error message on fatal exit will be printed to.
"""
try:
# NB: mutate process-global state!
# This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
if override_faulthandler_destination:
faulthandler.register(
signal.SIGUSR2, interactive_output_stream, all_threads=True, chain=False
)
# NB: mutate the class variables!
cls._interactive_output_stream = interactive_output_stream
except ValueError:
# Warn about "ValueError: IO on closed file" when the stream is closed.
cls.log_exception(
"Cannot reset interactive_output_stream -- stream (probably stderr) is closed"
)
@classmethod
def exceptions_log_path(cls, for_pid=None, in_dir=None):
"""Get the path to either the shared or pid-specific fatal errors log file."""
if for_pid is None:
intermediate_filename_component = ""
else:
assert isinstance(for_pid, Pid)
intermediate_filename_component = ".{}".format(for_pid)
in_dir = in_dir or cls._log_dir
return os.path.join(
in_dir, ".pids", "exceptions{}.log".format(intermediate_filename_component)
)
@classmethod
def log_exception(cls, msg):
"""Try to log an error message to this process's error log and the shared error log.
NB: Doesn't raise (logs an error instead).
"""
pid = os.getpid()
fatal_error_log_entry = cls._format_exception_message(msg, pid)
# We care more about this log than the shared log, so write to it first.
try:
cls._try_write_with_flush(cls._pid_specific_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the pid-specific file handle for {} at pid {}:\n{}".format(
msg, cls._log_dir, pid, e
)
)
# Write to the shared log.
try:
# TODO: we should probably guard this against concurrent modification by other pants
# subprocesses somehow.
cls._try_write_with_flush(cls._shared_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the shared file handle for {} at pid {}:\n{}".format(
msg, cls._log_dir, pid, e
)
)
@classmethod
def _try_write_with_flush(cls, fileobj, payload):
"""This method is here so that it can be patched to simulate write errors.
This is because mock can't patch primitive objects like file objects.
"""
fileobj.write(payload)
fileobj.flush()
@classmethod
def reset_signal_handler(cls, signal_handler):
"""Class state:
- Overwrites `cls._signal_handler`.
OS state:
- Overwrites signal handlers for SIGINT, SIGQUIT, and SIGTERM.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
:returns: The :class:`SignalHandler` that was previously registered, or None if this is
the first time this method was called.
"""
assert isinstance(signal_handler, SignalHandler)
# NB: Modify process-global state!
for signum, handler in signal_handler.signal_handler_mapping.items():
signal.signal(signum, handler)
# Retry any system calls interrupted by any of the signals we just installed handlers for
# (instead of having them raise EINTR). siginterrupt(3) says this is the default behavior on
# Linux and OSX.
signal.siginterrupt(signum, False)
previous_signal_handler = cls._signal_handler
# NB: Mutate the class variables!
cls._signal_handler = signal_handler
return previous_signal_handler
@classmethod
@contextmanager
def trapped_signals(cls, new_signal_handler):
"""A contextmanager which temporarily overrides signal handling.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
"""
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
try:
yield
finally:
cls.reset_signal_handler(previous_signal_handler)
@classmethod
@contextmanager
def ignoring_sigint(cls):
"""A contextmanager which disables handling sigint in the current signal handler. This
allows threads that are not the main thread to ignore sigint.
NB: Only use this if you can't use ExceptionSink.trapped_signals().
Class state:
- Toggles `self._ignore_sigint` in `cls._signal_handler`.
"""
with cls._signal_handler._ignoring_sigint():
yield
@classmethod
def toggle_ignoring_sigint_v2_engine(cls, toggle: bool) -> None:
assert cls._signal_handler is not None
cls._signal_handler._toggle_ignoring_sigint_v2_engine(toggle)
@classmethod
def _iso_timestamp_for_now(cls):
return datetime.datetime.now().isoformat()
# NB: This includes a trailing newline, but no leading newline.
_EXCEPTION_LOG_FORMAT = """\
timestamp: {timestamp}
process title: {process_title}
sys.argv: {args}
pid: {pid}
{message}
"""
@classmethod
def _format_exception_message(cls, msg, pid):
return cls._EXCEPTION_LOG_FORMAT.format(
timestamp=cls._iso_timestamp_for_now(),
process_title=setproctitle.getproctitle(),
args=sys.argv,
pid=pid,
message=msg,
)
_traceback_omitted_default_text = "(backtrace omitted)"
@classmethod
def _format_traceback(cls, traceback_lines, should_print_backtrace):
if should_print_backtrace:
traceback_string = "\n{}".format("".join(traceback_lines))
else:
traceback_string = " {}".format(cls._traceback_omitted_default_text)
return traceback_string
_UNHANDLED_EXCEPTION_LOG_FORMAT = """\
Exception caught: ({exception_type}){backtrace}
Exception message: {exception_message}{maybe_newline}
"""
@classmethod
def _format_unhandled_exception_log(cls, exc, tb, add_newline, should_print_backtrace):
exc_type = type(exc)
exception_full_name = "{}.{}".format(exc_type.__module__, exc_type.__name__)
exception_message = str(exc) if exc else "(no message)"
maybe_newline = "\n" if add_newline else ""
return cls._UNHANDLED_EXCEPTION_LOG_FORMAT.format(
exception_type=exception_full_name,
backtrace=cls._format_traceback(
traceback_lines=traceback.format_tb(tb),
should_print_backtrace=should_print_backtrace,
),
exception_message=exception_message,
maybe_newline=maybe_newline,
)
_EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT = """\
{timestamp_msg}{terminal_msg}{details_msg}
"""
@classmethod
def _exit_with_failure(cls, terminal_msg):
timestamp_msg = (
f"timestamp: {cls._iso_timestamp_for_now()}\n"
if cls._should_print_backtrace_to_terminal
else ""
)
details_msg = (
""
if cls._should_print_backtrace_to_terminal
else "\n\n(Use --print-exception-stacktrace to see more error details.)"
)
terminal_msg = terminal_msg or "<no exit reason provided>"
formatted_terminal_msg = cls._EXIT_FAILURE_TERMINAL_MESSAGE_FORMAT.format(
timestamp_msg=timestamp_msg, terminal_msg=terminal_msg, details_msg=details_msg
)
# Exit with failure, printing a message to the terminal (or whatever the interactive stream is).
cls._exiter.exit_and_fail(msg=formatted_terminal_msg, out=cls._interactive_output_stream)
@classmethod
def _log_unhandled_exception_and_exit(
cls, exc_class=None, exc=None, tb=None, add_newline=False
):
"""A sys.excepthook implementation which logs the error and exits with failure."""
exc_class = exc_class or sys.exc_info()[0]
exc = exc or sys.exc_info()[1]
tb = tb or sys.exc_info()[2]
# This exception was raised by a signal handler with the intent to exit the program.
if exc_class == SignalHandler.SignalHandledNonLocalExit:
return cls._handle_signal_gracefully(exc.signum, exc.signame, exc.traceback_lines)
extra_err_msg = None
try:
# Always output the unhandled exception details into a log file, including the traceback.
exception_log_entry = cls._format_unhandled_exception_log(
exc, tb, add_newline, should_print_backtrace=True
)
cls.log_exception(exception_log_entry)
except Exception as e:
extra_err_msg = "Additional error logging unhandled exception {}: {}".format(exc, e)
logger.error(extra_err_msg)
# Generate an unhandled exception report fit to be printed to the terminal (respecting the
# Exiter's should_print_backtrace field).
if cls._should_print_backtrace_to_terminal:
stderr_printed_error = cls._format_unhandled_exception_log(
exc, tb, add_newline, should_print_backtrace=cls._should_print_backtrace_to_terminal
)
if extra_err_msg:
stderr_printed_error = "{}\n{}".format(stderr_printed_error, extra_err_msg)
else:
# If the user didn't ask for a backtrace, show a succinct error message without
# all the exception-related preamble. A power-user/pants developer can still
# get all the preamble info along with the backtrace, but the end user shouldn't
# see that boilerplate by default.
error_msgs = getattr(exc, "end_user_messages", lambda: [str(exc)])()
stderr_printed_error = "\n" + "\n".join(f"ERROR: {msg}" for msg in error_msgs)
cls._exit_with_failure(stderr_printed_error)
_CATCHABLE_SIGNAL_ERROR_LOG_FORMAT = """\
Signal {signum} ({signame}) was raised. Exiting with failure.{formatted_traceback}
"""
@classmethod
def _handle_signal_gracefully(cls, signum, signame, traceback_lines):
"""Signal handler for non-fatal signals which raises or logs an error and exits with
failure."""
# Extract the stack, and format an entry to be written to the exception log.
formatted_traceback = cls._format_traceback(
traceback_lines=traceback_lines, should_print_backtrace=True
)
signal_error_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum, signame=signame, formatted_traceback=formatted_traceback
)
# TODO: determine the appropriate signal-safe behavior here (to avoid writing to our file
# descriptors re-entrantly, which raises an IOError).
# This method catches any exceptions raised within it.
cls.log_exception(signal_error_log_entry)
# Create a potentially-abbreviated traceback for the terminal or other interactive stream.
formatted_traceback_for_terminal = cls._format_traceback(
traceback_lines=traceback_lines,
should_print_backtrace=cls._should_print_backtrace_to_terminal,
)
terminal_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format(
signum=signum, signame=signame, formatted_traceback=formatted_traceback_for_terminal
)
# Exit, printing the output to the terminal.
cls._exit_with_failure(terminal_log_entry)
# Setup global state such as signal handlers and sys.excepthook with probably-safe values at module
# import time.
# Set the log location for writing logs before bootstrap options are parsed.
ExceptionSink.reset_log_location(os.getcwd())
# Sets except hook for exceptions at import time.
ExceptionSink._reset_exiter(Exiter(exiter=sys.exit))
# Sets a SIGUSR2 handler.
ExceptionSink.reset_interactive_output_stream(sys.stderr.buffer)
# Sets a handler that logs nonfatal signals to the exception sink before exiting.
ExceptionSink.reset_signal_handler(SignalHandler())
# Set whether to print stacktraces on exceptions or signals during import time.
# NB: This will be overridden by bootstrap options in PantsRunner, so we avoid printing out a full
# stacktrace when a user presses control-c during import time unless the environment variable is set
# to explicitly request it. The exception log will have any stacktraces regardless so this should
# not hamper debugging.
ExceptionSink.reset_should_print_backtrace_to_terminal(
should_print_backtrace=os.environ.get("PANTS_PRINT_EXCEPTION_STACKTRACE", "True") == "True"
)
| tdyas/pants | src/python/pants/base/exception_sink.py | Python | apache-2.0 | 23,733 |
# -*- coding: utf-8 -*-
"""
Add compatibility for gevent and multiprocessing.
Source based on project GIPC 0.6.0
https://bitbucket.org/jgehrcke/gipc/
"""
import os, sys, signal, multiprocessing, multiprocessing.process, multiprocessing.reduction
gevent=None
geventEvent=None
def _tryGevent():
global gevent, geventEvent
if gevent and geventEvent: return False
try:
import gevent
from gevent import event as geventEvent
return True
except ImportError:
raise ValueError('gevent not found')
def Process(target, args=(), kwargs={}, name=None): # daemon=None
# check if gevent availible
try: _tryGevent()
except ValueError:
print 'Gevent not founded, switching to native'
return multiprocessing.Process(target=target, args=args, kwargs=kwargs, name=name)
if int(gevent.__version__[0])<1:
raise NotImplementedError('Gmultiprocessing supports only gevent>=1.0, your version %s'%gevent.__version__)
if not isinstance(args, tuple):
raise TypeError('<args> must be a tuple')
if not isinstance(kwargs, dict):
raise TypeError('<kwargs> must be a dict')
p = _GProcess(
target=_child,
name=name,
kwargs={"target": target, "args": args, "kwargs": kwargs}
)
# if daemon is not None: p.daemon = daemon
return p
def _child(target, args, kwargs):
"""Wrapper function that runs in child process. Resets gevent/libev state
and executes user-given function.
"""
_tryGevent()
_reset_signal_handlers()
gevent.reinit()
hub = gevent.get_hub()
del hub.threadpool
hub._threadpool = None
hub.destroy(destroy_loop=True)
h = gevent.get_hub(default=True)
assert h.loop.default, 'Could not create libev default event loop.'
target(*args, **kwargs)
class _GProcess(multiprocessing.Process):
"""
Compatible with the ``multiprocessing.Process`` API.
"""
try:
from multiprocessing.forking import Popen as mp_Popen
except ImportError:
# multiprocessing's internal structure has changed from 3.3 to 3.4.
from multiprocessing.popen_fork import Popen as mp_Popen
# Monkey-patch and forget about the name.
mp_Popen.poll = lambda *a, **b: None
del mp_Popen
def start(self):
_tryGevent()
# Start grabbing SIGCHLD within libev event loop.
gevent.get_hub().loop.install_sigchld()
# Run new process (based on `fork()` on POSIX-compliant systems).
super(_GProcess, self).start()
# The occurrence of SIGCHLD is recorded asynchronously in libev.
# This guarantees proper behavior even if the child watcher is
# started after the child exits. Start child watcher now.
self._sigchld_watcher = gevent.get_hub().loop.child(self.pid)
self._returnevent = gevent.event.Event()
self._sigchld_watcher.start(self._on_sigchld, self._sigchld_watcher)
def _on_sigchld(self, watcher):
"""Callback of libev child watcher. Called when libev event loop
catches corresponding SIGCHLD signal.
"""
watcher.stop()
# Status evaluation copied from `multiprocessing.forking` in Py2.7.
if os.WIFSIGNALED(watcher.rstatus):
self._popen.returncode = -os.WTERMSIG(watcher.rstatus)
else:
assert os.WIFEXITED(watcher.rstatus)
self._popen.returncode = os.WEXITSTATUS(watcher.rstatus)
self._returnevent.set()
def is_alive(self):
assert self._popen is not None, "Process not yet started."
if self._popen.returncode is None:
return True
return False
@property
def exitcode(self):
if self._popen is None:
return None
return self._popen.returncode
def __repr__(self):
exitcodedict = multiprocessing.process._exitcode_to_name
status = 'started'
if self._parent_pid != os.getpid(): status = 'unknown'
elif self.exitcode is not None: status = self.exitcode
if status == 0: status = 'stopped'
elif isinstance(status, int):
status = 'stopped[%s]' % exitcodedict.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self.daemon and ' daemon' or '')
def join(self, timeout=None):
"""
Wait cooperatively until child process terminates or timeout occurs.
:arg timeout: ``None`` (default) or a a time in seconds. The method
simply returns upon timeout expiration. The state of the process
has to be identified via ``is_alive()``.
"""
assert self._parent_pid == os.getpid(), "I'm not parent of this child."
assert self._popen is not None, 'Can only join a started process.'
# Resemble multiprocessing's join() method while replacing
# `self._popen.wait(timeout)` with
# `self._returnevent.wait(timeout)`
self._returnevent.wait(timeout)
if self._popen.returncode is not None:
if hasattr(multiprocessing.process, '_children'): # This is for Python 3.4.
kids = multiprocessing.process._children
else: # For Python 2.6, 2.7, 3.3.
kids = multiprocessing.process._current_process._children
kids.discard(self)
# Inspect signal module for signals whose action is to be restored to the default action right after fork.
_signals_to_reset = [getattr(signal, s) for s in
set([s for s in dir(signal) if s.startswith("SIG")]) -
# Exclude constants that are not signals such as SIG_DFL and SIG_BLOCK.
set([s for s in dir(signal) if s.startswith("SIG_")]) -
# Leave handlers for SIG(STOP/KILL/PIPE) untouched.
set(['SIGSTOP', 'SIGKILL', 'SIGPIPE'])]
def _reset_signal_handlers():
for s in _signals_to_reset:
if s < signal.NSIG:
signal.signal(s, signal.SIG_DFL)
PY3 = sys.version_info[0] == 3
if PY3:
def _reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def __exec(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
__exec("""def _reraise(tp, value, tb=None): raise tp, value, tb""")
| byaka/flaskJSONRPCServer | flaskJSONRPCServer/gmultiprocessing.py | Python | apache-2.0 | 6,393 |
import pytest
import math
import io
import time
import base64
import hashlib
from http import client
from unittest import mock
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import metadata
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.azureblobstorage import AzureBlobStorageProvider
from waterbutler.providers.azureblobstorage.metadata import AzureBlobStorageFileMetadata
from waterbutler.providers.azureblobstorage.metadata import AzureBlobStorageFolderMetadata
from waterbutler.providers.azureblobstorage.provider import (
MAX_UPLOAD_BLOCK_SIZE,
)
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': '[email protected]',
}
@pytest.fixture
def credentials():
return {
'account_name': 'dontdead',
'account_key': base64.b64encode(b'open inside'),
}
@pytest.fixture
def settings():
return {
'container': 'thatkerning'
}
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=1454684930.0)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def provider(auth, credentials, settings):
provider = AzureBlobStorageProvider(auth, credentials, settings)
return provider
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def large_file_content():
# 71MB (4MB * 17 + 3MB)
return b'a' * (71 * (2 ** 20))
@pytest.fixture
def large_file_like(large_file_content):
return io.BytesIO(large_file_content)
@pytest.fixture
def large_file_stream(large_file_like):
return streams.FileStreamReader(large_file_like)
@pytest.fixture
def folder_metadata():
return b'''<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://vrosf.blob.core.windows.net/" ContainerName="sample-container1">
<Blobs>
<Blob>
<Name>Photos/test-text.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
<Blob>
<Name>Photos/a/test.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
<Blob>
<Name>top.txt</Name>
<Properties>
<Last-Modified>Thu, 10 Nov 2016 11:04:45 GMT</Last-Modified>
<Etag>0x8D40959613D32F6</Etag>
<Content-Length>0</Content-Length>
<Content-Type>text/plain</Content-Type>
<Content-Encoding />
<Content-Language />
<Content-MD5 />
<Cache-Control />
<Content-Disposition />
<BlobType>BlockBlob</BlobType>
<LeaseStatus>unlocked</LeaseStatus>
<LeaseState>available</LeaseState>
</Properties>
</Blob>
</Blobs>
<NextMarker />
</EnumerationResults>'''
@pytest.fixture
def file_metadata():
return {
'CONTENT-LENGTH': '0',
'CONTENT-TYPE': 'text/plain',
'LAST-MODIFIED': 'Thu, 10 Nov 2016 11:04:45 GMT',
'ACCEPT-RANGES': 'bytes',
'ETAG': '"0x8D40959613D32F6"',
'SERVER': 'Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0',
'X-MS-REQUEST-ID': '5b4a3cb6-0001-00ea-4575-895e2c000000',
'X-MS-VERSION': '2015-07-08',
'X-MS-LEASE-STATUS': 'unlocked',
'X-MS-LEASE-STATE': 'available',
'X-MS-BLOB-TYPE': 'BlockBlob',
'DATE': 'Fri, 17 Feb 2017 23:28:33 GMT'
}
@pytest.fixture
def large_file_metadata(large_file_content):
return {
'CONTENT-LENGTH': str(len(large_file_content)),
'CONTENT-TYPE': 'text/plain',
'LAST-MODIFIED': 'Thu, 10 Nov 2016 11:04:45 GMT',
'ACCEPT-RANGES': 'bytes',
'ETAG': '"0x8D40959613D32F6"',
'SERVER': 'Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0',
'X-MS-REQUEST-ID': '5b4a3cb6-0001-00ea-4575-895e2c000000',
'X-MS-VERSION': '2015-07-08',
'X-MS-LEASE-STATUS': 'unlocked',
'X-MS-LEASE-STATE': 'available',
'X-MS-BLOB-TYPE': 'BlockBlob',
'DATE': 'Fri, 17 Feb 2017 23:28:33 GMT'
}
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, file_metadata,
mock_time):
file_path = 'foobah'
for good_metadata_url in provider.generate_urls(file_path, secondary=True):
aiohttpretty.register_uri('HEAD', good_metadata_url, headers=file_metadata)
for bad_metadata_url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', bad_metadata_url,
params={'restype': 'container', 'comp': 'list'}, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + file_path)
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + file_path + '/')
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + file_path)
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, folder_metadata, mock_time):
folder_path = 'Photos'
for good_metadata_url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri(
'GET', good_metadata_url, params={'restype': 'container', 'comp': 'list'},
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
for bad_metadata_url in provider.generate_urls(folder_path, secondary=True):
aiohttpretty.register_uri('HEAD', bad_metadata_url, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + folder_path + '/')
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + folder_path)
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + folder_path + '/')
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
async def test_normal_name(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/path.txt')
assert path.name == 'path.txt'
assert path.parent.name == 'a'
assert path.is_file
assert not path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_folder(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_root(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download(self, provider, mock_time):
path = WaterButlerPath('/muhtriangle')
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('GET', url, body=b'delicious', auto_length=True)
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_folder_400s(self, provider, mock_time):
with pytest.raises(exceptions.DownloadError) as e:
await provider.download(WaterButlerPath('/cool/folder/mom/'))
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete(self, provider, mock_time):
path = WaterButlerPath('/some-file')
for url in provider.generate_urls(path.path):
aiohttpretty.register_uri('DELETE', url, status=200)
await provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_folder_delete(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/Photos/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri(
'GET', url, params={'restype': 'container', 'comp': 'list'},
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
delete_urls = []
for url in provider.generate_urls(path.path + "test-text.txt"):
aiohttpretty.register_uri('DELETE', url, status=200)
delete_urls.append(url)
for url in provider.generate_urls(path.path + "a/test.txt"):
aiohttpretty.register_uri('DELETE', url, status=200)
delete_urls.append(url)
await provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=delete_urls[0])
assert aiohttpretty.has_call(method='DELETE', uri=delete_urls[1])
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_root(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/')
assert path.is_root
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].path == '/Photos/'
assert result[0].name == 'Photos'
assert result[0].is_folder
assert result[1].path == '/top.txt'
assert result[1].name == 'top.txt'
assert not result[1].is_folder
assert result[1].extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/Photos/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
assert result[0].path == '/Photos/a/'
assert result[0].name == 'a'
assert result[0].is_folder
assert result[1].path == '/Photos/test-text.txt'
assert result[1].name == 'test-text.txt'
assert not result[1].is_folder
assert result[1].extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, provider, file_metadata, mock_time):
path = WaterButlerPath('/Foo/Bar/my-image.jpg')
provider.url = 'http://test_url'
provider.token = 'test'
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('HEAD', url, headers=file_metadata)
result = await provider.metadata(path)
assert isinstance(result, metadata.BaseFileMetadata)
assert result.path == str(path)
assert result.name == 'my-image.jpg'
assert result.modified is not None
assert result.extra['md5'] == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_missing(self, provider, mock_time):
path = WaterButlerPath('/notfound.txt')
provider.url = 'http://test_url'
provider.token = 'test'
for url in provider.generate_urls(path.path, secondary=True):
aiohttpretty.register_uri('HEAD', url, status=404)
with pytest.raises(exceptions.MetadataError):
await provider.metadata(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload(self, provider, file_content, file_stream, file_metadata, mock_time):
path = WaterButlerPath('/foobah')
for url in provider.generate_urls(path.path):
aiohttpretty.register_uri('PUT', url, status=200)
for metadata_url in provider.generate_urls(path.path):
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_metadata},
],
)
metadata, created = await provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_large(self, provider, large_file_content, large_file_stream, large_file_metadata, mock_time):
# upload 4MB data 17 times and 3MB once, and request block_list
upload_times = math.floor(len(large_file_content) / MAX_UPLOAD_BLOCK_SIZE)
block_id_prefix = 'hogefuga'
block_id_list = [AzureBlobStorageProvider._format_block_id(block_id_prefix, i) for i in range(upload_times)]
block_req_params_list = [{'comp': 'block', 'blockid': block_id} for block_id in block_id_list]
block_list_req_params = {'comp': 'blocklist'}
path = WaterButlerPath('/large_foobah')
for url in provider.generate_urls(path.path):
for block_req_params in block_req_params_list:
aiohttpretty.register_uri('PUT', url, status=200, params=block_req_params)
aiohttpretty.register_uri('PUT', url, status=200, params=block_list_req_params)
for metadata_url in provider.generate_urls(path.path):
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': large_file_metadata},
],
)
metadata, created = await provider.upload(large_file_stream, path, block_id_prefix=block_id_prefix)
assert metadata.kind == 'file'
assert created
for block_req_params in block_req_params_list:
assert aiohttpretty.has_call(method='PUT', uri=url, params=block_req_params)
assert aiohttpretty.has_call(method='PUT', uri=url, params=block_list_req_params)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
class TestCreateFolder:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_must_start_with_slash(self, provider, mock_time):
path = WaterButlerPath('/alreadyexists')
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 400
assert e.value.message == 'Path must be a directory'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_errors_conflict(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/alreadyexists/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
for url in provider.generate_urls('alreadyexists', secondary=True):
aiohttpretty.register_uri('HEAD', url, status=200)
for url in provider.generate_urls('alreadyexists/.osfkeep'):
aiohttpretty.register_uri('PUT', url, status=200)
with pytest.raises(exceptions.FolderNamingConflict) as e:
await provider.create_folder(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_creates(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/doesntalreadyexists/')
for url in provider.generate_urls(secondary=True):
aiohttpretty.register_uri('GET', url,
params={'restype': 'container', 'comp': 'list'},
body=folder_metadata,
headers={'Content-Type': 'application/xml'})
for url in provider.generate_urls('doesntalreadyexists', secondary=True):
aiohttpretty.register_uri('HEAD', url, status=404)
for url in provider.generate_urls('doesntalreadyexists/.osfkeep'):
aiohttpretty.register_uri('PUT', url, status=200)
resp = await provider.create_folder(path)
assert resp.kind == 'folder'
assert resp.name == 'doesntalreadyexists'
assert resp.path == '/doesntalreadyexists/'
class TestOperations:
async def test_equality(self, provider, mock_time):
assert provider.can_intra_copy(provider)
assert provider.can_intra_move(provider)
| RCOSDP/waterbutler | tests/providers/azureblobstorage/test_provider.py | Python | apache-2.0 | 18,530 |
__author__ = 'mark'
"""
User Profile Extension based on One-to-One fields code in Django Docs here:
https://docs.djangoproject.com/en/1.7/topics/auth/customizing/
"""
from django.db import models
from django.contrib.auth.models import User
from uuid import uuid4
class Member(models.Model):
user = models.OneToOneField(User)
member_guid = models.CharField(max_length=100, null=True, blank=True)
ext_uid = models.CharField(max_length=100, null=True, blank=True)
user_token = models.CharField(max_length=100, null=True, blank=True)
| ekivemark/my_device | bbp/bbp/member/models.py | Python | apache-2.0 | 548 |
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from threading import Timer
from oslo_log import log as logging
from networking_vsphere._i18n import _LI
from networking_vsphere.utils.rpc_translator import update_rules
from neutron.agent import securitygroups_rpc
LOG = logging.getLogger(__name__)
class DVSSecurityGroupRpc(securitygroups_rpc.SecurityGroupAgentRpc):
def __init__(self, context, plugin_rpc,
defer_refresh_firewall=False):
self.context = context
self.plugin_rpc = plugin_rpc
self._devices_to_update = set()
self.init_firewall(defer_refresh_firewall)
def prepare_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_LI("Preparing filters for devices %s"), device_ids)
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, list(device_ids))
devices = update_rules(devices_info)
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, list(device_ids))
self.firewall.prepare_port_filter(devices.values())
def remove_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_LI("Remove device filter for %r"), device_ids)
self.firewall.remove_port_filter(device_ids)
def _refresh_ports(self):
device_ids = self._devices_to_update
self._devices_to_update = self._devices_to_update - device_ids
if not device_ids:
return
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, device_ids)
devices = update_rules(devices_info)
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, device_ids)
self.firewall.update_port_filter(devices.values())
def refresh_firewall(self, device_ids=None):
LOG.info(_LI("Refresh firewall rules"))
self._devices_to_update |= device_ids
if device_ids:
Timer(2, self._refresh_ports).start()
| VTabolin/networking-vsphere | networking_vsphere/agent/firewalls/dvs_securitygroup_rpc.py | Python | apache-2.0 | 2,782 |
#!/usr/bin/env python3
# encoding: utf-8
"""
main.py
The entry point for the book reader application.
"""
__version_info__ = (0, 0, 1)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "[email protected]"
import time
import sqlite3
import pdb
import signal
import sys, os
import rfid
import config
import RPi.GPIO as GPIO
from player import Player
from status_light import StatusLight
from threading import Thread
class BookReader(object):
"""The main class that controls the player, the GPIO pins and the RFID reader"""
def __init__(self):
"""Initialize all the things"""
self.rfid_reader = rfid.Reader(**config.serial)
# setup signal handlers. SIGINT for KeyboardInterrupt
# and SIGTERM for when running from supervisord
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self.status_light = StatusLight(config.status_light_pin)
thread = Thread(target=self.status_light.start)
thread.start()
self.setup_db()
self.player = Player(config.mpd_conn, self.status_light)
self.setup_gpio()
def setup_db(self):
"""Setup a connection to the SQLite db"""
self.db_conn = sqlite3.connect(config.db_file)
self.db_cursor = self.db_conn.cursor()
def setup_gpio(self):
"""Setup all GPIO pins"""
GPIO.setmode(GPIO.BCM)
# input pins for buttons
for pin in config.gpio_pins:
GPIO.setup(pin['pin_id'], GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(pin['pin_id'], GPIO.FALLING, callback=getattr(self.player, pin['callback']), bouncetime=pin['bounce_time'])
def signal_handler(self, signal, frame):
"""When quiting, stop playback, close the player and release GPIO pins"""
self.player.close()
self.status_light.exit()
GPIO.cleanup()
sys.exit(0)
def loop(self):
"""The main event loop. This is where we look for new RFID cards on the RFID reader. If one is
present and different from the book that's currently playing, in which case:
1. Stop playback of the current book if one is playing
2. Start playing
"""
while True:
if self.player.is_playing():
self.on_playing()
elif self.player.finished_book():
# when at the end of a book, delete its progress from the db
# so we can listen to it again
self.db_cursor.execute(
'DELETE FROM progress WHERE book_id = %d' % self.player.book.book_id)
self.db_conn.commit()
self.player.book.reset()
rfid_card = self.rfid_reader.read()
if not rfid_card:
continue
book_id = rfid_card.get_id()
if book_id and book_id != self.player.book.book_id: # a change in book id
progress = self.db_cursor.execute(
'SELECT * FROM progress WHERE book_id = "%s"' % book_id).fetchone()
self.player.play(book_id, progress)
def on_playing(self):
"""Executed for each loop execution. Here we update self.player.book with the latest known position
and save the prigress to db"""
status = self.player.get_status()
self.player.book.elapsed = float(status['elapsed'])
self.player.book.part = int(status['song']) + 1
#print "%s second of part %s" % (self.player.book.elapsed, self.player.book.part)
self.db_cursor.execute(
'INSERT OR REPLACE INTO progress (book_id, part, elapsed) VALUES (%s, %d, %f)' %\
(self.player.book.book_id, self.player.book.part, self.player.book.elapsed))
self.db_conn.commit()
if __name__ == '__main__':
reader = BookReader()
reader.loop()
| siliconchris1973/fairytale | RASPI-stuff/python-codeline/fairytale/main.py | Python | apache-2.0 | 3,954 |
from ..user_namespaces import UserNamespaces
from ...parsers.cmdline import CmdLine
from ...parsers.grub_conf import Grub2Config
from ...tests import context_wrap
ENABLE_TOK_A = '''
user_namespaces.enable=1
'''.strip() # noqa
ENABLE_TOK_B = '''
user-namespaces.enable=1
'''.strip() # noqa
CMDLINE = '''
BOOT_IMAGE=/vmlinuz-3.10.0-514.6.1.el7.x86_64 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap {0}
'''.strip() # noqa
GRUB2_CONF = '''
### BEGIN /etc/grub.d/10_linux ###
menuentry 'Red Hat Enterprise Linux Server (3.10.0-514.16.1.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586' {{
load_video
set gfxpayload=keep
insmod gzio
insmod part_gpt
insmod xfs
set root='hd0,gpt2'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 d80fa96c-ffa1-4894-9282-aeda37f0befe
else
search --no-floppy --fs-uuid --set=root d80fa96c-ffa1-4894-9282-aeda37f0befe
fi
linuxefi /vmlinuz-3.10.0-514.16.1.el7.x86_64 root=/dev/mapper/rhel-root ro rd.luks.uuid=luks-a40b320e-0711-4cd6-8f9e-ce32810e2a79 rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8 {0}
initrdefi /initramfs-3.10.0-514.16.1.el7.x86_64.img
}}
menuentry 'Red Hat Enterprise Linux Server (3.10.0-514.10.2.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586' {{
load_video
set gfxpayload=keep
insmod gzio
insmod part_gpt
insmod xfs
set root='hd0,gpt2'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,gpt2 --hint-efi=hd0,gpt2 --hint-baremetal=ahci0,gpt2 d80fa96c-ffa1-4894-9282-aeda37f0befe
else
search --no-floppy --fs-uuid --set=root d80fa96c-ffa1-4894-9282-aeda37f0befe
fi
linuxefi /vmlinuz-3.10.0-514.10.2.el7.x86_64 root=/dev/mapper/rhel-root ro rd.luks.uuid=luks-a40b320e-0711-4cd6-8f9e-ce32810e2a79 rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8 {1}
initrdefi /initramfs-3.10.0-514.10.2.el7.x86_64.img
}}
''' # noqa
MENUENTRY_0 = '''
'Red Hat Enterprise Linux Server (3.10.0-514.16.1.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586'
'''.strip() # noqa
MENUENTRY_1 = '''
'Red Hat Enterprise Linux Server (3.10.0-514.10.2.el7.x86_64) 7.3 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-514.el7.x86_64-advanced-9727cab4-12c2-41a8-9527-9644df34e586'
'''.strip() # noqa
CASES = [
# noqa
# |-- provided --| |---- expected results ---|
# ((cmdline, grub), (enabled, enabled_configs))
# Not enabled, no grub data
((CMDLINE.format(''), None), (False, [])),
# Not enabled, not enabled in grub
((CMDLINE.format(''), GRUB2_CONF.format('', '')), (False, [])),
# Not enabled, but enabled in menuentry 1
((CMDLINE.format(''), GRUB2_CONF.format('', ENABLE_TOK_A)),
(False, [MENUENTRY_1])),
# Enabled, no grub data
((CMDLINE.format(ENABLE_TOK_A), None), (True, [])),
# Enabled, but not enabled in grub
((CMDLINE.format(ENABLE_TOK_A), GRUB2_CONF.format('', '')),
(True, [])),
# Enabled, enabled in menuentry 0
((CMDLINE.format(ENABLE_TOK_A), GRUB2_CONF.format(ENABLE_TOK_A, '')),
(True, [MENUENTRY_0])),
# Dash syntax, rather than underscore
((CMDLINE.format(ENABLE_TOK_B), GRUB2_CONF.format(ENABLE_TOK_B, '')),
(True, [MENUENTRY_0]))
]
def test_integration():
for case in CASES:
context = {}
context[CmdLine] = CmdLine(context_wrap(case[0][0]))
if case[0][1] is not None:
context[Grub2Config] = Grub2Config(context_wrap(case[0][1]))
un = UserNamespaces(context.get(CmdLine), context.get(Grub2Config))
assert un.enabled() == case[1][0]
assert un.enabled_configs() == case[1][1]
| wcmitchell/insights-core | insights/combiners/tests/test_user_namespaces.py | Python | apache-2.0 | 4,522 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.api import auth
from solum.api.handlers import assembly_handler
from solum.common import exception
from solum.common import repo_utils
from solum.objects import assembly
from solum.openstack.common.fixture import config
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
STATES = assembly.States
@mock.patch('solum.objects.registry')
class TestAssemblyHandler(base.BaseTestCase):
def setUp(self):
super(TestAssemblyHandler, self).setUp()
self.ctx = utils.dummy_context()
self.CONF = self.useFixture(config.Config())
self.CONF.config(auth_uri='http://fakeidentity.com',
group=auth.OPT_GROUP_NAME)
self.CONF.config(keystone_version='3')
def test_assembly_get(self, mock_registry):
mock_registry.return_value.Assembly.get_by_uuid.return_value = {
'plan_id': '1234'
}
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.get('test_id')
self.assertIsNotNone(res)
get_by_uuid = mock_registry.Assembly.get_by_uuid
get_by_uuid.assert_called_once_with(self.ctx, 'test_id')
def test_assembly_get_all(self, mock_registry):
mock_registry.AssemblyList.get_all.return_value = {}
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.get_all()
self.assertIsNotNone(res)
mock_registry.AssemblyList.get_all.assert_called_once_with(self.ctx)
def test_update(self, mock_registry):
data = {'user_id': 'new_user_id',
'plan_uuid': 'input_plan_uuid'}
handler = assembly_handler.AssemblyHandler(self.ctx)
handler.update('test_id', data)
mock_registry.Assembly.update_and_save.assert_called_once_with(
self.ctx, 'test_id', data)
@mock.patch('solum.worker.api.API.build_app')
@mock.patch('solum.common.clients.OpenStackClients.keystone')
def test_create(self, mock_kc, mock_pa, mock_registry):
data = {'user_id': 'new_user_id',
'uuid': 'input_uuid',
'plan_uuid': 'input_plan_uuid'}
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.return_value = db_obj
fp = fakes.FakePlan()
mock_registry.Plan.get_by_id.return_value = fp
fp.raw_content = {
'name': 'theplan',
'artifacts': [{'name': 'nodeus',
'artifact_type': 'heroku',
'content': {'private': False,
'href': 'https://example.com/ex.git'},
'language_pack': 'auto'}]}
mock_registry.Image.return_value = fakes.FakeImage()
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.create(data)
db_obj.update.assert_called_once_with(data)
db_obj.create.assert_called_once_with(self.ctx)
self.assertEqual(db_obj, res)
git_info = {
'source_url': "https://example.com/ex.git",
'commit_sha': '',
'repo_token': None,
'status_url': None,
}
mock_pa.assert_called_once_with(
verb='launch_workflow', workflow=['unittest', 'build', 'deploy'],
build_id=8, name='nodeus', assembly_id=8,
git_info=git_info, test_cmd=None, ports=[80],
base_image_id='auto', source_format='heroku',
image_format='qcow2', run_cmd=None)
@mock.patch('solum.common.clients.OpenStackClients.keystone')
def test_create_with_username_in_ctx(self, mock_kc, mock_registry):
data = {'plan_uuid': 'input_plan_uuid'}
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.return_value = db_obj
fp = fakes.FakePlan()
mock_registry.Plan.get_by_id.return_value = fp
fp.raw_content = {'name': 'theplan'}
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.create(data)
self.assertEqual(res.username, self.ctx.user_name)
@mock.patch('solum.common.clients.OpenStackClients.keystone')
def test_create_without_username_in_ctx(self, mock_kc, mock_registry):
data = {'plan_uuid': 'input_plan_uuid'}
ctx = utils.dummy_context()
ctx.user_name = ''
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.return_value = db_obj
fp = fakes.FakePlan()
mock_registry.Plan.get_by_id.return_value = fp
fp.raw_content = {'name': 'theplan'}
handler = assembly_handler.AssemblyHandler(ctx)
res = handler.create(data)
self.assertEqual(res.username, '')
@mock.patch('solum.worker.api.API.build_app')
@mock.patch('solum.common.clients.OpenStackClients.keystone')
def test_create_with_private_github_repo(self, mock_kc, mock_pa,
mock_registry):
data = {'user_id': 'new_user_id',
'uuid': 'input_uuid',
'plan_uuid': 'input_plan_uuid'}
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.return_value = db_obj
fp = fakes.FakePlan()
mock_registry.Plan.get_by_id.return_value = fp
fp.raw_content = {
'name': 'theplan',
'artifacts': [{'name': 'nodeus',
'artifact_type': 'heroku',
'content': {'private': True,
'href': 'https://example.com/ex.git',
'public_key': 'ssh-rsa abc'},
'language_pack': 'auto'}]}
fp.deploy_keys_uri = 'secret_ref_uri'
mock_registry.Image.return_value = fakes.FakeImage()
handler = assembly_handler.AssemblyHandler(self.ctx)
res = handler.create(data)
db_obj.update.assert_called_once_with(data)
db_obj.create.assert_called_once_with(self.ctx)
self.assertEqual(db_obj, res)
git_info = {
'source_url': "https://example.com/ex.git",
'commit_sha': '',
'repo_token': None,
'status_url': None,
}
mock_pa.assert_called_once_with(
verb='launch_workflow', workflow=['unittest', 'build', 'deploy'],
build_id=8, name='nodeus', assembly_id=8,
git_info=git_info, ports=[80],
test_cmd=None, base_image_id='auto', source_format='heroku',
image_format='qcow2', run_cmd=None)
@mock.patch('solum.common.clients.OpenStackClients.keystone')
@mock.patch('solum.deployer.api.API.destroy_assembly')
@mock.patch('solum.conductor.api.API.update_assembly')
def test_delete(self, mock_cond, mock_deploy, mock_kc, mock_registry):
db_obj = fakes.FakeAssembly()
mock_registry.Assembly.get_by_uuid.return_value = db_obj
handler = assembly_handler.AssemblyHandler(self.ctx)
handler.delete('test_id')
mock_registry.Assembly.get_by_uuid.assert_called_once_with(self.ctx,
'test_id')
mock_cond.assert_called_once_with(db_obj.id, {'status': 'DELETING'})
mock_deploy.assert_called_once_with(assem_id=db_obj.id)
@mock.patch('httplib2.Http.request')
def test_verify_artifact_raise_exp(self, http_mock, mock_registry):
artifact = {"name": "Test",
"artifact_type": "heroku",
"content": {"href": "https://github.com/some/project"},
"language_pack": "auto",
"repo_token": "abcd"}
http_mock.return_value = ({'status': '404'}, '') # Not a collaborator
collab_url = 'https://api.github.com/repos/u/r/collaborators/foo'
self.assertRaises(exception.RequestForbidden,
repo_utils.verify_artifact,
artifact, collab_url)
| devdattakulkarni/test-solum | solum/tests/api/handlers/test_assembly.py | Python | apache-2.0 | 8,505 |
"""
Copyright 2013 OpERA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/python
## @package algorithm
# ::TODO:: Discover how to include patches externally
import sys
import os
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.insert(0, path)
import unittest
import random
# Modules tested
from feedbackAlgorithm import FeedbackAlgorithm, ExponentialTimeFeedback, KunstTimeFeedback
# Other modules needed
from device import radioDevice
from abstractAlgorithm import AbstractAlgorithm
class QaAlgorithm(unittest.TestCase):
"""
Test algorithm module.
"""
def test_feedback_001(self):
"""
Test the feedback algorithm.
"""
mi = 1,
ma = 256
base = 3
obj = ExponentialTimeFeedback(min_time=mi,
max_time=ma,
base=base
)
# Estado inicial
# Initial state.
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 1
# 3 ^ 0 == 1 (wait is 1)
self.assertEqual(True, obj.feedback())
# Testa se voltou direito
# Test if got back correctly.
self.assertEqual(False, obj.feedback())
# Aumentamos o tempo de sensoriamento 3^1 = 3
# We increase the sensing time 3^1 = 3.
obj.increase_time()
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 1
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 2
obj.wait() # wait = 3
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
self.assertEqual(False, obj.feedback())
obj.decrease_time() # reset time 3^0 = 1 # reseta tempo 3^0 = 1
obj.wait() # wait = 1
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
def test_feedback_002(self):
"""
Test the feedback algorithm
"""
obj = KunstTimeFeedback()
# Estado inicial
# Initial state.
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 1
# 2 ^ 0 == 1
# wait = 0
self.assertEqual(True, obj.feedback())
# Aumentamos o tempo de sensoriamento 2^1 = 2
# We increase the sensing time 2^1 = 2
obj.increase_time()
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 1
self.assertEqual(False, obj.feedback())
obj.wait() # wait = 2
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
self.assertEqual(False, obj.feedback()) # wait gets back to 0 volta wait para 0
obj.wait() # wait = 1
obj.wait() # wait = 2
obj.wait() # wait = 3
obj.wait() # wait = 4
obj.increase_time() # 2^2 = 4
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
self.assertEqual(False, obj.feedback()) # wait gets back to 0 # volta wait para 0
obj.decrease_time() # Should be 2^1 = 2
obj.wait()
obj.wait()
self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
self.assertEqual(False, obj.feedback()) # wait gets back to 0 # volta wait para 0
if __name__ == '__main__':
unittest.main()
| ComputerNetworks-UFRGS/OpERA | python/algorithm/qa_test.py | Python | apache-2.0 | 3,935 |
# -*- coding:utf-8 -*-
'''
Created on 2015年3月12日
@author: wanhao01
'''
import os
class SpiderFileUtils(object):
'''
deal with file related operations.
'''
def __save_page(self, data, url, outputdir):
'''
save the page content with the specific url to the local path.
'''
if(not os.path.exists(outputdir)):
os.makedirs(outputdir)
filename = self.__validate_name(url)
f = open(outputdir + os.sep + filename, 'w')
f.writelines(data)
f.close()
if __name__ == '__main__':
pass | onehao/opensource | pyml/crawler/minispider/SpiderFileUtils.py | Python | apache-2.0 | 610 |
#!/usr/bin/env python
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Configment interface
>>> class TestCfg(Configment):
... CONFIGSPEC_SOURCE = '''
... [abc]
... x = integer(default=3)
... '''
>>> cfg = TestCfg()
>>> cfg["abc"]["x"]
3
>>>
"""
import os
import validate
import six
from .configobj_wrap import ConfigObjWrap
from .meta_configment import MetaConfigment
from .configment_validator import ConfigmentValidator
from .pathname import Pathname
from .environment import load_configspec
__author__ = "Simone Campagna"
__all__ = [
'create_configment_class',
'Configment',
'ConfigmentValidateError',
]
class ConfigmentValidateError(validate.ValidateError):
def __str__(self):
return "validation failed: {}".format(self.args[0])
class BaseConfigment(ConfigObjWrap):
CONFIGSPEC = None
DEFAULT_MODE_HIDE = "hide"
DEFAULT_MODE_SHOW = "show"
DEFAULT_MODES = [DEFAULT_MODE_HIDE, DEFAULT_MODE_SHOW]
DEFAULT_MODE = DEFAULT_MODE_HIDE
def __init__(self, filename=None, default_mode=None):
super(BaseConfigment, self).__init__(
infile=None,
configspec=self.__class__.CONFIGSPEC,
unrepr=True,
interpolation=False,
indent_type=" ",
stringify=True,
)
if default_mode is None:
default_mode = self.DEFAULT_MODE
self.default_mode = default_mode
self.set_filename(filename)
if self.filename is not None:
self.load_file(filename, throw_on_errors=True)
else:
self.initialize(throw_on_errors=False)
def set_filename(self, filename=None):
super(BaseConfigment, self).set_filename(filename)
if self.filename is None:
self._base_dir = os.getcwd()
else:
self._base_dir = os.path.dirname(os.path.abspath(filename))
def do_validation(self, base_dir=None, reset=False, throw_on_errors=False):
if base_dir is None:
base_dir = self._base_dir
validator = ConfigmentValidator()
copy = self.default_mode == self.DEFAULT_MODE_SHOW
result = super(BaseConfigment, self).validate(validator, preserve_errors=True, copy=copy)
result = self.filter_validation_result(result)
self.set_paths(base_dir, reset=reset)
if throw_on_errors and result:
raise ConfigmentValidateError(result)
c_result = ConfigObjWrap(
infile=result,
stringify=True,
unrepr=True,
indent_type=' ',
)
return c_result
@six.add_metaclass(MetaConfigment)
class Configment(BaseConfigment):
def __init__(self, filename=None, default_mode=None):
super(Configment, self).__init__(
filename=filename,
default_mode=default_mode,
)
def impl_initialize(self, throw_on_errors=False):
try:
return self.do_validation(reset=False, throw_on_errors=throw_on_errors)
except: # pylint: disable=bare-except
return False
def impl_load_file(self, filename, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
Pathname.set_default_base_dir(self._base_dir)
self.set_filename(filename)
self.reload()
try:
result = self.do_validation(base_dir=self._base_dir, reset=True, throw_on_errors=throw_on_errors)
finally:
Pathname.set_default_base_dir(default_base_dir)
return result
def impl_dump_s(self, stream=None, filename=None, throw_on_errors=False):
default_base_dir = Pathname.get_default_base_dir()
try:
if filename is not None:
base_dir = os.path.dirname(os.path.normpath(os.path.abspath(filename)))
else:
base_dir = self._base_dir
Pathname.set_default_base_dir(base_dir)
self.do_validation(base_dir=base_dir, reset=False, throw_on_errors=throw_on_errors)
self.write(stream)
finally:
Pathname.set_default_base_dir(default_base_dir)
def create_configment_class(configspec_filename, class_name=None, dir_list=None):
if class_name is None:
class_name = os.path.splitext(os.path.basename(configspec_filename))[0]
class_bases = (Configment, )
class_dict = {
'CONFIGSPEC_SOURCE': load_configspec(configspec_filename, dir_list=dir_list),
}
return MetaConfigment(class_name, class_bases, class_dict)
| simone-campagna/py-configment | src/configment/configment.py | Python | apache-2.0 | 5,069 |
"""Example training a memory neural net on the bAbI dataset.
References Keras and is based off of https://keras.io/examples/babi_memnn/.
"""
from __future__ import print_function
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import (Input, Activation, Dense, Permute,
Dropout)
from tensorflow.keras.layers import add, dot, concatenate
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import get_file
from tensorflow.keras.preprocessing.sequence import pad_sequences
from filelock import FileLock
import os
import argparse
import tarfile
import numpy as np
import re
from ray import tune
def tokenize(sent):
"""Return the tokens of a sentence including punctuation.
>>> tokenize("Bob dropped the apple. Where is the apple?")
["Bob", "dropped", "the", "apple", ".", "Where", "is", "the", "apple", "?"]
"""
return [x.strip() for x in re.split(r"(\W+)?", sent) if x and x.strip()]
def parse_stories(lines, only_supporting=False):
"""Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences
that support the answer are kept.
"""
data = []
story = []
for line in lines:
line = line.decode("utf-8").strip()
nid, line = line.split(" ", 1)
nid = int(nid)
if nid == 1:
story = []
if "\t" in line:
q, a, supporting = line.split("\t")
q = tokenize(q)
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append("")
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
"""Given a file name, read the file,
retrieve the stories,
and then convert the sentences into a single story.
If max_length is supplied,
any stories longer than max_length tokens will be discarded.
"""
def flatten(data):
return sum(data, [])
data = parse_stories(f.readlines(), only_supporting=only_supporting)
data = [(flatten(story), q, answer) for story, q, answer in data
if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(word_idx, story_maxlen, query_maxlen, data):
inputs, queries, answers = [], [], []
for story, query, answer in data:
inputs.append([word_idx[w] for w in story])
queries.append([word_idx[w] for w in query])
answers.append(word_idx[answer])
return (pad_sequences(inputs, maxlen=story_maxlen),
pad_sequences(queries, maxlen=query_maxlen), np.array(answers))
def read_data(finish_fast=False):
# Get the file
try:
path = get_file(
"babi-tasks-v1-2.tar.gz",
origin="https://s3.amazonaws.com/text-datasets/"
"babi_tasks_1-20_v1-2.tar.gz")
except Exception:
print(
"Error downloading dataset, please download it manually:\n"
"$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2" # noqa: E501
".tar.gz\n"
"$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz" # noqa: E501
)
raise
# Choose challenge
challenges = {
# QA1 with 10,000 samples
"single_supporting_fact_10k": "tasks_1-20_v1-2/en-10k/qa1_"
"single-supporting-fact_{}.txt",
# QA2 with 10,000 samples
"two_supporting_facts_10k": "tasks_1-20_v1-2/en-10k/qa2_"
"two-supporting-facts_{}.txt",
}
challenge_type = "single_supporting_fact_10k"
challenge = challenges[challenge_type]
with tarfile.open(path) as tar:
train_stories = get_stories(tar.extractfile(challenge.format("train")))
test_stories = get_stories(tar.extractfile(challenge.format("test")))
if finish_fast:
train_stories = train_stories[:64]
test_stories = test_stories[:64]
return train_stories, test_stories
class MemNNModel(tune.Trainable):
def build_model(self):
"""Helper method for creating the model"""
vocab = set()
for story, q, answer in self.train_stories + self.test_stories:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(
len(x) for x, _, _ in self.train_stories + self.test_stories)
query_maxlen = max(
len(x) for _, x, _ in self.train_stories + self.test_stories)
word_idx = {c: i + 1 for i, c in enumerate(vocab)}
self.inputs_train, self.queries_train, self.answers_train = (
vectorize_stories(word_idx, story_maxlen, query_maxlen,
self.train_stories))
self.inputs_test, self.queries_test, self.answers_test = (
vectorize_stories(word_idx, story_maxlen, query_maxlen,
self.test_stories))
# placeholders
input_sequence = Input((story_maxlen, ))
question = Input((query_maxlen, ))
# encoders
# embed the input sequence into a sequence of vectors
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size, output_dim=64))
input_encoder_m.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, story_maxlen, embedding_dim)
# embed the input into a sequence of vectors of size query_maxlen
input_encoder_c = Sequential()
input_encoder_c.add(
Embedding(input_dim=vocab_size, output_dim=query_maxlen))
input_encoder_c.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, story_maxlen, query_maxlen)
# embed the question into a sequence of vectors
question_encoder = Sequential()
question_encoder.add(
Embedding(
input_dim=vocab_size, output_dim=64,
input_length=query_maxlen))
question_encoder.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, query_maxlen, embedding_dim)
# encode input sequence and questions (which are indices)
# to sequences of dense vectors
input_encoded_m = input_encoder_m(input_sequence)
input_encoded_c = input_encoder_c(input_sequence)
question_encoded = question_encoder(question)
# compute a "match" between the first input vector sequence
# and the question vector sequence
# shape: `(samples, story_maxlen, query_maxlen)`
match = dot([input_encoded_m, question_encoded], axes=(2, 2))
match = Activation("softmax")(match)
# add the match matrix with the second input vector sequence
response = add(
[match, input_encoded_c]) # (samples, story_maxlen, query_maxlen)
response = Permute(
(2, 1))(response) # (samples, query_maxlen, story_maxlen)
# concatenate the match matrix with the question vector sequence
answer = concatenate([response, question_encoded])
# the original paper uses a matrix multiplication.
# we choose to use a RNN instead.
answer = LSTM(32)(answer) # (samples, 32)
# one regularization layer -- more would probably be needed.
answer = Dropout(self.config.get("dropout", 0.3))(answer)
answer = Dense(vocab_size)(answer) # (samples, vocab_size)
# we output a probability distribution over the vocabulary
answer = Activation("softmax")(answer)
# build the final model
model = Model([input_sequence, question], answer)
return model
def setup(self, config):
with FileLock(os.path.expanduser("~/.tune.lock")):
self.train_stories, self.test_stories = read_data(
config["finish_fast"])
model = self.build_model()
rmsprop = RMSprop(
lr=self.config.get("lr", 1e-3), rho=self.config.get("rho", 0.9))
model.compile(
optimizer=rmsprop,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
self.model = model
def step(self):
# train
self.model.fit(
[self.inputs_train, self.queries_train],
self.answers_train,
batch_size=self.config.get("batch_size", 32),
epochs=self.config.get("epochs", 1),
validation_data=([self.inputs_test, self.queries_test],
self.answers_test),
verbose=0)
_, accuracy = self.model.evaluate(
[self.inputs_train, self.queries_train],
self.answers_train,
verbose=0)
return {"mean_accuracy": accuracy}
def save_checkpoint(self, checkpoint_dir):
file_path = checkpoint_dir + "/model"
self.model.save(file_path)
return file_path
def load_checkpoint(self, path):
# See https://stackoverflow.com/a/42763323
del self.model
self.model = load_model(path)
if __name__ == "__main__":
import ray
from ray.tune.schedulers import PopulationBasedTraining
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=2)
elif args.server_address:
ray.util.connect(args.server_address)
pbt = PopulationBasedTraining(
perturbation_interval=2,
hyperparam_mutations={
"dropout": lambda: np.random.uniform(0, 1),
"lr": lambda: 10**np.random.randint(-10, 0),
"rho": lambda: np.random.uniform(0, 1)
})
results = tune.run(
MemNNModel,
name="pbt_babi_memnn",
scheduler=pbt,
metric="mean_accuracy",
mode="max",
stop={"training_iteration": 4 if args.smoke_test else 100},
num_samples=2,
config={
"finish_fast": args.smoke_test,
"batch_size": 32,
"epochs": 1,
"dropout": 0.3,
"lr": 0.01,
"rho": 0.9
})
| pcmoritz/ray-1 | python/ray/tune/examples/pbt_memnn_example.py | Python | apache-2.0 | 10,874 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
TR-55 Model Implementation
A mapping between variable/parameter names found in the TR-55 document
and variables used in this program are as follows:
* `precip` is referred to as P in the report
* `runoff` is Q
* `evaptrans` maps to ET, the evapotranspiration
* `inf` is the amount of water that infiltrates into the soil (in inches)
* `init_abs` is Ia, the initial abstraction, another form of infiltration
"""
import copy
import numpy as np
from tr55.tablelookup import lookup_cn, lookup_bmp_storage, \
lookup_ki, is_bmp, is_built_type, make_precolumbian, \
get_pollutants, get_bmps, lookup_pitt_runoff, lookup_bmp_drainage_ratio
from tr55.water_quality import get_volume_of_runoff, get_pollutant_load
from tr55.operations import dict_plus
def runoff_pitt(precip, evaptrans, soil_type, land_use):
"""
The Pitt Small Storm Hydrology method. The output is a runoff
value in inches.
This uses numpy to make a linear interpolation between tabular values to
calculate the exact runoff for a given value
`precip` is the amount of precipitation in inches.
"""
runoff_ratios = lookup_pitt_runoff(soil_type, land_use)
runoff_ratio = np.interp(precip, runoff_ratios['precip'], runoff_ratios['Rv'])
runoff = precip*runoff_ratio
return min(runoff, precip - evaptrans)
def nrcs_cutoff(precip, curve_number):
"""
A function to find the cutoff between precipitation/curve number
pairs that have zero runoff by definition, and those that do not.
"""
if precip <= -1 * (2 * (curve_number - 100.0) / curve_number):
return True
else:
return False
def runoff_nrcs(precip, evaptrans, soil_type, land_use):
"""
The runoff equation from the TR-55 document. The output is a
runoff value in inches.
`precip` is the amount of precipitation in inches.
"""
curve_number = lookup_cn(soil_type, land_use)
if nrcs_cutoff(precip, curve_number):
return 0.0
potential_retention = (1000.0 / curve_number) - 10
initial_abs = 0.2 * potential_retention
precip_minus_initial_abs = precip - initial_abs
numerator = pow(precip_minus_initial_abs, 2)
denominator = (precip_minus_initial_abs + potential_retention)
runoff = numerator / denominator
return min(runoff, precip - evaptrans)
def simulate_cell_day(precip, evaptrans, cell, cell_count):
"""
Simulate a bunch of cells of the same type during a one-day event.
`precip` is the amount of precipitation in inches.
`evaptrans` is evapotranspiration in inches per day - this is the
ET for the cell after taking the crop/landscape factor into account
this is NOT the ETmax.
`cell` is a string which contains a soil type and land use
separated by a colon.
`cell_count` is the number of cells to simulate.
The return value is a dictionary of runoff, evapotranspiration, and
infiltration as a volume (inches * #cells).
"""
def clamp(runoff, et, inf, precip):
"""
This function ensures that runoff + et + inf <= precip.
NOTE: Infiltration is normally independent of the
precipitation level, but this function introduces a slight
dependency (that is, at very low levels of precipitation, this
function can cause infiltration to be smaller than it
ordinarily would be.
"""
total = runoff + et + inf
if (total > precip):
scale = precip / total
runoff *= scale
et *= scale
inf *= scale
return (runoff, et, inf)
precip = max(0.0, precip)
soil_type, land_use, bmp = cell.lower().split(':')
# If there is no precipitation, then there is no runoff or
# infiltration; however, there is evapotranspiration. (It is
# understood that over a period of time, this can lead to the sum
# of the three values exceeding the total precipitation.)
if precip == 0.0:
return {
'runoff-vol': 0.0,
'et-vol': 0.0,
'inf-vol': 0.0,
}
# If the BMP is cluster_housing or no_till, then make it the
# land-use. This is done because those two types of BMPs behave
# more like land-uses than they do BMPs.
if bmp and not is_bmp(bmp):
land_use = bmp or land_use
# When the land-use is a built-type use the Pitt Small Storm Hydrology
# Model until the runoff predicted by the NRCS model is greater than that
# predicted by the NRCS model.
if is_built_type(land_use):
pitt_runoff = runoff_pitt(precip, evaptrans, soil_type, land_use)
nrcs_runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
runoff = max(pitt_runoff, nrcs_runoff)
else:
runoff = runoff_nrcs(precip, evaptrans, soil_type, land_use)
inf = max(0.0, precip - (evaptrans + runoff))
# (runoff, evaptrans, inf) = clamp(runoff, evaptrans, inf, precip)
return {
'runoff-vol': cell_count * runoff,
'et-vol': cell_count * evaptrans,
'inf-vol': cell_count * inf,
}
def create_unmodified_census(census):
"""
This creates a cell census, ignoring any modifications. The
output is suitable for use as input to `simulate_water_quality`.
"""
unmod = copy.deepcopy(census)
unmod.pop('modifications', None)
return unmod
def create_modified_census(census):
"""
This creates a cell census, with modifications, that is suitable
for use as input to `simulate_water_quality`.
For every type of cell that undergoes modification, the
modifications are indicated with a sub-distribution under that
cell type.
"""
mod = copy.deepcopy(census)
mod.pop('modifications', None)
for (cell, subcensus) in mod['distribution'].items():
n = subcensus['cell_count']
changes = {
'distribution': {
cell: {
'distribution': {
cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
for modification in (census.get('modifications') or []):
for (orig_cell, subcensus) in modification['distribution'].items():
n = subcensus['cell_count']
soil1, land1 = orig_cell.split(':')
soil2, land2, bmp = modification['change'].split(':')
changed_cell = '%s:%s:%s' % (soil2 or soil1, land2 or land1, bmp)
changes = {
'distribution': {
orig_cell: {
'distribution': {
orig_cell: {'cell_count': -n},
changed_cell: {'cell_count': n}
}
}
}
}
mod = dict_plus(mod, changes)
return mod
def simulate_water_quality(tree, cell_res, fn,
pct=1.0, current_cell=None, precolumbian=False):
"""
Perform a water quality simulation by doing simulations on each of
the cell types (leaves), then adding them together by summing the
values of a node's subtrees and storing them at that node.
`tree` is the (sub)tree of cell distributions that is currently
under consideration.
`pct` is the percentage of calculated water volume to retain.
`cell_res` is the size of each cell/pixel in meters squared
(used for turning inches of water into volumes of water).
`fn` is a function that takes a cell type and a number of cells
and returns a dictionary containing runoff, et, and inf as
volumes.
`current_cell` is the cell type for the present node.
"""
# Internal node.
if 'cell_count' in tree and 'distribution' in tree:
n = tree['cell_count']
# simulate subtrees
if n != 0:
tally = {}
for cell, subtree in tree['distribution'].items():
simulate_water_quality(subtree, cell_res, fn,
pct, cell, precolumbian)
subtree_ex_dist = subtree.copy()
subtree_ex_dist.pop('distribution', None)
tally = dict_plus(tally, subtree_ex_dist)
tree.update(tally) # update this node
# effectively a leaf
elif n == 0:
for pol in get_pollutants():
tree[pol] = 0.0
# Leaf node.
elif 'cell_count' in tree and 'distribution' not in tree:
# the number of cells covered by this leaf
n = tree['cell_count']
# canonicalize the current_cell string
split = current_cell.split(':')
if (len(split) == 2):
split.append('')
if precolumbian:
split[1] = make_precolumbian(split[1])
current_cell = '%s:%s:%s' % tuple(split)
# run the runoff model on this leaf
result = fn(current_cell, n) # runoff, et, inf
runoff_adjustment = result['runoff-vol'] - (result['runoff-vol'] * pct)
result['runoff-vol'] -= runoff_adjustment
result['inf-vol'] += runoff_adjustment
tree.update(result)
# perform water quality calculation
if n != 0:
soil_type, land_use, bmp = split
runoff_per_cell = result['runoff-vol'] / n
liters = get_volume_of_runoff(runoff_per_cell, n, cell_res)
for pol in get_pollutants():
tree[pol] = get_pollutant_load(land_use, pol, liters)
def postpass(tree):
"""
Remove volume units and replace them with inches.
"""
if 'cell_count' in tree:
if tree['cell_count'] > 0:
n = tree['cell_count']
tree['runoff'] = tree['runoff-vol'] / n
tree['et'] = tree['et-vol'] / n
tree['inf'] = tree['inf-vol'] / n
else:
tree['runoff'] = 0
tree['et'] = 0
tree['inf'] = 0
tree.pop('runoff-vol', None)
tree.pop('et-vol', None)
tree.pop('inf-vol', None)
if 'distribution' in tree:
for subtree in tree['distribution'].values():
postpass(subtree)
def compute_bmp_effect(census, m2_per_pixel, precip):
"""
Compute the overall amount of water retained by infiltration/retention
type BMP's.
Result is a percent of runoff remaining after water is trapped in
infiltration/retention BMP's
"""
meters_per_inch = 0.0254
cubic_meters = census['runoff-vol'] * meters_per_inch * m2_per_pixel
# 'runoff-vol' in census is in inches*#cells
bmp_dict = census.get('BMPs', {})
bmp_keys = set(bmp_dict.keys())
reduction = 0.0
for bmp in set.intersection(set(get_bmps()), bmp_keys):
bmp_area = bmp_dict[bmp]
storage_space = (lookup_bmp_storage(bmp) * bmp_area)
max_reduction = lookup_bmp_drainage_ratio(bmp) * bmp_area * precip * meters_per_inch
bmp_reduction = min(max_reduction, storage_space)
reduction += bmp_reduction
return 0 if not cubic_meters else \
max(0.0, cubic_meters - reduction) / cubic_meters
def simulate_modifications(census, fn, cell_res, precip, pc=False):
"""
Simulate effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`fn` is as described in `simulate_water_quality`.
`cell_res` is as described in `simulate_water_quality`.
"""
mod = create_modified_census(census)
simulate_water_quality(mod, cell_res, fn, precolumbian=pc)
pct = compute_bmp_effect(mod, cell_res, precip)
simulate_water_quality(mod, cell_res, fn, pct=pct, precolumbian=pc)
postpass(mod)
unmod = create_unmodified_census(census)
simulate_water_quality(unmod, cell_res, fn, precolumbian=pc)
postpass(unmod)
return {
'unmodified': unmod,
'modified': mod
}
def simulate_day(census, precip, cell_res=10, precolumbian=False):
"""
Simulate a day, including water quality effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`cell_res` is as described in `simulate_water_quality`.
`precolumbian` indicates that artificial types should be turned
into forest.
"""
et_max = 0.207
# From the EPA WaterSense data finder for the Philadelphia airport (19153)
# Converted to daily number in inches per day.
# http://www3.epa.gov/watersense/new_homes/wb_data_finder.html
# TODO: include Potential Max ET as a data layer from CGIAR
# http://csi.cgiar.org/aridity/Global_Aridity_PET_Methodolgy.asp
if 'modifications' in census:
verify_census(census)
def fn(cell, cell_count):
# Compute et for cell type
split = cell.split(':')
if (len(split) == 2):
(land_use, bmp) = split
else:
(_, land_use, bmp) = split
et = et_max * lookup_ki(bmp or land_use)
# Simulate the cell for one day
return simulate_cell_day(precip, et, cell, cell_count)
return simulate_modifications(census, fn, cell_res, precip, precolumbian)
def verify_census(census):
"""
Assures that there is no soil type/land cover pair
in a modification census that isn't in the AoI census.
"""
for modification in census['modifications']:
for land_cover in modification['distribution']:
if land_cover not in census['distribution']:
raise ValueError("Invalid modification census")
| WikiWatershed/tr-55 | tr55/model.py | Python | apache-2.0 | 13,671 |
'''
Kurgan AI Web Application Security Analyzer.
http://www.kurgan.com.br/
Author: Glaudson Ocampos - <[email protected]>
Created in May, 11th 2016.
'''
import db.db as db
import config as cf
class WebServer(object):
banner = None
os = None
server = None
framework = None
version = None
options = None
def set_banner(self, val):
self.banner = val
def get_banner(self):
return self.banner
def set_os(self, val):
self.os = val
def get_os(self):
return self.os
def set_server(self, val):
self.server = val
def get_server(self):
return self.server
def set_version(self,val):
self.version = val
def get_version(self):
return self.version
def set_options(self,val):
self.options = val
def get_options(self):
return self.options
def check_os(self):
os_possibles = {"Debian","Fedora","Windows","SuSE","marrakesh","RedHat","Unix"}
for i in os_possibles:
if i in self.banner:
self.os = i
break
def check_server(self):
#server_possibles = {"nginx", "Apache", "Tomcat", "JBoss", "IIS", "X-Varnish"}
mydb = db.DB();
query = "SELECT DISTINCT name FROM server"
database = cf.DB_WEBSERVERS
servers_in_database = mydb.getData(query,database)
server_possibles = list(servers_in_database)
for j in server_possibles:
for i in j:
if i in self.banner:
self.server = i
break
def check_version(self):
if self.server is None:
return None
else:
mydb = db.DB();
name = self.server;
query = "SELECT DISTINCT version FROM server WHERE name='" + name + "'"
database = cf.DB_WEBSERVERS
servers_in_database = mydb.getData(query,database)
v_possibles = list(servers_in_database)
for j in v_possibles:
for i in j:
if i in self.banner:
self.version = i
break
def check_options(self):
op_possibles = {'GET','POST','PUT','HEAD','OPTIONS','DELETE','TRACE','PATCH','CONNECT'}
op_in_server = []
for i in op_possibles:
if i in self.options:
op_in_server.append(i)
return op_in_server
class Framework(object):
framework = None
X_Powered_By = None
def set_X_Powered_By(self, val):
self.X_Powered_By = val
def get_X_Powered_By(self):
return self.X_Powered_By
def set_framework(self, val):
self.framework = val
def get_framework(self):
return self.framework
#checar extensao tambem
def check_framework(self):
fw_possibles = {"PHP","ASP.NET","JSP","Perl","CGI"}
for i in fw_possibles:
if i in self.X_Powered_By:
self.framework = i
break
class Application(object):
extension = None
cookie = None
has_javascript = None
def set_extension(self, val):
self.extension = val
def get_extension(self):
return self.extension
def set_cookie(self, val):
self.cookie = val
def get_cookie(self):
return self.cookie
def set_has_javascript(self, val):
self.has_javascript = val
def get_has_javascript(self):
return self.has_javascript
def check_extension(self):
if self.extension is 'html':
weight_html_framework += 10
| glaudsonml/kurgan-ai | libs/WebServer.py | Python | apache-2.0 | 3,716 |
#############################################################################
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#############################################################################
#
# Project Name : IEEE 802.11 Timeline Tool *
#
# Author : Alex Ashley
#
#############################################################################
from django.db import models
from django.utils.translation import ugettext_lazy as _
class MeetingType(object):
def __init__(self,code,descr):
self.code = code
self.description = descr
def __str__(self):
return '%s'%self.description
def __unicode__(self):
return self.description
class MeetingReport(models.Model):
Plenary = MeetingType('P', 'Plenary')
Interim = MeetingType('I', 'Interim')
Special = MeetingType('S', 'Special')
_MEETING_TYPES = [ (b.code,b.description) for b in Plenary, Interim, Special]
id = models.AutoField(primary_key=True)
session = models.DecimalField(unique=True, db_index=True, decimal_places=1, max_digits=5, help_text=_('Session number'))
start = models.DateField(help_text=_('Session start date'))
end = models.DateField(help_text=_('Session end date'))
cancelled = models.BooleanField(default=False,help_text=_(u'Session was cancelled'))
pending = models.BooleanField(default=True,help_text=_(u'Reports are in-progress and will be provided later'))
#null=True, blank=True,
report = models.URLField(null=True, blank=True, help_text=_('URL pointing to meeting report'))
minutes_doc = models.URLField(null=True, blank=True,
help_text=_('URL pointing to meeting minutes in Word format'))
minutes_pdf = models.URLField(null=True, blank=True,
help_text=_('URL pointing to meeting minutes in PDF format'))
venue = models.CharField(max_length=100, help_text=_('Name of meeting venue'))
location = models.CharField(max_length=100, help_text=_('Location of meeting venue'))
meeting_type = models.CharField(max_length=2, choices=_MEETING_TYPES, help_text=_('Plenary or Interim'))
@property
def session_num(self):
s = int(self.session)
return s if s==self.session else self.session
def __unicode__(self):
try:
return '%03.1f: %s'%(int(self.session),self.location)
except (ValueError,TypeError):
return self.location
| asrashley/ieee-802-11-timeline | report/models.py | Python | apache-2.0 | 3,172 |
# Copyright 2015 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_serialization import jsonutils
from magnum.conductor import k8s_monitor
from magnum.conductor import mesos_monitor
from magnum.conductor import monitors
from magnum.conductor import swarm_monitor
from magnum import objects
from magnum.tests import base
from magnum.tests.unit.db import utils
class MonitorsTestCase(base.TestCase):
test_metrics_spec = {
'metric1': {
'unit': 'metric1_unit',
'func': 'metric1_func',
},
'metric2': {
'unit': 'metric2_unit',
'func': 'metric2_func',
},
}
def setUp(self):
super(MonitorsTestCase, self).setUp()
bay = utils.get_test_bay(node_addresses=['1.2.3.4'],
api_address='https://5.6.7.8:2376')
self.bay = objects.Bay(self.context, **bay)
self.monitor = swarm_monitor.SwarmMonitor(self.context, self.bay)
self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.bay)
self.mesos_monitor = mesos_monitor.MesosMonitor(self.context,
self.bay)
p = mock.patch('magnum.conductor.swarm_monitor.SwarmMonitor.'
'metrics_spec', new_callable=mock.PropertyMock)
self.mock_metrics_spec = p.start()
self.mock_metrics_spec.return_value = self.test_metrics_spec
self.addCleanup(p.stop)
@mock.patch('magnum.objects.BayModel.get_by_uuid')
def test_create_monitor_success(self, mock_baymodel_get_by_uuid):
baymodel = mock.MagicMock()
baymodel.coe = 'swarm'
mock_baymodel_get_by_uuid.return_value = baymodel
monitor = monitors.create_monitor(self.context, self.bay)
self.assertIsInstance(monitor, swarm_monitor.SwarmMonitor)
@mock.patch('magnum.objects.BayModel.get_by_uuid')
def test_create_monitor_k8s_bay(self, mock_baymodel_get_by_uuid):
baymodel = mock.MagicMock()
baymodel.coe = 'kubernetes'
mock_baymodel_get_by_uuid.return_value = baymodel
monitor = monitors.create_monitor(self.context, self.bay)
self.assertIsInstance(monitor, k8s_monitor.K8sMonitor)
@mock.patch('magnum.objects.BayModel.get_by_uuid')
def test_create_monitor_mesos_bay(self, mock_baymodel_get_by_uuid):
baymodel = mock.MagicMock()
baymodel.coe = 'mesos'
mock_baymodel_get_by_uuid.return_value = baymodel
monitor = monitors.create_monitor(self.context, self.bay)
self.assertIsInstance(monitor, mesos_monitor.MesosMonitor)
@mock.patch('magnum.objects.BayModel.get_by_uuid')
def test_create_monitor_unsupported_coe(self, mock_baymodel_get_by_uuid):
baymodel = mock.MagicMock()
baymodel.coe = 'unsupported'
mock_baymodel_get_by_uuid.return_value = baymodel
monitor = monitors.create_monitor(self.context, self.bay)
self.assertIsNone(monitor)
@mock.patch('magnum.common.docker_utils.docker_for_bay')
def test_swarm_monitor_pull_data_success(self, mock_docker_for_bay):
mock_docker = mock.MagicMock()
mock_docker.info.return_value = {'DriverStatus': [[
u' \u2514 Reserved Memory', u'0 B / 1 GiB']]}
mock_docker.containers.return_value = [mock.MagicMock()]
mock_docker.inspect_container.return_value = 'test_container'
mock_docker_for_bay.return_value.__enter__.return_value = mock_docker
self.monitor.pull_data()
self.assertEqual([{'MemTotal': 1073741824.0}],
self.monitor.data['nodes'])
self.assertEqual(['test_container'], self.monitor.data['containers'])
@mock.patch('magnum.common.docker_utils.docker_for_bay')
def test_swarm_monitor_pull_data_raise(self, mock_docker_for_bay):
mock_container = mock.MagicMock()
mock_docker = mock.MagicMock()
mock_docker.info.return_value = {'DriverStatus': [[
u' \u2514 Reserved Memory', u'0 B / 1 GiB']]}
mock_docker.containers.return_value = [mock_container]
mock_docker.inspect_container.side_effect = Exception("inspect error")
mock_docker_for_bay.return_value.__enter__.return_value = mock_docker
self.monitor.pull_data()
self.assertEqual([{'MemTotal': 1073741824.0}],
self.monitor.data['nodes'])
self.assertEqual([mock_container], self.monitor.data['containers'])
def test_swarm_monitor_get_metric_names(self):
names = self.monitor.get_metric_names()
self.assertEqual(sorted(['metric1', 'metric2']), sorted(names))
def test_swarm_monitor_get_metric_unit(self):
unit = self.monitor.get_metric_unit('metric1')
self.assertEqual('metric1_unit', unit)
def test_swarm_monitor_compute_metric_value(self):
mock_func = mock.MagicMock()
mock_func.return_value = 'metric1_value'
self.monitor.metric1_func = mock_func
value = self.monitor.compute_metric_value('metric1')
self.assertEqual('metric1_value', value)
def test_swarm_monitor_compute_memory_util(self):
test_data = {
'nodes': [
{
'Name': 'node',
'MemTotal': 20,
},
],
'containers': [
{
'Name': 'container',
'HostConfig': {
'Memory': 10,
},
},
],
}
self.monitor.data = test_data
mem_util = self.monitor.compute_memory_util()
self.assertEqual(50, mem_util)
test_data = {
'nodes': [],
'containers': [],
}
self.monitor.data = test_data
mem_util = self.monitor.compute_memory_util()
self.assertEqual(0, mem_util)
@mock.patch('magnum.conductor.k8s_api.create_k8s_api')
def test_k8s_monitor_pull_data_success(self, mock_k8s_api):
mock_nodes = mock.MagicMock()
mock_node = mock.MagicMock()
mock_node.status = mock.MagicMock()
mock_node.status.capacity = "{'memory': '2000Ki'}"
mock_nodes.items = [mock_node]
mock_k8s_api.return_value.list_namespaced_node.return_value = (
mock_nodes)
mock_pods = mock.MagicMock()
mock_pod = mock.MagicMock()
mock_pod.spec = mock.MagicMock()
mock_container = mock.MagicMock()
mock_container.resources = mock.MagicMock()
mock_container.resources.limits = "{'memory':'100Mi'}"
mock_pod.spec.containers = [mock_container]
mock_pods.items = [mock_pod]
mock_k8s_api.return_value.list_namespaced_pod.return_value = mock_pods
self.k8s_monitor.pull_data()
self.assertEqual(self.k8s_monitor.data['nodes'],
[{'Memory': 2048000.0}])
self.assertEqual(self.k8s_monitor.data['pods'],
[{'Memory': 104857600.0}])
def test_k8s_monitor_get_metric_names(self):
k8s_metric_spec = 'magnum.conductor.k8s_monitor.K8sMonitor.'\
'metrics_spec'
with mock.patch(k8s_metric_spec,
new_callable=mock.PropertyMock) as mock_k8s_metric:
mock_k8s_metric.return_value = self.test_metrics_spec
names = self.k8s_monitor.get_metric_names()
self.assertEqual(sorted(['metric1', 'metric2']), sorted(names))
def test_k8s_monitor_get_metric_unit(self):
k8s_metric_spec = 'magnum.conductor.k8s_monitor.K8sMonitor.' \
'metrics_spec'
with mock.patch(k8s_metric_spec,
new_callable=mock.PropertyMock) as mock_k8s_metric:
mock_k8s_metric.return_value = self.test_metrics_spec
unit = self.k8s_monitor.get_metric_unit('metric1')
self.assertEqual('metric1_unit', unit)
def test_k8s_monitor_compute_memory_util(self):
test_data = {
'nodes': [
{
'Memory': 20,
},
],
'pods': [
{
'Memory': 10,
},
],
}
self.k8s_monitor.data = test_data
mem_util = self.k8s_monitor.compute_memory_util()
self.assertEqual(50, mem_util)
test_data = {
'nodes': [],
'pods': [],
}
self.k8s_monitor.data = test_data
mem_util = self.k8s_monitor.compute_memory_util()
self.assertEqual(0, mem_util)
@mock.patch('magnum.common.urlfetch.get')
def test_mesos_monitor_pull_data_success(self, mock_url_get):
state_json = {
'slaves': [{
'resources': {
'mem': 100
},
'used_resources': {
'mem': 50
}
}]
}
state_json = jsonutils.dumps(state_json)
mock_url_get.return_value = state_json
self.mesos_monitor.pull_data()
self.assertEqual(self.mesos_monitor.data['mem_total'],
100)
self.assertEqual(self.mesos_monitor.data['mem_used'],
50)
def test_mesos_monitor_get_metric_names(self):
mesos_metric_spec = 'magnum.conductor.mesos_monitor.MesosMonitor.'\
'metrics_spec'
with mock.patch(mesos_metric_spec,
new_callable=mock.PropertyMock) as mock_mesos_metric:
mock_mesos_metric.return_value = self.test_metrics_spec
names = self.mesos_monitor.get_metric_names()
self.assertEqual(sorted(['metric1', 'metric2']), sorted(names))
def test_mesos_monitor_get_metric_unit(self):
mesos_metric_spec = 'magnum.conductor.mesos_monitor.MesosMonitor.' \
'metrics_spec'
with mock.patch(mesos_metric_spec,
new_callable=mock.PropertyMock) as mock_mesos_metric:
mock_mesos_metric.return_value = self.test_metrics_spec
unit = self.mesos_monitor.get_metric_unit('metric1')
self.assertEqual('metric1_unit', unit)
def test_mesos_monitor_compute_memory_util(self):
test_data = {
'mem_total': 100,
'mem_used': 50
}
self.mesos_monitor.data = test_data
mem_util = self.mesos_monitor.compute_memory_util()
self.assertEqual(50, mem_util)
test_data = {
'mem_total': 0,
'pods': 0,
}
self.mesos_monitor.data = test_data
mem_util = self.mesos_monitor.compute_memory_util()
self.assertEqual(0, mem_util)
| dimtruck/magnum | magnum/tests/unit/conductor/test_monitors.py | Python | apache-2.0 | 11,331 |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from deckhand.db.sqlalchemy import api as db_api
from deckhand.tests import test_utils
from deckhand.tests.unit.db import base
class TestRevisionDiffing(base.TestDbBase):
def _verify_buckets_status(self, revision_id, comparison_revision_id,
expected):
# Verify that actual and expected results match, despite the order of
# `comparison_revision_id` and `revision_id` args.
revision_ids = [revision_id, comparison_revision_id]
for rev_ids in (revision_ids, reversed(revision_ids)):
actual = db_api.revision_diff(*rev_ids)
self.assertEqual(expected, actual)
def test_revision_diff_null(self):
self._verify_buckets_status(0, 0, {})
def test_revision_diff_created(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
self._verify_buckets_status(
0, revision_id, {bucket_name: 'created'})
def test_revision_diff_multi_bucket_created(self):
revision_ids = []
bucket_names = []
for _ in range(3):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
bucket_names.append(bucket_name)
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
revision_ids.append(revision_id)
# Between revision 1 and 0, 1 bucket is created.
self._verify_buckets_status(
0, revision_ids[0], {b: 'created' for b in bucket_names[:1]})
# Between revision 2 and 0, 2 buckets are created.
self._verify_buckets_status(
0, revision_ids[1], {b: 'created' for b in bucket_names[:2]})
# Between revision 3 and 0, 3 buckets are created.
self._verify_buckets_status(
0, revision_ids[2], {b: 'created' for b in bucket_names})
def test_revision_diff_self(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
self._verify_buckets_status(
revision_id, revision_id, {bucket_name: 'unmodified'})
def test_revision_diff_multi_bucket_self(self):
bucket_names = []
revision_ids = []
for _ in range(3):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
# Store each bucket that was created.
bucket_names.append(bucket_name)
documents = self.create_documents(bucket_name, payload)
# Store each revision that was created.
revision_id = documents[0]['revision_id']
revision_ids.append(revision_id)
# The last revision should contain history for the previous 2 revisions
# such that its diff history will show history for 3 buckets. Similarly
# the 2nd revision will have history for 2 buckets and the 1st revision
# for 1 bucket.
# 1st revision has revision history for 1 bucket.
self._verify_buckets_status(
revision_ids[0], revision_ids[0], {bucket_names[0]: 'unmodified'})
# 2nd revision has revision history for 2 buckets.
self._verify_buckets_status(
revision_ids[1], revision_ids[1],
{b: 'unmodified' for b in bucket_names[:2]})
# 3rd revision has revision history for 3 buckets.
self._verify_buckets_status(
revision_ids[2], revision_ids[2],
{b: 'unmodified' for b in bucket_names})
def test_revision_diff_modified(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
payload[0]['data'] = {'modified': 'modified'}
comparison_documents = self.create_documents(bucket_name, payload)
comparison_revision_id = comparison_documents[0]['revision_id']
self._verify_buckets_status(
revision_id, comparison_revision_id, {bucket_name: 'modified'})
def test_revision_diff_multi_revision_modified(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
revision_ids = []
for _ in range(3):
payload[0]['data'] = {'modified': test_utils.rand_name('modified')}
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
revision_ids.append(revision_id)
for pair in [(0, 1), (0, 2), (1, 2)]:
self._verify_buckets_status(
revision_ids[pair[0]], revision_ids[pair[1]],
{bucket_name: 'modified'})
def test_revision_diff_multi_revision_multi_bucket_modified(self):
revision_ids = []
bucket_name = test_utils.rand_name('bucket')
alt_bucket_name = test_utils.rand_name('bucket')
bucket_names = [bucket_name, alt_bucket_name] * 2
# Create revisions by modifying documents in `bucket_name` and
# `alt_bucket_name`.
for bucket_idx in range(4):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
documents = self.create_documents(
bucket_names[bucket_idx], payload)
revision_id = documents[0]['revision_id']
revision_ids.append(revision_id)
# Between revision_ids[0] and [1], bucket_name is unmodified and
# alt_bucket_name is created.
self._verify_buckets_status(
revision_ids[0], revision_ids[1],
{bucket_name: 'unmodified', alt_bucket_name: 'created'})
# Between revision_ids[0] and [2], bucket_name is modified (by 2) and
# alt_bucket_name is created (by 1).
self._verify_buckets_status(
revision_ids[0], revision_ids[2],
{bucket_name: 'modified', alt_bucket_name: 'created'})
# Between revision_ids[0] and [3], bucket_name is modified (by [2]) and
# alt_bucket_name is created (by [1]) (as well as modified by [3]).
self._verify_buckets_status(
revision_ids[0], revision_ids[3],
{bucket_name: 'modified', alt_bucket_name: 'created'})
# Between revision_ids[1] and [2], bucket_name is modified but
# alt_bucket_name remains unmodified.
self._verify_buckets_status(
revision_ids[1], revision_ids[2],
{bucket_name: 'modified', alt_bucket_name: 'unmodified'})
# Between revision_ids[1] and [3], bucket_name is modified (by [2]) and
# alt_bucket_name is modified by [3].
self._verify_buckets_status(
revision_ids[1], revision_ids[3],
{bucket_name: 'modified', alt_bucket_name: 'modified'})
# Between revision_ids[2] and [3], alt_bucket_name is modified but
# bucket_name remains unmodified.
self._verify_buckets_status(
revision_ids[2], revision_ids[3],
{bucket_name: 'unmodified', alt_bucket_name: 'modified'})
def test_revision_diff_ignore_bucket_with_unrelated_documents(self):
payload = base.DocumentFixture.get_minimal_fixture()
alt_payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('bucket')
alt_bucket_name = test_utils.rand_name('bucket')
# Create a bucket with a single document.
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
# Create another bucket with an entirely different document (different
# schema and metadata.name).
self.create_documents(alt_bucket_name, alt_payload)
# Modify the document from the 1st bucket.
payload['data'] = {'modified': 'modified'}
documents = self.create_documents(bucket_name, payload)
comparison_revision_id = documents[0]['revision_id']
# The `alt_bucket_name` should be created.
self._verify_buckets_status(
revision_id, comparison_revision_id,
{bucket_name: 'modified', alt_bucket_name: 'created'})
def test_revision_diff_ignore_bucket_with_all_unrelated_documents(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
alt_payload = copy.deepcopy(payload)
bucket_name = test_utils.rand_name('bucket')
alt_bucket_name = test_utils.rand_name('bucket')
# Create a bucket with 3 documents.
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
# Modify all 3 documents from first bucket.
for idx in range(3):
alt_payload[idx]['name'] = test_utils.rand_name('name')
alt_payload[idx]['schema'] = test_utils.rand_name('schema')
self.create_documents(
alt_bucket_name, alt_payload)
# Modify the document from the 1st bucket.
payload[0]['data'] = {'modified': 'modified'}
documents = self.create_documents(bucket_name, payload)
comparison_revision_id = documents[0]['revision_id']
# The alt_bucket_name should be created.
self._verify_buckets_status(
revision_id, comparison_revision_id,
{bucket_name: 'modified', alt_bucket_name: 'created'})
def test_revision_diff_deleted(self):
payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('bucket')
created_documents = self.create_documents(bucket_name, payload)
revision_id = created_documents[0]['revision_id']
# Delete the previously created document.
deleted_documents = self.create_documents(bucket_name, [])
comparison_revision_id = deleted_documents[0]['revision_id']
self._verify_buckets_status(
revision_id, comparison_revision_id, {bucket_name: 'deleted'})
def test_revision_diff_delete_then_recreate(self):
payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('bucket')
created_documents = self.create_documents(bucket_name, payload)
revision_id_1 = created_documents[0]['revision_id']
# Delete the previously created document.
deleted_documents = self.create_documents(bucket_name, [])
revision_id_2 = deleted_documents[0]['revision_id']
# Recreate the previously deleted document.
recreated_documents = self.create_documents(bucket_name, payload)
revision_id_3 = recreated_documents[0]['revision_id']
# Verify that the revision for recreated document compared to revision
# for deleted document is created, ignoring order.
self._verify_buckets_status(
revision_id_2, revision_id_3, {bucket_name: 'created'})
# Verify that the revision for recreated document compared to revision
# for created document is unmodified, ignoring order.
self._verify_buckets_status(
revision_id_1, revision_id_3, {bucket_name: 'unmodified'})
def test_revision_diff_ignore_mistake_document(self):
payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('first_bucket')
created_documents = self.create_documents(bucket_name, payload)
revision_id_1 = created_documents[0]['revision_id']
# Create then delete an "accidental" document create request.
alt_payload = base.DocumentFixture.get_minimal_fixture()
alt_bucket_name = test_utils.rand_name('mistake_bucket')
created_documents = self.create_documents(alt_bucket_name, alt_payload)
revision_id_2 = created_documents[0]['revision_id']
deleted_documents = self.create_documents(alt_bucket_name, [])
revision_id_3 = deleted_documents[0]['revision_id']
alt_payload_2 = base.DocumentFixture.get_minimal_fixture()
alt_bucket_name_2 = test_utils.rand_name('second_bucket')
created_documents = self.create_documents(
alt_bucket_name_2, alt_payload_2)
revision_id_4 = created_documents[0]['revision_id']
self._verify_buckets_status(
revision_id_1, revision_id_2, {bucket_name: 'unmodified',
alt_bucket_name: 'created'})
self._verify_buckets_status(
revision_id_2, revision_id_3, {bucket_name: 'unmodified',
alt_bucket_name: 'deleted'})
self._verify_buckets_status(
revision_id_1, revision_id_3, {bucket_name: 'unmodified'})
# Should not contain information about `alt_bucket_name` as it was a
# "mistake": created then deleted between the revisions in question.
self._verify_buckets_status(
revision_id_1, revision_id_4,
{bucket_name: 'unmodified', alt_bucket_name_2: 'created'})
| att-comdev/deckhand | deckhand/tests/unit/db/test_revision_diffing.py | Python | apache-2.0 | 14,036 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover CampaignTargetService."""
__author__ = '[email protected] (Stan Grinberg)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..'))
import unittest
from adspygoogle.common import Utils
from tests.adspygoogle.adwords import HTTP_PROXY
from tests.adspygoogle.adwords import SERVER_V201109
from tests.adspygoogle.adwords import TEST_VERSION_V201109
from tests.adspygoogle.adwords import VERSION_V201109
from tests.adspygoogle.adwords import client
class CampaignTargetServiceTestV201109(unittest.TestCase):
"""Unittest suite for CampaignTargetService using v201109."""
SERVER = SERVER_V201109
VERSION = VERSION_V201109
client.debug = False
service = None
campaign_id = '0'
def setUp(self):
"""Prepare unittest."""
print self.id()
if not self.__class__.service:
self.__class__.service = client.GetCampaignTargetService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
if self.__class__.campaign_id == '0':
campaign_service = client.GetCampaignService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Campaign #%s' % Utils.GetUniqueName(),
'status': 'PAUSED',
'biddingStrategy': {
'xsi_type': 'ManualCPC'
},
'budget': {
'period': 'DAILY',
'amount': {
'microAmount': '1000000'
},
'deliveryMethod': 'STANDARD'
}
}
}]
self.__class__.campaign_id = campaign_service.Mutate(
operations)[0]['value'][0]['id']
def testGetAllTargets(self):
"""Test whether we can fetch all existing targets for given campaign."""
selector = {
'campaignIds': [self.__class__.campaign_id]
}
self.assert_(isinstance(self.__class__.service.Get(selector), tuple))
def testAddAdScheduleTarget(self):
"""Test whether we can add an ad schedule target to campaign."""
operations = [{
'operator': 'SET',
'operand': {
'xsi_type': 'AdScheduleTargetList',
'campaignId': self.__class__.campaign_id,
'targets': [{
'xsi_type': 'AdScheduleTarget',
'dayOfWeek': 'MONDAY',
'startHour': '8',
'startMinute': 'ZERO',
'endHour': '17',
'endMinute': 'ZERO',
'bidMultiplier': '1.0',
}]
}
}]
self.assert_(isinstance(self.__class__.service.Mutate(operations), tuple))
def makeTestSuiteV201109():
"""Set up test suite using v201109.
Returns:
TestSuite test suite using v201109.
"""
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(CampaignTargetServiceTestV201109))
return suite
if __name__ == '__main__':
suites = []
if TEST_VERSION_V201109:
suites.append(makeTestSuiteV201109())
if suites:
alltests = unittest.TestSuite(suites)
unittest.main(defaultTest='alltests')
| nearlyfreeapps/python-googleadwords | tests/adspygoogle/adwords/campaign_target_service_unittest.py | Python | apache-2.0 | 3,781 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BadgeByCourse.title_en'
db.add_column('badges_badgebycourse', 'title_en',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_es'
db.add_column('badges_badgebycourse', 'title_es',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_it'
db.add_column('badges_badgebycourse', 'title_it',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_pt'
db.add_column('badges_badgebycourse', 'title_pt',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_fr'
db.add_column('badges_badgebycourse', 'title_fr',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.title_de'
db.add_column('badges_badgebycourse', 'title_de',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_en'
db.add_column('badges_badgebycourse', 'description_en',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_es'
db.add_column('badges_badgebycourse', 'description_es',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_it'
db.add_column('badges_badgebycourse', 'description_it',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_pt'
db.add_column('badges_badgebycourse', 'description_pt',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_fr'
db.add_column('badges_badgebycourse', 'description_fr',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.description_de'
db.add_column('badges_badgebycourse', 'description_de',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BadgeByCourse.title_en'
db.delete_column('badges_badgebycourse', 'title_en')
# Deleting field 'BadgeByCourse.title_es'
db.delete_column('badges_badgebycourse', 'title_es')
# Deleting field 'BadgeByCourse.title_it'
db.delete_column('badges_badgebycourse', 'title_it')
# Deleting field 'BadgeByCourse.title_pt'
db.delete_column('badges_badgebycourse', 'title_pt')
# Deleting field 'BadgeByCourse.title_fr'
db.delete_column('badges_badgebycourse', 'title_fr')
# Deleting field 'BadgeByCourse.title_de'
db.delete_column('badges_badgebycourse', 'title_de')
# Deleting field 'BadgeByCourse.description_en'
db.delete_column('badges_badgebycourse', 'description_en')
# Deleting field 'BadgeByCourse.description_es'
db.delete_column('badges_badgebycourse', 'description_es')
# Deleting field 'BadgeByCourse.description_it'
db.delete_column('badges_badgebycourse', 'description_it')
# Deleting field 'BadgeByCourse.description_pt'
db.delete_column('badges_badgebycourse', 'description_pt')
# Deleting field 'BadgeByCourse.description_fr'
db.delete_column('badges_badgebycourse', 'description_fr')
# Deleting field 'BadgeByCourse.description_de'
db.delete_column('badges_badgebycourse', 'description_de')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254'})
},
'badges.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.award': {
'Meta': {'ordering': "['-modified', '-awarded']", 'unique_together': "(('user', 'badge'),)", 'object_name': 'Award'},
'awarded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'awards_set'", 'to': "orm['badges.Badge']"}),
'evidence': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_hash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'identity_hashed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'identity_salt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'identity_type': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '255', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_awards'", 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'4d5a6e5e-c0cb-11e4-a589-08002759738a'", 'max_length': '255', 'db_index': 'True'})
},
'badges.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
'alignments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'alignments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Alignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'tags'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'badges.badgebycourse': {
'Meta': {'object_name': 'BadgeByCourse'},
'color': ('django.db.models.fields.TextField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'criteria': ('django.db.models.fields.TextField', [], {}),
'criteria_type': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'description_de': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_es': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_pt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title_de': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_es': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_it': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title_pt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'badges.identity': {
'Meta': {'object_name': 'Identity'},
'hashed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_hash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'identity'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'badges.revocation': {
'Meta': {'object_name': 'Revocation'},
'award': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revocations'", 'to': "orm['badges.Award']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'badges.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.course': {
'Meta': {'ordering': "['order']", 'object_name': 'Course'},
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'certification_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'completion_badge': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'course'", 'null': 'True', 'to': "orm['badges.Badge']"}),
'created_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'courses_created_of'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['courses.Course']"}),
'description': ('tinymce.models.HTMLField', [], {}),
'description_de': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_es': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_it': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'description_pt': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'ects': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_method': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '200'}),
'estimated_effort': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_de': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_en': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_es': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_it': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'estimated_effort_pt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'external_certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forum_slug': ('django.db.models.fields.CharField', [], {'max_length': '350', 'null': 'True', 'blank': 'True'}),
'group_max_size': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '50'}),
'has_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hashtag': ('django.db.models.fields.CharField', [], {'default': "'Hashtag'", 'max_length': '128'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_audience': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'is_activity_clonable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['courses.Language']", 'symmetrical': 'False'}),
'learning_goals': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'learning_goals_de': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_en': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_es': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_fr': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_it': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'learning_goals_pt': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'max_mass_emails_month': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_pt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'official_course': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses_as_owner'", 'to': "orm['auth.User']"}),
'promotion_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'promotion_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'requirements': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'requirements_de': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_en': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_es': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_fr': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_it': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'requirements_pt': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'static_page': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['courses.StaticPage']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'courses_as_student'", 'blank': 'True', 'through': "orm['courses.CourseStudent']", 'to': "orm['auth.User']"}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses_as_teacher'", 'symmetrical': 'False', 'through': "orm['courses.CourseTeacher']", 'to': "orm['auth.User']"}),
'threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_alt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'courses.coursestudent': {
'Meta': {'object_name': 'CourseStudent'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_course_status': ('django.db.models.fields.CharField', [], {'default': "'f'", 'max_length': '1'}),
'pos_lat': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'pos_lon': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rate': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'timestamp': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'})
},
'courses.courseteacher': {
'Meta': {'ordering': "['order']", 'object_name': 'CourseTeacher'},
'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courses.language': {
'Meta': {'object_name': 'Language'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'courses.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_de': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_en': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_es': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_fr': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_it': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'body_pt': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_de': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_es': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_it': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_pt': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['badges'] | GeographicaGS/moocng | moocng/badges/migrations/0011_badgesbycoursemultilang.py | Python | apache-2.0 | 26,422 |
import numpy as np
import matplotlib.pyplot as plt
def readmesh(fname):
"""
input
-----
fname: string
gmsh file name
output
------
V: array
vertices
E: array
element ids
"""
import gmsh
mesh = gmsh.Mesh()
mesh.read_msh(fname)
return mesh.Verts[:, :2], mesh.Elmts[2][1]
def identify_boundary(V):
"""
input
-----
V: array
vertices
output
------
d: dictionary
inflow, outflow, wall, cylinder
(unsorted)
"""
d = {}
II = np.where(np.abs(V[:, 0]) < 1e-13)
d["inflow"] = II
II = np.where(np.abs(V[:, 0] - 22.0) < 1e-13)
d["outflow"] = II
II = np.where(np.abs(V[:, 1]) < 1e-13)
J = np.where(np.abs(V[:, 1] - 4.1) < 1e-13)
d["wall"] = np.vstack((II, J)).ravel()
II = np.where(
np.abs(2 * np.sqrt((V[:, 0] - 2.0) ** 2 + (V[:, 1] - 2.0) ** 2) - 1.0) < 1e-13
)
d["cylinder"] = II
return d
if __name__ == "__main__":
V, E = readmesh("ns.msh")
d = identify_boundary(V)
plt.ion()
plt.triplot(V[:, 0], V[:, 1], E)
plt.axis("scaled")
II = d["inflow"]
plt.plot(V[II, 0], V[II, 1], "ro", markersize=10)
II = d["outflow"]
plt.plot(V[II, 0], V[II, 1], "bo", markersize=10)
II = d["wall"]
plt.plot(V[II, 0], V[II, 1], "gs", markersize=10)
II = d["cylinder"]
plt.plot(V[II, 0], V[II, 1], "m*", markersize=10)
plt.show()
| lukeolson/python-mesh-scripts | ns.py | Python | apache-2.0 | 1,505 |
CAMERA_MODE_PHOTO2 = 100
CAMERA_MODE_PHOTO = 0
CAMERA_MODE_FACE_BEAUTY = 1
CAMERA_MODE_PANORAMA = 2
CAMERA_MODE_SELF_WIDEVIEW = 3
CAMERA_MODE_SCENE_FRAME = 4
CAMERA_MODE_GESTURE_SHOT = 5
CAMERA_MODE_LIVE_PHOTO = 6
CAMERA_MODE_VIDEO = 7
CAMERA_MODE_PROFESSIONAL = 8
CAMERA_MODE_NIGHTSHOT = 9
CAMERA_MODE_PIP = 10
CAMERA_MODE_SPORTS = 11
CAMERA_MODE_VIV = 12
CAMERA_MODE_ZBAR_CODE = 13
CAMERA_MODE_REFOCUS = 14
CAMERA_MODE_CHROMAFLASH = 15
CAMERA_MODE_SUPERPIXEL = 16
CAMERA_MODE_CLEARSIGHT = 17
CAMERA_MODE_VIDEOBEAUTY = 18
CAMERA_MODE_VIDEOTIMELAPSE = 19
CAMERA_MODE_MONOCHROME = 20
CAMERA_MODE_PORTRAIT = 21
VALUE_CAPTURE_MODE_VIDEO = "video"
VALUE_CAPTURE_MODE_AUTO = "normal"
VALUE_CAPTURE_MODE_BEAUTYSHOT = "beautyshot"
VALUE_CAPTURE_MODE_NIGHTSHOT = "nightshot"
VALUE_CAPTURE_MODE_PANORAMA = "panorama"
VALUE_CAPTURE_MODE_WIDESELF = "wideself"
VALUE_CAPTURE_MODE_PROFESSIONAL = "professional"
VALUE_CAPTURE_MODE_SCENE_FRAME = "sceneframe"
VALUE_CAPTURE_MODE_SPORT = "sports"
VALUE_CAPTURE_MODE_PIP = "pip"
VALUE_CAPTURE_MODE_VIV = "viv"
VALUE_CAPTURE_MODE_ZBAR = "zbarcode"
VALUE_CAPTURE_MODE_REFOCUS = "refocus"
VALUE_CAPTURE_MODE_CHROMAFLASH = "chromaflash"
VALUE_CAPTURE_MODE_SUPERPIXEL = "superphoto"
VALUE_CAPTURE_MODE_VEDOBEAUTY = "videobeauty"
VALUE_CAPTURE_MODE_CLEARSIGHT = "clearsight"
VALUE_CAPTURE_MODE_VEDOTIMELAPSE = "videotimelapse"
VALUE_CAPTURE_MODE_MONOCHROME = "monochrome"
VALUE_CAPTURE_MODE_PORTRAIT = "picselfie"
VALUE_CAPTURE_MODE_VIDEOAUTOZOOM = "videoautozoom"
VALUE_CAPTURE_MODE_UNKNOWN = "unknown"
def get_mode_name(mode):
return {
CAMERA_MODE_PHOTO: VALUE_CAPTURE_MODE_AUTO,
CAMERA_MODE_FACE_BEAUTY: VALUE_CAPTURE_MODE_BEAUTYSHOT,
CAMERA_MODE_PANORAMA: VALUE_CAPTURE_MODE_PANORAMA,
CAMERA_MODE_SELF_WIDEVIEW: VALUE_CAPTURE_MODE_WIDESELF,
CAMERA_MODE_SCENE_FRAME: VALUE_CAPTURE_MODE_SCENE_FRAME,
CAMERA_MODE_GESTURE_SHOT: VALUE_CAPTURE_MODE_UNKNOWN,
CAMERA_MODE_LIVE_PHOTO: VALUE_CAPTURE_MODE_UNKNOWN,
CAMERA_MODE_VIDEO: VALUE_CAPTURE_MODE_VIDEO,
CAMERA_MODE_PROFESSIONAL: VALUE_CAPTURE_MODE_PROFESSIONAL,
CAMERA_MODE_NIGHTSHOT: VALUE_CAPTURE_MODE_NIGHTSHOT,
CAMERA_MODE_PIP: VALUE_CAPTURE_MODE_PIP,
CAMERA_MODE_SPORTS: VALUE_CAPTURE_MODE_SPORT,
CAMERA_MODE_VIV: VALUE_CAPTURE_MODE_VIV,
CAMERA_MODE_ZBAR_CODE: VALUE_CAPTURE_MODE_ZBAR,
CAMERA_MODE_REFOCUS: VALUE_CAPTURE_MODE_REFOCUS,
CAMERA_MODE_CHROMAFLASH: VALUE_CAPTURE_MODE_CHROMAFLASH,
CAMERA_MODE_SUPERPIXEL: VALUE_CAPTURE_MODE_SUPERPIXEL,
CAMERA_MODE_CLEARSIGHT: VALUE_CAPTURE_MODE_CLEARSIGHT,
CAMERA_MODE_VIDEOBEAUTY: VALUE_CAPTURE_MODE_VEDOBEAUTY,
CAMERA_MODE_VIDEOTIMELAPSE: VALUE_CAPTURE_MODE_VEDOTIMELAPSE,
CAMERA_MODE_MONOCHROME: VALUE_CAPTURE_MODE_MONOCHROME,
CAMERA_MODE_PORTRAIT: VALUE_CAPTURE_MODE_PORTRAIT
}.get(mode)
action = "android.myos.action.%s"
ACTION_NOMARL_CAMERA = "NOMARLCAMERA" # normal
ACTION_NS_CAMERA = "NSCAMERA" # night
ACTION_BEATY_CAMERA = "BEATYCAMERA" # beauty
ACTION_SUPERPIXEL_CAMERA = "SUPERPIXELCAMERA" # super photo
# ACTION_PRO_CAMERA = "PROCAMERA"
# ACTION_WIDESELFT_CAMERA = "WIDESELFCAMERA"
# ACTION_SPORT_CAMERA = "SPORTCAMERA"
# ACTION_SMARTFOCUS_CAMERA = "SMARTFOCUSCAMERA"
# ACTION_SMARTFLASH_CAMERA = "SMARTFLASHCAMERA"
# ACTION_PANORAMA_CAMERA = "PANORAMACAMERA"
# ACTION_MONOCHROME_CAMERA = "MONOCHROMECAMERA"
def get_actions():
actions = [action % ACTION_NS_CAMERA,
action % ACTION_BEATY_CAMERA,
action % ACTION_NOMARL_CAMERA,
action % ACTION_SUPERPIXEL_CAMERA]
return actions
| liuxuyang/tools | kpiTest/data/mode_tag.py | Python | apache-2.0 | 3,667 |
# coding=utf-8
"""
This module, problem_019.py, solves the nineteenth project euler problem.
"""
from project_euler_problems.problem import Problem
from datetime import date
'''
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
# Solution from captainsafia, thanks! Link : https://gist.github.com/captainsafia/3390092
class ImplementedProblem(Problem):
"""This specific problem's implementation.
"""
def get_solution(self):
"""Solves the solution for problem 019.
:return: The solution for problem 019.
"""
number_of_sundays = 0
for year in range(1901, 2001):
for month in range(1, 13):
# date(...) will create a Date() instance.
# weekday() gets the current day as an integer between 0-6.
if date(year, month, 1).weekday() == 6:
number_of_sundays += 1
return number_of_sundays
| utarsuno/urbtek | project_euler_problems/problems/problem_019.py | Python | apache-2.0 | 1,284 |
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import math
import paramiko
import random
import re
import time
import unicodedata
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import ssh_utils
from cinder import utils as cinder_utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.ibm.storwize_svc import (
replication as storwize_rep)
from cinder.volume.drivers.ibm.storwize_svc import storwize_const
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import utils
from cinder.volume import volume_types
INTERVAL_1_SEC = 1
DEFAULT_TIMEOUT = 15
LOG = logging.getLogger(__name__)
storwize_svc_opts = [
cfg.ListOpt('storwize_svc_volpool_name',
default=['volpool'],
help='Comma separated list of storage system storage '
'pools for volumes.'),
cfg.IntOpt('storwize_svc_vol_rsize',
default=2,
min=-1, max=100,
help='Storage system space-efficiency parameter for volumes '
'(percentage)'),
cfg.IntOpt('storwize_svc_vol_warning',
default=0,
min=-1, max=100,
help='Storage system threshold for volume capacity warnings '
'(percentage)'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
default=True,
help='Storage system autoexpand parameter for volumes '
'(True/False)'),
cfg.IntOpt('storwize_svc_vol_grainsize',
default=256,
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
default=False,
help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
default=True,
help='Enable Easy Tier for volumes'),
cfg.StrOpt('storwize_svc_vol_iogrp',
default='0',
help='The I/O group in which to allocate volumes. It can be a '
'comma-separated list in which case the driver will select an '
'io_group based on least number of volumes associated with the '
'io_group.'),
cfg.IntOpt('storwize_svc_flashcopy_timeout',
default=120,
min=1, max=600,
help='Maximum number of seconds to wait for FlashCopy to be '
'prepared.'),
cfg.BoolOpt('storwize_svc_multihostmap_enabled',
default=True,
help='This option no longer has any affect. It is deprecated '
'and will be removed in the next release.',
deprecated_for_removal=True),
cfg.BoolOpt('storwize_svc_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('storwize_svc_stretched_cluster_partner',
default=None,
help='If operating in stretched cluster mode, specify the '
'name of the pool in which mirrored copies are stored.'
'Example: "pool2"'),
cfg.StrOpt('storwize_san_secondary_ip',
default=None,
help='Specifies secondary management IP or hostname to be '
'used if san_ip is invalid or becomes inaccessible.'),
cfg.BoolOpt('storwize_svc_vol_nofmtdisk',
default=False,
help='Specifies that the volume not be formatted during '
'creation.'),
cfg.IntOpt('storwize_svc_flashcopy_rate',
default=50,
min=1, max=100,
help='Specifies the Storwize FlashCopy copy rate to be used '
'when creating a full volume copy. The default is rate '
'is 50, and the valid rates are 1-100.'),
cfg.StrOpt('storwize_svc_mirror_pool',
default=None,
help='Specifies the name of the pool in which mirrored copy '
'is stored. Example: "pool2"'),
cfg.IntOpt('cycle_period_seconds',
default=300,
min=60, max=86400,
help='This defines an optional cycle period that applies to '
'Global Mirror relationships with a cycling mode of multi. '
'A Global Mirror relationship using the multi cycling_mode '
'performs a complete cycle at most once each period. '
'The default is 300 seconds, and the valid seconds '
'are 60-86400.'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_opts, group=configuration.SHARED_CONF_GROUP)
class StorwizeSSH(object):
"""SSH interface to IBM Storwize family and SVC storage systems."""
def __init__(self, run_ssh):
self._ssh = run_ssh
def _run_ssh(self, ssh_cmd):
try:
return self._ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': e.stdout,
'err': e.stderr})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_info(self, ssh_cmd, delim='!', with_header=False):
"""Run an SSH command and return parsed output."""
raw = self._run_ssh(ssh_cmd)
return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim,
with_header=with_header)
def run_ssh_assert_no_output(self, ssh_cmd):
"""Run an SSH command and assert no output returned."""
out, err = self._run_ssh(ssh_cmd)
if len(out.strip()) != 0:
msg = (_('Expected no output from CLI command %(cmd)s, '
'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def run_ssh_check_created(self, ssh_cmd):
"""Run an SSH command and return the ID of the created object."""
out, err = self._run_ssh(ssh_cmd)
try:
match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
return match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsnode(self, node_id=None):
with_header = True
ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
if node_id:
with_header = False
ssh_cmd.append(node_id)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lslicense(self):
ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lsguicapabilities(self):
ssh_cmd = ['svcinfo', 'lsguicapabilities', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lssystem(self):
ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
return self.run_ssh_info(ssh_cmd)[0]
def lsmdiskgrp(self, pool):
ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!',
'"%s"' % pool]
try:
return self.run_ssh_info(ssh_cmd)[0]
except exception.VolumeBackendAPIException as ex:
LOG.warning("Failed to get pool %(pool)s info. "
"Exception: %(ex)s.", {'pool': pool,
'ex': ex})
return None
def lsiogrp(self):
ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsportip(self):
ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
@staticmethod
def _create_port_arg(port_type, port_name):
if port_type == 'initiator':
port = ['-iscsiname']
else:
port = ['-hbawwpn']
port.append(port_name)
return port
def mkhost(self, host_name, port_type, port_name):
port = self._create_port_arg(port_type, port_name)
ssh_cmd = ['svctask', 'mkhost', '-force'] + port
ssh_cmd += ['-name', '"%s"' % host_name]
return self.run_ssh_check_created(ssh_cmd)
def addhostport(self, host, port_type, port_name):
port = self._create_port_arg(port_type, port_name)
ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def lshost(self, host=None):
with_header = True
ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
if host:
with_header = False
ssh_cmd.append('"%s"' % host)
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def add_chap_secret(self, secret, host):
ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def lsiscsiauth(self):
ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfabric(self, wwpn=None, host=None):
ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!']
if wwpn:
ssh_cmd.extend(['-wwpn', wwpn])
elif host:
ssh_cmd.extend(['-host', '"%s"' % host])
else:
msg = (_('Must pass wwpn or host to lsfabric.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return self.run_ssh_info(ssh_cmd, with_header=True)
def mkvdiskhostmap(self, host, vdisk, lun, multihostmap):
"""Map vdisk to host.
If vdisk already mapped and multihostmap is True, use the force flag.
"""
ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host, vdisk]
if lun:
ssh_cmd.insert(ssh_cmd.index(vdisk), '-scsi')
ssh_cmd.insert(ssh_cmd.index(vdisk), lun)
if multihostmap:
ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
try:
self.run_ssh_check_created(ssh_cmd)
result_lun = self.get_vdiskhostmapid(vdisk, host)
if result_lun is None or (lun and lun != result_lun):
msg = (_('mkvdiskhostmap error:\n command: %(cmd)s\n '
'lun: %(lun)s\n result_lun: %(result_lun)s') %
{'cmd': ssh_cmd,
'lun': lun,
'result_lun': result_lun})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return result_lun
except Exception as ex:
if (not multihostmap and hasattr(ex, 'message') and
'CMMVC6071E' in ex.message):
LOG.error('storwize_svc_multihostmap_enabled is set '
'to False, not allowing multi host mapping.')
raise exception.VolumeDriverException(
message=_('CMMVC6071E The VDisk-to-host mapping was not '
'created because the VDisk is already mapped '
'to a host.\n"'))
with excutils.save_and_reraise_exception():
LOG.error('Error mapping VDisk-to-host')
def mkrcrelationship(self, master, aux, system, asyncmirror,
cyclingmode=False):
ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master,
'-aux', aux, '-cluster', system]
if asyncmirror:
ssh_cmd.append('-global')
if cyclingmode:
ssh_cmd.extend(['-cyclingmode', 'multi'])
return self.run_ssh_check_created(ssh_cmd)
def rmrcrelationship(self, relationship, force=False):
ssh_cmd = ['svctask', 'rmrcrelationship']
if force:
ssh_cmd += ['-force']
ssh_cmd += [relationship]
self.run_ssh_assert_no_output(ssh_cmd)
def switchrelationship(self, relationship, aux=True):
primary = 'aux' if aux else 'master'
ssh_cmd = ['svctask', 'switchrcrelationship', '-primary',
primary, relationship]
self.run_ssh_assert_no_output(ssh_cmd)
def startrcrelationship(self, rc_rel, primary=None):
ssh_cmd = ['svctask', 'startrcrelationship', '-force']
if primary:
ssh_cmd.extend(['-primary', primary])
ssh_cmd.append(rc_rel)
self.run_ssh_assert_no_output(ssh_cmd)
def ch_rcrelationship_cycleperiod(self, relationship,
cycle_period_seconds):
# Note: Can only change one attribute at a time,
# so define two ch_rcrelationship_xxx here
if cycle_period_seconds:
ssh_cmd = ['svctask', 'chrcrelationship']
ssh_cmd.extend(['-cycleperiodseconds',
six.text_type(cycle_period_seconds)])
ssh_cmd.append(relationship)
self.run_ssh_assert_no_output(ssh_cmd)
def ch_rcrelationship_changevolume(self, relationship,
changevolume, master):
# Note: Can only change one attribute at a time,
# so define two ch_rcrelationship_xxx here
if changevolume:
ssh_cmd = ['svctask', 'chrcrelationship']
if master:
ssh_cmd.extend(['-masterchange', changevolume])
else:
ssh_cmd.extend(['-auxchange', changevolume])
ssh_cmd.append(relationship)
self.run_ssh_assert_no_output(ssh_cmd)
def stoprcrelationship(self, relationship, access=False):
ssh_cmd = ['svctask', 'stoprcrelationship']
if access:
ssh_cmd.append('-access')
ssh_cmd.append(relationship)
self.run_ssh_assert_no_output(ssh_cmd)
def lsrcrelationship(self, rc_rel):
ssh_cmd = ['svcinfo', 'lsrcrelationship', '-delim', '!', rc_rel]
return self.run_ssh_info(ssh_cmd)
def lspartnership(self, system_name):
key_value = 'name=%s' % system_name
ssh_cmd = ['svcinfo', 'lspartnership', '-filtervalue',
key_value, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lspartnershipcandidate(self):
ssh_cmd = ['svcinfo', 'lspartnershipcandidate', '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def mkippartnership(self, ip_v4, bandwith=1000, backgroundcopyrate=50):
ssh_cmd = ['svctask', 'mkippartnership', '-type', 'ipv4',
'-clusterip', ip_v4, '-linkbandwidthmbits',
six.text_type(bandwith),
'-backgroundcopyrate', six.text_type(backgroundcopyrate)]
return self.run_ssh_assert_no_output(ssh_cmd)
def mkfcpartnership(self, system_name, bandwith=1000,
backgroundcopyrate=50):
ssh_cmd = ['svctask', 'mkfcpartnership', '-linkbandwidthmbits',
six.text_type(bandwith),
'-backgroundcopyrate', six.text_type(backgroundcopyrate),
system_name]
return self.run_ssh_assert_no_output(ssh_cmd)
def chpartnership(self, partnership_id, start=True):
action = '-start' if start else '-stop'
ssh_cmd = ['svctask', 'chpartnership', action, partnership_id]
return self.run_ssh_assert_no_output(ssh_cmd)
def rmvdiskhostmap(self, host, vdisk):
ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host,
'"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdiskhostmap(self, vdisk):
ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)
def lshostvdiskmap(self, host):
ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host]
return self.run_ssh_info(ssh_cmd, with_header=True)
def get_vdiskhostmapid(self, vdisk, host):
resp = self.lsvdiskhostmap(vdisk)
for mapping_info in resp:
if mapping_info['host_name'] == host:
lun_id = mapping_info['SCSI_id']
return lun_id
return None
def rmhost(self, host):
ssh_cmd = ['svctask', 'rmhost', '"%s"' % host]
self.run_ssh_assert_no_output(ssh_cmd)
def mkvdisk(self, name, size, units, pool, opts, params):
ssh_cmd = ['svctask', 'mkvdisk', '-name', '"%s"' % name, '-mdiskgrp',
'"%s"' % pool, '-iogrp', six.text_type(opts['iogrp']),
'-size', size, '-unit', units] + params
try:
return self.run_ssh_check_created(ssh_cmd)
except Exception as ex:
if hasattr(ex, 'msg') and 'CMMVC6372W' in ex.msg:
vdisk = self.lsvdisk(name)
if vdisk:
LOG.warning('CMMVC6372W The virtualized storage '
'capacity that the cluster is using is '
'approaching the virtualized storage '
'capacity that is licensed.')
return vdisk['id']
with excutils.save_and_reraise_exception():
LOG.exception('Failed to create vdisk %(vol)s.',
{'vol': name})
def rmvdisk(self, vdisk, force=True):
ssh_cmd = ['svctask', 'rmvdisk']
if force:
ssh_cmd += ['-force']
ssh_cmd += ['"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdisk(self, vdisk):
"""Return vdisk attributes or None if it doesn't exist."""
ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
'"%s"' % vdisk]
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if not err:
return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
with_header=False)[0]
if 'CMMVC5754E' in err:
return None
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def lsvdisks_from_filter(self, filter_name, value):
"""Performs an lsvdisk command, filtering the results as specified.
Returns an iterable for all matching vdisks.
"""
ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
'-filtervalue', '%s=%s' % (filter_name, value)]
return self.run_ssh_info(ssh_cmd, with_header=True)
def chvdisk(self, vdisk, params):
ssh_cmd = ['svctask', 'chvdisk'] + params + ['"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def movevdisk(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, '"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def expandvdisksize(self, vdisk, amount):
ssh_cmd = (
['svctask', 'expandvdisksize', '-size', six.text_type(amount),
'-unit', 'gb', '"%s"' % vdisk])
self.run_ssh_assert_no_output(ssh_cmd)
def mkfcmap(self, source, target, full_copy, copy_rate, consistgrp=None):
ssh_cmd = ['svctask', 'mkfcmap', '-source', '"%s"' % source, '-target',
'"%s"' % target, '-autodelete']
if not full_copy:
ssh_cmd.extend(['-copyrate', '0'])
else:
ssh_cmd.extend(['-copyrate', six.text_type(copy_rate)])
if consistgrp:
ssh_cmd.extend(['-consistgrp', consistgrp])
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if 'successfully created' not in out:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], '
'successfully created', out)
fc_map_id = match_obj.group(1)
except (AttributeError, IndexError):
msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s.') %
{'cmd': ssh_cmd,
'out': out,
'err': err})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return fc_map_id
def prestartfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'prestartfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def startfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'startfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def prestartfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def startfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def stopfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group]
self.run_ssh_assert_no_output(ssh_cmd)
def chfcmap(self, fc_map_id, copyrate='50', autodel='on'):
ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate,
'-autodelete', autodel, fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def stopfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'stopfcmap', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def rmfcmap(self, fc_map_id):
ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id]
self.run_ssh_assert_no_output(ssh_cmd)
def lsvdiskfcmappings(self, vdisk):
ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!',
'"%s"' % vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfcmap(self, fc_map_id):
ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue',
'id=%s' % fc_map_id, '-delim', '!']
return self.run_ssh_info(ssh_cmd, with_header=True)
def lsfcconsistgrp(self, fc_consistgrp):
ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp]
out, err = self._ssh(ssh_cmd)
return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
with_header=False)
def mkfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group]
return self.run_ssh_check_created(ssh_cmd)
def rmfcconsistgrp(self, fc_consist_group):
ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group]
return self.run_ssh_assert_no_output(ssh_cmd)
def addvdiskcopy(self, vdisk, dest_pool, params, auto_delete):
ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
'"%s"' % dest_pool])
if auto_delete:
ssh_cmd += ['-autodelete']
ssh_cmd += ['"%s"' % vdisk]
return self.run_ssh_check_created(ssh_cmd)
def lsvdiskcopy(self, vdisk, copy_id=None):
ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!']
with_header = True
if copy_id:
ssh_cmd += ['-copy', copy_id]
with_header = False
ssh_cmd += ['"%s"' % vdisk]
return self.run_ssh_info(ssh_cmd, with_header=with_header)
def lsvdisksyncprogress(self, vdisk, copy_id):
ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!',
'-copy', copy_id, '"%s"' % vdisk]
return self.run_ssh_info(ssh_cmd, with_header=True)[0]
def rmvdiskcopy(self, vdisk, copy_id):
ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, '"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def addvdiskaccess(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp,
'"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def rmvdiskaccess(self, vdisk, iogrp):
ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
def lsportfc(self, node_id):
ssh_cmd = ['svcinfo', 'lsportfc', '-delim', '!',
'-filtervalue', 'node_id=%s' % node_id]
return self.run_ssh_info(ssh_cmd, with_header=True)
def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool, '-copy',
copy_id, '-vdisk', vdisk]
self.run_ssh_assert_no_output(ssh_cmd)
class StorwizeHelpers(object):
# All the supported QoS key are saved in this dict. When a new
# key is going to add, three values MUST be set:
# 'default': to indicate the value, when the parameter is disabled.
# 'param': to indicate the corresponding parameter in the command.
# 'type': to indicate the type of this value.
WAIT_TIME = 5
svc_qos_keys = {'IOThrottling': {'default': '0',
'param': 'rate',
'type': int}}
def __init__(self, run_ssh):
self.ssh = StorwizeSSH(run_ssh)
self.check_fcmapping_interval = 3
@staticmethod
def handle_keyerror(cmd, out):
msg = (_('Could not find key in output of command %(cmd)s: %(out)s.')
% {'out': out, 'cmd': cmd})
raise exception.VolumeBackendAPIException(data=msg)
def compression_enabled(self):
"""Return whether or not compression is enabled for this system."""
resp = self.ssh.lslicense()
keys = ['license_compression_enclosures',
'license_compression_capacity']
for key in keys:
if resp.get(key, '0') != '0':
return True
# lslicense is not used for V9000 compression check
# compression_enclosures and compression_capacity are
# always 0. V9000 uses license_scheme 9846 as an
# indicator and can always do compression
try:
resp = self.ssh.lsguicapabilities()
if resp.get('license_scheme', '0') == '9846':
return True
except exception.VolumeBackendAPIException:
LOG.exception("Failed to fetch licensing scheme.")
return False
def replication_licensed(self):
"""Return whether or not replication is enabled for this system."""
# Uses product_key as an indicator to check
# whether replication is supported in storage.
try:
resp = self.ssh.lsguicapabilities()
product_key = resp.get('product_key', '0')
if product_key in storwize_const.REP_CAP_DEVS:
return True
except exception.VolumeBackendAPIException as war:
LOG.warning("Failed to run lsguicapability. Exception: %s.", war)
return False
def get_system_info(self):
"""Return system's name, ID, and code level."""
resp = self.ssh.lssystem()
level = resp['code_level']
match_obj = re.search('([0-9].){3}[0-9]', level)
if match_obj is None:
msg = _('Failed to get code level (%s).') % level
raise exception.VolumeBackendAPIException(data=msg)
code_level = match_obj.group().split('.')
return {'code_level': tuple([int(x) for x in code_level]),
'system_name': resp['name'],
'system_id': resp['id']}
def get_pool_attrs(self, pool):
"""Return attributes for the specified pool."""
return self.ssh.lsmdiskgrp(pool)
def is_pool_defined(self, pool_name):
"""Check if vdisk is defined."""
attrs = self.get_pool_attrs(pool_name)
return attrs is not None
def get_available_io_groups(self):
"""Return list of available IO groups."""
iogrps = []
resp = self.ssh.lsiogrp()
for iogrp in resp:
try:
if int(iogrp['node_count']) > 0:
iogrps.append(int(iogrp['id']))
except KeyError:
self.handle_keyerror('lsiogrp', iogrp)
except ValueError:
msg = (_('Expected integer for node_count, '
'svcinfo lsiogrp returned: %(node)s.') %
{'node': iogrp['node_count']})
raise exception.VolumeBackendAPIException(data=msg)
return iogrps
def get_vdisk_count_by_io_group(self):
res = {}
resp = self.ssh.lsiogrp()
for iogrp in resp:
try:
if int(iogrp['node_count']) > 0:
res[int(iogrp['id'])] = int(iogrp['vdisk_count'])
except KeyError:
self.handle_keyerror('lsiogrp', iogrp)
except ValueError:
msg = (_('Expected integer for node_count, '
'svcinfo lsiogrp returned: %(node)s') %
{'node': iogrp['node_count']})
raise exception.VolumeBackendAPIException(data=msg)
return res
def select_io_group(self, state, opts):
selected_iog = 0
iog_list = StorwizeHelpers._get_valid_requested_io_groups(state, opts)
if len(iog_list) == 0:
raise exception.InvalidInput(
reason=_('Given I/O group(s) %(iogrp)s not valid; available '
'I/O groups are %(avail)s.')
% {'iogrp': opts['iogrp'],
'avail': state['available_iogrps']})
iog_vdc = self.get_vdisk_count_by_io_group()
LOG.debug("IO group current balance %s", iog_vdc)
min_vdisk_count = iog_vdc[iog_list[0]]
selected_iog = iog_list[0]
for iog in iog_list:
if iog_vdc[iog] < min_vdisk_count:
min_vdisk_count = iog_vdc[iog]
selected_iog = iog
LOG.debug("Selected io_group is %d", selected_iog)
return selected_iog
def get_volume_io_group(self, vol_name):
vdisk = self.ssh.lsvdisk(vol_name)
if vdisk:
resp = self.ssh.lsiogrp()
for iogrp in resp:
if iogrp['name'] == vdisk['IO_group_name']:
return int(iogrp['id'])
return None
def get_node_info(self):
"""Return dictionary containing information on system's nodes."""
nodes = {}
resp = self.ssh.lsnode()
for node_data in resp:
try:
if node_data['status'] != 'online':
continue
node = {}
node['id'] = node_data['id']
node['name'] = node_data['name']
node['IO_group'] = node_data['IO_group_id']
node['iscsi_name'] = node_data['iscsi_name']
node['WWNN'] = node_data['WWNN']
node['status'] = node_data['status']
node['WWPN'] = []
node['ipv4'] = []
node['ipv6'] = []
node['enabled_protocols'] = []
nodes[node['id']] = node
except KeyError:
self.handle_keyerror('lsnode', node_data)
return nodes
def add_iscsi_ip_addrs(self, storage_nodes):
"""Add iSCSI IP addresses to system node information."""
resp = self.ssh.lsportip()
for ip_data in resp:
try:
state = ip_data['state']
if ip_data['node_id'] in storage_nodes and (
state == 'configured' or state == 'online'):
node = storage_nodes[ip_data['node_id']]
if len(ip_data['IP_address']):
node['ipv4'].append(ip_data['IP_address'])
if len(ip_data['IP_address_6']):
node['ipv6'].append(ip_data['IP_address_6'])
except KeyError:
self.handle_keyerror('lsportip', ip_data)
def add_fc_wwpns(self, storage_nodes):
"""Add FC WWPNs to system node information."""
for key in storage_nodes:
node = storage_nodes[key]
wwpns = set(node['WWPN'])
resp = self.ssh.lsportfc(node_id=node['id'])
for port_info in resp:
if (port_info['type'] == 'fc' and
port_info['status'] == 'active'):
wwpns.add(port_info['WWPN'])
node['WWPN'] = list(wwpns)
LOG.info('WWPN on node %(node)s: %(wwpn)s.',
{'node': node['id'], 'wwpn': node['WWPN']})
def add_chap_secret_to_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
chap_secret = utils.generate_password()
self.ssh.add_chap_secret(chap_secret, host_name)
return chap_secret
def get_chap_secret_for_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
resp = self.ssh.lsiscsiauth()
host_found = False
for host_data in resp:
try:
if host_data['name'] == host_name:
host_found = True
if host_data['iscsi_auth_method'] == 'chap':
return host_data['iscsi_chap_secret']
except KeyError:
self.handle_keyerror('lsiscsiauth', host_data)
if not host_found:
msg = _('Failed to find host %s.') % host_name
raise exception.VolumeBackendAPIException(data=msg)
return None
def get_conn_fc_wwpns(self, host):
wwpns = set()
resp = self.ssh.lsfabric(host=host)
for wwpn in resp.select('local_wwpn'):
if wwpn is not None:
wwpns.add(wwpn)
return list(wwpns)
def get_host_from_connector(self, connector, volume_name=None,
iscsi=False):
"""Return the Storwize host described by the connector."""
LOG.debug('Enter: get_host_from_connector: %s.', connector)
# If we have FC information, we have a faster lookup option
host_name = None
if 'wwpns' in connector and not iscsi:
for wwpn in connector['wwpns']:
resp = self.ssh.lsfabric(wwpn=wwpn)
for wwpn_info in resp:
try:
if (wwpn_info['remote_wwpn'] and
wwpn_info['name'] and
wwpn_info['remote_wwpn'].lower() ==
wwpn.lower()):
host_name = wwpn_info['name']
break
except KeyError:
self.handle_keyerror('lsfabric', wwpn_info)
if host_name:
break
if host_name:
LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
return host_name
def update_host_list(host, host_list):
idx = host_list.index(host)
del host_list[idx]
host_list.insert(0, host)
# That didn't work, so try exhaustive search
hosts_info = self.ssh.lshost()
host_list = list(hosts_info.select('name'))
# If we have a "real" connector, we might be able to find the
# host entry with fewer queries if we move the host entries
# that contain the connector's host property value to the front
# of the list
if 'host' in connector:
# order host_list such that the host entries that
# contain the connector's host name are at the
# beginning of the list
for host in host_list:
if re.search(connector['host'], host):
update_host_list(host, host_list)
# If we have a volume name we have a potential fast path
# for finding the matching host for that volume.
# Add the host_names that have mappings for our volume to the
# head of the list of host names to search them first
if volume_name:
hosts_map_info = self.ssh.lsvdiskhostmap(volume_name)
hosts_map_info_list = list(hosts_map_info.select('host_name'))
# remove the fast path host names from the end of the list
# and move to the front so they are only searched for once.
for host in hosts_map_info_list:
update_host_list(host, host_list)
found = False
for name in host_list:
try:
resp = self.ssh.lshost(host=name)
except exception.VolumeBackendAPIException as ex:
LOG.debug("Exception message: %s", ex.msg)
if 'CMMVC5754E' in ex.msg:
LOG.debug("CMMVC5754E found in CLI exception.")
# CMMVC5754E: The specified object does not exist
# The host has been deleted while walking the list.
# This is a result of a host change on the SVC that
# is out of band to this request.
continue
# unexpected error so reraise it
with excutils.save_and_reraise_exception():
pass
if iscsi:
if 'initiator' in connector:
for iscsi in resp.select('iscsi_name'):
if iscsi == connector['initiator']:
host_name = name
found = True
break
elif 'wwpns' in connector and len(connector['wwpns']):
connector_wwpns = [str(x).lower() for x in connector['wwpns']]
for wwpn in resp.select('WWPN'):
if wwpn and wwpn.lower() in connector_wwpns:
host_name = name
found = True
break
if found:
break
LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
return host_name
def create_host(self, connector, iscsi=False):
"""Create a new host on the storage system.
We create a host name and associate it with the given connection
information. The host name will be a cleaned up version of the given
host name (at most 55 characters), plus a random 8-character suffix to
avoid collisions. The total length should be at most 63 characters.
"""
LOG.debug('Enter: create_host: host %s.', connector['host'])
# Before we start, make sure host name is a string and that we have at
# least one port.
host_name = connector['host']
if not isinstance(host_name, six.string_types):
msg = _('create_host: Host name is not unicode or string.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
ports = []
if iscsi:
if 'initiator' in connector:
ports.append(['initiator', '%s' % connector['initiator']])
else:
msg = _('create_host: No initiators supplied.')
else:
if 'wwpns' in connector:
for wwpn in connector['wwpns']:
ports.append(['wwpn', '%s' % wwpn])
else:
msg = _('create_host: No wwpns supplied.')
if not len(ports):
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Build a host name for the Storwize host - first clean up the name
if isinstance(host_name, six.text_type):
host_name = unicodedata.normalize('NFKD', host_name).encode(
'ascii', 'replace').decode('ascii')
for num in range(0, 128):
ch = str(chr(num))
if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
host_name = host_name.replace(ch, '-')
# Storwize doesn't like hostname that doesn't starts with letter or _.
if not re.match('^[A-Za-z]', host_name):
host_name = '_' + host_name
# Add a random 8-character suffix to avoid collisions
rand_id = str(random.randint(0, 99999999)).zfill(8)
host_name = '%s-%s' % (host_name[:55], rand_id)
# Create a host with one port
port = ports.pop(0)
self.ssh.mkhost(host_name, port[0], port[1])
# Add any additional ports to the host
for port in ports:
self.ssh.addhostport(host_name, port[0], port[1])
LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.',
{'host': connector['host'], 'host_name': host_name})
return host_name
def delete_host(self, host_name):
self.ssh.rmhost(host_name)
def map_vol_to_host(self, volume_name, host_name, multihostmap):
"""Create a mapping between a volume to a host."""
LOG.debug('Enter: map_vol_to_host: volume %(volume_name)s to '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
# Check if this volume is already mapped to this host
result_lun = self.ssh.get_vdiskhostmapid(volume_name, host_name)
if result_lun is None:
result_lun = self.ssh.mkvdiskhostmap(host_name, volume_name, None,
multihostmap)
LOG.debug('Leave: map_vol_to_host: LUN %(result_lun)s, volume '
'%(volume_name)s, host %(host_name)s.',
{'result_lun': result_lun,
'volume_name': volume_name,
'host_name': host_name})
return int(result_lun)
def unmap_vol_from_host(self, volume_name, host_name):
"""Unmap the volume and delete the host if it has no more mappings."""
LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
# Check if the mapping exists
resp = self.ssh.lsvdiskhostmap(volume_name)
if not len(resp):
LOG.warning('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to any host found.',
{'vol_name': volume_name})
return host_name
if host_name is None:
if len(resp) > 1:
LOG.warning('unmap_vol_from_host: Multiple mappings of '
'volume %(vol_name)s found, no host '
'specified.', {'vol_name': volume_name})
return
else:
host_name = resp[0]['host_name']
else:
found = False
for h in resp.select('host_name'):
if h == host_name:
found = True
if not found:
LOG.warning('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to host %(host)s found.',
{'vol_name': volume_name, 'host': host_name})
return host_name
# We now know that the mapping exists
self.ssh.rmvdiskhostmap(host_name, volume_name)
LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s.',
{'volume_name': volume_name, 'host_name': host_name})
return host_name
def check_host_mapped_vols(self, host_name):
return self.ssh.lshostvdiskmap(host_name)
@staticmethod
def build_default_opts(config):
# Ignore capitalization
cluster_partner = config.storwize_svc_stretched_cluster_partner
opt = {'rsize': config.storwize_svc_vol_rsize,
'warning': config.storwize_svc_vol_warning,
'autoexpand': config.storwize_svc_vol_autoexpand,
'grainsize': config.storwize_svc_vol_grainsize,
'compression': config.storwize_svc_vol_compression,
'easytier': config.storwize_svc_vol_easytier,
'iogrp': config.storwize_svc_vol_iogrp,
'qos': None,
'stretched_cluster': cluster_partner,
'replication': False,
'nofmtdisk': config.storwize_svc_vol_nofmtdisk,
'mirror_pool': config.storwize_svc_mirror_pool,
'cycle_period_seconds': config.cycle_period_seconds}
return opt
@staticmethod
def check_vdisk_opts(state, opts):
# Check that grainsize is 32/64/128/256
if opts['grainsize'] not in [32, 64, 128, 256]:
raise exception.InvalidInput(
reason=_('Illegal value specified for '
'storwize_svc_vol_grainsize: set to either '
'32, 64, 128, or 256.'))
# Check that compression is supported
if opts['compression'] and not state['compression_enabled']:
raise exception.InvalidInput(
reason=_('System does not support compression.'))
# Check that rsize is set if compression is set
if opts['compression'] and opts['rsize'] == -1:
raise exception.InvalidInput(
reason=_('If compression is set to True, rsize must '
'also be set (not equal to -1).'))
# Check cycle_period_seconds are in 60-86400
if opts['cycle_period_seconds'] not in range(60, 86401):
raise exception.InvalidInput(
reason=_('cycle_period_seconds should be integer '
'between 60 and 86400.'))
iogs = StorwizeHelpers._get_valid_requested_io_groups(state, opts)
if len(iogs) == 0:
raise exception.InvalidInput(
reason=_('Given I/O group(s) %(iogrp)s not valid; available '
'I/O groups are %(avail)s.')
% {'iogrp': opts['iogrp'],
'avail': state['available_iogrps']})
if opts['nofmtdisk'] and opts['rsize'] != -1:
raise exception.InvalidInput(
reason=_('If nofmtdisk is set to True, rsize must '
'also be set to -1.'))
@staticmethod
def _get_valid_requested_io_groups(state, opts):
given_iogs = str(opts['iogrp'])
iog_list = given_iogs.split(',')
# convert to int
iog_list = list(map(int, iog_list))
LOG.debug("Requested iogroups %s", iog_list)
LOG.debug("Available iogroups %s", state['available_iogrps'])
filtiog = set(iog_list).intersection(state['available_iogrps'])
iog_list = list(filtiog)
LOG.debug("Filtered (valid) requested iogroups %s", iog_list)
return iog_list
def _get_opts_from_specs(self, opts, specs):
qos = {}
for k, value in specs.items():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# We generally do not look at capabilities in the driver, but
# replication is a special case where the user asks for
# a volume to be replicated, and we want both the scheduler and
# the driver to act on the value.
if ((not scope or scope == 'capabilities') and
key == 'replication'):
scope = None
key = 'replication'
words = value.split()
if not (words and len(words) == 2 and words[0] == '<is>'):
LOG.error('Replication must be specified as '
'\'<is> True\' or \'<is> False\'.')
del words[0]
value = words[0]
# Add the QoS.
if scope and scope == 'qos':
if key in self.svc_qos_keys.keys():
try:
type_fn = self.svc_qos_keys[key]['type']
value = type_fn(value)
qos[key] = value
except ValueError:
continue
# Any keys that the driver should look at should have the
# 'drivers' scope.
if scope and scope != 'drivers':
continue
if key in opts:
this_type = type(opts[key]).__name__
if this_type == 'int':
value = int(value)
elif this_type == 'bool':
value = strutils.bool_from_string(value)
opts[key] = value
if len(qos) != 0:
opts['qos'] = qos
return opts
def _get_qos_from_volume_metadata(self, volume_metadata):
"""Return the QoS information from the volume metadata."""
qos = {}
for i in volume_metadata:
k = i.get('key', None)
value = i.get('value', None)
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# Add the QoS.
if scope and scope == 'qos':
if key in self.svc_qos_keys.keys():
try:
type_fn = self.svc_qos_keys[key]['type']
value = type_fn(value)
qos[key] = value
except ValueError:
continue
return qos
def _wait_for_a_condition(self, testmethod, timeout=None,
interval=INTERVAL_1_SEC,
raise_exception=False):
start_time = time.time()
if timeout is None:
timeout = DEFAULT_TIMEOUT
def _inner():
try:
testValue = testmethod()
except Exception as ex:
if raise_exception:
LOG.exception("_wait_for_a_condition: %s"
" execution failed.",
testmethod.__name__)
raise exception.VolumeBackendAPIException(data=ex)
else:
testValue = False
LOG.debug('Helper.'
'_wait_for_condition: %(method_name)s '
'execution failed for %(exception)s.',
{'method_name': testmethod.__name__,
'exception': ex.message})
if testValue:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = (_('CommandLineHelper._wait_for_condition: %s timeout.')
% testmethod.__name__)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
def get_vdisk_params(self, config, state, type_id,
volume_type=None, volume_metadata=None):
"""Return the parameters for creating the vdisk.
Takes volume type and defaults from config options into account.
"""
opts = self.build_default_opts(config)
ctxt = context.get_admin_context()
if volume_type is None and type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
if volume_type:
qos_specs_id = volume_type.get('qos_specs_id')
specs = dict(volume_type).get('extra_specs')
# NOTE(vhou): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
# Merge the qos_specs into extra_specs and qos_specs has higher
# priority than extra_specs if they have different values for
# the same key.
specs.update(kvs)
opts = self._get_opts_from_specs(opts, specs)
if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos
and volume_metadata):
qos = self._get_qos_from_volume_metadata(volume_metadata)
if len(qos) != 0:
opts['qos'] = qos
self.check_vdisk_opts(state, opts)
return opts
@staticmethod
def _get_vdisk_create_params(opts, add_copies=False):
easytier = 'on' if opts['easytier'] else 'off'
if opts['rsize'] == -1:
params = []
if opts['nofmtdisk']:
params.append('-nofmtdisk')
else:
params = ['-rsize', '%s%%' % str(opts['rsize']),
'-autoexpand', '-warning',
'%s%%' % str(opts['warning'])]
if not opts['autoexpand']:
params.remove('-autoexpand')
if opts['compression']:
params.append('-compressed')
else:
params.extend(['-grainsize', str(opts['grainsize'])])
if add_copies and opts['mirror_pool']:
params.extend(['-copies', '2'])
params.extend(['-easytier', easytier])
return params
def create_vdisk(self, name, size, units, pool, opts):
LOG.debug('Enter: create_vdisk: vdisk %s.', name)
mdiskgrp = pool
if opts['mirror_pool']:
if not self.is_pool_defined(opts['mirror_pool']):
raise exception.InvalidInput(
reason=_('The pool %s in which mirrored copy is stored '
'is invalid') % opts['mirror_pool'])
# The syntax of pool SVC expects is pool:mirror_pool in
# mdiskgrp for mirror volume
mdiskgrp = '%s:%s' % (pool, opts['mirror_pool'])
params = self._get_vdisk_create_params(
opts, add_copies=True if opts['mirror_pool'] else False)
self.ssh.mkvdisk(name, size, units, mdiskgrp, opts, params)
LOG.debug('Leave: _create_vdisk: volume %s.', name)
def get_vdisk_attributes(self, vdisk):
attrs = self.ssh.lsvdisk(vdisk)
return attrs
def is_vdisk_defined(self, vdisk_name):
"""Check if vdisk is defined."""
attrs = self.get_vdisk_attributes(vdisk_name)
return attrs is not None
def find_vdisk_copy_id(self, vdisk, pool):
resp = self.ssh.lsvdiskcopy(vdisk)
for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
if mdisk_grp == pool:
return copy_id
msg = _('Failed to find a vdisk copy in the expected pool.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def get_vdisk_copy_attrs(self, vdisk, copy_id):
return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
def get_vdisk_copies(self, vdisk):
copies = {'primary': None,
'secondary': None}
resp = self.ssh.lsvdiskcopy(vdisk)
for copy_id, status, sync, primary, mdisk_grp in (
resp.select('copy_id', 'status', 'sync',
'primary', 'mdisk_grp_name')):
copy = {'copy_id': copy_id,
'status': status,
'sync': sync,
'primary': primary,
'mdisk_grp_name': mdisk_grp,
'sync_progress': None}
if copy['sync'] != 'yes':
progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
copy['sync_progress'] = progress_info['progress']
if copy['primary'] == 'yes':
copies['primary'] = copy
else:
copies['secondary'] = copy
return copies
def _prepare_fc_map(self, fc_map_id, timeout):
self.ssh.prestartfcmap(fc_map_id)
mapping_ready = False
max_retries = (timeout // self.WAIT_TIME) + 1
for try_number in range(1, max_retries):
mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id)
if (mapping_attrs is None or
'status' not in mapping_attrs):
break
if mapping_attrs['status'] == 'prepared':
mapping_ready = True
break
elif mapping_attrs['status'] == 'stopped':
self.ssh.prestartfcmap(fc_map_id)
elif mapping_attrs['status'] != 'preparing':
msg = (_('Unexecpted mapping status %(status)s for mapping '
'%(id)s. Attributes: %(attr)s.')
% {'status': mapping_attrs['status'],
'id': fc_map_id,
'attr': mapping_attrs})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
greenthread.sleep(self.WAIT_TIME)
if not mapping_ready:
msg = (_('Mapping %(id)s prepare failed to complete within the'
'allotted %(to)d seconds timeout. Terminating.')
% {'id': fc_map_id,
'to': timeout})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def start_fc_consistgrp(self, fc_consistgrp):
self.ssh.startfcconsistgrp(fc_consistgrp)
def create_fc_consistgrp(self, fc_consistgrp):
self.ssh.mkfcconsistgrp(fc_consistgrp)
def delete_fc_consistgrp(self, fc_consistgrp):
self.ssh.rmfcconsistgrp(fc_consistgrp)
def stop_fc_consistgrp(self, fc_consistgrp):
self.ssh.stopfcconsistgrp(fc_consistgrp)
def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state,
config, timeout):
model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE}
snapshots_model_update = []
try:
for snapshot in snapshots:
opts = self.get_vdisk_params(config, state,
snapshot['volume_type_id'])
volume = snapshot.volume
if not volume:
msg = (_("Can't get volume from snapshot: %(id)s")
% {"id": snapshot.id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
pool = utils.extract_host(volume.host, 'pool')
self.create_flashcopy_to_consistgrp(snapshot['volume_name'],
snapshot['name'],
fc_consistgrp,
config, opts, False,
pool=pool)
self.prepare_fc_consistgrp(fc_consistgrp, timeout)
self.start_fc_consistgrp(fc_consistgrp)
# There is CG limitation that could not create more than 128 CGs.
# After start CG, we delete CG to avoid CG limitation.
# Cinder general will maintain the CG and snapshots relationship.
self.delete_fc_consistgrp(fc_consistgrp)
except exception.VolumeBackendAPIException as err:
model_update['status'] = fields.GroupSnapshotStatus.ERROR
# Release cg
self.delete_fc_consistgrp(fc_consistgrp)
LOG.error("Failed to create CGSnapshot. "
"Exception: %s.", err)
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'],
'status': model_update['status']})
return model_update, snapshots_model_update
def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots):
"""Delete flashcopy maps and consistent group."""
model_update = {'status': fields.GroupSnapshotStatus.DELETED}
snapshots_model_update = []
try:
for snapshot in snapshots:
self.delete_vdisk(snapshot['name'], True)
except exception.VolumeBackendAPIException as err:
model_update['status'] = (
fields.GroupSnapshotStatus.ERROR_DELETING)
LOG.error("Failed to delete the snapshot %(snap)s of "
"CGSnapshot. Exception: %(exception)s.",
{'snap': snapshot['name'], 'exception': err})
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'],
'status': model_update['status']})
return model_update, snapshots_model_update
def prepare_fc_consistgrp(self, fc_consistgrp, timeout):
"""Prepare FC Consistency Group."""
self.ssh.prestartfcconsistgrp(fc_consistgrp)
def prepare_fc_consistgrp_success():
mapping_ready = False
mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp)
if (mapping_attrs is None or
'status' not in mapping_attrs):
pass
if mapping_attrs['status'] == 'prepared':
mapping_ready = True
elif mapping_attrs['status'] == 'stopped':
self.ssh.prestartfcconsistgrp(fc_consistgrp)
elif mapping_attrs['status'] != 'preparing':
msg = (_('Unexpected mapping status %(status)s for mapping'
'%(id)s. Attributes: %(attr)s.') %
{'status': mapping_attrs['status'],
'id': fc_consistgrp,
'attr': mapping_attrs})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return mapping_ready
self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout)
def create_cg_from_source(self, group, fc_consistgrp,
sources, targets, state,
config, timeout):
"""Create consistence group from source"""
LOG.debug('Enter: create_cg_from_source: cg %(cg)s'
' source %(source)s, target %(target)s',
{'cg': fc_consistgrp, 'source': sources, 'target': targets})
model_update = {'status': fields.GroupStatus.AVAILABLE}
ctxt = context.get_admin_context()
try:
for source, target in zip(sources, targets):
opts = self.get_vdisk_params(config, state,
source['volume_type_id'])
pool = utils.extract_host(target['host'], 'pool')
self.create_flashcopy_to_consistgrp(source['name'],
target['name'],
fc_consistgrp,
config, opts,
True, pool=pool)
self.prepare_fc_consistgrp(fc_consistgrp, timeout)
self.start_fc_consistgrp(fc_consistgrp)
self.delete_fc_consistgrp(fc_consistgrp)
volumes_model_update = self._get_volume_model_updates(
ctxt, targets, group['id'], model_update['status'])
except exception.VolumeBackendAPIException as err:
model_update['status'] = fields.GroupStatus.ERROR
volumes_model_update = self._get_volume_model_updates(
ctxt, targets, group['id'], model_update['status'])
with excutils.save_and_reraise_exception():
# Release cg
self.delete_fc_consistgrp(fc_consistgrp)
LOG.error("Failed to create CG from CGsnapshot. "
"Exception: %s", err)
return model_update, volumes_model_update
LOG.debug('Leave: create_cg_from_source.')
return model_update, volumes_model_update
def _get_volume_model_updates(self, ctxt, volumes, cgId,
status='available'):
"""Update the volume model's status and return it."""
volume_model_updates = []
LOG.info("Updating status for CG: %(id)s.",
{'id': cgId})
if volumes:
for volume in volumes:
volume_model_updates.append({'id': volume['id'],
'status': status})
else:
LOG.info("No volume found for CG: %(cg)s.",
{'cg': cgId})
return volume_model_updates
def run_flashcopy(self, source, target, timeout, copy_rate,
full_copy=True):
"""Create a FlashCopy mapping from the source to the target."""
LOG.debug('Enter: run_flashcopy: execute FlashCopy from source '
'%(source)s to target %(target)s.',
{'source': source, 'target': target})
fc_map_id = self.ssh.mkfcmap(source, target, full_copy, copy_rate)
self._prepare_fc_map(fc_map_id, timeout)
self.ssh.startfcmap(fc_map_id)
LOG.debug('Leave: run_flashcopy: FlashCopy started from '
'%(source)s to %(target)s.',
{'source': source, 'target': target})
def create_flashcopy_to_consistgrp(self, source, target, consistgrp,
config, opts, full_copy=False,
pool=None):
"""Create a FlashCopy mapping and add to consistent group."""
LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy'
' from source %(source)s to target %(target)s'
'Then add the flashcopy to %(cg)s.',
{'source': source, 'target': target, 'cg': consistgrp})
src_attrs = self.get_vdisk_attributes(source)
if src_attrs is None:
msg = (_('create_copy: Source vdisk %(src)s '
'does not exist.') % {'src': source})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
src_size = src_attrs['capacity']
# In case we need to use a specific pool
if not pool:
pool = src_attrs['mdisk_grp_name']
opts['iogrp'] = src_attrs['IO_group_id']
self.create_vdisk(target, src_size, 'b', pool, opts)
self.ssh.mkfcmap(source, target, full_copy,
config.storwize_svc_flashcopy_rate,
consistgrp=consistgrp)
LOG.debug('Leave: create_flashcopy_to_consistgrp: '
'FlashCopy started from %(source)s to %(target)s.',
{'source': source, 'target': target})
def _get_vdisk_fc_mappings(self, vdisk):
"""Return FlashCopy mappings that this vdisk is associated with."""
mapping_ids = []
resp = self.ssh.lsvdiskfcmappings(vdisk)
for id in resp.select('id'):
mapping_ids.append(id)
return mapping_ids
def _get_flashcopy_mapping_attributes(self, fc_map_id):
resp = self.ssh.lsfcmap(fc_map_id)
if not len(resp):
return None
return resp[0]
def _get_flashcopy_consistgrp_attr(self, fc_map_id):
resp = self.ssh.lsfcconsistgrp(fc_map_id)
if not len(resp):
return None
return resp[0]
def _check_vdisk_fc_mappings(self, name,
allow_snaps=True, allow_fctgt=False):
"""FlashCopy mapping check helper."""
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s.', name)
mapping_ids = self._get_vdisk_fc_mappings(name)
wait_for_copy = False
for map_id in mapping_ids:
attrs = self._get_flashcopy_mapping_attributes(map_id)
# We should ignore GMCV flash copies
if not attrs or 'yes' == attrs['rc_controlled']:
continue
source = attrs['source_vdisk_name']
target = attrs['target_vdisk_name']
copy_rate = attrs['copy_rate']
status = attrs['status']
if allow_fctgt and target == name and status == 'copying':
self.ssh.stopfcmap(map_id)
attrs = self._get_flashcopy_mapping_attributes(map_id)
if attrs:
status = attrs['status']
if copy_rate == '0':
if source == name:
# Vdisk with snapshots. Return False if snapshot
# not allowed.
if not allow_snaps:
raise loopingcall.LoopingCallDone(retvalue=False)
self.ssh.chfcmap(map_id, copyrate='50', autodel='on')
wait_for_copy = True
else:
# A snapshot
if target != name:
msg = (_('Vdisk %(name)s not involved in '
'mapping %(src)s -> %(tgt)s.') %
{'name': name, 'src': source, 'tgt': target})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if status in ['copying', 'prepared']:
self.ssh.stopfcmap(map_id)
# Need to wait for the fcmap to change to
# stopped state before remove fcmap
wait_for_copy = True
elif status in ['stopping', 'preparing']:
wait_for_copy = True
else:
self.ssh.rmfcmap(map_id)
# Case 4: Copy in progress - wait and will autodelete
else:
if status == 'prepared':
self.ssh.stopfcmap(map_id)
self.ssh.rmfcmap(map_id)
elif status in ['idle_or_copied', 'stopped']:
# Prepare failed or stopped
self.ssh.rmfcmap(map_id)
else:
wait_for_copy = True
if not wait_for_copy or not len(mapping_ids):
raise loopingcall.LoopingCallDone(retvalue=True)
def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True,
allow_fctgt=False):
"""Ensure vdisk has no flashcopy mappings."""
timer = loopingcall.FixedIntervalLoopingCall(
self._check_vdisk_fc_mappings, name,
allow_snaps, allow_fctgt)
# Create a timer greenthread. The default volume service heart
# beat is every 10 seconds. The flashcopy usually takes hours
# before it finishes. Don't set the sleep interval shorter
# than the heartbeat. Otherwise volume service heartbeat
# will not be serviced.
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s.',
name)
ret = timer.start(interval=self.check_fcmapping_interval).wait()
timer.stop()
return ret
def start_relationship(self, volume_name, primary=None):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.startrcrelationship(vol_attrs['RC_name'], primary)
def stop_relationship(self, volume_name, access=False):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.stoprcrelationship(vol_attrs['RC_name'], access=access)
def create_relationship(self, master, aux, system, asyncmirror,
cyclingmode=False, masterchange=None,
cycle_period_seconds=None):
try:
rc_id = self.ssh.mkrcrelationship(master, aux, system,
asyncmirror, cyclingmode)
except exception.VolumeBackendAPIException as e:
# CMMVC5959E is the code in Stowize storage, meaning that
# there is a relationship that already has this name on the
# master cluster.
if 'CMMVC5959E' not in e:
# If there is no relation between the primary and the
# secondary back-end storage, the exception is raised.
raise
if rc_id:
# We need setup master and aux change volumes for gmcv
# before we can start remote relationship
# aux change volume must be set on target site
if cycle_period_seconds:
self.change_relationship_cycleperiod(master,
cycle_period_seconds)
if masterchange:
self.change_relationship_changevolume(master,
masterchange, True)
else:
self.start_relationship(master)
def change_relationship_changevolume(self, volume_name,
change_volume, master):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name'] and change_volume:
self.ssh.ch_rcrelationship_changevolume(vol_attrs['RC_name'],
change_volume, master)
def change_relationship_cycleperiod(self, volume_name,
cycle_period_seconds):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name'] and cycle_period_seconds:
self.ssh.ch_rcrelationship_cycleperiod(vol_attrs['RC_name'],
cycle_period_seconds)
def delete_relationship(self, volume_name):
vol_attrs = self.get_vdisk_attributes(volume_name)
if vol_attrs['RC_name']:
self.ssh.rmrcrelationship(vol_attrs['RC_name'], True)
def get_relationship_info(self, volume_name):
vol_attrs = self.get_vdisk_attributes(volume_name)
if not vol_attrs or not vol_attrs['RC_name']:
LOG.info("Unable to get remote copy information for "
"volume %s", volume_name)
return
relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
return relationship[0] if len(relationship) > 0 else None
def delete_rc_volume(self, volume_name, target_vol=False):
vol_name = volume_name
if target_vol:
vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + volume_name
try:
rel_info = self.get_relationship_info(vol_name)
if rel_info:
self.delete_relationship(vol_name)
# Delete change volume
self.delete_vdisk(
storwize_const.REPLICA_CHG_VOL_PREFIX + vol_name, False)
self.delete_vdisk(vol_name, False)
except Exception as e:
msg = (_('Unable to delete the volume for '
'volume %(vol)s. Exception: %(err)s.'),
{'vol': vol_name, 'err': e})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
def switch_relationship(self, relationship, aux=True):
self.ssh.switchrelationship(relationship, aux)
def get_partnership_info(self, system_name):
partnership = self.ssh.lspartnership(system_name)
return partnership[0] if len(partnership) > 0 else None
def get_partnershipcandidate_info(self, system_name):
candidates = self.ssh.lspartnershipcandidate()
for candidate in candidates:
if system_name == candidate['name']:
return candidate
return None
def mkippartnership(self, ip_v4, bandwith=1000, copyrate=50):
self.ssh.mkippartnership(ip_v4, bandwith, copyrate)
def mkfcpartnership(self, system_name, bandwith=1000, copyrate=50):
self.ssh.mkfcpartnership(system_name, bandwith, copyrate)
def chpartnership(self, partnership_id):
self.ssh.chpartnership(partnership_id)
def delete_vdisk(self, vdisk, force):
"""Ensures that vdisk is not part of FC mapping and deletes it."""
LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk)
if not self.is_vdisk_defined(vdisk):
LOG.info('Tried to delete non-existent vdisk %s.', vdisk)
return
self.ensure_vdisk_no_fc_mappings(vdisk, allow_snaps=True,
allow_fctgt=True)
self.ssh.rmvdisk(vdisk, force=force)
LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk)
def create_copy(self, src, tgt, src_id, config, opts,
full_copy, pool=None):
"""Create a new snapshot using FlashCopy."""
LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.',
{'tgt': tgt, 'src': src})
src_attrs = self.get_vdisk_attributes(src)
if src_attrs is None:
msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
'does not exist.') % {'src': src, 'src_id': src_id})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
src_size = src_attrs['capacity']
# In case we need to use a specific pool
if not pool:
pool = src_attrs['mdisk_grp_name']
opts['iogrp'] = src_attrs['IO_group_id']
self.create_vdisk(tgt, src_size, 'b', pool, opts)
timeout = config.storwize_svc_flashcopy_timeout
try:
self.run_flashcopy(src, tgt, timeout,
config.storwize_svc_flashcopy_rate,
full_copy=full_copy)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_vdisk(tgt, True)
LOG.debug('Leave: _create_copy: snapshot %(tgt)s from '
'vdisk %(src)s.',
{'tgt': tgt, 'src': src})
def extend_vdisk(self, vdisk, amount):
self.ssh.expandvdisksize(vdisk, amount)
def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config,
auto_delete=False):
"""Add a vdisk copy in the given pool."""
resp = self.ssh.lsvdiskcopy(vdisk)
if len(resp) > 1:
msg = (_('add_vdisk_copy failed: A copy of volume %s exists. '
'Adding another copy would exceed the limit of '
'2 copies.') % vdisk)
raise exception.VolumeDriverException(message=msg)
orig_copy_id = resp[0].get("copy_id", None)
if orig_copy_id is None:
msg = (_('add_vdisk_copy started without a vdisk copy in the '
'expected pool.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if volume_type is None:
opts = self.get_vdisk_params(config, state, None)
else:
opts = self.get_vdisk_params(config, state, volume_type['id'],
volume_type=volume_type)
params = self._get_vdisk_create_params(opts)
try:
new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params,
auto_delete)
except exception.VolumeBackendAPIException as e:
msg = (_('Unable to add vdiskcopy for volume %(vol)s. '
'Exception: %(err)s.'),
{'vol': vdisk, 'err': e})
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
return (orig_copy_id, new_copy_id)
def is_vdisk_copy_synced(self, vdisk, copy_id):
sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync']
if sync == 'yes':
return True
return False
def rm_vdisk_copy(self, vdisk, copy_id):
self.ssh.rmvdiskcopy(vdisk, copy_id)
def lsvdiskcopy(self, vdisk, copy_id=None):
return self.ssh.lsvdiskcopy(vdisk, copy_id)
@staticmethod
def can_migrate_to_host(host, state):
if 'location_info' not in host['capabilities']:
return None
info = host['capabilities']['location_info']
try:
(dest_type, dest_id, dest_pool) = info.split(':')
except ValueError:
return None
if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']):
return None
return dest_pool
def add_vdisk_qos(self, vdisk, qos):
"""Add the QoS configuration to the volume."""
for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
def update_vdisk_qos(self, vdisk, qos):
"""Update all the QoS in terms of a key and value.
svc_qos_keys saves all the supported QoS parameters. Going through
this dict, we set the new values to all the parameters. If QoS is
available in the QoS configuration, the value is taken from it;
if not, the value will be set to default.
"""
for key, value in self.svc_qos_keys.items():
param = value['param']
if key in qos.keys():
# If the value is set in QoS, take the value from
# the QoS configuration.
v = qos[key]
else:
# If not, set the value to default.
v = value['default']
self.ssh.chvdisk(vdisk, ['-' + param, str(v)])
def disable_vdisk_qos(self, vdisk, qos):
"""Disable the QoS."""
for key, value in qos.items():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
# Take the default value.
value = self.svc_qos_keys[key]['default']
self.ssh.chvdisk(vdisk, ['-' + param, value])
def change_vdisk_options(self, vdisk, changes, opts, state):
if 'warning' in opts:
opts['warning'] = '%s%%' % str(opts['warning'])
if 'easytier' in opts:
opts['easytier'] = 'on' if opts['easytier'] else 'off'
if 'autoexpand' in opts:
opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off'
for key in changes:
self.ssh.chvdisk(vdisk, ['-' + key, opts[key]])
def change_vdisk_iogrp(self, vdisk, state, iogrp):
if state['code_level'] < (6, 4, 0, 0):
LOG.debug('Ignore change IO group as storage code level is '
'%(code_level)s, below the required 6.4.0.0.',
{'code_level': state['code_level']})
else:
self.ssh.movevdisk(vdisk, str(iogrp[0]))
self.ssh.addvdiskaccess(vdisk, str(iogrp[0]))
self.ssh.rmvdiskaccess(vdisk, str(iogrp[1]))
def vdisk_by_uid(self, vdisk_uid):
"""Returns the properties of the vdisk with the specified UID.
Returns None if no such disk exists.
"""
vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid)
if len(vdisks) == 0:
return None
if len(vdisks) != 1:
msg = (_('Expected single vdisk returned from lsvdisk when '
'filtering on vdisk_UID. %(count)s were returned.') %
{'count': len(vdisks)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
vdisk = vdisks.result[0]
return self.ssh.lsvdisk(vdisk['name'])
def is_vdisk_in_use(self, vdisk):
"""Returns True if the specified vdisk is mapped to at least 1 host."""
resp = self.ssh.lsvdiskhostmap(vdisk)
return len(resp) != 0
def rename_vdisk(self, vdisk, new_name):
self.ssh.chvdisk(vdisk, ['-name', new_name])
def change_vdisk_primary_copy(self, vdisk, copy_id):
self.ssh.chvdisk(vdisk, ['-primary', copy_id])
def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
self.ssh.migratevdisk(vdisk, dest_pool, copy_id)
class CLIResponse(object):
"""Parse SVC CLI output and generate iterable."""
def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True):
super(CLIResponse, self).__init__()
if ssh_cmd:
self.ssh_cmd = ' '.join(ssh_cmd)
else:
self.ssh_cmd = 'None'
self.raw = raw
self.delim = delim
self.with_header = with_header
self.result = self._parse()
def select(self, *keys):
for a in self.result:
vs = []
for k in keys:
v = a.get(k, None)
if isinstance(v, six.string_types) or v is None:
v = [v]
if isinstance(v, list):
vs.append(v)
for item in zip(*vs):
if len(item) == 1:
yield item[0]
else:
yield item
def __getitem__(self, key):
try:
return self.result[key]
except KeyError:
msg = (_('Did not find the expected key %(key)s in %(fun)s: '
'%(raw)s.') % {'key': key, 'fun': self.ssh_cmd,
'raw': self.raw})
raise exception.VolumeBackendAPIException(data=msg)
def __iter__(self):
for a in self.result:
yield a
def __len__(self):
return len(self.result)
def _parse(self):
def get_reader(content, delim):
for line in content.lstrip().splitlines():
line = line.strip()
if line:
yield line.split(delim)
else:
yield []
if isinstance(self.raw, six.string_types):
stdout, stderr = self.raw, ''
else:
stdout, stderr = self.raw
reader = get_reader(stdout, self.delim)
result = []
if self.with_header:
hds = tuple()
for row in reader:
hds = row
break
for row in reader:
cur = dict()
if len(hds) != len(row):
msg = (_('Unexpected CLI response: header/row mismatch. '
'header: %(header)s, row: %(row)s.')
% {'header': hds,
'row': row})
raise exception.VolumeBackendAPIException(data=msg)
for k, v in zip(hds, row):
CLIResponse.append_dict(cur, k, v)
result.append(cur)
else:
cur = dict()
for row in reader:
if row:
CLIResponse.append_dict(cur, row[0], ' '.join(row[1:]))
elif cur: # start new section
result.append(cur)
cur = dict()
if cur:
result.append(cur)
return result
@staticmethod
def append_dict(dict_, key, value):
key, value = key.strip(), value.strip()
obj = dict_.get(key, None)
if obj is None:
dict_[key] = value
elif isinstance(obj, list):
obj.append(value)
dict_[key] = obj
else:
dict_[key] = [obj, value]
return dict_
class StorwizeSVCCommonDriver(san.SanDriver,
driver.ManageableVD,
driver.MigrateVD,
driver.CloneableImageVD):
"""IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers.
Version history:
.. code-block:: none
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim
to lsfabric, clear unused data from connections, ensure
matching WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
1.3.3 - Update driver to use ABC metaclasses
2.0 - Code refactor, split init file and placed shared methods
for FC and iSCSI within the StorwizeSVCCommonDriver class
2.1 - Added replication V2 support to the global/metro mirror
mode
2.1.1 - Update replication to version 2.1
"""
VERSION = "2.1.1"
VDISKCOPYOPS_INTERVAL = 600
DEFAULT_GR_SLEEP = random.randint(20, 500) / 100.0
def __init__(self, *args, **kwargs):
super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storwize_svc_opts)
self._backend_name = self.configuration.safe_get('volume_backend_name')
self.active_ip = self.configuration.san_ip
self.inactive_ip = self.configuration.storwize_san_secondary_ip
self._master_backend_helpers = StorwizeHelpers(self._run_ssh)
self._aux_backend_helpers = None
self._helpers = self._master_backend_helpers
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
self.protocol = None
self._state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
'available_iogrps': [],
'system_name': None,
'system_id': None,
'code_level': None,
}
self._active_backend_id = kwargs.get('active_backend_id')
# This dictionary is used to map each replication target to certain
# replication manager object.
self.replica_manager = {}
# One driver can be configured with only one replication target
# to failover.
self._replica_target = {}
# This boolean is used to indicate whether replication is supported
# by this storage.
self._replica_enabled = False
# This list is used to save the supported replication modes.
self._supported_replica_types = []
# This is used to save the available pools in failed-over status
self._secondary_pools = None
# Storwize has the limitation that can not burst more than 3 new ssh
# connections within 1 second. So slow down the initialization.
time.sleep(1)
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug('enter: do_setup')
# v2.1 replication setup
self._get_storwize_config()
# Update the storwize state
self._update_storwize_state()
# Validate that the pool exists
self._validate_pools_exist()
# Build the list of in-progress vdisk copy operations
if ctxt is None:
admin_context = context.get_admin_context()
else:
admin_context = ctxt.elevated()
volumes = objects.VolumeList.get_all_by_host(admin_context, self.host)
for volume in volumes:
metadata = volume.admin_metadata
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
self._vdiskcopyops[volume['id']] = ops
# if vdiskcopy exists in database, start the looping call
if len(self._vdiskcopyops) >= 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
LOG.debug('leave: do_setup')
def _update_storwize_state(self):
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
# Check if compression is supported
self._state['compression_enabled'] = (self._helpers.
compression_enabled())
# Get the available I/O groups
self._state['available_iogrps'] = (self._helpers.
get_available_io_groups())
# Get the iSCSI and FC names of the Storwize/SVC nodes
self._state['storage_nodes'] = self._helpers.get_node_info()
# Add the iSCSI IP addresses and WWPNs to the storage node info
self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
self._helpers.add_fc_wwpns(self._state['storage_nodes'])
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._state['storage_nodes'].items():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')
self._state['enabled_protocols'].add('iSCSI')
if len(node['WWPN']):
node['enabled_protocols'].append('FC')
self._state['enabled_protocols'].add('FC')
if not len(node['enabled_protocols']):
to_delete.append(k)
for delkey in to_delete:
del self._state['storage_nodes'][delkey]
def _get_backend_pools(self):
if not self._active_backend_id:
return self.configuration.storwize_svc_volpool_name
elif not self._secondary_pools:
self._secondary_pools = [self._replica_target.get('pool_name')]
return self._secondary_pools
def _validate_pools_exist(self):
# Validate that the pool exists
pools = self._get_backend_pools()
for pool in pools:
if not self._helpers.is_pool_defined(pool):
reason = (_('Failed getting details for pool %s.') % pool)
raise exception.InvalidInput(reason=reason)
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug('enter: check_for_setup_error')
# Check that we have the system ID information
if self._state['system_name'] is None:
exception_msg = (_('Unable to determine system name.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
if self._state['system_id'] is None:
exception_msg = (_('Unable to determine system id.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
# Make sure we have at least one node configured
if not len(self._state['storage_nodes']):
msg = _('do_setup: No configured nodes.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if self.protocol not in self._state['enabled_protocols']:
# TODO(mc_nair): improve this error message by looking at
# self._state['enabled_protocols'] to tell user what driver to use
raise exception.InvalidInput(
reason=_('The storage device does not support %(prot)s. '
'Please configure the device to support %(prot)s or '
'switch to a driver using a different protocol.')
% {'prot': self.protocol})
required_flags = ['san_ip', 'san_ssh_port', 'san_login',
'storwize_svc_volpool_name']
for flag in required_flags:
if not self.configuration.safe_get(flag):
raise exception.InvalidInput(reason=_('%s is not set.') % flag)
# Ensure that either password or keyfile were set
if not (self.configuration.san_password or
self.configuration.san_private_key):
raise exception.InvalidInput(
reason=_('Password or SSH private key is required for '
'authentication: set either san_password or '
'san_private_key option.'))
opts = self._helpers.build_default_opts(self.configuration)
self._helpers.check_vdisk_opts(self._state, opts)
LOG.debug('leave: check_for_setup_error')
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
cinder_utils.check_ssh_injection(cmd_list)
command = ' '.join(cmd_list)
if not self.sshpool:
try:
self.sshpool = self._set_up_sshpool(self.active_ip)
except paramiko.SSHException:
LOG.warning('Unable to use san_ip to create SSHPool. Now '
'attempting to use storwize_san_secondary_ip '
'to create SSHPool.')
if self._toggle_ip():
self.sshpool = self._set_up_sshpool(self.active_ip)
else:
LOG.warning('Unable to create SSHPool using san_ip '
'and not able to use '
'storwize_san_secondary_ip since it is '
'not configured.')
raise
try:
return self._ssh_execute(self.sshpool, command,
check_exit_code, attempts)
except Exception:
# Need to check if creating an SSHPool storwize_san_secondary_ip
# before raising an error.
try:
if self._toggle_ip():
LOG.warning("Unable to execute SSH command with "
"%(inactive)s. Attempting to execute SSH "
"command with %(active)s.",
{'inactive': self.inactive_ip,
'active': self.active_ip})
self.sshpool = self._set_up_sshpool(self.active_ip)
return self._ssh_execute(self.sshpool, command,
check_exit_code, attempts)
else:
LOG.warning('Not able to use '
'storwize_san_secondary_ip since it is '
'not configured.')
raise
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error running SSH command: %s",
command)
def _set_up_sshpool(self, ip):
password = self.configuration.san_password
privatekey = self.configuration.san_private_key
min_size = self.configuration.ssh_min_pool_conn
max_size = self.configuration.ssh_max_pool_conn
sshpool = ssh_utils.SSHPool(
ip,
self.configuration.san_ssh_port,
self.configuration.ssh_conn_timeout,
self.configuration.san_login,
password=password,
privatekey=privatekey,
min_size=min_size,
max_size=max_size)
return sshpool
def _ssh_execute(self, sshpool, command,
check_exit_code = True, attempts=1):
try:
with sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
LOG.error('Error has occurred: %s', e)
last_exception = e
greenthread.sleep(self.DEFAULT_GR_SLEEP)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error running SSH command: %s", command)
def _toggle_ip(self):
# Change active_ip if storwize_san_secondary_ip is set.
if self.configuration.storwize_san_secondary_ip is None:
return False
self.inactive_ip, self.active_ip = self.active_ip, self.inactive_ip
LOG.info('Toggle active_ip from %(old)s to %(new)s.',
{'old': self.inactive_ip,
'new': self.active_ip})
return True
def ensure_export(self, ctxt, volume):
"""Check that the volume exists on the storage.
The system does not "export" volumes as a Linux iSCSI target does,
and therefore we just check that the volume exists on the storage.
"""
vol_name = self._get_target_vol(volume)
volume_defined = self._helpers.is_vdisk_defined(vol_name)
if not volume_defined:
LOG.error('ensure_export: Volume %s not found on storage.',
volume['name'])
def create_export(self, ctxt, volume, connector):
model_update = None
return model_update
def remove_export(self, ctxt, volume):
pass
def _get_vdisk_params(self, type_id, volume_type=None,
volume_metadata=None):
return self._helpers.get_vdisk_params(self.configuration,
self._state, type_id,
volume_type=volume_type,
volume_metadata=volume_metadata)
def create_volume(self, volume):
LOG.debug('enter: create_volume: volume %s', volume['name'])
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
pool = utils.extract_host(volume['host'], 'pool')
if opts['mirror_pool'] and rep_type:
reason = _('Create mirror volume with replication enabled is '
'not supported.')
raise exception.InvalidInput(reason=reason)
opts['iogrp'] = self._helpers.select_io_group(self._state, opts)
self._helpers.create_vdisk(volume['name'], str(volume['size']),
'gb', pool, opts)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
model_update = None
if rep_type:
replica_obj = self._get_replica_obj(rep_type)
replica_obj.volume_replication_setup(ctxt, volume)
model_update = {'replication_status':
fields.ReplicationStatus.ENABLED}
LOG.debug('leave: create_volume:\n volume: %(vol)s\n '
'model_update %(model_update)s',
{'vol': volume['name'],
'model_update': model_update})
return model_update
def delete_volume(self, volume):
LOG.debug('enter: delete_volume: volume %s', volume['name'])
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type:
if self._aux_backend_helpers:
self._aux_backend_helpers.delete_rc_volume(volume['name'],
target_vol=True)
if not self._active_backend_id:
self._master_backend_helpers.delete_rc_volume(volume['name'])
else:
# If it's in fail over state, also try to delete the volume
# in master backend
try:
self._master_backend_helpers.delete_rc_volume(
volume['name'])
except Exception as ex:
LOG.error('Failed to get delete volume %(volume)s in '
'master backend. Exception: %(err)s.',
{'volume': volume['name'],
'err': ex})
else:
if self._active_backend_id:
msg = (_('Error: delete non-replicate volume in failover mode'
' is not allowed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
self._helpers.delete_vdisk(volume['name'], False)
if volume['id'] in self._vdiskcopyops:
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
LOG.debug('leave: delete_volume: volume %s', volume['name'])
def create_snapshot(self, snapshot):
ctxt = context.get_admin_context()
try:
# TODO(zhaochy): change to use snapshot.volume
source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
except Exception:
msg = (_('create_snapshot: get source volume failed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
rep_type = self._get_volume_replicated_type(
ctxt, None, source_vol['volume_type_id'])
if rep_type == storwize_const.GMCV:
# GMCV volume will have problem to failback
# when it has flash copy relationship besides change volumes
msg = _('create_snapshot: Create snapshot to '
'gmcv replication volume is not allowed.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
pool = utils.extract_host(source_vol['host'], 'pool')
opts = self._get_vdisk_params(source_vol['volume_type_id'])
self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], self.configuration,
opts, False, pool=pool)
def delete_snapshot(self, snapshot):
self._helpers.delete_vdisk(snapshot['name'], False)
def create_volume_from_snapshot(self, volume, snapshot):
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
pool = utils.extract_host(volume['host'], 'pool')
self._helpers.create_copy(snapshot['name'], volume['name'],
snapshot['id'], self.configuration,
opts, True, pool=pool)
# The volume size is equal to the snapshot size in most
# of the cases. But in some scenario, the volume size
# may be bigger than the source volume size.
# SVC does not support flashcopy between two volumes
# with two different size. So use the snapshot size to
# create volume first and then extend the volume to-
# the target size.
if volume['size'] > snapshot['volume_size']:
# extend the new created target volume to expected size.
self._extend_volume_op(volume, volume['size'],
snapshot['volume_size'])
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type:
self._validate_replication_enabled()
replica_obj = self._get_replica_obj(rep_type)
replica_obj.volume_replication_setup(ctxt, volume)
return {'replication_status': fields.ReplicationStatus.ENABLED}
def create_cloned_volume(self, tgt_volume, src_volume):
"""Creates a clone of the specified volume."""
opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
volume_metadata=
tgt_volume.get('volume_metadata'))
pool = utils.extract_host(tgt_volume['host'], 'pool')
self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
src_volume['id'], self.configuration,
opts, True, pool=pool)
# The source volume size is equal to target volume size
# in most of the cases. But in some scenarios, the target
# volume size may be bigger than the source volume size.
# SVC does not support flashcopy between two volumes
# with two different sizes. So use source volume size to
# create target volume first and then extend target
# volume to original size.
if tgt_volume['size'] > src_volume['size']:
# extend the new created target volume to expected size.
self._extend_volume_op(tgt_volume, tgt_volume['size'],
src_volume['size'])
if opts['qos']:
self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, tgt_volume)
if rep_type:
self._validate_replication_enabled()
replica_obj = self._get_replica_obj(rep_type)
replica_obj.volume_replication_setup(ctxt, tgt_volume)
return {'replication_status': fields.ReplicationStatus.ENABLED}
def extend_volume(self, volume, new_size):
self._extend_volume_op(volume, new_size)
def _extend_volume_op(self, volume, new_size, old_size=None):
LOG.debug('enter: _extend_volume_op: volume %s', volume['id'])
volume_name = self._get_target_vol(volume)
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume_name,
allow_snaps=False)
if not ret:
msg = (_('_extend_volume_op: Extending a volume with snapshots is '
'not supported.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if old_size is None:
old_size = volume.size
extend_amt = int(new_size) - old_size
rel_info = self._helpers.get_relationship_info(volume_name)
if rel_info:
LOG.warning('_extend_volume_op: Extending a volume with '
'remote copy is not recommended.')
try:
rep_type = rel_info['copy_type']
cyclingmode = rel_info['cycling_mode']
self._master_backend_helpers.delete_relationship(
volume.name)
tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
volume.name)
self._master_backend_helpers.extend_vdisk(volume.name,
extend_amt)
self._aux_backend_helpers.extend_vdisk(tgt_vol, extend_amt)
tgt_sys = self._aux_backend_helpers.get_system_info()
if storwize_const.GMCV_MULTI == cyclingmode:
tgt_change_vol = (
storwize_const.REPLICA_CHG_VOL_PREFIX +
tgt_vol)
source_change_vol = (
storwize_const.REPLICA_CHG_VOL_PREFIX +
volume.name)
self._master_backend_helpers.extend_vdisk(
source_change_vol, extend_amt)
self._aux_backend_helpers.extend_vdisk(
tgt_change_vol, extend_amt)
src_change_opts = self._get_vdisk_params(
volume.volume_type_id)
cycle_period_seconds = src_change_opts.get(
'cycle_period_seconds')
self._master_backend_helpers.create_relationship(
volume.name, tgt_vol, tgt_sys.get('system_name'),
True, True, source_change_vol, cycle_period_seconds)
self._aux_backend_helpers.change_relationship_changevolume(
tgt_vol, tgt_change_vol, False)
self._master_backend_helpers.start_relationship(
volume.name)
else:
self._master_backend_helpers.create_relationship(
volume.name, tgt_vol, tgt_sys.get('system_name'),
True if storwize_const.GLOBAL == rep_type else False)
except Exception as e:
msg = (_('Failed to extend a volume with remote copy '
'%(volume)s. Exception: '
'%(err)s.') % {'volume': volume.id,
'err': e})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
self._helpers.extend_vdisk(volume_name, extend_amt)
LOG.debug('leave: _extend_volume_op: volume %s', volume.id)
def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False):
return self._helpers.add_vdisk_copy(volume, dest_pool,
vol_type, self._state,
self.configuration,
auto_delete=auto_delete)
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
new_ops_list = curr_ops_list.append(new_op)
else:
new_ops_list = [new_op]
new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
if volume['id'] in self._vdiskcopyops:
self._vdiskcopyops[volume['id']].append(new_op)
else:
self._vdiskcopyops[volume['id']] = [new_op]
# We added the first copy operation, so start the looping call
if len(self._vdiskcopyops) == 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
try:
self._vdiskcopyops[volume['id']].remove((orig_copy_id,
new_copy_id))
if not len(self._vdiskcopyops[volume['id']]):
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except KeyError:
LOG.error('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.', volume['id'])
return
except ValueError:
LOG.error('_rm_vdisk_copy_op: Volume %(vol)s does not have '
'the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.',
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
LOG.error('_rm_vdisk_copy_op: Volume metadata %s does not '
'have any registered vdisk copy operations.',
volume['id'])
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
LOG.error('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
'not have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.',
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
if len(curr_ops_list):
new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
else:
self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
'vdiskcopyops')
def _check_volume_copy_ops(self):
LOG.debug("Enter: update volume copy status.")
ctxt = context.get_admin_context()
copy_items = list(self._vdiskcopyops.items())
for vol_id, copy_ops in copy_items:
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
LOG.warning('Volume %s does not exist.', vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
continue
for copy_op in copy_ops:
try:
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
LOG.info('_check_volume_copy_ops: Volume %(vol)s does '
'not have the specified vdisk copy '
'operation: orig=%(orig)s new=%(new)s.',
{'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
copy_op[1])
LOG.debug("Exit: update volume copy status.")
# #### V2.1 replication methods #### #
def failover_host(self, context, volumes, secondary_id=None, groups=None):
LOG.debug('enter: failover_host: secondary_id=%(id)s',
{'id': secondary_id})
if not self._replica_enabled:
msg = _("Replication is not properly enabled on backend.")
LOG.error(msg)
raise exception.UnableToFailOver(reason=msg)
if storwize_const.FAILBACK_VALUE == secondary_id:
# In this case the administrator would like to fail back.
secondary_id, volumes_update = self._replication_failback(context,
volumes)
elif (secondary_id == self._replica_target['backend_id']
or secondary_id is None):
# In this case the administrator would like to fail over.
secondary_id, volumes_update = self._replication_failover(context,
volumes)
else:
msg = (_("Invalid secondary id %s.") % secondary_id)
LOG.error(msg)
raise exception.InvalidReplicationTarget(reason=msg)
LOG.debug('leave: failover_host: secondary_id=%(id)s',
{'id': secondary_id})
return secondary_id, volumes_update, []
def _replication_failback(self, ctxt, volumes):
"""Fail back all the volume on the secondary backend."""
volumes_update = []
if not self._active_backend_id:
LOG.info("Host has been failed back. doesn't need "
"to fail back again")
return None, volumes_update
try:
self._master_backend_helpers.get_system_info()
except Exception:
msg = (_("Unable to failback due to primary is not reachable."))
LOG.error(msg)
raise exception.UnableToFailOver(reason=msg)
unrep_volumes, rep_volumes = self._classify_volume(ctxt, volumes)
# start synchronize from aux volume to master volume
self._sync_with_aux(ctxt, rep_volumes)
self._wait_replica_ready(ctxt, rep_volumes)
rep_volumes_update = self._failback_replica_volumes(ctxt,
rep_volumes)
volumes_update.extend(rep_volumes_update)
unrep_volumes_update = self._failover_unreplicated_volume(
unrep_volumes)
volumes_update.extend(unrep_volumes_update)
self._helpers = self._master_backend_helpers
self._active_backend_id = None
# Update the storwize state
self._update_storwize_state()
self._update_volume_stats()
return storwize_const.FAILBACK_VALUE, volumes_update
def _failback_replica_volumes(self, ctxt, rep_volumes):
LOG.debug('enter: _failback_replica_volumes')
volumes_update = []
for volume in rep_volumes:
rep_type = self._get_volume_replicated_type(ctxt, volume)
replica_obj = self._get_replica_obj(rep_type)
tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
rep_info = self._helpers.get_relationship_info(tgt_volume)
if not rep_info:
volumes_update.append(
{'volume_id': volume['id'],
'updates':
{'replication_status':
fields.ReplicationStatus.ERROR,
'status': 'error'}})
LOG.error('_failback_replica_volumes:no rc-releationship '
'is established between master: %(master)s and '
'aux %(aux)s. Please re-establish the '
'relationship and synchronize the volumes on '
'backend storage.',
{'master': volume['name'], 'aux': tgt_volume})
continue
LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s'
'primary=%(primary)s',
{'vol': volume['name'],
'master_vol': rep_info['master_vdisk_name'],
'aux_vol': rep_info['aux_vdisk_name'],
'state': rep_info['state'],
'primary': rep_info['primary']})
try:
model_updates = replica_obj.replication_failback(volume)
volumes_update.append(
{'volume_id': volume['id'],
'updates': model_updates})
except exception.VolumeDriverException:
LOG.error('Unable to fail back volume %(volume_id)s',
{'volume_id': volume.id})
volumes_update.append(
{'volume_id': volume['id'],
'updates': {'replication_status':
fields.ReplicationStatus.ERROR,
'status': 'error'}})
LOG.debug('leave: _failback_replica_volumes '
'volumes_update=%(volumes_update)s',
{'volumes_update': volumes_update})
return volumes_update
def _failover_unreplicated_volume(self, unreplicated_vols):
volumes_update = []
for vol in unreplicated_vols:
if vol.replication_driver_data:
rep_data = json.loads(vol.replication_driver_data)
update_status = rep_data['previous_status']
rep_data = ''
else:
update_status = 'error'
rep_data = json.dumps({'previous_status': vol.status})
volumes_update.append(
{'volume_id': vol.id,
'updates': {'status': update_status,
'replication_driver_data': rep_data}})
return volumes_update
def _sync_with_aux(self, ctxt, volumes):
LOG.debug('enter: _sync_with_aux ')
try:
rep_mgr = self._get_replica_mgr()
rep_mgr.establish_target_partnership()
except Exception as ex:
LOG.warning('Fail to establish partnership in backend. '
'error=%(ex)s', {'error': ex})
for volume in volumes:
tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
rep_info = self._helpers.get_relationship_info(tgt_volume)
if not rep_info:
LOG.error('_sync_with_aux: no rc-releationship is '
'established between master: %(master)s and aux '
'%(aux)s. Please re-establish the relationship '
'and synchronize the volumes on backend '
'storage.', {'master': volume['name'],
'aux': tgt_volume})
continue
LOG.debug('_sync_with_aux: volume: %(volume)s rep_info:master_vol='
'%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
'primary=%(primary)s',
{'volume': volume['name'],
'master_vol': rep_info['master_vdisk_name'],
'aux_vol': rep_info['aux_vdisk_name'],
'state': rep_info['state'],
'primary': rep_info['primary']})
try:
if (rep_info['state'] not in
[storwize_const.REP_CONSIS_SYNC,
storwize_const.REP_CONSIS_COPYING]):
if rep_info['primary'] == 'master':
self._helpers.start_relationship(tgt_volume)
else:
self._helpers.start_relationship(tgt_volume,
primary='aux')
except Exception as ex:
LOG.warning('Fail to copy data from aux to master. master:'
' %(master)s and aux %(aux)s. Please '
're-establish the relationship and synchronize'
' the volumes on backend storage. error='
'%(ex)s', {'master': volume['name'],
'aux': tgt_volume,
'error': ex})
LOG.debug('leave: _sync_with_aux.')
def _wait_replica_ready(self, ctxt, volumes):
for volume in volumes:
tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
try:
self._wait_replica_vol_ready(ctxt, tgt_volume)
except Exception as ex:
LOG.error('_wait_replica_ready: wait for volume:%(volume)s'
' remote copy synchronization failed due to '
'error:%(err)s.', {'volume': tgt_volume,
'err': ex})
def _wait_replica_vol_ready(self, ctxt, volume):
LOG.debug('enter: _wait_replica_vol_ready: volume=%(volume)s',
{'volume': volume})
def _replica_vol_ready():
rep_info = self._helpers.get_relationship_info(volume)
if not rep_info:
msg = (_('_wait_replica_vol_ready: no rc-releationship'
'is established for volume:%(volume)s. Please '
're-establish the rc-relationship and '
'synchronize the volumes on backend storage.'),
{'volume': volume})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('_replica_vol_ready:volume: %(volume)s rep_info: '
'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, '
'state=%(state)s, primary=%(primary)s',
{'volume': volume,
'master_vol': rep_info['master_vdisk_name'],
'aux_vol': rep_info['aux_vdisk_name'],
'state': rep_info['state'],
'primary': rep_info['primary']})
if (rep_info['state'] in
[storwize_const.REP_CONSIS_SYNC,
storwize_const.REP_CONSIS_COPYING]):
return True
elif rep_info['state'] == storwize_const.REP_IDL_DISC:
msg = (_('Wait synchronize failed. volume: %(volume)s'),
{'volume': volume})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return False
self._helpers._wait_for_a_condition(
_replica_vol_ready, timeout=storwize_const.DEFAULT_RC_TIMEOUT,
interval=storwize_const.DEFAULT_RC_INTERVAL,
raise_exception=True)
LOG.debug('leave: _wait_replica_vol_ready: volume=%(volume)s',
{'volume': volume})
def _replication_failover(self, ctxt, volumes):
volumes_update = []
if self._active_backend_id:
LOG.info("Host has been failed over to %s",
self._active_backend_id)
return self._active_backend_id, volumes_update
try:
self._aux_backend_helpers.get_system_info()
except Exception as ex:
msg = (_("Unable to failover due to replication target is not "
"reachable. error=%(ex)s"), {'error': ex})
LOG.error(msg)
raise exception.UnableToFailOver(reason=msg)
unrep_volumes, rep_volumes = self._classify_volume(ctxt, volumes)
rep_volumes_update = self._failover_replica_volumes(ctxt, rep_volumes)
volumes_update.extend(rep_volumes_update)
unrep_volumes_update = self._failover_unreplicated_volume(
unrep_volumes)
volumes_update.extend(unrep_volumes_update)
self._helpers = self._aux_backend_helpers
self._active_backend_id = self._replica_target['backend_id']
self._secondary_pools = [self._replica_target['pool_name']]
# Update the storwize state
self._update_storwize_state()
self._update_volume_stats()
return self._active_backend_id, volumes_update
def _failover_replica_volumes(self, ctxt, rep_volumes):
LOG.debug('enter: _failover_replica_volumes')
volumes_update = []
for volume in rep_volumes:
rep_type = self._get_volume_replicated_type(ctxt, volume)
replica_obj = self._get_replica_obj(rep_type)
# Try do the fail-over.
try:
rep_info = self._aux_backend_helpers.get_relationship_info(
storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'])
if not rep_info:
volumes_update.append(
{'volume_id': volume['id'],
'updates':
{'replication_status':
fields.ReplicationStatus.FAILOVER_ERROR,
'status': 'error'}})
LOG.error('_failover_replica_volumes: no rc-'
'releationship is established for master:'
'%(master)s. Please re-establish the rc-'
'relationship and synchronize the volumes on'
' backend storage.',
{'master': volume['name']})
continue
LOG.debug('_failover_replica_volumes: vol=%(vol)s, '
'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, '
'state=%(state)s, primary=%(primary)s',
{'vol': volume['name'],
'master_vol': rep_info['master_vdisk_name'],
'aux_vol': rep_info['aux_vdisk_name'],
'state': rep_info['state'],
'primary': rep_info['primary']})
model_updates = replica_obj.failover_volume_host(ctxt, volume)
volumes_update.append(
{'volume_id': volume['id'],
'updates': model_updates})
except exception.VolumeDriverException:
LOG.error('Unable to failover to aux volume. Please make '
'sure that the aux volume is ready.')
volumes_update.append(
{'volume_id': volume['id'],
'updates': {'status': 'error',
'replication_status':
fields.ReplicationStatus.FAILOVER_ERROR}})
LOG.debug('leave: _failover_replica_volumes '
'volumes_update=%(volumes_update)s',
{'volumes_update': volumes_update})
return volumes_update
def _classify_volume(self, ctxt, volumes):
normal_volumes = []
replica_volumes = []
for v in volumes:
volume_type = self._get_volume_replicated_type(ctxt, v)
if volume_type and v['status'] == 'available':
replica_volumes.append(v)
else:
normal_volumes.append(v)
return normal_volumes, replica_volumes
def _get_replica_obj(self, rep_type):
replica_manager = self.replica_manager[
self._replica_target['backend_id']]
return replica_manager.get_replica_obj(rep_type)
def _get_replica_mgr(self):
replica_manager = self.replica_manager[
self._replica_target['backend_id']]
return replica_manager
def _get_target_vol(self, volume):
tgt_vol = volume['name']
if self._active_backend_id:
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
if rep_type:
tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
volume['name'])
return tgt_vol
def _validate_replication_enabled(self):
if not self._replica_enabled:
msg = _("Replication is not properly configured on backend.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _get_specs_replicated_type(self, volume_type):
replication_type = None
extra_specs = volume_type.get("extra_specs", {})
rep_val = extra_specs.get('replication_enabled')
if rep_val == "<is> True":
replication_type = extra_specs.get('replication_type',
storwize_const.GLOBAL)
# The format for replication_type in extra spec is in
# "<in> global". Otherwise, the code will
# not reach here.
if replication_type != storwize_const.GLOBAL:
# Pick up the replication type specified in the
# extra spec from the format like "<in> global".
replication_type = replication_type.split()[1]
if replication_type not in storwize_const.VALID_REP_TYPES:
msg = (_("Invalid replication type %s.") % replication_type)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return replication_type
def _get_volume_replicated_type(self, ctxt, volume, vol_type_id=None):
replication_type = None
volume_type = None
volume_type_id = volume.volume_type_id if volume else vol_type_id
if volume_type_id:
volume_type = objects.VolumeType.get_by_name_or_id(
ctxt, volume_type_id)
if volume_type:
replication_type = self._get_specs_replicated_type(volume_type)
return replication_type
def _get_storwize_config(self):
self._do_replication_setup()
if self._active_backend_id and self._replica_target:
self._helpers = self._aux_backend_helpers
self._replica_enabled = (True if (self._helpers.replication_licensed()
and self._replica_target) else False)
if self._replica_enabled:
self._supported_replica_types = storwize_const.VALID_REP_TYPES
def _do_replication_setup(self):
rep_devs = self.configuration.safe_get('replication_device')
if not rep_devs:
return
if len(rep_devs) > 1:
raise exception.InvalidInput(
reason='Multiple replication devices are configured. '
'Now only one replication_device is supported.')
required_flags = ['san_ip', 'backend_id', 'san_login',
'san_password', 'pool_name']
for flag in required_flags:
if flag not in rep_devs[0]:
raise exception.InvalidInput(
reason=_('%s is not set.') % flag)
rep_target = {}
rep_target['san_ip'] = rep_devs[0].get('san_ip')
rep_target['backend_id'] = rep_devs[0].get('backend_id')
rep_target['san_login'] = rep_devs[0].get('san_login')
rep_target['san_password'] = rep_devs[0].get('san_password')
rep_target['pool_name'] = rep_devs[0].get('pool_name')
# Each replication target will have a corresponding replication.
self._replication_initialize(rep_target)
def _replication_initialize(self, target):
rep_manager = storwize_rep.StorwizeSVCReplicationManager(
self, target, StorwizeHelpers)
if self._active_backend_id:
if self._active_backend_id != target['backend_id']:
msg = (_("Invalid secondary id %s.") % self._active_backend_id)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
# Setup partnership only in non-failover state
else:
try:
rep_manager.establish_target_partnership()
except exception.VolumeDriverException:
LOG.error('The replication src %(src)s has not '
'successfully established partnership with the '
'replica target %(tgt)s.',
{'src': self.configuration.san_ip,
'tgt': target['backend_id']})
self._aux_backend_helpers = rep_manager.get_target_helpers()
self.replica_manager[target['backend_id']] = rep_manager
self._replica_target = target
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage.
We create a new vdisk copy in the desired pool, and add the original
vdisk copy to the admin_metadata of the volume to be deleted. The
deletion will occur using a periodic task once the new copy is synced.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return false_ret
ctxt = context.get_admin_context()
volume_type_id = volume['volume_type_id']
if volume_type_id is not None:
vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
else:
vol_type = None
resp = self._helpers.lsvdiskcopy(volume.name)
if len(resp) > 1:
copies = self._helpers.get_vdisk_copies(volume.name)
self._helpers.migratevdisk(volume.name, dest_pool,
copies['primary']['copy_id'])
else:
self.add_vdisk_copy(volume.name, dest_pool, vol_type,
auto_delete=True)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume.id, 'host': host['host']})
return (True, None)
def _verify_retype_params(self, volume, new_opts, old_opts, need_copy,
change_mirror, new_rep_type, old_rep_type):
# Some volume parameters can not be changed or changed at the same
# time during volume retype operation. This function checks the
# retype parameters.
resp = self._helpers.lsvdiskcopy(volume.name)
if old_opts['mirror_pool'] and len(resp) == 1:
msg = (_('Unable to retype: volume %s is a mirrorred vol. But it '
'has only one copy in storage.') % volume.name)
raise exception.VolumeDriverException(message=msg)
if need_copy:
# mirror volume can not add volume-copy again.
if len(resp) > 1:
msg = (_('Unable to retype: current action needs volume-copy. '
'A copy of volume %s exists. Adding another copy '
'would exceed the limit of 2 copies.') % volume.name)
raise exception.VolumeDriverException(message=msg)
if old_opts['mirror_pool'] or new_opts['mirror_pool']:
msg = (_('Unable to retype: current action needs volume-copy, '
'it is not allowed for mirror volume '
'%s.') % volume.name)
raise exception.VolumeDriverException(message=msg)
if change_mirror:
if (new_opts['mirror_pool'] and
not self._helpers.is_pool_defined(
new_opts['mirror_pool'])):
msg = (_('Unable to retype: The pool %s in which mirror copy '
'is stored is not valid') % new_opts['mirror_pool'])
raise exception.VolumeDriverException(message=msg)
# There are four options for rep_type: None, metro, global, gmcv
if new_rep_type or old_rep_type:
# If volume is replicated, can't copy
if need_copy or new_opts['mirror_pool'] or old_opts['mirror_pool']:
msg = (_('Unable to retype: current action needs volume-copy, '
'it is not allowed for replication type. '
'Volume = %s') % volume.id)
raise exception.VolumeDriverException(message=msg)
if new_rep_type != old_rep_type:
old_io_grp = self._helpers.get_volume_io_group(volume.name)
if (old_io_grp not in
StorwizeHelpers._get_valid_requested_io_groups(
self._state, new_opts)):
msg = (_('Unable to retype: it is not allowed to change '
'replication type and io group at the same time.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if new_rep_type and old_rep_type:
msg = (_('Unable to retype: it is not allowed to change '
'%(old_rep_type)s volume to %(new_rep_type)s '
'volume.') %
{'old_rep_type': old_rep_type,
'new_rep_type': new_rep_type})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
elif storwize_const.GMCV == new_rep_type:
# To gmcv, we may change cycle_period_seconds if needed
previous_cps = old_opts.get('cycle_period_seconds')
new_cps = new_opts.get('cycle_period_seconds')
if previous_cps != new_cps:
self._helpers.change_relationship_cycleperiod(volume.name,
new_cps)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def retype_iogrp_property(volume, new, old):
if new != old:
self._helpers.change_vdisk_iogrp(volume['name'],
self._state, (new, old))
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
no_copy_keys = ['warning', 'autoexpand', 'easytier']
copy_keys = ['rsize', 'grainsize', 'compression']
all_keys = no_copy_keys + copy_keys
old_opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_matadata'))
new_opts = self._get_vdisk_params(new_type['id'],
volume_type=new_type)
vdisk_changes = []
need_copy = False
change_mirror = False
for key in all_keys:
if old_opts[key] != new_opts[key]:
if key in copy_keys:
need_copy = True
break
elif key in no_copy_keys:
vdisk_changes.append(key)
if (utils.extract_host(volume['host'], 'pool') !=
utils.extract_host(host['host'], 'pool')):
need_copy = True
if old_opts['mirror_pool'] != new_opts['mirror_pool']:
change_mirror = True
# Check if retype affects volume replication
model_update = None
new_rep_type = self._get_specs_replicated_type(new_type)
old_rep_type = self._get_volume_replicated_type(ctxt, volume)
old_io_grp = self._helpers.get_volume_io_group(volume['name'])
new_io_grp = self._helpers.select_io_group(self._state, new_opts)
self._verify_retype_params(volume, new_opts, old_opts, need_copy,
change_mirror, new_rep_type, old_rep_type)
if need_copy:
self._check_volume_copy_ops()
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return False
retype_iogrp_property(volume,
new_io_grp, old_io_grp)
try:
self.add_vdisk_copy(volume['name'], dest_pool, new_type,
auto_delete=True)
except exception.VolumeDriverException:
# roll back changing iogrp property
retype_iogrp_property(volume, old_io_grp, new_io_grp)
msg = (_('Unable to retype: A copy of volume %s exists. '
'Retyping would exceed the limit of 2 copies.'),
volume['id'])
raise exception.VolumeDriverException(message=msg)
else:
retype_iogrp_property(volume, new_io_grp, old_io_grp)
self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
new_opts, self._state)
if change_mirror:
copies = self._helpers.get_vdisk_copies(volume.name)
if not old_opts['mirror_pool'] and new_opts['mirror_pool']:
# retype from non mirror vol to mirror vol
self.add_vdisk_copy(volume['name'],
new_opts['mirror_pool'], new_type)
elif old_opts['mirror_pool'] and not new_opts['mirror_pool']:
# retype from mirror vol to non mirror vol
secondary = copies['secondary']
if secondary:
self._helpers.rm_vdisk_copy(
volume.name, secondary['copy_id'])
else:
# migrate the second copy to another pool.
self._helpers.migratevdisk(
volume.name, new_opts['mirror_pool'],
copies['secondary']['copy_id'])
if new_opts['qos']:
# Add the new QoS setting to the volume. If the volume has an
# old QoS setting, it will be overwritten.
self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
elif old_opts['qos']:
# If the old_opts contain QoS keys, disable them.
self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
# Delete replica if needed
if old_rep_type and not new_rep_type:
self._aux_backend_helpers.delete_rc_volume(volume['name'],
target_vol=True)
if storwize_const.GMCV == old_rep_type:
self._helpers.delete_vdisk(
storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'],
False)
model_update = {'replication_status':
fields.ReplicationStatus.DISABLED,
'replication_driver_data': None,
'replication_extended_status': None}
# Add replica if needed
if not old_rep_type and new_rep_type:
replica_obj = self._get_replica_obj(new_rep_type)
replica_obj.volume_replication_setup(ctxt, volume)
if storwize_const.GMCV == new_rep_type:
# Set cycle_period_seconds if needed
self._helpers.change_relationship_cycleperiod(
volume['name'],
new_opts.get('cycle_period_seconds'))
model_update = {'replication_status':
fields.ReplicationStatus.ENABLED}
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True, model_update
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from Storwize for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self._helpers.rename_vdisk(current_name, original_volume_name)
rep_type = self._get_volume_replicated_type(ctxt, new_volume)
if rep_type:
rel_info = self._helpers.get_relationship_info(current_name)
aux_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
original_volume_name)
self._aux_backend_helpers.rename_vdisk(
rel_info['aux_vdisk_name'], aux_vol)
except exception.VolumeBackendAPIException:
LOG.error('Unable to rename the logical volume '
'for volume: %s', volume['id'])
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
# If the back-end name(id) for the volume has been renamed,
# it is OK for the volume to keep the original name(id) and there is
# no need to use the column "_name_id" to establish the mapping
# relationship between the volume id and the back-end volume
# name(id).
# Set the key "_name_id" to None for a successful rename.
model_update = {'_name_id': None}
return model_update
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
Renames the vdisk to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated -
if we got here then we have a vdisk that isn't in use (or we don't
care if it is in use.
"""
# Check that the reference is valid
vdisk = self._manage_input_check(ref)
vdisk_io_grp = self._helpers.get_volume_io_group(vdisk['name'])
if vdisk_io_grp not in self._state['available_iogrps']:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is not in a valid "
"I/O group."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
# Add replication check
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
vol_rep_type = None
rel_info = self._helpers.get_relationship_info(vdisk['name'])
copies = self._helpers.get_vdisk_copies(vdisk['name'])
if rel_info:
vol_rep_type = (
storwize_const.GMCV if
storwize_const.GMCV_MULTI == rel_info['cycling_mode']
else rel_info['copy_type'])
aux_info = self._aux_backend_helpers.get_system_info()
if rel_info['aux_cluster_id'] != aux_info['system_id']:
msg = (_("Failed to manage existing volume due to the aux "
"cluster for volume %(volume)s is %(aux_id)s. The "
"configured cluster id is %(cfg_id)s") %
{'volume': vdisk['name'],
'aux_id': rel_info['aux_cluster_id'],
'cfg_id': aux_info['system_id']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if vol_rep_type != rep_type:
msg = (_("Failed to manage existing volume due to "
"the replication type of the volume to be managed is "
"mismatch with the provided replication type."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
elif storwize_const.GMCV == rep_type:
if volume['volume_type_id']:
rep_opts = self._get_vdisk_params(
volume['volume_type_id'],
volume_metadata=volume.get('volume_metadata'))
# Check cycle_period_seconds
rep_cps = six.text_type(rep_opts.get('cycle_period_seconds'))
if rel_info['cycle_period_seconds'] != rep_cps:
msg = (_("Failed to manage existing volume due to "
"the cycle_period_seconds %(vol_cps)s of "
"the volume to be managed is mismatch with "
"cycle_period_seconds %(type_cps)s in "
"the provided gmcv replication type.") %
{'vol_cps': rel_info['cycle_period_seconds'],
'type_cps': rep_cps})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if volume['volume_type_id']:
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
resp = self._helpers.lsvdiskcopy(vdisk['name'])
expected_copy_num = 2 if opts['mirror_pool'] else 1
if len(resp) != expected_copy_num:
msg = (_("Failed to manage existing volume due to mirror type "
"mismatch. Volume to be managed has %(resp_len)s "
"copies. mirror_pool of the chosen type is "
"%(mirror_pool)s.") %
{'resp_len': len(resp),
'mirror_pool': opts['mirror_pool']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (opts['mirror_pool']and opts['mirror_pool'] !=
copies['secondary']['mdisk_grp_name']):
msg = (_("Failed to manage existing volume due to mirror pool "
"mismatch. The secondary pool of the volume to be "
"managed is %(sec_copy_pool)s. mirror_pool of the "
"chosen type is %(mirror_pool)s.") %
{'sec_copy_pool': copies['secondary']['mdisk_grp_name'],
'mirror_pool': opts['mirror_pool']})
raise exception.ManageExistingVolumeTypeMismatch(
reason=msg)
vdisk_copy = self._helpers.get_vdisk_copy_attrs(vdisk['name'], '0')
if vdisk_copy['autoexpand'] == 'on' and opts['rsize'] == -1:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is thin, but "
"the volume type chosen is thick."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if not vdisk_copy['autoexpand'] and opts['rsize'] != -1:
msg = (_("Failed to manage existing volume due to "
"the volume to be managed is thick, but "
"the volume type chosen is thin."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (vdisk_copy['compressed_copy'] == 'no' and
opts['compression']):
msg = (_("Failed to manage existing volume due to the "
"volume to be managed is not compress, but "
"the volume type chosen is compress."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (vdisk_copy['compressed_copy'] == 'yes' and
not opts['compression']):
msg = (_("Failed to manage existing volume due to the "
"volume to be managed is compress, but "
"the volume type chosen is not compress."))
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if (vdisk_io_grp not in
StorwizeHelpers._get_valid_requested_io_groups(
self._state, opts)):
msg = (_("Failed to manage existing volume due to "
"I/O group mismatch. The I/O group of the "
"volume to be managed is %(vdisk_iogrp)s. I/O group"
"of the chosen type is %(opt_iogrp)s.") %
{'vdisk_iogrp': vdisk['IO_group_name'],
'opt_iogrp': opts['iogrp']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
pool = utils.extract_host(volume['host'], 'pool')
if copies['primary']['mdisk_grp_name'] != pool:
msg = (_("Failed to manage existing volume due to the "
"pool of the volume to be managed does not "
"match the backend pool. Pool of the "
"volume to be managed is %(vdisk_pool)s. Pool "
"of the backend is %(backend_pool)s.") %
{'vdisk_pool': copies['primary']['mdisk_grp_name'],
'backend_pool': pool})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
model_update = {}
self._helpers.rename_vdisk(vdisk['name'], volume['name'])
if vol_rep_type:
aux_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
self._aux_backend_helpers.rename_vdisk(rel_info['aux_vdisk_name'],
aux_vol)
if storwize_const.GMCV == vol_rep_type:
self._helpers.rename_vdisk(
rel_info['master_change_vdisk_name'],
storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'])
self._aux_backend_helpers.rename_vdisk(
rel_info['aux_change_vdisk_name'],
storwize_const.REPLICA_CHG_VOL_PREFIX + aux_vol)
model_update = {'replication_status':
fields.ReplicationStatus.ENABLED}
return model_update
def manage_existing_get_size(self, volume, ref):
"""Return size of an existing Vdisk for manage_existing.
existing_ref is a dictionary of the form:
{'source-id': <uid of disk>} or
{'source-name': <name of the disk>}
Optional elements are:
'manage_if_in_use': True/False (default is False)
If set to True, a volume will be managed even if it is currently
attached to a host system.
"""
# Check that the reference is valid
vdisk = self._manage_input_check(ref)
# Check if the disk is in use, if we need to.
manage_if_in_use = ref.get('manage_if_in_use', False)
if (not manage_if_in_use and
self._helpers.is_vdisk_in_use(vdisk['name'])):
reason = _('The specified vdisk is mapped to a host.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def unmanage(self, volume):
"""Remove the specified volume from Cinder management."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If we haven't gotten stats yet or 'refresh' is True,
run update the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
# Add CG capability to generic volume groups
def create_group(self, context, group):
"""Creates a group.
:param context: the context of the caller.
:param group: the group object.
:returns: model_update
"""
LOG.debug("Creating group.")
model_update = {'status': fields.GroupStatus.AVAILABLE}
for vol_type_id in group.volume_type_ids:
replication_type = self._get_volume_replicated_type(
context, None, vol_type_id)
if replication_type:
# An unsupported configuration
LOG.error('Unable to create group: create group with '
'replication volume type is not supported.')
model_update = {'status': fields.GroupStatus.ERROR}
return model_update
if utils.is_group_a_cg_snapshot_type(group):
return {'status': fields.GroupStatus.AVAILABLE}
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
def delete_group(self, context, group, volumes):
"""Deletes a group.
:param context: the context of the caller.
:param group: the group object.
:param volumes: a list of volume objects in the group.
:returns: model_update, volumes_model_update
"""
LOG.debug("Deleting group.")
if not utils.is_group_a_cg_snapshot_type(group):
# we'll rely on the generic group implementation if it is
# not a consistency group request.
raise NotImplementedError()
model_update = {'status': fields.GroupStatus.DELETED}
volumes_model_update = []
for volume in volumes:
try:
self._helpers.delete_vdisk(volume['name'], True)
volumes_model_update.append(
{'id': volume['id'], 'status': 'deleted'})
except exception.VolumeBackendAPIException as err:
model_update['status'] = (
fields.GroupStatus.ERROR_DELETING)
LOG.error("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s.",
{'vol': volume['name'], 'exception': err})
volumes_model_update.append(
{'id': volume['id'],
'status': fields.GroupStatus.ERROR_DELETING})
return model_update, volumes_model_update
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
"""Updates a group.
:param context: the context of the caller.
:param group: the group object.
:param add_volumes: a list of volume objects to be added.
:param remove_volumes: a list of volume objects to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
"""
LOG.debug("Updating group.")
if utils.is_group_a_cg_snapshot_type(group):
return None, None, None
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of Volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
LOG.debug('Enter: create_group_from_src.')
if not utils.is_group_a_cg_snapshot_type(group):
# we'll rely on the generic volume groups implementation if it is
# not a consistency group request.
raise NotImplementedError()
if group_snapshot and snapshots:
cg_name = 'cg-' + group_snapshot.id
sources = snapshots
elif source_group and source_vols:
cg_name = 'cg-' + source_group.id
sources = source_vols
else:
error_msg = _("create_group_from_src must be creating from a "
"group snapshot, or a source group.")
raise exception.InvalidInput(reason=error_msg)
LOG.debug('create_group_from_src: cg_name %(cg_name)s'
' %(sources)s', {'cg_name': cg_name, 'sources': sources})
self._helpers.create_fc_consistgrp(cg_name)
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.create_cg_from_source(group,
cg_name,
sources,
volumes,
self._state,
self.configuration,
timeout))
LOG.debug("Leave: create_group_from_src.")
return model_update, snapshots_model
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a group_snapshot.
:param context: the context of the caller.
:param group_snapshot: the GroupSnapshot object to be created.
:param snapshots: a list of Snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
if not utils.is_group_a_cg_snapshot_type(group_snapshot):
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
# Use group_snapshot id as cg name
cg_name = 'cg_snap-' + group_snapshot.id
# Create new cg as cg_snapshot
self._helpers.create_fc_consistgrp(cg_name)
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.run_consistgrp_snapshots(cg_name,
snapshots,
self._state,
self.configuration,
timeout))
return model_update, snapshots_model
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Deletes a group_snapshot.
:param context: the context of the caller.
:param group_snapshot: the GroupSnapshot object to be deleted.
:param snapshots: a list of snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
if not utils.is_group_a_cg_snapshot_type(group_snapshot):
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
cgsnapshot_id = group_snapshot.id
cg_name = 'cg_snap-' + cgsnapshot_id
model_update, snapshots_model = (
self._helpers.delete_consistgrp_snapshots(cg_name,
snapshots))
return model_update, snapshots_model
def get_pool(self, volume):
attr = self._helpers.get_vdisk_attributes(volume['name'])
if attr is None:
msg = (_('get_pool: Failed to get attributes for volume '
'%s') % volume['name'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return attr['mdisk_grp_name']
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = {}
data['vendor_name'] = 'IBM'
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.protocol
data['pools'] = []
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = (backend_name or
self._state['system_name'])
data['pools'] = [self._build_pool_stats(pool)
for pool in
self._get_backend_pools()]
if self._replica_enabled:
data['replication'] = self._replica_enabled
data['replication_enabled'] = self._replica_enabled
data['replication_targets'] = self._get_replication_targets()
self._stats = data
def _build_pool_stats(self, pool):
"""Build pool status"""
QoS_support = True
pool_stats = {}
try:
pool_data = self._helpers.get_pool_attrs(pool)
if pool_data:
easy_tier = pool_data['easy_tier'] in ['on', 'auto']
total_capacity_gb = float(pool_data['capacity']) / units.Gi
free_capacity_gb = float(pool_data['free_capacity']) / units.Gi
allocated_capacity_gb = (float(pool_data['used_capacity']) /
units.Gi)
provisioned_capacity_gb = float(
pool_data['virtual_capacity']) / units.Gi
rsize = self.configuration.safe_get(
'storwize_svc_vol_rsize')
# rsize of -1 or 100 means fully allocate the mdisk
use_thick_provisioning = rsize == -1 or rsize == 100
over_sub_ratio = self.configuration.safe_get(
'max_over_subscription_ratio')
location_info = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
{'sys_id': self._state['system_id'],
'pool': pool_data['name']})
multiattach = (self.configuration.
storwize_svc_multihostmap_enabled)
pool_stats = {
'pool_name': pool_data['name'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'allocated_capacity_gb': allocated_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'compression_support': self._state['compression_enabled'],
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': QoS_support,
'consistencygroup_support': True,
'location_info': location_info,
'easytier_support': easy_tier,
'multiattach': multiattach,
'thin_provisioning_support': not use_thick_provisioning,
'thick_provisioning_support': use_thick_provisioning,
'max_over_subscription_ratio': over_sub_ratio,
'consistent_group_snapshot_enabled': True,
}
if self._replica_enabled:
pool_stats.update({
'replication_enabled': self._replica_enabled,
'replication_type': self._supported_replica_types,
'replication_targets': self._get_replication_targets(),
'replication_count': len(self._get_replication_targets())
})
except exception.VolumeBackendAPIException:
msg = _('Failed getting details for pool %s.') % pool
raise exception.VolumeBackendAPIException(data=msg)
return pool_stats
def _get_replication_targets(self):
return [self._replica_target['backend_id']]
def _manage_input_check(self, ref):
"""Verify the input of manage function."""
# Check that the reference is valid
if 'source-name' in ref:
manage_source = ref['source-name']
vdisk = self._helpers.get_vdisk_attributes(manage_source)
elif 'source-id' in ref:
manage_source = ref['source-id']
vdisk = self._helpers.vdisk_by_uid(manage_source)
else:
reason = _('Reference must contain source-id or '
'source-name element.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
if vdisk is None:
reason = (_('No vdisk with the UID specified by ref %s.')
% manage_source)
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return vdisk
| eharney/cinder | cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py | Python | apache-2.0 | 179,526 |
# Copyright (c) 2013 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import stat
import subprocess
import tempfile
from oslo_log import log as logging
from oslo_utils import netutils
import pexpect
import six
from trove.common import cfg
from trove.common.db import models
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common import utils as utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.couchbase import system
from trove.guestagent.datastore import service
from trove.guestagent import pkg
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
packager = pkg.Package()
class CouchbaseApp(object):
"""
Handles installation and configuration of couchbase
on a trove instance.
"""
def __init__(self, status, state_change_wait_time=None):
"""
Sets default status and state_change_wait_time
"""
if state_change_wait_time:
self.state_change_wait_time = state_change_wait_time
else:
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
def install_if_needed(self, packages):
"""
Install couchbase if needed, do nothing if it is already installed.
"""
LOG.info(_('Preparing Guest as Couchbase Server.'))
if not packager.pkg_is_installed(packages):
LOG.debug('Installing Couchbase.')
self._install_couchbase(packages)
def initial_setup(self):
self.ip_address = netutils.get_my_ipv4()
mount_point = CONF.couchbase.mount_point
try:
LOG.info(_('Couchbase Server change data dir path.'))
operating_system.chown(mount_point, 'couchbase', 'couchbase',
as_root=True)
pwd = CouchbaseRootAccess.get_password()
utils.execute_with_timeout(
(system.cmd_node_init
% {'data_path': mount_point,
'IP': self.ip_address,
'PWD': pwd}), shell=True)
operating_system.remove(system.INSTANCE_DATA_DIR, force=True,
as_root=True)
LOG.debug('Couchbase Server initialize cluster.')
utils.execute_with_timeout(
(system.cmd_cluster_init
% {'IP': self.ip_address, 'PWD': pwd}),
shell=True)
utils.execute_with_timeout(system.cmd_set_swappiness, shell=True)
utils.execute_with_timeout(system.cmd_update_sysctl_conf,
shell=True)
LOG.info(_('Couchbase Server initial setup finished.'))
except exception.ProcessExecutionError:
LOG.exception(_('Error performing initial Couchbase setup.'))
raise RuntimeError(_("Couchbase Server initial setup failed"))
def _install_couchbase(self, packages):
"""
Install the Couchbase Server.
"""
LOG.debug('Installing Couchbase Server. Creating %s' %
system.COUCHBASE_CONF_DIR)
operating_system.create_directory(system.COUCHBASE_CONF_DIR,
as_root=True)
pkg_opts = {}
packager.pkg_install(packages, pkg_opts, system.TIME_OUT)
self.start_db()
LOG.debug('Finished installing Couchbase Server.')
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
self.status.stop_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time,
disable_on_boot=do_not_start_on_reboot, update_db=update_db)
def restart(self):
self.status.restart_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time)
def start_db(self, update_db=False):
self.status.start_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time,
enable_on_boot=True, update_db=update_db)
def enable_root(self, root_password=None):
return CouchbaseRootAccess.enable_root(root_password)
def start_db_with_conf_changes(self, config_contents):
LOG.info(_("Starting Couchbase with configuration changes.\n"
"Configuration contents:\n %s.") % config_contents)
if self.status.is_running:
LOG.error(_("Cannot start Couchbase with configuration changes. "
"Couchbase state == %s.") % self.status)
raise RuntimeError(_("Couchbase is not stopped."))
self._write_config(config_contents)
self.start_db(True)
def reset_configuration(self, configuration):
config_contents = configuration['config_contents']
LOG.debug("Resetting configuration.")
self._write_config(config_contents)
def _write_config(self, config_contents):
"""
Update contents of Couchbase configuration file
"""
return
class CouchbaseAppStatus(service.BaseDbStatus):
"""
Handles all of the status updating for the couchbase guest agent.
"""
def _get_actual_db_status(self):
self.ip_address = netutils.get_my_ipv4()
pwd = None
try:
pwd = CouchbaseRootAccess.get_password()
return self._get_status_from_couchbase(pwd)
except exception.ProcessExecutionError:
# log the exception, but continue with native config approach
LOG.exception(_("Error getting the Couchbase status."))
try:
out, err = utils.execute_with_timeout(
system.cmd_get_password_from_config, shell=True)
except exception.ProcessExecutionError:
LOG.exception(_("Error getting the root password from the "
"native Couchbase config file."))
return rd_instance.ServiceStatuses.SHUTDOWN
config_pwd = out.strip() if out is not None else None
if not config_pwd or config_pwd == pwd:
LOG.debug("The root password from the native Couchbase config "
"file is either empty or already matches the "
"stored value.")
return rd_instance.ServiceStatuses.SHUTDOWN
try:
status = self._get_status_from_couchbase(config_pwd)
except exception.ProcessExecutionError:
LOG.exception(_("Error getting Couchbase status using the "
"password parsed from the native Couchbase "
"config file."))
return rd_instance.ServiceStatuses.SHUTDOWN
# if the parsed root password worked, update the stored value to
# avoid having to consult/parse the couchbase config file again.
LOG.debug("Updating the stored value for the Couchbase "
"root password.")
CouchbaseRootAccess().write_password_to_file(config_pwd)
return status
def _get_status_from_couchbase(self, pwd):
out, err = utils.execute_with_timeout(
(system.cmd_couchbase_status %
{'IP': self.ip_address, 'PWD': pwd}),
shell=True)
server_stats = json.loads(out)
if not err and server_stats["clusterMembership"] == "active":
return rd_instance.ServiceStatuses.RUNNING
else:
return rd_instance.ServiceStatuses.SHUTDOWN
def cleanup_stalled_db_services(self):
utils.execute_with_timeout(system.cmd_kill)
class CouchbaseRootAccess(object):
@classmethod
def enable_root(cls, root_password=None):
user = models.DatastoreUser.root(password=root_password)
if root_password:
CouchbaseRootAccess().write_password_to_file(root_password)
else:
CouchbaseRootAccess().set_password(user.password)
return user.serialize()
def set_password(self, root_password):
self.ip_address = netutils.get_my_ipv4()
child = pexpect.spawn(system.cmd_reset_pwd % {'IP': self.ip_address})
try:
child.expect('.*password.*')
child.sendline(root_password)
child.expect('.*(yes/no).*')
child.sendline('yes')
child.expect('.*successfully.*')
except pexpect.TIMEOUT:
child.delayafterclose = 1
child.delayafterterminate = 1
try:
child.close(force=True)
except pexpect.ExceptionPexpect:
# Close fails to terminate a sudo process on some OSes.
subprocess.call(['sudo', 'kill', str(child.pid)])
self.write_password_to_file(root_password)
def write_password_to_file(self, root_password):
operating_system.create_directory(system.COUCHBASE_CONF_DIR,
as_root=True)
try:
tempfd, tempname = tempfile.mkstemp()
os.fchmod(tempfd, stat.S_IRUSR | stat.S_IWUSR)
if isinstance(root_password, six.text_type):
root_password = root_password.encode('utf-8')
os.write(tempfd, root_password)
os.fchmod(tempfd, stat.S_IRUSR)
os.close(tempfd)
except OSError as err:
message = _("An error occurred in saving password "
"(%(errno)s). %(strerror)s.") % {
"errno": err.errno,
"strerror": err.strerror}
LOG.exception(message)
raise RuntimeError(message)
operating_system.move(tempname, system.pwd_file, as_root=True)
@staticmethod
def get_password():
pwd = "password"
if os.path.exists(system.pwd_file):
with open(system.pwd_file) as file:
pwd = file.readline().strip()
return pwd
| hplustree/trove | trove/guestagent/datastore/experimental/couchbase/service.py | Python | apache-2.0 | 10,461 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
DEV SCRIPT
This is a hacky script meant to be run mostly automatically with the option of
interactions.
dev.py is supposed to be a developer non-gui interface into the IBEIS software.
dev.py runs experiments and serves as a scratchpad for new code and quick scripts
TODO:
Test to find typical "good" descriptor scores. Find nearest neighbors and
noramlizers for each feature in a query image. Based on ground truth and
spatial verification mark feature matches as true or false. Visualize the
feature scores of good matches vs bad matches. Lowe shows the pdf of
correct matches and the PDF for incorrect matches. We should also show the
same thing.
Done:
Cache nearest neighbors so different parameters later in the pipeline dont
take freaking forever.
CommandLine:
python dev.py --wshow -t query --db PZ_MTEST --qaid 110 --cfg score_method:nsum prescore_method:nsum
python dev.py --wshow -t query --db PZ_MTEST --qaid 110
python dev.py --wshow -t query --db PZ_MTEST --qaid 110 --cfg fg_on=True
python dev.py --wshow -t query --db PZ_MTEST --qaid 110 --cfg
"""
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function
import multiprocessing
import sys
#from ibeis._devscript import devcmd, DEVCMD_FUNCTIONS, DEVPRECMD_FUNCTIONS, DEVCMD_FUNCTIONS2, devcmd2
from ibeis._devscript import devcmd, DEVCMD_FUNCTIONS, DEVPRECMD_FUNCTIONS
import utool as ut
from utool.util_six import get_funcname
import utool
#from ibeis.algo.hots import smk
import plottool as pt
import ibeis
if __name__ == '__main__':
multiprocessing.freeze_support()
ibeis._preload()
#from ibeis.all_imports import * # NOQA
#utool.util_importer.dynamic_import(__name__, ('_devcmds_ibeis', None),
# developing=True)
from ibeis._devcmds_ibeis import * # NOQA
# IBEIS
from ibeis.init import main_helpers # NOQA
from ibeis.other import dbinfo # NOQA
from ibeis.expt import experiment_configs # NOQA
from ibeis.expt import harness # NOQA
from ibeis import params # NOQA
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[dev]')
#------------------
# DEV DEVELOPMENT
#------------------
# This is where you write all of the functions that will become pristine
# and then go in _devcmds_ibeis.py
"""
./dev.py -e print_results --db PZ_Master1 -a varysize_pzm:dper_name=[1,2],dsize=1500 -t candidacy_k:K=1 --intersect_hack
./dev.py -e draw_rank_cdf -t baseline -a baseline --show --db PZ_Master1
./dev.py -e get_dbinfo --db PZ_Master1 --aid_list=baseline
./dev.py -e get_dbinfo --db PZ_MTEST
./dev.py -e get_dbinfo --db PZ_Master1 --aid_list=baseline --hackshow-unixtime --show
./dev.py -e get_dbinfo --db PZ_Master1 --hackshow-unixtime --show
"""
# Quick interface into specific registered doctests
REGISTERED_DOCTEST_EXPERIMENTS = [
('ibeis.expt.experiment_drawing', 'draw_case_timedeltas', ['timedelta_hist', 'timedelta_pie']),
('ibeis.expt.experiment_drawing', 'draw_match_cases', ['draw_cases', 'cases']),
('ibeis.expt.experiment_drawing', 'draw_casetag_hist', ['taghist']),
('ibeis.expt.old_storage', 'draw_results'),
('ibeis.expt.experiment_drawing', 'draw_rank_cdf', ['rank_cdf']),
('ibeis.other.dbinfo', 'get_dbinfo'),
('ibeis.other.dbinfo', 'latex_dbstats'),
('ibeis.other.dbinfo', 'show_image_time_distributions', ['db_time_hist']),
('ibeis.expt.experiment_drawing', 'draw_rank_surface', ['rank_surface']),
('ibeis.expt.experiment_helpers', 'get_annotcfg_list', ['print_acfg']),
('ibeis.expt.experiment_printres', 'print_results', ['printres', 'print']),
('ibeis.expt.experiment_printres', 'print_latexsum', ['latexsum']),
('ibeis.dbio.export_subset', 'export_annots'),
('ibeis.expt.experiment_drawing', 'draw_annot_scoresep', ['scores', 'scores_good', 'scores_all']),
]
def _exec_doctest_func(modname, funcname):
module = ut.import_modname(modname)
func = module.__dict__[funcname]
testsrc = ut.get_doctest_examples(func)[0][0]
exec(testsrc, globals(), locals())
def _register_doctest_precmds():
from functools import partial
for tup in REGISTERED_DOCTEST_EXPERIMENTS:
modname, funcname = tup[:2]
aliases = tup[2] if len(tup) == 3 else []
aliases += [funcname]
_doctest_func = partial(_exec_doctest_func, modname, funcname)
devprecmd(*aliases)(_doctest_func)
_register_doctest_precmds()
@devcmd('tune', 'autotune')
def tune_flann(ibs, qaid_list, daid_list=None):
r"""
CommandLine:
python dev.py -t tune --db PZ_MTEST
python dev.py -t tune --db GZ_ALL
python dev.py -t tune --db GIR_Tanya
python dev.py -t tune --db PZ_Master0
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis._devscript import * # NOQA
>>> # build test data
>>> # execute function
>>> result = func_wrapper()
>>> # verify results
>>> print(result)
"""
all_aids = ibs.get_valid_aids()
vecs = np.vstack(ibs.get_annot_vecs(all_aids))
print('Tunning flann for species={species}:'.format(species=ibs.get_database_species(all_aids)))
tuned_params = vt.tune_flann(vecs,
target_precision=.98,
build_weight=0.05,
memory_weight=0.00,
sample_fraction=0.1)
tuned_params
#tuned_params2 = vt.tune_flann(vecs,
# target_precision=.90,
# build_weight=0.001,
# memory_weight=0.00,
# sample_fraction=0.5)
#tuned_params2
@devcmd('incremental', 'inc')
def incremental_test(ibs, qaid_list, daid_list=None):
"""
Adds / queries new images one at a time to a clean test database.
Tests the complete system.
Args:
ibs (list) : IBEISController object
qaid_list (list) : list of annotation-ids to query
CommandLine:
python dev.py -t inc --db PZ_MTEST --qaid 1:30:3 --cmd
python dev.py --db PZ_MTEST --allgt --cmd
python dev.py --db PZ_MTEST --allgt -t inc
python dev.py -t inc --db PZ_MTEST --qaid 1:30:3 --cmd
python dev.py -t inc --db GZ_ALL --ninit 100 --noqcache
python dev.py -t inc --db PZ_MTEST --noqcache --interactive-after 40
python dev.py -t inc --db PZ_Master0 --noqcache --interactive-after 10000 --ninit 400
Example:
>>> from ibeis.all_imports import * # NOQA
>>> ibs = ibeis.opendb('PZ_MTEST')
>>> qaid_list = ibs.get_valid_aids()
>>> daid_list = None
"""
from ibeis.algo.hots import automated_matcher
ibs1 = ibs
num_initial = ut.get_argval('--ninit', type_=int, default=0)
return automated_matcher.incremental_test(ibs1, num_initial)
@devcmd('inspect')
def inspect_matches(ibs, qaid_list, daid_list):
print('<inspect_matches>')
from ibeis.gui import inspect_gui
return inspect_gui.test_review_widget(ibs, qaid_list, daid_list)
def get_ibslist(ibs):
print('[dev] get_ibslist')
ibs_GV = ibs
ibs_RI = ibs.clone_handle(nogravity_hack=True)
ibs_RIW = ibs.clone_handle(nogravity_hack=True, gravity_weighting=True)
ibs_list = [ibs_GV, ibs_RI, ibs_RIW]
return ibs_list
@devcmd('gv_scores')
def compgrav_draw_score_sep(ibs, qaid_list, daid_list):
print('[dev] compgrav_draw_score_sep')
ibs_list = get_ibslist(ibs)
for ibs_ in ibs_list:
draw_annot_scoresep(ibs_, qaid_list)
#--------------------
# RUN DEV EXPERIMENTS
#--------------------
#def run_registered_precmd(precmd_name):
# # Very hacky way to run just a single registered precmd
# for (func_aliases, func) in DEVPRECMD_FUNCTIONS:
# for aliases in func_aliases:
# ret = precmd_name in input_precmd_list
# if ret:
# func()
def run_devprecmds():
"""
Looks for pre-tests specified with the -t flag and runs them
"""
#input_precmd_list = params.args.tests[:]
input_precmd_list = ut.get_argval('-e', type_=list, default=[])
valid_precmd_list = []
def intest(*args, **kwargs):
for precmd_name in args:
valid_precmd_list.append(precmd_name)
ret = precmd_name in input_precmd_list
ret2 = precmd_name in params.unknown # Let unparsed args count towards tests
if ret or ret2:
if ret:
input_precmd_list.remove(precmd_name)
else:
ret = ret2
print('+===================')
print('| running precmd = %s' % (args,))
return ret
return False
ut.start_logging(appname='ibeis')
# Implicit (decorated) test functions
for (func_aliases, func) in DEVPRECMD_FUNCTIONS:
if intest(*func_aliases):
#with utool.Indenter('[dev.' + get_funcname(func) + ']'):
func()
print('Exiting after first precommand')
sys.exit(1)
if len(input_precmd_list) > 0:
raise AssertionError('Unhandled tests: ' + repr(input_precmd_list))
#@utool.indent_func('[dev]')
def run_devcmds(ibs, qaid_list, daid_list, acfg=None):
"""
This function runs tests passed in with the -t flag
"""
print('\n')
#print('[dev] run_devcmds')
print('==========================')
print('[DEV] RUN EXPERIMENTS %s' % ibs.get_dbname())
print('==========================')
input_test_list = params.args.tests[:]
print('input_test_list = %s' % (ut.list_str(input_test_list),))
# fnum = 1
valid_test_list = [] # build list for printing in case of failure
valid_test_helpstr_list = [] # for printing
def mark_test_handled(testname):
input_test_list.remove(testname)
def intest(*args, **kwargs):
helpstr = kwargs.get('help', '')
valid_test_helpstr_list.append(' -t ' + ', '.join(args) + helpstr)
for testname in args:
valid_test_list.append(testname)
ret = testname in input_test_list
ret2 = testname in params.unknown # Let unparsed args count towards tests
if ret or ret2:
if ret:
mark_test_handled(testname)
else:
ret = ret2
print('\n+===================')
print(' [dev] running testname = %s' % (args,))
print('+-------------------\n')
return ret
return False
valid_test_helpstr_list.append(' # --- Simple Tests ---')
# Explicit (simple) test functions
if intest('export'):
export(ibs)
if intest('dbinfo'):
dbinfo.get_dbinfo(ibs)
if intest('headers', 'schema'):
ibs.db.print_schema()
if intest('info'):
print(ibs.get_infostr())
if intest('printcfg'):
printcfg(ibs)
if intest('tables'):
ibs.print_tables()
if intest('imgtbl'):
ibs.print_image_table()
valid_test_helpstr_list.append(' # --- Decor Tests ---')
locals_ = locals()
# Implicit (decorated) test functions
for (func_aliases, func) in DEVCMD_FUNCTIONS:
if intest(*func_aliases):
funcname = get_funcname(func)
#with utool.Indenter('[dev.' + funcname + ']'):
with utool.Timer(funcname):
#print('[dev] qid_list=%r' % (qaid_list,))
# FIXME: , daid_list
if len(ut.get_func_argspec(func).args) == 0:
ret = func()
else:
ret = func(ibs, qaid_list, daid_list)
# Add variables returned by the function to the
# "local scope" (the exec scop)
if hasattr(ret, 'items'):
for key, val in ret.items():
if utool.is_valid_varname(key):
locals_[key] = val
valid_test_helpstr_list.append(' # --- Config Tests ---')
# ------
# RUNS EXPERIMENT HARNESS OVER VALID TESTNAMES SPECIFIED WITH -t
# ------
# Config driven test functions
# Allow any testcfg to be in tests like: vsone_1 or vsmany_3
test_cfg_name_list = []
for test_cfg_name in experiment_configs.TEST_NAMES:
if intest(test_cfg_name):
test_cfg_name_list.append(test_cfg_name)
# Hack to allow for very customized harness tests
for testname in input_test_list[:]:
if testname.startswith('custom:'):
test_cfg_name_list.append(testname)
mark_test_handled(testname)
if len(test_cfg_name_list):
fnum = pt.next_fnum()
# Run Experiments
# backwards compatibility yo
acfgstr_name_list = {'OVERRIDE_HACK': (qaid_list, daid_list)}
assert False, 'This way of running tests no longer works. It may be fixed in the future'
#acfg
harness.test_configurations(ibs, acfgstr_name_list, test_cfg_name_list)
valid_test_helpstr_list.append(' # --- Help ---')
if intest('help'):
print('valid tests are:')
print('\n'.join(valid_test_helpstr_list))
return locals_
if len(input_test_list) > 0:
print('valid tests are: \n')
print('\n'.join(valid_test_list))
raise Exception('Unknown tests: %r ' % input_test_list)
return locals_
#-------------------
# CUSTOM DEV FUNCS
#-------------------
#------------------
# DEV MAIN
#------------------
def dev_snippets(main_locals):
""" Common variables for convineince when interacting with IPython """
print('[dev] dev_snippets')
species = 'zebra_grevys'
quick = True
fnum = 1
# Get reference to IBEIS Controller
ibs = main_locals['ibs']
if 'back' in main_locals:
# Get reference to GUI Backend
back = main_locals['back']
if back is not None:
# Get reference to GUI Frontend
front = getattr(back, 'front', None)
ibswgt = front
view = ibswgt.views['images']
model = ibswgt.models['names_tree']
selection_model = view.selectionModel()
if ibs is not None:
#ibs.dump_tables()
annots = ibs.annots()
images = ibs.images()
aid_list = ibs.get_valid_aids()
gid_list = ibs.get_valid_gids()
#nid_list = ibs.get_valid_nids()
#valid_nid_list = ibs.get_annot_name_rowids(aid_list)
#valid_aid_names = ibs.get_annot_names(aid_list)
#valid_aid_gtrues = ibs.get_annot_groundtruth(aid_list)
return locals()
def get_sortbystr(str_list, key_list, strlbl=None, keylbl=None):
sortx = key_list.argsort()
ndigits = max(len(str(key_list.max())), 0 if keylbl is None else len(keylbl))
keyfmt = '%' + str(ndigits) + 'd'
if keylbl is not None:
header = keylbl + ' --- ' + strlbl
else:
header = None
sorted_strs = ([(keyfmt % key + ' --- ' + str_) for str_, key in zip(str_list[sortx], key_list[sortx])])
def boxjoin(list_, header=None):
topline = '+----------'
botline = 'L__________'
boxlines = []
boxlines.append(topline + '\n')
if header is not None:
boxlines.append(header + '\n')
boxlines.append(topline)
body = utool.indentjoin(list_, '\n | ')
boxlines.append(body + '\n ')
boxlines.append(botline + '\n')
return ''.join(boxlines)
return boxjoin(sorted_strs, header)
@devcmd('test_feats')
def test_feats(ibs, qaid_list, daid_list=None):
"""
test_feats shows features using several different parameters
Args:
ibs (IBEISController):
qaid_list (int): query annotation id
CommandLine:
python dev.py -t test_feats --db PZ_MTEST --all --qindex 0 --show -w
Example:
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> qaid_list = [1]
"""
from ibeis import viz
from ibeis.expt import experiment_configs
import utool as ut
NUM_PASSES = 1 if not utool.get_argflag('--show') else 2
varyparams_list = [experiment_configs.featparams]
def test_featcfg_combo(ibs, aid, alldictcomb, count, nKpts_list, cfgstr_list):
for dict_ in ut.progiter(alldictcomb, lbl='FeatCFG Combo: '):
# Set ibs parameters to the current config
for key_, val_ in six.iteritems(dict_):
ibs.cfg.feat_cfg[key_] = val_
cfgstr_ = ibs.cfg.feat_cfg.get_cfgstr()
if count == 0:
# On first run just record info
kpts = ibs.get_annot_kpts(aid)
nKpts_list.append(len(kpts))
cfgstr_list.append(cfgstr_)
if count == 1:
kpts = ibs.get_annot_kpts(aid)
# If second run happens display info
cfgpackstr = utool.packstr(cfgstr_, textwidth=80,
breakchars=',', newline_prefix='',
break_words=False, wordsep=',')
title_suffix = (' len(kpts) = %r \n' % len(kpts)) + cfgpackstr
viz.show_chip(ibs, aid, fnum=pt.next_fnum(),
title_suffix=title_suffix, darken=.8,
ell_linewidth=2, ell_alpha=.6)
alldictcomb = utool.flatten(map(utool.all_dict_combinations, varyparams_list))
for count in range(NUM_PASSES):
nKpts_list = []
cfgstr_list = []
for aid in qaid_list:
test_featcfg_combo(ibs, aid, alldictcomb, count, nKpts_list, cfgstr_list)
#for dict_ in alldictcomb:
if count == 0:
nKpts_list = np.array(nKpts_list)
cfgstr_list = np.array(cfgstr_list)
print(get_sortbystr(cfgstr_list, nKpts_list, 'cfg', 'nKpts'))
def devfunc(ibs, qaid_list):
""" Function for developing something """
print('[dev] devfunc')
import ibeis # NOQA
from ibeis.algo import Config # NOQA
#from ibeis.algo.Config import * # NOQA
feat_cfg = Config.FeatureConfig()
#feat_cfg.printme3()
print('\ncfgstr..')
print(feat_cfg.get_cfgstr())
print(utool.dict_str(feat_cfg.get_hesaff_params()))
from ibeis import viz
aid = 1
ibs.cfg.feat_cfg.threshold = 16.0 / 3.0
kpts = ibs.get_annot_kpts(aid)
print('len(kpts) = %r' % len(kpts))
from ibeis.expt import experiment_configs
#varyparams_list = [
# #{
# # 'threshold': [16.0 / 3.0, 32.0 / 3.0], # 8.0 / 3.0
# # 'numberOfScales': [3, 2, 1],
# # 'maxIterations': [16, 32],
# # 'convergenceThreshold': [.05, .1],
# # 'initialSigma': [1.6, 1.2],
# #},
# {
# #'threshold': [16.0 / 3.0, 32.0 / 3.0], # 8.0 / 3.0
# 'numberOfScales': [1],
# #'maxIterations': [16, 32],
# #'convergenceThreshold': [.05, .1],
# #'initialSigma': [6.0, 3.0, 2.0, 1.6, 1.2, 1.1],
# 'initialSigma': [3.2, 1.6, 0.8],
# 'edgeEigenValueRatio': [10, 5, 3],
# },
#]
varyparams_list = [experiment_configs.featparams]
# low threshold = more keypoints
# low initialSigma = more keypoints
nKpts_list = []
cfgstr_list = []
alldictcomb = utool.flatten([utool.util_dict.all_dict_combinations(varyparams) for varyparams in featparams_list])
NUM_PASSES = 1 if not utool.get_argflag('--show') else 2
for count in range(NUM_PASSES):
for aid in qaid_list:
#for dict_ in utool.progiter(alldictcomb, lbl='feature param comb: ', total=len(alldictcomb)):
for dict_ in alldictcomb:
for key_, val_ in six.iteritems(dict_):
ibs.cfg.feat_cfg[key_] = val_
cfgstr_ = ibs.cfg.feat_cfg.get_cfgstr()
cfgstr = utool.packstr(cfgstr_, textwidth=80,
breakchars=',', newline_prefix='', break_words=False, wordsep=',')
if count == 0:
kpts = ibs.get_annot_kpts(aid)
#print('___________')
#print('len(kpts) = %r' % len(kpts))
#print(cfgstr)
nKpts_list.append(len(kpts))
cfgstr_list.append(cfgstr_)
if count == 1:
title_suffix = (' len(kpts) = %r \n' % len(kpts)) + cfgstr
viz.show_chip(ibs, aid, fnum=pt.next_fnum(),
title_suffix=title_suffix, darken=.4,
ell_linewidth=2, ell_alpha=.8)
if count == 0:
nKpts_list = np.array(nKpts_list)
cfgstr_list = np.array(cfgstr_list)
print(get_sortbystr(cfgstr_list, nKpts_list, 'cfg', 'nKpts'))
pt.present()
locals_ = locals()
return locals_
def run_dev(ibs):
"""
main developer command
CommandLine:
python dev.py --db PZ_Master0 --controlled --print-rankhist
"""
print('[dev] --- RUN DEV ---')
# Get reference to controller
if ibs is not None:
# Get aids marked as test cases
if not ut.get_argflag('--no-expanded-aids'):
ibs, qaid_list, daid_list = main_helpers.testdata_expanded_aids(ibs=ibs)
#qaid_list = main_helpers.get_test_qaids(ibs, default_qaids=[1])
#daid_list = main_helpers.get_test_daids(ibs, default_daids='all', qaid_list=qaid_list)
print('[run_def] Test Annotations:')
#print('[run_dev] * qaid_list = %s' % ut.packstr(qaid_list, 80, nlprefix='[run_dev] '))
else:
qaid_list = []
daid_list = []
try:
assert len(qaid_list) > 0, 'assert!'
assert len(daid_list) > 0, 'daid_list!'
except AssertionError as ex:
utool.printex(ex, 'len(qaid_list) = 0', iswarning=True)
utool.printex(ex, 'or len(daid_list) = 0', iswarning=True)
#qaid_list = ibs.get_valid_aids()[0]
if len(qaid_list) > 0 or True:
# Run the dev experiments
expt_locals = run_devcmds(ibs, qaid_list, daid_list)
# Add experiment locals to local namespace
execstr_locals = utool.execstr_dict(expt_locals, 'expt_locals')
exec(execstr_locals)
if ut.get_argflag('--devmode'):
# Execute the dev-func and add to local namespace
devfunc_locals = devfunc(ibs, qaid_list)
exec(utool.execstr_dict(devfunc_locals, 'devfunc_locals'))
return locals()
#-------------
# EXAMPLE TEXT
#-------------
EXAMPLE_TEXT = '''
### DOWNLOAD A TEST DATABASE (IF REQUIRED) ###
python dev.py --t mtest
python dev.py --t nauts
./resetdbs.sh # FIXME
python ibeis/dbio/ingest_database.py <- see module for usage
### LIST AVAIABLE DATABASES ###
python dev.py -t list_dbs
### CHOOSE A DATABASE ###
python dev.py --db PZ_Master0 --setdb
python dev.py --db GZ_ALL --setdb
python dev.py --db PZ_MTEST --setdb
python dev.py --db NAUT_test --setdb
python dev.py --db testdb1 --setdb
python dev.py --db seals2 --setdb
### DATABASE INFORMATION ###
python dev.py -t dbinfo
### EXPERIMENTS ###
python dev.py --allgt -t best
python dev.py --allgt -t vsone
python dev.py --allgt -t vsmany
python dev.py --allgt -t nsum
# Newstyle experiments
# commmand # annot settings # test settings
python -m ibeis.dev -a default:qaids=allgt -t best
### COMPARE TWO CONFIGS ###
python dev.py --allgt -t nsum vsmany vsone
python dev.py --allgt -t nsum vsmany
python dev.py --allgt -t nsum vsmany vsone smk
### VARY DATABASE SIZE
python -m ibeis.dev -a default:qaids=allgt,dsize=100,qper_name=1,qmin_per_name=1 -t default --db PZ_MTEST
python -m ibeis.dev -a candidacy:qsize=10,dsize=100 -t default --db PZ_MTEST --verbtd
### VIZ A SET OF MATCHES ###
python dev.py --db PZ_MTEST -t query --qaid 72 110 -w
#python dev.py --allgt -t vsone vsmany
#python dev.py --allgt -t vsone --vz --vh
### RUN A SMALL AMOUNT OF VSONE TESTS ###
python dev.py --allgt -t vsone --qindex 0:1 --vz --vh --vf --noqcache
python dev.py --allgt --qindex 0:20 --
### DUMP ANALYSIS FIGURES TO DISK ###
python dev.py --allgt -t best --vf --vz --fig-dname query_analysis_easy
python dev.py --allgt -t best --vf --vh --fig-dname query_analysis_hard
python dev.py --allgt -t best --vf --va --fig-dname query_analysis_all
python dev.py --db PZ_MTEST --set-aids-as-hard 27 28 44 49 50 51 53 54 66 72 89 97 110
python dev.py --hard -t best vsone nsum
>>>
'''
#L______________
#def run_devmain2():
# input_test_list = ut.get_argval(('--tests', '-t',), type_=list, default=[])[:]
# print('input_test_list = %s' % (ut.list_str(input_test_list),))
# # fnum = 1
# valid_test_list = [] # build list for printing in case of failure
# valid_test_helpstr_list = [] # for printing
# def mark_test_handled(testname):
# input_test_list.remove(testname)
# def intest(*args, **kwargs):
# helpstr = kwargs.get('help', '')
# valid_test_helpstr_list.append(' -t ' + ', '.join(args) + helpstr)
# for testname in args:
# valid_test_list.append(testname)
# ret = testname in input_test_list
# ret2 = testname in params.unknown # Let unparsed args count towards tests
# if ret or ret2:
# if ret:
# mark_test_handled(testname)
# else:
# ret = ret2
# print('\n+===================')
# print(' [dev2] running testname = %s' % (args,))
# print('+-------------------\n')
# return ret
# return False
# anynewhit = False
# # Implicit (decorated) test functions
# print('DEVCMD_FUNCTIONS2 = %r' % (DEVCMD_FUNCTIONS2,))
# for (func_aliases, func) in DEVCMD_FUNCTIONS2:
# if intest(*func_aliases):
# funcname = get_funcname(func)
# with utool.Timer(funcname):
# if len(ut.get_func_argspec(func).args) == 0:
# func()
# anynewhit = True
# else:
# func(ibs, qaid_list, daid_list)
# anynewhit = True
# if anynewhit:
# sys.exit(1)
def devmain():
"""
The Developer Script
A command line interface to almost everything
-w # wait / show the gui / figures are visible
--cmd # ipython shell to play with variables
-t # run list of tests
Examples:
"""
helpstr = ut.codeblock(
'''
Dev is meant to be run as an interactive script.
The dev.py script runs any test you regiter with @devcmd in any combination
of configurations specified by a Config object.
Dev caches information in order to get quicker results. # FIXME: Provide quicker results # FIXME: len(line)
''')
INTRO_TITLE = 'The dev.py Script'
#INTRO_TEXT = ''.join((ut.bubbletext(INTRO_TITLE, font='cybermedium'), helpstr))
INTRO_TEXT = ut.bubbletext(INTRO_TITLE, font='cybermedium')
INTRO_STR = ut.msgblock('dev.py Intro', INTRO_TEXT)
EXAMPLE_STR = ut.msgblock('dev.py Examples', ut.codeblock(EXAMPLE_TEXT))
if ut.NOT_QUIET:
print(INTRO_STR)
if ut.get_argflag(('--help', '--verbose')):
print(EXAMPLE_STR)
CMD = ut.get_argflag('--cmd')
NOGUI = not ut.get_argflag('--gui')
if len(sys.argv) == 1:
print('Run dev.py with arguments!')
sys.exit(1)
# Run Precommands
run_devprecmds()
#
#
# Run IBEIS Main, create controller, and possibly gui
print('++dev')
main_locals = ibeis.main(gui=ut.get_argflag('--gui'))
#utool.set_process_title('IBEIS_dev')
#
#
# Load snippet variables
SNIPPITS = True and CMD
if SNIPPITS:
snippet_locals = dev_snippets(main_locals)
snippet_execstr = utool.execstr_dict(snippet_locals, 'snippet_locals')
exec(snippet_execstr)
#
#
# Development code
RUN_DEV = True # RUN_DEV = '__IPYTHON__' in vars()
if RUN_DEV:
dev_locals = run_dev(main_locals['ibs'])
dev_execstr = utool.execstr_dict(dev_locals, 'dev_locals')
exec(dev_execstr)
command = ut.get_argval('--eval', type_=str, default=None)
if command is not None:
result = eval(command, globals(), locals())
print('result = %r' % (result,))
#ibs.search_annot_notes('360')
#
#
# Main Loop (IPython interaction, or some exec loop)
#if '--nopresent' not in sys.argv or '--noshow' in sys.argv:
ut.show_if_requested()
if ut.get_argflag(('--show', '--wshow')):
pt.present()
main_execstr = ibeis.main_loop(main_locals, ipy=(NOGUI or CMD))
exec(main_execstr)
#
#
# Memory profile
if ut.get_argflag('--memprof'):
utool.print_resource_usage()
utool.memory_profile()
print('exiting dev')
if __name__ == '__main__':
multiprocessing.freeze_support() # for win32
# HACK to run tests without specifing ibs first
#run_devmain2()
devmain()
r"""
CurrentExperiments:
# Full best settings run
./dev.py -t custom --db PZ_Master0 --allgt --species=zebra_plains
# Full best settings run without spatial verification
./dev.py -t custom:sv_on=False --db PZ_Master0 --allgt --species=zebra_plains
./dev.py -t custom --db PZ_Master0 --allgt --species=zebra_plains --hs
# Check to see if new spatial verification helps
./dev.py -t custom:full_homog_checks=False custom:full_homog_checks=True --db PZ_Master0 --allgt --species=zebra_plains
# Yay it does
# Look for how many false negatives are in the bottom batch
./dev.py -t custom --db PZ_MTEST --species=zebra_plains --print-rankhist
./dev.py -t custom --db PZ_MTEST --controlled --print-rankhist
./dev.py -t custom --db PZ_Master0 --controlled --print-rankhist
./dev.py -t \
custom \
custom:rotation_invariance=True,affine_invariance=False \
custom:rotation_invariance=True,augment_queryside_hack=True \
--db PZ_Master0 --controlled --print-rankhist --print-bestcfg
./dev.py -t \
custom:rotation_invariance=True,affine_invariance=False \
custom:rotation_invariance=True,augment_queryside_hack=True \
--db NNP_Master3 --controlled --print-rankhist --print-bestcfg
ElephantEarExperiments
--show --vh
./dev.py -t custom:affine_invariance=True --db Elephants_drop1_ears --allgt --print-rankhist
./dev.py -t custom:affine_invariance=False --db Elephants_drop1_ears --allgt --print-rankhist
./dev.py -t custom:affine_invariance=False,histeq=True --db Elephants_drop1_ears --allgt --print-rankhist
./dev.py -t custom:affine_invariance=False,adapteq=True --db Elephants_drop1_ears --allgt --print-rankhist
./dev.py -t custom:affine_invariance=False,fg_on=False --db Elephants_drop1_ears --allgt
./dev.py -t custom:affine_invariance=False,histeq=True,fg_on=False --db Elephants_drop1_ears --allgt
./dev.py -t custom:affine_invariance=False,adapteq=True,fg_on=False --db Elephants_drop1_ears --allgt
./dev.py -t elph --db Elephants_drop1_ears --allgt
Sift vs Siam Experiments
./dev.py -t custom:feat_type=hesaff+siam128,algorithm=linear custom:feat_type=hesaff+sift --db testdb1 --allgt
./dev.py -t custom:feat_type=hesaff+siam128,algorithm=linear custom:feat_type=hesaff+sift --db PZ_MTEST --allgt
./dev.py -t custom:feat_type=hesaff+siam128,lnbnn_on=False,fg_on=False,bar_l2_on=True custom:feat_type=hesaff+sift,fg_on=False --db PZ_MTEST --allgt
./dev.py -t custom:feat_type=hesaff+siam128 custom:feat_type=hesaff+sift --db PZ_MTEST --allgt --print-rankhist
./dev.py -t custom:feat_type=hesaff+siam128 --db PZ_MTEST --allgt --print-rankhist
./dev.py -t custom:feat_type=hesaff+sift --db PZ_MTEST --allgt --print-rankhist
./dev.py -t custom:feat_type=hesaff+siam128 custom:feat_type=hesaff+sift --db PZ_Master0 --allgt
./dev.py -t custom:feat_type=hesaff+siam128 --db testdb1 --allgt
Without SV:
agg rank histogram = {
(0, 1): 2276,
(1, 5): 126,
(5, 50): 99,
(50, 8624): 108,
(8624, 8625): 28,
}
With SV:
agg rank histogram = {
(0, 1): 2300,
(1, 5): 106,
(5, 50): 16,
(50, 8624): 0,
(8624, 8625): 215,
}
Guesses:
0 2 2 2 4 4 4 4 0 0
0 0 4 2 2 4 4 4 2 2
2 4 4 4 1 1 1 2 2 2
0 0 1 1 1 2 0 0 1
"""
| SU-ECE-17-7/ibeis | ibeis/dev.py | Python | apache-2.0 | 32,707 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user domain objects."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import user_domain
from core.tests import test_utils
import feconf
import utils
# This mock class will not be needed once the schema version is >=2 for the
# original class ModifiableUserData. Tests below using this class should also
# be modified then.
class MockModifiableUserData(user_domain.ModifiableUserData):
"""A mock ModifiableUserData class that adds a new attribute to the original
class to create a new version of the schema for testing migration of old
schema user data dict to latest one.
"""
def __init__(
self, display_alias, pin, preferred_language_codes,
preferred_site_language_code, preferred_audio_language_code,
version, user_id=None, fake_field=None):
super(MockModifiableUserData, self).__init__(
display_alias, pin, preferred_language_codes,
preferred_site_language_code, preferred_audio_language_code,
version, user_id=None)
self.fake_field = fake_field
CURRENT_SCHEMA_VERSION = 2
# Overriding method to add a new attribute added names 'fake_field'.
@classmethod
def from_dict(cls, modifiable_user_data_dict):
return MockModifiableUserData(
modifiable_user_data_dict['display_alias'],
modifiable_user_data_dict['pin'],
modifiable_user_data_dict['preferred_language_codes'],
modifiable_user_data_dict['preferred_site_language_code'],
modifiable_user_data_dict['preferred_audio_language_code'],
modifiable_user_data_dict['schema_version'],
modifiable_user_data_dict['user_id'],
modifiable_user_data_dict['fake_field']
)
# Adding a new method to convert v1 schema data dict to v2.
@classmethod
def _convert_v1_dict_to_v2_dict(cls, user_data_dict):
"""Mock function to convert v1 dict to v2."""
user_data_dict['schema_version'] = 2
user_data_dict['fake_field'] = 'default_value'
return user_data_dict
# Overiding method to first convert raw user data dict to latest version
# then returning a ModifiableUserData domain object.
@classmethod
def from_raw_dict(cls, raw_user_data_dict):
intial_schema_version = raw_user_data_dict['schema_version']
data_schema_version = intial_schema_version
user_data_dict = raw_user_data_dict
if data_schema_version == 1:
user_data_dict = cls._convert_v1_dict_to_v2_dict(user_data_dict)
return MockModifiableUserData.from_dict(user_data_dict)
class UserGlobalPrefsTests(test_utils.GenericTestBase):
"""Test domain object for user global email preferences."""
def test_initialization(self):
"""Testing init method."""
user_global_prefs = (user_domain.UserGlobalPrefs(
True, False, True, False))
self.assertTrue(user_global_prefs.can_receive_email_updates)
self.assertFalse(user_global_prefs.can_receive_editor_role_email)
self.assertTrue(user_global_prefs.can_receive_feedback_message_email)
self.assertFalse(user_global_prefs.can_receive_subscription_email)
def test_create_default_prefs(self):
"""Testing create_default_prefs."""
default_user_global_prefs = (
user_domain.UserGlobalPrefs.create_default_prefs())
self.assertEqual(
default_user_global_prefs.can_receive_email_updates,
feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE)
self.assertEqual(
default_user_global_prefs.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
default_user_global_prefs.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
default_user_global_prefs.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
class UserExplorationPrefsTests(test_utils.GenericTestBase):
"""Test domain object for user exploration email preferences."""
def test_initialization(self):
"""Testing init method."""
user_exp_prefs = (user_domain.UserExplorationPrefs(
False, True))
mute_feedback_notifications = (
user_exp_prefs.mute_feedback_notifications)
mute_suggestion_notifications = (
user_exp_prefs.mute_suggestion_notifications)
self.assertFalse(mute_feedback_notifications)
self.assertTrue(mute_suggestion_notifications)
def test_create_default_prefs(self):
"""Testing create_default_prefs."""
default_user_exp_prefs = (
user_domain.UserExplorationPrefs.create_default_prefs())
self.assertEqual(
default_user_exp_prefs.mute_feedback_notifications,
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)
self.assertEqual(
default_user_exp_prefs.mute_suggestion_notifications,
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
def test_to_dict(self):
"""Testing to_dict."""
user_exp_prefs = (user_domain.UserExplorationPrefs(
False, True))
default_user_global_prefs = (
user_domain.UserExplorationPrefs.create_default_prefs())
test_dict = user_exp_prefs.to_dict()
default_dict = default_user_global_prefs.to_dict()
self.assertEqual(
test_dict,
{
'mute_feedback_notifications': False,
'mute_suggestion_notifications': True
}
)
self.assertEqual(
default_dict,
{
'mute_feedback_notifications':
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE,
'mute_suggestion_notifications':
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE
}
)
class ExpUserLastPlaythroughTests(test_utils.GenericTestBase):
"""Testing domain object for an exploration last playthrough model."""
def test_initialization(self):
"""Testing init method."""
exp_last_playthrough = (user_domain.ExpUserLastPlaythrough(
'user_id0', 'exp_id0', 0, 'last_updated', 'state0'))
self.assertEqual(
exp_last_playthrough.id, 'user_id0.exp_id0')
self.assertEqual(
exp_last_playthrough.user_id, 'user_id0')
self.assertEqual(
exp_last_playthrough.exploration_id, 'exp_id0')
self.assertEqual(
exp_last_playthrough.last_played_exp_version, 0)
self.assertEqual(
exp_last_playthrough.last_updated, 'last_updated')
self.assertEqual(
exp_last_playthrough.last_played_state_name, 'state0')
def test_update_last_played_information(self):
"""Testing update_last_played_information."""
exp_last_playthrough = (user_domain.ExpUserLastPlaythrough(
'user_id0', 'exp_id0', 0, 'last_updated', 'state0'))
self.assertEqual(
exp_last_playthrough.last_played_exp_version, 0)
self.assertEqual(
exp_last_playthrough.last_played_state_name, 'state0')
exp_last_playthrough.update_last_played_information(1, 'state1')
self.assertEqual(
exp_last_playthrough.last_played_exp_version, 1)
self.assertEqual(
exp_last_playthrough.last_played_state_name, 'state1')
class IncompleteActivitiesTests(test_utils.GenericTestBase):
"""Testing domain object for incomplete activities model."""
def test_initialization(self):
"""Testing init method."""
incomplete_activities = (user_domain.IncompleteActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertEqual(incomplete_activities.id, 'user_id0')
self.assertListEqual(
incomplete_activities.exploration_ids, ['exp_id0'])
self.assertListEqual(
incomplete_activities.collection_ids, ['collect_id0'])
def test_add_exploration_id(self):
"""Testing add_exploration_id."""
incomplete_activities = (user_domain.IncompleteActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
incomplete_activities.exploration_ids, ['exp_id0'])
incomplete_activities.add_exploration_id('exp_id1')
self.assertListEqual(
incomplete_activities.exploration_ids,
['exp_id0', 'exp_id1'])
def test_remove_exploration_id(self):
"""Testing remove_exploration_id."""
incomplete_activities = (user_domain.IncompleteActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
incomplete_activities.exploration_ids, ['exp_id0'])
incomplete_activities.remove_exploration_id('exp_id0')
self.assertListEqual(
incomplete_activities.exploration_ids, [])
def test_add_collection_id(self):
"""Testing add_collection_id."""
incomplete_activities = (user_domain.IncompleteActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
incomplete_activities.collection_ids, ['collect_id0'])
incomplete_activities.add_collection_id('collect_id1')
self.assertListEqual(
incomplete_activities.collection_ids,
['collect_id0', 'collect_id1'])
def test_remove_collection_id(self):
"""Testing remove_collection_id."""
incomplete_activities = (user_domain.IncompleteActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
incomplete_activities.collection_ids, ['collect_id0'])
incomplete_activities.remove_collection_id('collect_id0')
self.assertListEqual(
incomplete_activities.collection_ids, [])
class CompletedActivitiesTests(test_utils.GenericTestBase):
"""Testing domain object for the activities completed."""
def test_initialization(self):
"""Testing init method."""
completed_activities = (user_domain.CompletedActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertEqual('user_id0', completed_activities.id)
self.assertListEqual(
completed_activities.exploration_ids, ['exp_id0'])
self.assertListEqual(
completed_activities.collection_ids, ['collect_id0'])
def test_add_exploration_id(self):
"""Testing add_exploration_id."""
completed_activities = (user_domain.CompletedActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
completed_activities.exploration_ids, ['exp_id0'])
completed_activities.add_exploration_id('exp_id1')
self.assertListEqual(
completed_activities.exploration_ids,
['exp_id0', 'exp_id1'])
def test_remove_exploration_id(self):
"""Testing remove_exploration_id."""
completed_activities = (user_domain.CompletedActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
completed_activities.exploration_ids, ['exp_id0'])
completed_activities.remove_exploration_id('exp_id0')
self.assertListEqual(
completed_activities.exploration_ids, [])
def test_add_collection_id(self):
"""Testing add_collection_id."""
completed_activities = (user_domain.CompletedActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
completed_activities.collection_ids, ['collect_id0'])
completed_activities.add_collection_id('collect_id1')
self.assertListEqual(
completed_activities.collection_ids,
['collect_id0', 'collect_id1'])
def test_remove_collection_id(self):
"""Testing remove_collection_id."""
completed_activities = (user_domain.CompletedActivities(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
completed_activities.collection_ids, ['collect_id0'])
completed_activities.remove_collection_id('collect_id0')
self.assertListEqual(
completed_activities.collection_ids, [])
class LearnerPlaylistTests(test_utils.GenericTestBase):
"""Testing domain object for the learner playlist."""
def test_initialization(self):
"""Testing init method."""
learner_playlist = (user_domain.LearnerPlaylist(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertEqual(learner_playlist.id, 'user_id0')
self.assertListEqual(
learner_playlist.exploration_ids, ['exp_id0'])
self.assertListEqual(
learner_playlist.collection_ids, ['collect_id0'])
def test_insert_exploration_id_at_given_position(self):
"""Testing inserting the given exploration id at the given position."""
learner_playlist = (user_domain.LearnerPlaylist(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
learner_playlist.exploration_ids, ['exp_id0'])
learner_playlist.insert_exploration_id_at_given_position(
'exp_id1', 1)
learner_playlist.insert_exploration_id_at_given_position(
'exp_id2', 1)
self.assertListEqual(
learner_playlist.exploration_ids,
['exp_id0', 'exp_id2', 'exp_id1'])
def test_add_exploration_id_to_list(self):
"""Testing add_exploration_id_to_list."""
learner_playlist = (user_domain.LearnerPlaylist(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
learner_playlist.exploration_ids, ['exp_id0'])
learner_playlist.add_exploration_id_to_list('exp_id1')
self.assertListEqual(
learner_playlist.exploration_ids, ['exp_id0', 'exp_id1'])
def test_insert_collection_id_at_given_position(self):
"""Testing insert_exploration_id_at_given_position."""
learner_playlist = (user_domain.LearnerPlaylist(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
learner_playlist.collection_ids, ['collect_id0'])
learner_playlist.insert_collection_id_at_given_position(
'collect_id1', 1)
learner_playlist.insert_collection_id_at_given_position(
'collect_id2', 1)
self.assertListEqual(
learner_playlist.collection_ids,
['collect_id0', 'collect_id2', 'collect_id1'])
def test_add_collection_id_list(self):
"""Testing add_collection_id."""
learner_playlist = (user_domain.LearnerPlaylist(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
learner_playlist.collection_ids, ['collect_id0'])
learner_playlist.add_collection_id_to_list('collect_id1')
self.assertListEqual(
learner_playlist.collection_ids,
['collect_id0', 'collect_id1'])
def test_remove_exploration_id(self):
"""Testing remove_exploration_id."""
learner_playlist = (user_domain.LearnerPlaylist(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
learner_playlist.exploration_ids, ['exp_id0'])
learner_playlist.remove_exploration_id('exp_id0')
self.assertListEqual(
learner_playlist.exploration_ids, [])
def test_remove_collection_id(self):
"""Testing remove_collection_id."""
learner_playlist = (user_domain.LearnerPlaylist(
'user_id0', ['exp_id0'], ['collect_id0']))
self.assertListEqual(
learner_playlist.collection_ids, ['collect_id0'])
learner_playlist.remove_collection_id('collect_id0')
self.assertListEqual(
learner_playlist.collection_ids, [])
class UserContributionProficiencyTests(test_utils.GenericTestBase):
"""Testing domain object for user contribution scoring model."""
def setUp(self):
super(UserContributionProficiencyTests, self).setUp()
self.user_proficiency = user_domain.UserContributionProficiency(
'user_id0', 'category0', 0, False)
def test_initialization(self):
"""Testing init method."""
self.assertEqual(self.user_proficiency.user_id, 'user_id0')
self.assertEqual(
self.user_proficiency.score_category, 'category0')
self.assertEqual(self.user_proficiency.score, 0)
self.assertEqual(
self.user_proficiency.onboarding_email_sent, False)
def test_increment_score(self):
self.assertEqual(self.user_proficiency.score, 0)
self.user_proficiency.increment_score(4)
self.assertEqual(self.user_proficiency.score, 4)
self.user_proficiency.increment_score(-3)
self.assertEqual(self.user_proficiency.score, 1)
def test_can_user_review_category(self):
self.assertEqual(self.user_proficiency.score, 0)
self.assertFalse(self.user_proficiency.can_user_review_category())
self.user_proficiency.increment_score(
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW)
self.assertTrue(self.user_proficiency.can_user_review_category())
def test_mark_onboarding_email_as_sent(self):
self.assertFalse(self.user_proficiency.onboarding_email_sent)
self.user_proficiency.mark_onboarding_email_as_sent()
self.assertTrue(self.user_proficiency.onboarding_email_sent)
class UserContributionRightsTests(test_utils.GenericTestBase):
"""Testing UserContributionRights domain object."""
def setUp(self):
super(UserContributionRightsTests, self).setUp()
self.user_contribution_rights = user_domain.UserContributionRights(
'user_id', ['hi'], [], True)
def test_initialization(self):
"""Testing init method."""
self.assertEqual(self.user_contribution_rights.id, 'user_id')
self.assertEqual(
self.user_contribution_rights
.can_review_translation_for_language_codes, ['hi'])
self.assertEqual(
self.user_contribution_rights
.can_review_voiceover_for_language_codes,
[])
self.assertEqual(
self.user_contribution_rights.can_review_questions, True)
def test_can_review_translation_for_language_codes_incorrect_type(self):
self.user_contribution_rights.can_review_translation_for_language_codes = 5 # pylint: disable=line-too-long
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected can_review_translation_for_language_codes to be a list'):
self.user_contribution_rights.validate()
def test_can_review_voiceover_for_language_codes_incorrect_type(self):
self.user_contribution_rights.can_review_voiceover_for_language_codes = 5 # pylint: disable=line-too-long
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected can_review_voiceover_for_language_codes to be a list'):
self.user_contribution_rights.validate()
def test_incorrect_language_code_for_voiceover_raise_error(self):
self.user_contribution_rights.can_review_voiceover_for_language_codes = [ # pylint: disable=line-too-long
'invalid_lang_code']
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid_lang_code'):
self.user_contribution_rights.validate()
def test_incorrect_language_code_for_translation_raise_error(self):
self.user_contribution_rights.can_review_translation_for_language_codes = [ # pylint: disable=line-too-long
'invalid_lang_code']
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid_lang_code'):
self.user_contribution_rights.validate()
def test_can_review_voiceover_for_language_codes_with_duplicate_values(
self):
self.user_contribution_rights.can_review_voiceover_for_language_codes = [ # pylint: disable=line-too-long
'hi']
self.user_contribution_rights.validate()
self.user_contribution_rights.can_review_voiceover_for_language_codes = [ # pylint: disable=line-too-long
'hi', 'hi']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected can_review_voiceover_for_language_codes list not to have '
'duplicate values'):
self.user_contribution_rights.validate()
def test_can_review_translation_for_language_codes_with_duplicate_values(
self):
self.user_contribution_rights.can_review_translation_for_language_codes = [ # pylint: disable=line-too-long
'hi']
self.user_contribution_rights.validate()
self.user_contribution_rights.can_review_translation_for_language_codes = [ # pylint: disable=line-too-long
'hi', 'hi']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected can_review_translation_for_language_codes list not to '
'have duplicate values'):
self.user_contribution_rights.validate()
def test_incorrect_type_for_can_review_questions_raise_error(self):
self.user_contribution_rights.can_review_questions = 5
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected can_review_questions to be a boolean value'):
self.user_contribution_rights.validate()
class ModifiableUserDataTests(test_utils.GenericTestBase):
"""Testing domain object for modifiable user data."""
def test_initialization_with_none_user_id_is_successful(self):
"""Testing init method user id set None."""
schema_version = 1
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '123',
'preferred_language_codes': 'preferred_language_codes',
'preferred_site_language_code': 'preferred_site_language_code',
'preferred_audio_language_code': 'preferred_audio_language_code',
'user_id': None,
}
modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict)
)
self.assertEqual(
modifiable_user_data.display_alias, 'display_alias')
self.assertEqual(modifiable_user_data.pin, '123')
self.assertEqual(
modifiable_user_data.preferred_language_codes,
'preferred_language_codes'
)
self.assertEqual(
modifiable_user_data.preferred_site_language_code,
'preferred_site_language_code'
)
self.assertEqual(
modifiable_user_data.preferred_audio_language_code,
'preferred_audio_language_code'
)
self.assertIsNone(modifiable_user_data.user_id)
self.assertEqual(modifiable_user_data.version, schema_version)
def test_initialization_with_valid_user_id_is_successful(self):
"""Testing init method with a valid user id set."""
schema_version = 1
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '123',
'preferred_language_codes': 'preferred_language_codes',
'preferred_site_language_code': 'preferred_site_language_code',
'preferred_audio_language_code': 'preferred_audio_language_code',
'user_id': 'user_id',
}
modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict)
)
self.assertEqual(
modifiable_user_data.display_alias, 'display_alias')
self.assertEqual(modifiable_user_data.pin, '123')
self.assertEqual(
modifiable_user_data.preferred_language_codes,
'preferred_language_codes'
)
self.assertEqual(
modifiable_user_data.preferred_site_language_code,
'preferred_site_language_code'
)
self.assertEqual(
modifiable_user_data.preferred_audio_language_code,
'preferred_audio_language_code'
)
self.assertEqual(modifiable_user_data.user_id, 'user_id')
self.assertEqual(modifiable_user_data.version, schema_version)
# This test should be modified to use the original class ModifiableUserData
# itself when the CURRENT_SCHEMA_VERSION has been updated to 2 or higher.
def test_mock_modifiable_user_data_class_with_all_attributes_given(self):
user_data_dict = {
'schema_version': 2,
'display_alias': 'name',
'pin': '123',
'preferred_language_codes': ['en', 'es'],
'preferred_site_language_code': 'es',
'preferred_audio_language_code': 'en',
'user_id': None,
'fake_field': 'set_value'
}
modifiable_user_data = (
MockModifiableUserData.from_raw_dict(user_data_dict))
self.assertEqual(modifiable_user_data.display_alias, 'name')
self.assertEqual(modifiable_user_data.pin, '123')
self.assertEqual(
modifiable_user_data.preferred_language_codes, ['en', 'es'])
self.assertEqual(
modifiable_user_data.preferred_site_language_code, 'es')
self.assertEqual(
modifiable_user_data.preferred_audio_language_code, 'en')
self.assertEqual(modifiable_user_data.fake_field, 'set_value')
self.assertEqual(modifiable_user_data.user_id, None)
self.assertEqual(modifiable_user_data.version, 2)
# This test should be modified to use the original class ModifiableUserData
# itself when the CURRENT_SCHEMA_VERSION has been updated to 2 or higher.
def test_mock_migration_from_old_version_to_new_works_correctly(self):
user_data_dict = {
'schema_version': 1,
'display_alias': 'name',
'pin': '123',
'preferred_language_codes': ['en', 'es'],
'preferred_site_language_code': 'es',
'preferred_audio_language_code': 'en',
'user_id': None
}
modifiable_user_data = MockModifiableUserData.from_raw_dict(
user_data_dict)
self.assertEqual(modifiable_user_data.display_alias, 'name')
self.assertEqual(modifiable_user_data.pin, '123')
self.assertEqual(
modifiable_user_data.preferred_language_codes, ['en', 'es'])
self.assertEqual(
modifiable_user_data.preferred_site_language_code, 'es')
self.assertEqual(
modifiable_user_data.preferred_audio_language_code, 'en')
self.assertEqual(modifiable_user_data.fake_field, 'default_value')
self.assertEqual(modifiable_user_data.user_id, None)
self.assertEqual(modifiable_user_data.version, 2)
| prasanna08/oppia | core/domain/user_domain_test.py | Python | apache-2.0 | 27,932 |
#!/usr/bin/env python
'''You can easily read off two sample,line coordinates from qview, but ISIS
crop wants one sample,line and then offsets. This just takes two coordinates,
does the math, and then calls crop.'''
# Copyright 2016, 2019, Ross A. Beyer ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The arguments to ISIS crop require a sample/line pair and then a set of offsets.
# I typically have two sample/line pairs read from qview, and got tired of always
# bringing up the calculator to compute the offsets.
import argparse
import subprocess
import sys
from pathlib import Path
def crop(fr, to, samp, line, nsamp, nline):
cmd = ('crop', f'from= {fr}', f'to= {to}',
f'samp= {samp}', f'line= {line}',
f'nsamp= {nsamp}', f'nline= {nline}')
return subprocess.run(cmd, check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
def calcoffset(first, second):
(f_samp, f_line) = first.split(':')
(s_samp, s_line) = second.split(':')
nsamp = int(s_samp) - int(f_samp)
nline = int(s_line) - int(f_line)
return(f_samp, f_line, str(nsamp), str(nline))
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-o', '--output', help="The output filename.")
parser.add_argument('-f', '--first',
help='The sample and line of the first point, '
'separated by a colon, like -f 3:10')
parser.add_argument('-s', '--second',
help='The sample and line of the second point, '
'separated by a colon.')
parser.add_argument('cube', help='Cube file(s) to crop.', nargs='+')
args = parser.parse_args()
for cub in args.cube:
in_p = Path(cub)
if(args.output):
out_p = Path(args.output)
else:
out_p = in_p.with_suffix('.crop.cub')
(samp, line, nsamp, nline) = calcoffset(args.first, args.second)
print(crop(in_p, out_p, samp, line, nsamp, nline).args)
if(args.output):
# If there's a specific output filename, only do one.
break
if __name__ == "__main__":
sys.exit(main())
| rbeyer/scriptorium | cropsl.py | Python | apache-2.0 | 2,797 |
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from config import config
import chartkick
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
app.jinja_env.add_extension("chartkick.ext.charts")
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .crawler import crawler as crawler_blueprint
app.register_blueprint(crawler_blueprint)
return app
| ASaiun/saiun_bysj | app/__init__.py | Python | apache-2.0 | 785 |
#!/usr/bin/env python3
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
import re
from .py23 import BASE_STRING
class Filename(object):
def __init__(self, init):
if isinstance(init, Filename):
filename = init._filename
elif isinstance(init, BASE_STRING):
filename = self._from_string(init)
else:
raise ValueError("cannot make a {c} from {t} object {o!r}".format(
c=self.__class__.__name__,
t=type(init).__name__,
o=init))
self._filename = filename
@property
def filename(self):
return self._filename
def __str__(self):
return self._filename
def __repr__(self):
return "{0}(filename={1!r})".format(self.__class__.__name__, self._filename)
@classmethod
def _from_string(cls, value):
return value
class InputFilename(Filename):
pass
class OutputFilename(Filename):
pass
class Mode(object):
MODES = set()
DEFAULT_MODE = 'rb'
def __init__(self, mode=None):
if mode is None:
mode = self.DEFAULT_MODE
mode = mode.lower()
for m in self.MODES:
if set(mode) == set(m):
break
else:
raise ValueError("invalid {} {!r}: allowed modes are {}".format(
self.__class__.__name__,
mode,
', '.join(repr(m) for m in self.MODES)))
self.mode = mode
def __str__(self):
return self.mode
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self.mode)
class InputMode(Mode):
MODES = {'r', 'rb'}
DEFAULT_MODE = 'rb'
class OutputMode(Mode):
MODES = {'w', 'wb', 'a', 'ab', 'w+b', 'r+b', 'a+b'}
DEFAULT_MODE = 'wb'
def is_append_mode(self):
return 'a' in self.mode
| simone-campagna/rubik | rubik/filename.py | Python | apache-2.0 | 2,441 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Volumes API extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import extended_volumes
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import volume
ALIAS = "os-extended-volumes"
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
authorize_attach = extensions.extension_authorizer('compute',
'v3:%s:attach' % ALIAS)
authorize_detach = extensions.extension_authorizer('compute',
'v3:%s:detach' % ALIAS)
authorize_swap = extensions.extension_authorizer('compute',
'v3:%s:swap' % ALIAS)
class ExtendedVolumesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedVolumesController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.volume_api = volume.API()
def _extend_server(self, context, server, instance):
bdms = self.compute_api.get_instance_bdms(context, instance)
volume_ids = [bdm['volume_id'] for bdm in bdms if bdm['volume_id']]
key = "%s:volumes_attached" % ExtendedVolumes.alias
server[key] = [{'id': volume_id} for volume_id in volume_ids]
@extensions.expected_errors((400, 404, 409))
@wsgi.action('swap_volume_attachment')
@validation.schema(extended_volumes.swap_volume_attachment)
def swap(self, req, id, body):
context = req.environ['nova.context']
authorize_swap(context)
old_volume_id = body['swap_volume_attachment']['old_volume_id']
new_volume_id = body['swap_volume_attachment']['new_volume_id']
try:
old_volume = self.volume_api.get(context, old_volume_id)
new_volume = self.volume_api.get(context, new_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
try:
instance = self.compute_api.get(context, id,
want_objects=True)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = self.compute_api.get_instance_bdms(context, instance)
found = False
try:
for bdm in bdms:
if bdm['volume_id'] != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume')
if not found:
raise exc.HTTPNotFound("The volume was either invalid or not "
"attached to the instance.")
else:
return webob.Response(status_int=202)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('attach')
@validation.schema(extended_volumes.attach)
def attach(self, req, id, body):
server_id = id
context = req.environ['nova.context']
authorize_attach(context)
volume_id = body['attach']['volume_id']
device = body['attach'].get('device')
LOG.audit(_("Attach volume %(volume_id)s to instance %(server_id)s "
"at %(device)s"),
{'volume_id': volume_id,
'device': device,
'server_id': server_id},
context=context)
try:
instance = self.compute_api.get(context, server_id)
self.compute_api.attach_volume(context, instance,
volume_id, device)
except (exception.InstanceNotFound, exception.VolumeNotFound) as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(
state_error, 'attach_volume')
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InvalidDevicePath as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('detach')
@validation.schema(extended_volumes.detach)
def detach(self, req, id, body):
server_id = id
context = req.environ['nova.context']
authorize_detach(context)
volume_id = body['detach']['volume_id']
LOG.audit(_("Detach volume %(volume_id)s from "
"instance %(server_id)s"),
{"volume_id": volume_id,
"server_id": id,
"context": context})
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = self.compute_api.get_instance_bdms(context, instance)
if not bdms:
msg = _("Volume %(volume_id)s is not attached to the "
"instance %(server_id)s") % {'server_id': server_id,
'volume_id': volume_id}
LOG.debug(msg)
raise exc.HTTPNotFound(explanation=msg)
for bdm in bdms:
if bdm['volume_id'] != volume_id:
continue
try:
self.compute_api.detach_volume(context, instance, volume)
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(
state_error, 'detach_volume')
else:
msg = _("Volume %(volume_id)s is not attached to the "
"instance %(server_id)s") % {'server_id': server_id,
'volume_id': volume_id}
raise exc.HTTPNotFound(explanation=msg)
class ExtendedVolumes(extensions.V3APIExtensionBase):
"""Extended Volumes support."""
name = "ExtendedVolumes"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ExtendedVolumesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| OpenAcademy-OpenStack/nova-scheduler | nova/api/openstack/compute/plugins/v3/extended_volumes.py | Python | apache-2.0 | 9,702 |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START main_method]
def main():
return 'main method'
# [END main_method]
# [START not_main]
def not_main():
return 'not main'
# [END not_main]
# [START also_not_main]
def also_not_main():
return 'also_not main'
# [END also_not_main]
# [START untested_method]
def untested_method():
return 'untested!'
# [END untested_method]
| GoogleCloudPlatform/repo-automation-playground | xunit-autolabeler-v2/ast_parser/core/test_data/cli/additions/additions.py | Python | apache-2.0 | 926 |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
import re
urlpatterns = patterns(re.sub(r'[^.]*$', "views", __name__),
(r'^$', 'index'),
(r'^(?P<admin>admin)/(?P<user>.*?)/$', 'index'),
(r'^((?P<event_key>.*?)/)?edit/$', 'edit'),
(r'^(?P<ref_key>.*?)/((?P<event_key>.*?)/)?edit/event/$', 'editPureEvent'),
#(r'^(?P<location_key>\w+)/update/$', 'update'),
# Uncomment this for admin:
# (r'^admin/', include('django.contrib.admin.urls')),
)
| sandeva/appspot | astro/birth/urls.py | Python | apache-2.0 | 1,038 |
# Standard Python packages
import math, cmath
import re
import itertools
import numbers
import random
# Special dependencies
import numpy, numpy.random # sudo apt-get install python-numpy
# import minuit # no package
# Augustus dependencies
from augustus.kernel.unitable import UniTable
# Cassius interdependencies
import mathtools
import utilities
import color
import containers
class ContainerException(Exception):
"""Run-time errors in container objects."""
pass
class AutoType:
def __repr__(self):
if self is Auto:
return "Auto"
else:
raise ContainerException, "There must only be one instance of Auto"
#: Symbol indicating that a frame argument should be
#: automatically-generated, if possible. Similar to `None` in that
#: there is only one instance (checked with `is`), but with a different
#: meaning.
#:
#: Example:
#: `xticks = None` means that no x-ticks are drawn
#:
#: `xticks = Auto` means that x-ticks are automatically generated
#:
#: `Auto` is the only instance of `AutoType`.
Auto = AutoType()
######################################################### Layout of the page, coordinate frames, overlays
# for arranging a grid of plots
class Layout:
"""Represents a regular grid of plots.
Signatures::
Layout(nrows, ncols, plot1[, plot2[, ...]])
Layout(plot1[, plot2[, ...]], nrows=value, ncols=value)
Arguments:
nrows (number): number of rows
ncols (number): number of columns
plots (list of `Frame` or other `Layout` objects): plots to
draw, organized in normal reading order (left to right, columns
before rows)
Public Members:
`nrows`, `ncols`, `plots`
Behavior:
It is possible to create an empty Layout (no plots).
For a Layout object named `layout`, `layout[i,j]` accesses a
plot in row `i` and column `j`, while `layout.plots[k]`
accesses a plot by a serial index (`layout.plots` is a normal
list).
Spaces containing `None` will be blank.
Layouts can be nested: e.g. `Layout(1, 2, top, Layout(2, 1,
bottomleft, bottomright))`.
"""
def __init__(self, *args, **kwds):
if "nrows" in kwds and "ncols" in kwds:
self.nrows, self.ncols = kwds["nrows"], kwds["ncols"]
self.plots = list(args)
if set(kwds.keys()) != set(["nrows", "ncols"]):
raise TypeError, "Unrecognized keyword argument"
elif len(args) >= 2 and isinstance(args[0], (numbers.Number, numpy.number)) and isinstance(args[1], (numbers.Number, numpy.number)):
self.nrows, self.ncols = args[0:2]
self.plots = list(args[2:])
if set(kwds.keys()) != set([]):
raise TypeError, "Unrecognized keyword argument"
else:
raise TypeError, "Missing nrows or ncols argument"
def index(self, i, j):
"""Convert a grid index (i,j) into a serial index."""
if i < 0 or j < 0 or i >= self.nrows or j >= self.ncols:
raise ContainerException, "Index (%d,%d) is beyond the %dx%d grid of plots" % (i, j, self.nrows, self.ncols)
return self.ncols*i + j
def __getitem__(self, ij):
i, j = ij
index = self.index(i, j)
if index < len(self.plots):
return self.plots[index]
else:
return None
def __setitem__(self, ij, value):
i, j = ij
index = self.index(i, j)
if index < len(self.plots):
self.plots[index] = value
else:
for k in range(len(self.plots), index):
self.plots.append(None)
self.plots.append(value)
def __delitem__(self, ij):
i, j = ij
if self.index(i, j) < len(self.plots):
self.plots[self.index(i, j)] = None
def __repr__(self):
return "<Layout %dx%d at 0x%x>" % (self.nrows, self.ncols, id(self))
# for representing a coordinate axis
class Frame:
"""Abstract superclass for all plots with drawable coordinate frames.
Frame arguments:
Any frame argument (axis labels, margins, etc.) can be passed
as a keyword in the constructor or later as member data. The
frame arguments are interpreted only by the backend and are
replaced with defaults if not present.
Public Members:
All frame arguments that have been set.
"""
_not_frameargs = []
def __init__(self, **frameargs):
self.__dict__.update(frameargs)
def __repr__(self):
return "<Frame %s at 0x%x>" % (str(self._frameargs()), id(self))
def _frameargs(self):
output = dict(self.__dict__)
for i in self._not_frameargs:
if i in output: del output[i]
for i in output.keys():
if i[0] == "_": del output[i]
return output
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
The abstract class, `Frame`, returns constant intervals (0, 1)
(or (0.1, 1) for log scales.)
"""
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if ylog:
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
return xmin, ymin, xmax, ymax
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None): pass # get ready to be drawn
# for overlaying different containers' data in a single frame
class Overlay(Frame):
"""Represents an overlay of several plots in the same coordinate axis.
Signatures::
Overlay(frame, plot1[, plot2[, ...]], [framearg=value[, ...]])
Overlay(plot1[, plot2[, ...]], [frame=value[, framearg=value[, ...]]])
Arguments:
plots (`Frame` instances): plots to be overlaid
frame (index or `None`): which, if any, plot to use to set the
coordinate frame. If `frame=None`, then `frameargs` will be
taken from the `Overlay` instance and a data-space bounding box
will be derived from the union of all contents.
Public Members:
`plots`, `frame`
Behavior:
It is *not* possible to create an empty Overlay (no plots).
"""
_not_frameargs = ["plots", "frame"]
def __init__(self, first, *others, **frameargs):
if isinstance(first, (int, long)):
self.frame = first
self.plots = list(others)
else:
self.plots = [first] + list(others)
Frame.__init__(self, **frameargs)
def append(self, plot):
"""Append a plot to the end of `plots` (drawn last), keeping the `frame` pointer up-to-date."""
self.plots.append(plot)
if getattr(self, "frame", None) is not None and self.frame < 0:
self.frame -= 1
def prepend(self, plot):
"""Prepend a plot at the beginning of `plots` (drawn first), keeping the `frame` pointer up-to-date."""
self.plots.insert(0, plot)
if getattr(self, "frame", None) is not None and self.frame >= 0:
self.frame += 1
def __repr__(self):
if getattr(self, "frame", None) is not None:
return "<Overlay %d items (frame=%d) at 0x%x>" % (len(self.plots), self.frame, id(self))
else:
return "<Overlay %d items at 0x%x>" % (len(self.plots), id(self))
def _frameargs(self):
if getattr(self, "frame", None) is not None:
if self.frame >= len(self.plots):
raise ContainerException, "Overlay.frame points to a non-existent plot (%d <= %d)" % (self.frame, len(self.plots))
output = dict(self.plots[self.frame].__dict__)
output.update(self.__dict__)
else:
output = dict(self.__dict__)
for i in self._not_frameargs:
if i in output: del output[i]
for i in output.keys():
if i[0] == "_": del output[i]
return output
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box of all contents as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
if getattr(self, "frame", None) is not None:
if self.frame >= len(self.plots):
raise ContainerException, "Overlay.frame points to a non-existent plot (%d <= %d)" % (self.frame, len(self.plots))
return self.plots[self.frame].ranges(xlog, ylog)
xmins, ymins, xmaxs, ymaxs = [], [], [], []
for plot in self.plots:
xmin, ymin, xmax, ymax = plot.ranges(xlog, ylog)
xmins.append(xmin)
ymins.append(ymin)
xmaxs.append(xmax)
ymaxs.append(ymax)
return min(xmins), min(ymins), max(xmaxs), max(ymaxs)
######################################################### Histograms, bar charts, pie charts
class Stack(Frame):
"""Represents a stack of histograms.
Signature::
Stack(plot1[, plot2[, ...]] [linewidths=list,] [linestyles=list,] [linecolors=list,] [**frameargs])
Arguments:
plots (list of `HistogramAbstract` instances): histograms to be stacked
linewidths (list): list of linewidths with the same length as
the number of histograms
linestyles (list): list of styles
linecolors (list): list of colors
fillcolors (list): list of fill colors (most commonly used to
distinguish between stacked histograms
Public members:
`plots`, `linewidths`, `linestyles`, `linecolors`, `fillcolors`
Behavior:
It is *not* possible to create an empty Stack (no plots).
If `linewidths`, `linestyles`, `linecolors`, or `fillcolors`
are not specified, the input histograms' own styles will be
used.
"""
_not_frameargs = ["plots", "linewidths", "linestyles", "linecolors", "fillcolors"]
def __init__(self, first, *others, **frameargs):
self.plots = [first] + list(others)
Frame.__init__(self, **frameargs)
def __repr__(self):
return "<Stack %d at 0x%x>" % (len(self.plots), id(self))
def bins(self):
"""Returns a list of histogram (low, high) bin edges.
Exceptions:
Raises `ContainerException` if any of the histogram bins
differ (ignoring small numerical errors).
"""
bins = None
for hold in self.plots:
if bins is None:
bins = hold.bins[:]
else:
same = (len(hold.bins) == len(bins))
if same:
for oldbin, refbin in zip(hold.bins, bins):
if HistogramAbstract._numeric(hold, oldbin) and HistogramAbstract._numeric(hold, refbin):
xepsilon = mathtools.epsilon * abs(refbin[1] - refbin[0])
if abs(oldbin[0] - refbin[0]) > xepsilon or abs(oldbin[1] - refbin[1]) > xepsilon:
same = False
break
else:
if oldbin != refbin:
same = False
break
if not same:
raise ContainerException, "Bins in stacked histograms must be the same"
return bins
def stack(self):
"""Returns a list of new histograms, obtained by stacking the inputs.
Exceptions:
Raises `ContainerException` if any of the histogram bins
differ (ignoring small numerical errors).
"""
if len(self.plots) == 0:
raise ContainerException, "Stack must contain at least one histogram"
for styles in "linewidths", "linestyles", "linecolors", "fillcolors":
if getattr(self, styles, None) is not None:
if len(getattr(self, styles)) != len(self.plots):
raise ContainerException, "There must be as many %s as plots" % styles
bins = self.bins()
gap = max([i.gap for i in self.plots])
output = []
for i in xrange(len(self.plots)):
if getattr(self, "linewidths", None) is not None:
linewidth = self.linewidths[i]
else:
linewidth = self.plots[i].linewidth
if getattr(self, "linestyles", None) is not None:
linestyle = self.linestyles[i]
else:
linestyle = self.plots[i].linestyle
if getattr(self, "linecolors", None) is not None:
linecolor = self.linecolors[i]
else:
linecolor = self.plots[i].linecolor
if getattr(self, "fillcolors", None) is not None:
fillcolor = self.fillcolors[i]
else:
fillcolor = self.plots[i].fillcolor
if isinstance(self.plots[i], HistogramCategorical):
hnew = HistogramCategorical(bins, None, None, 0, linewidth, linestyle, linecolor, fillcolor, gap)
else:
hnew = HistogramAbstract(bins, 0, linewidth, linestyle, linecolor, fillcolor, gap)
for j in xrange(i+1):
for bin in xrange(len(hnew.values)):
hnew.values[bin] += self.plots[j].values[bin]
output.append(hnew)
return output
def overlay(self):
self._stack = self.stack()
self._stack.reverse()
self._overlay = Overlay(*self._stack, frame=0)
self._overlay.plots[0].__dict__.update(self._frameargs())
return self._overlay
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box of all contents as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
self.overlay()
if ylog:
ymin = min(filter(lambda y: y > 0., self._stack[-1].values))
ymax = max(filter(lambda y: y > 0., self._stack[0].values))
else:
ymin = min(list(self._stack[-1].values) + [0.])
ymax = max(self._stack[0].values)
if ymin == ymax:
if ylog:
ymin, ymax = ymin / 2., ymax * 2.
else:
ymin, ymax = ymin - 0.5, ymax + 0.5
return self.plots[0].low(), ymin, self.plots[0].high(), ymax
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None):
self.overlay()
class HistogramAbstract(Frame):
"""Abstract class for histograms: use concrete classes (Histogram, HistogramNonUniform, and HistogramCategorical) instead."""
_not_frameargs = ["bins", "storelimit", "entries", "linewidth", "linestyle", "linecolor", "fillcolor", "gap", "values", "underflow", "overflow", "inflow"]
def __init__(self, bins, storelimit, linewidth, linestyle, linecolor, fillcolor, gap, **frameargs):
self.bins, self.storelimit = bins, storelimit
self.entries = 0
self.linewidth, self.linestyle, self.linecolor, self.fillcolor, self.gap = linewidth, linestyle, linecolor, fillcolor, gap
self.values = numpy.zeros(len(self.bins), numpy.float)
self._sumx = numpy.zeros(len(self.bins), numpy.float)
self.underflow, self.overflow, self.inflow = 0., 0., 0.
if storelimit is None:
self._store = []
self._weights = []
self._lenstore = None
else:
self._store = numpy.empty(storelimit, numpy.float)
self._weights = numpy.empty(storelimit, numpy.float)
self._lenstore = 0
Frame.__init__(self, **frameargs)
def __repr__(self):
return "<HistogramAbstract at 0x%x>" % id(self)
def _numeric(self, bin):
return len(bin) == 2 and isinstance(bin[0], (numbers.Number, numpy.number)) and isinstance(bin[1], (numbers.Number, numpy.number))
def __str__(self):
output = []
output.append("%-30s %s" % ("bin", "value"))
output.append("="*40)
if self.underflow > 0: output.append("%-30s %g" % ("underflow", self.underflow))
for i in xrange(len(self.bins)):
if self._numeric(self.bins[i]):
category = "[%g, %g)" % self.bins[i]
else:
category = "\"%s\"" % self.bins[i]
output.append("%-30s %g" % (category, self.values[i]))
if self.overflow > 0: output.append("%-30s %g" % ("overflow", self.overflow))
if self.inflow > 0: output.append("%-30s %g" % ("inflow", self.inflow))
return "\n".join(output)
def binedges(self):
"""Return numerical values for the the edges of bins."""
categorical = False
for bin in self.bins:
if not self._numeric(bin):
categorical = True
break
if categorical:
lows = map(lambda x: x - 0.5, xrange(len(self.bins)))
highs = map(lambda x: x + 0.5, xrange(len(self.bins)))
return zip(lows, highs)
else:
return self.bins[:]
def center(self, i):
"""Return the center (x value) of bin `i`."""
if self._numeric(self.bins[i]):
return (self.bins[i][0] + self.bins[i][1])/2.
else:
return self.bins[i]
def centers(self):
"""Return the centers of all bins."""
return [self.center(i) for i in range(len(self.bins))]
def centroid(self, i):
"""Return the centroid (average data x value) of bin `i`."""
if self.values[i] == 0.:
return self.center(i)
else:
return self._sumx[i] / self.values[i]
def centroids(self):
"""Return the centroids of all bins."""
return [self.centroid(i) for i in range(len(self.bins))]
def mean(self, decimals=Auto, sigfigs=Auto, string=False):
"""Calculate the mean of the distribution, using bin contents.
Keyword arguments:
decimals (int or `Auto`): number of digits after the decimal
point to found the result, if not `Auto`
sigfigs (int or `Auto`): number of significant digits to round
the result, if not `Auto`
string (bool): return output as a string (forces number of digits)
"""
numer = 0.
denom = 0.
for bin, value in zip(self.bins, self.values):
if self._numeric(bin):
width = bin[1] - bin[0]
center = (bin[0] + bin[1])/2.
else:
raise ContainerException, "The mean of a categorical histogram is not meaningful"
numer += width * value * center
denom += width * value
output = numer/denom
if decimals is not Auto:
if string:
return mathtools.str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not Auto:
if string:
return mathtools.str_sigfigs(output, sigfigs)
else:
return mathtools.round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def rms(self, decimals=Auto, sigfigs=Auto, string=False):
"""Calculate the root-mean-square of the distribution, using bin contents.
Keyword arguments:
decimals (int or `Auto`): number of digits after the decimal
point to found the result, if not `Auto`
sigfigs (int or `Auto`): number of significant digits to round
the result, if not `Auto`
string (bool): return output as a string (forces number of digits)
"""
numer = 0.
denom = 0.
for bin, value in zip(self.bins, self.values):
if self._numeric(bin):
width = bin[1] - bin[0]
center = (bin[0] + bin[1])/2.
else:
raise ContainerException, "The RMS of a categorical histogram is not meaningful"
numer += width * value * center**2
denom += width * value
output = math.sqrt(numer/denom)
if decimals is not Auto:
if string:
return mathtools.str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not Auto:
if string:
return mathtools.str_sigfigs(output, sigfigs)
else:
return mathtools.round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def stdev(self, decimals=Auto, sigfigs=Auto, string=False):
"""Calculate the standard deviation of the distribution, using bin contents.
Keyword arguments:
decimals (int or `Auto`): number of digits after the decimal
point to found the result, if not `Auto`
sigfigs (int or `Auto`): number of significant digits to round
the result, if not `Auto`
string (bool): return output as a string (forces number of digits)
"""
numer1 = 0.
numer2 = 0.
denom = 0.
for bin, value in zip(self.bins, self.values):
if self._numeric(bin):
width = bin[1] - bin[0]
center = (bin[0] + bin[1])/2.
else:
raise ContainerException, "The standard deviation of a categorical histogram is not meaningful"
numer1 += width * value * center
numer2 += width * value * center**2
denom += width * value
output = math.sqrt(numer2/denom - (numer1/denom)**2)
if decimals is not Auto:
if string:
return mathtools.str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not Auto:
if string:
return mathtools.str_sigfigs(output, sigfigs)
else:
return mathtools.round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def store(self):
"""Return a _copy_ of the histogram's stored values (if any)."""
if self._lenstore is None:
return self._store[:]
else:
return self._store[0:self._lenstore]
def weights(self):
"""Return a _copy_ of the histogram's stored weights (if any)."""
if self._lenstore is None:
return self._weights[:]
else:
return self._weights[0:self._lenstore]
def clearbins(self):
"""Clear all bin values, including `underflow`, `overflow`, and `inflow`, and set `entries` to zero."""
self.entries = 0
self.values = numpy.zeros(len(self.bins), self.values.dtype)
self._sumx = numpy.zeros(len(self.bins), self._sumx.dtype)
self.underflow, self.overflow, self.inflow = 0., 0., 0.
def clearstore(self):
"""Clear the histogram's stored values (if any)."""
if self._lenstore is None:
self._store = []
self._weights = []
else:
self._lenstore = 0
def refill(self):
"""Clear and refill all bin values using the stored values (if any)."""
self.clearbins()
self.fill(self._store, self._weights, self._lenstore, fillstore=False)
def support(self):
"""Return the widest interval of bin values with non-zero contents."""
all_numeric = True
for bin in self.bins:
if not self._numeric(bin):
all_numeric = False
break
xmin, xmax = None, None
output = []
for bin, value in zip(self.bins, self.values):
if value > 0.:
if all_numeric:
x1, x2 = bin
if xmin is None or x1 < xmin: xmin = x1
if xmax is None or x2 > xmax: xmax = x2
else:
output.append(bin)
if all_numeric: return xmin, xmax
else: return output
def scatter(self, centroids=False, poisson=False, **frameargs):
"""Return the bins and values of the histogram as a Scatter plot.
Arguments:
centroids (bool): if `False`, use bin centers; if `True`,
use centroids
poisson (bool): if `False`, do not create error bars; if
`True`, create error bars assuming the bin contents to
belong to Poisson distributions
Note:
Asymmetric Poisson tail-probability is used for error bars
on quantities up to 20 (using a pre-calculated table);
for 20 and above, a symmetric square root is used
(approximating Poisson(x) ~ Gaussian(x) for x >> 1).
"""
kwds = {"linewidth": self.linewidth,
"linestyle": self.linestyle,
"linecolor": self.linecolor}
kwds.update(frameargs)
def poisson_errorbars(value):
if value < 20:
return {0: (0, 1.1475924708896912),
1: (-1, 1.3593357241843194),
2: (-2, 1.5187126521158518),
3: (-2.1423687562878797, 1.7239415816257235),
4: (-2.2961052720689565, 1.9815257924746845),
5: (-2.4893042928478337, 2.2102901353154891),
6: (-2.6785495948620621, 2.418184093020642),
7: (-2.8588433484599989, 2.6100604797946687),
8: (-3.0300038654056323, 2.7891396571794473),
9: (-3.1927880092968906, 2.9576883353481378),
10: (-3.348085587280849, 3.1173735938098446),
11: (-3.4967228532132424, 3.2694639669834089),
12: (-3.639421017629985, 3.4149513337692667),
13: (-3.7767979638286704, 3.5546286916146812),
14: (-3.9093811537390764, 3.6891418894420838),
15: (-4.0376219573077776, 3.8190252444691453),
16: (-4.1619085382943979, 3.9447267851063259),
17: (-4.2825766762666433, 4.0666265902382577),
18: (-4.3999186228618044, 4.185050401352413),
19: (-4.5141902851535463, 4.3002799167131514)}[value]
else:
return -math.sqrt(value), math.sqrt(value)
if poisson: values = numpy.empty((len(self.bins), 4), dtype=numpy.float)
else: values = numpy.empty((len(self.bins), 2), dtype=numpy.float)
if centroids: values[:,0] = self.centroids()
else: values[:,0] = self.centers()
values[:,1] = self.values
if poisson:
for i in range(len(self.bins)):
values[i,2:4] = poisson_errorbars(self.values[i])
return Scatter(values=values, sig=("x", "y", "eyl", "ey"), **kwds)
else:
return Scatter(values=values, sig=("x", "y"), **kwds)
### to reproduce the table:
# from scipy.stats import poisson
# from scipy.optimize import bisect
# from math import sqrt
# def calculate_entry(value):
# def down(x):
# if x < 1e-5:
# return down(1e-5) - x
# else:
# if value in (0, 1, 2):
# return poisson.cdf(value, x) - 1. - 2.*0.3413
# else:
# return poisson.cdf(value, x) - poisson.cdf(value, value) - 0.3413
# def up(x):
# if x < 1e-5:
# return up(1e-5) - x
# else:
# if value in (0, 1, 2):
# return poisson.cdf(value, x) - 1. + 2.*0.3413
# else:
# return poisson.cdf(value, x) - poisson.cdf(value, value) + 0.3413
# table[value] = bisect(down, -100., 100.) - value, bisect(up, -100., 100.) - value
# if table[value][0] + value < 0.:
# table[value] = -value, table[value][1]
# table = {}
# for i in range(20):
# calculate_entry(i)
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
xmin, ymin, xmax, ymax = None, None, None, None
all_numeric = True
for bin, value in zip(self.bins, self.values):
if self._numeric(bin):
x1, x2 = bin
if (not xlog or x1 > 0.) and (xmin is None or x1 < xmin): xmin = x1
if (not xlog or x2 > 0.) and (xmax is None or x2 > xmax): xmax = x2
else:
all_numeric = False
if (not ylog or value > 0.) and (ymin is None or value < ymin): ymin = value
if (not ylog or value > 0.) and (ymax is None or value > ymax): ymax = value
if not all_numeric:
xmin, xmax = -0.5, len(self.bins) - 0.5
if xmin is None and xmax is None:
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if ymin is None and ymax is None:
if ylog:
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
if xmin == xmax:
if xlog:
xmin, xmax = xmin/2., xmax*2.
else:
xmin, xmax = xmin - 0.5, xmax + 0.5
if ymin == ymax:
if ylog:
ymin, ymax = ymin/2., ymax*2.
else:
ymin, ymax = ymin - 0.5, ymax + 0.5
return xmin, ymin, xmax, ymax
class Histogram(HistogramAbstract):
"""Represent a 1-D histogram with uniform bins.
Arguments:
numbins (int): number of bins
low (float): low edge of first bin
high (float): high edge of last bin
storelimit (int or `None`): maximum number of values to store,
so that the histogram bins can be redrawn; `None` means no
limit
linewidth (float): scale factor for the line used to draw the
histogram border
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of the boundary
line of the histogram area; no line if `None`
fillcolor (string, color, or `None`): fill color of the
histogram area; hollow if `None`
gap (float): space drawn between bins, as a fraction of the bin
width
`**frameargs`: keyword arguments for the coordinate frame
Public members:
bins (list of `(low, high)` pairs): bin intervals (x axis)
values (numpy array of floats): contents of each bin (y axis),
has the same length as `bins`
entries (int): unweighted number of entries accumulated so far
underflow (float): number of values encountered that are less
than all bin ranges
overflow (float): number of values encountered that are greater
than all bin ranges
`storelimit`, `linewidth`, `linestyle`, `linecolor`,
`fillcolor`, and frame arguments.
Behavior:
The histogram bins are initially fixed, but can be 'reshaped'
if `entries <= storelimit`.
After construction, do not set the bins directly; use `reshape`
instead.
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line around the histogram
border.
"""
def __init__(self, numbins, low, high, data=None, weights=None, storelimit=0, linewidth=1., linestyle="solid", linecolor="black", fillcolor=None, gap=0, **frameargs):
self.reshape(numbins, low, high, refill=False, warnings=False)
HistogramAbstract.__init__(self, self.bins, storelimit, linewidth, linestyle, linecolor, fillcolor, gap, **frameargs)
if data is not None:
if weights is not None:
self.fill(data, weights)
else:
self.fill(data)
def low(self):
"""Return the low edge of the lowest bin."""
return self._low
def high(self):
"""Return the high edge of the highest bin."""
return self._high
def __repr__(self):
try:
xlabel = " \"%s\"" % self.xlabel
except AttributeError:
xlabel = ""
return "<Histogram %d %g %g%s at 0x%x>" % (len(self.bins), self.low(), self.high(), xlabel, id(self))
def reshape(self, numbins, low=None, high=None, refill=True, warnings=True):
"""Change the bin structure of the histogram and refill its contents.
Arguments:
numbins (int): new number of bins
low (float or `None`): new low edge, or `None` to keep the
old one
high (float or `None`): new high edge, or `None` to keep
the old one
refill (bool): call `refill` after setting the bins
warnings (bool): raise `ContainerException` if `storelimit
< entries`: that is, if the reshaping cannot be performed
without losing data
"""
if low is None: low = self.low()
if high is None: high = self.high()
if warnings:
if self._lenstore is None:
if len(self._store) < self.entries: raise ContainerException, "Cannot reshape a histogram without a full set of stored data"
else:
if self._lenstore < self.entries: raise ContainerException, "Cannot reshape a histogram without a full set of stored data"
self._low, self._high, self._factor = low, high, numbins/float(high - low)
self._binwidth = (high-low)/float(numbins)
lows = numpy.arange(low, high, self._binwidth)
highs = lows + self._binwidth
self.bins = zip(lows, highs)
if refill: self.refill()
def optimize(self, numbins=utilities.binning, ranges=utilities.calcrange_quartile):
"""Optimize the number of bins and/or range of the histogram.
Arguments:
numbins (function, int, or `None`): function that returns
an optimal number of bins, given a dataset, or a simple
number of bins, or `None` to leave the number of bins as it is
ranges (function, (low, high), or `None`): function that
returns an optimal low, high range, given a dataset, or an
explicit low, high tuple, or `None` to leave the ranges as
they are
"""
if numbins is Auto: numbins = utilities.binning
if ranges is Auto: ranges = utilities.calcrange_quartile
# first do the ranges
if ranges is None:
low, high = self.low(), self.high()
elif isinstance(ranges, (tuple, list)) and len(ranges) == 2 and isinstance(ranges[0], (numbers.Number, numpy.number)) and isinstance(ranges[1], (numbers.Number, numpy.number)):
low, high = ranges
elif callable(ranges):
if self._lenstore is None:
if len(self._store) < self.entries: raise ContainerException, "Cannot optimize a histogram without a full set of stored data"
else:
if self._lenstore < self.entries: raise ContainerException, "Cannot optimize a histogram without a full set of stored data"
low, high = ranges(self._store, self.__dict__.get("xlog", False))
else:
raise ContainerException, "The 'ranges' argument must be a function, (low, high), or `None`."
# then do the binning
if numbins is None:
numbins = len(self.bins)
elif isinstance(numbins, (int, long)):
pass
elif callable(numbins):
if self._lenstore is None:
if len(self._store) < self.entries: raise ContainerException, "Cannot optimize a histogram without a full set of stored data"
storecopy = numpy.array(filter(lambda x: low <= x < high, self._store))
else:
if self._lenstore < self.entries: raise ContainerException, "Cannot optimize a histogram without a full set of stored data"
storecopy = self._store[0:self._lenstore]
numbins = numbins(storecopy, low, high)
else:
raise ContainerException, "The 'numbins' argument must be a function, int, or `None`."
self.reshape(numbins, low, high)
def fill(self, values, weights=None, limit=None, fillstore=True):
"""Put one or many values into the histogram.
Arguments:
values (float or list of floats): value or values to put
into the histogram
weights (float, list of floats, or `None`): weights for
each value; all have equal weight if `weights = None`.
limit (int or `None`): maximum number of values, weights to
put into the histogram
fillstore (bool): also fill the histogram's store (if any)
Behavior:
`itertools.izip` is used to loop over values and weights,
filling the histogram. If values and weights have
different lengths, the filling operation would be truncated
to the shorter list.
Histogram weights are usually either 1 or 1/(value uncertainty)**2.
"""
# handle the case of being given only one value
if isinstance(values, (numbers.Number, numpy.number)):
values = [values]
if weights is None:
weights = numpy.ones(len(values), numpy.float)
for counter, (value, weight) in enumerate(itertools.izip(values, weights)):
if limit is not None and counter >= limit: break
if fillstore:
if self._lenstore is None:
self._store.append(value)
self._weights.append(weight)
elif self._lenstore < self.storelimit:
self._store[self._lenstore] = value
self._weights[self._lenstore] = weight
self._lenstore += 1
index = int(math.floor((value - self._low)*self._factor))
if index < 0:
self.underflow += weight
elif index >= len(self.bins):
self.overflow += weight
else:
self.values[index] += weight
self._sumx[index] += weight * value
self.entries += 1
class HistogramNonUniform(HistogramAbstract):
"""Represent a 1-D histogram with uniform bins.
Arguments:
bins (list of `(low, high)` pairs): user-defined bin intervals
storelimit (int or `None`): maximum number of values to store,
so that the histogram bins can be redrawn; `None` means no
limit
linewidth (float): scale factor for the line used to draw the
histogram border
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of the boundary
line of the histogram area; no line if `None`
fillcolor (string, color, or `None`): fill color of the
histogram area; hollow if `None`
gap (float): space drawn between bins, as a fraction of the bin
width
`**frameargs`: keyword arguments for the coordinate frame
Public members:
values (numpy array of floats): contents of each bin (y axis),
has the same length as `bins`
entries (int): unweighted number of entries accumulated so far
underflow (float): number of values encountered that are less
than all bin ranges
overflow (float): number of values encountered that are greater
than all bin ranges
inflow (float): number of values encountered that are between
bins (if there are any gaps between user-defined bin intervals)
`bins`, `storelimit`, `linewidth`, `linestyle`, `linecolor`,
`fillcolor`, and frame arguments.
Behavior:
If any bin intervals overlap, values will be entered into the
first of the two overlapping bins.
After construction, do not set the bins directly; use `reshape`
instead.
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line around the histogram
border.
"""
def __init__(self, bins, data=None, weights=None, storelimit=0, linewidth=1., linestyle="solid", linecolor="black", fillcolor=None, gap=0, **frameargs):
HistogramAbstract.__init__(self, bins, storelimit, linewidth, linestyle, linecolor, fillcolor, gap, **frameargs)
self._low, self._high = None, None
for low, high in self.bins:
if self._low is None or low < self._low:
self._low = low
if self._high is None or high > self._high:
self._high = high
if data is not None:
if weights is not None:
self.fill(data, weights)
else:
self.fill(data)
def low(self):
"""Return the low edge of the lowest bin."""
return self._low
def high(self):
"""Return the high edge of the highest bin."""
return self._high
def __repr__(self):
try:
xlabel = " \"%s\"" % self.xlabel
except AttributeError:
xlabel = ""
return "<HistogramNonUniform %d%s at 0x%x>" % (len(self.bins), xlabel, id(self))
def reshape(self, bins, refill=True, warnings=True):
"""Change the bin structure of the histogram and refill its contents.
Arguments:
bins (list of `(low, high)` pairs): user-defined bin intervals
refill (bool): call `refill` after setting the bins
warnings (bool): raise `ContainerException` if `storelimit
< entries`: that is, if the reshaping cannot be performed
without losing data
"""
if warnings:
if self._lenstore is None:
if len(self._store) < self.entries: raise ContainerException, "Cannot reshape a histogram without a full set of stored data"
else:
if self._lenstore < self.entries: raise ContainerException, "Cannot reshape a histogram without a full set of stored data"
self.bins = bins
if refill: self.refill()
def fill(self, values, weights=None, limit=None, fillstore=True):
"""Put one or many values into the histogram.
Arguments:
values (float or list of floats): value or values to put
into the histogram
weights (float, list of floats, or `None`): weights for
each value; all have equal weight if `weights = None`.
limit (int or `None`): maximum number of values, weights to
put into the histogram
fillstore (bool): also fill the histogram's store (if any)
Behavior:
`itertools.izip` is used to loop over values and weights,
filling the histogram. If values and weights have
different lengths, the filling operation would be truncated
to the shorter list.
Histogram weights are usually either 1 or 1/(value uncertainty)**2.
"""
# handle the case of being given only one value
if isinstance(values, (numbers.Number, numpy.number)):
values = [values]
if weights is None:
weights = numpy.ones(len(values), numpy.float)
for counter, (value, weight) in enumerate(itertools.izip(values, weights)):
if limit is not None and counter >= limit: break
if fillstore:
if self._lenstore is None:
self._store.append(value)
self._weights.append(weight)
elif self._lenstore < self.storelimit:
self._store[self._lenstore] = value
self._weights[self._lenstore] = weight
self._lenstore += 1
filled = False
less_than_all = True
greater_than_all = True
for i, (low, high) in enumerate(self.bins):
if low <= value < high:
self.values[i] += weight
self._sumx[i] += weight * value
filled = True
break
elif not (value < low): less_than_all = False
elif not (value >= high): greater_than_all = False
if not filled:
if less_than_all: self.underflow += weight
elif greater_than_all: self.overflow += weight
else: self.inflow += weight
self.entries += 1
class HistogramCategorical(HistogramAbstract):
"""Represent a 1-D histogram with categorical bins (a bar chart).
Arguments:
bins (list of strings): names of the categories
storelimit (int or `None`): maximum number of values to store,
so that the histogram bins can be redrawn; `None` means no
limit
linewidth (float): scale factor for the line used to draw the
histogram border
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of the boundary
line of the histogram area; no line if `None`
fillcolor (string, color, or `None`): fill color of the
histogram area; hollow if `None`
gap (float): space drawn between bins, as a fraction of the bin
width
`**frameargs`: keyword arguments for the coordinate frame
Public members:
values (numpy array of floats): contents of each bin (y axis),
has the same length as `bins`
entries (int): unweighted number of entries accumulated so far
inflow (float): number of values encountered that do not belong
to any bins
`bins`, `storelimit`, `linewidth`, `linestyle`, `linecolor`,
`fillcolor`, and frame arguments.
Behavior:
After construction, never change the bins.
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line around the histogram
border.
"""
def __init__(self, bins, data=None, weights=None, storelimit=0, linewidth=1., linestyle="solid", linecolor="black", fillcolor=None, gap=0.1, **frameargs):
self._catalog = dict(map(lambda (x, y): (y, x), enumerate(bins)))
HistogramAbstract.__init__(self, bins, storelimit, linewidth, linestyle, linecolor, fillcolor, gap, **frameargs)
if data is not None:
if weights is not None:
self.fill(data, weights)
else:
self.fill(data)
def __repr__(self):
try:
xlabel = " \"%s\"" % self.xlabel
except AttributeError:
xlabel = ""
return "<HistogramCategorical %d%s at 0x%x>" % (len(self.bins), xlabel, id(self))
def low(self):
"""Return the effective low edge, with all categories treated as integers (-0.5)."""
return -0.5
def high(self):
"""Return the effective low edge, with all categories treated as integers (numbins - 0.5)."""
return len(self.bins) - 0.5
def top(self, N):
"""Return a simplified histogram containing only the top N values (sorted)."""
pairs = zip(self.bins, self.values)
pairs.sort(lambda a, b: cmp(b[1], a[1]))
othervalue = sum([values for bins, values in pairs[N:]])
bins, values = zip(*pairs[:N])
h = HistogramCategorical(list(bins) + ["other"])
h.values = numpy.array(list(values) + [othervalue])
for name, value in self.__dict__.items():
if name not in ("bins", "values"):
h.__dict__[name] = value
return h
def binorder(self, *neworder):
"""Specify a new order for the bins with a list of string arguments (updating bin values).
All arguments must be the names of existing bins.
If a bin name is missing, it will be deleted!
"""
reverse = dict(map(lambda (x, y): (y, x), enumerate(self.bins)))
indicies = []
for name in neworder:
if name not in self.bins:
raise ContainerException, "Not a recognized bin name: \"%s\"." % name
indicies.append(reverse[name])
newinflow = 0.
for i, name in enumerate(self.bins):
if name not in neworder:
newinflow += self.values[i]
self.bins = [self.bins[i] for i in indicies]
indicies = numpy.array(indicies)
self.values = self.values[indicies]
self._sumx = self._sumx[indicies]
self.inflow += newinflow
def fill(self, values, weights=None, limit=None, fillstore=True):
"""Put one or many values into the histogram.
Arguments:
values (float or list of floats): value or values to put
into the histogram
weights (float, list of floats, or `None`): weights for
each value; all have equal weight if `weights = None`.
limit (int or `None`): maximum number of values, weights to
put into the histogram
fillstore (bool): also fill the histogram's store (if any)
Behavior:
`itertools.izip` is used to loop over values and weights,
filling the histogram. If values and weights have
different lengths, the filling operation would be truncated
to the shorter list.
Histogram weights are usually either 1 or 1/(value uncertainty)**2.
"""
# handle the case of being given only one value
if isinstance(values, basestring):
values = [values]
if weights is None:
weights = numpy.ones(len(values), numpy.float)
for counter, (value, weight) in enumerate(itertools.izip(values, weights)):
if limit is not None and counter >= limit: break
try:
value = self._catalog[value]
self.values[value] += weight
self._sumx[value] += weight * value
except KeyError:
value = -1
self.inflow += weight
self.entries += 1
if fillstore:
if self._lenstore is None:
self._store.append(value)
self._weights.append(weight)
elif self._lenstore < self.storelimit:
self._store[self._lenstore] = value
self._weights[self._lenstore] = weight
self._lenstore += 1
######################################################### Scatter plots, with and without error bars, and timeseries
class Scatter(Frame):
"""Represents a scatter of X-Y points, a line graph, and error bars.
Signatures::
Scatter(values, sig, ...)
Scatter(x, y, [ex,] [ey,] [exl,] [eyl,] ...)
Arguments for signature 1:
values (numpy array of N-dimensional points): X-Y points to
draw (with possible error bars)
sig (list of strings): how to interpret each N-dimensional
point, e.g. `('x', 'y', 'ey')` for triplets of x, y, and y
error bars
Arguments for signature 2:
x (list of floats): x values
y (list of floats): y values
ex (list of floats or `None`): symmetric or upper errors in x;
`None` for no x error bars
ey (list of floats or `None`): symmetric or upper errors in y
exl (list of floats or `None`): asymmetric lower errors in x
eyl (list of floats or `None`): asymmetric lower errors in y
Arguments for both signatures:
limit (int or `None`): maximum number of points to draw
(randomly selected if less than total number of points)
calcrange (function): a function that chooses a reasonable range
to plot, based on the data (overruled by `xmin`, `xmax`, etc.)
marker (string or `None`): symbol to draw at each point; `None`
for no markers (e.g. just lines)
markersize (float): scale factor to resize marker points
markercolor (string, color, or `None`): color of the marker
points; hollow markers if `None`
markeroutline (string, color, or `None`): color of the outline
of each marker; no outline if `None`
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of a line
connecting all points; no line if `None`
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`values`, `sig`, `limit`, `calcrange`, `marker`, `markersize`,
`markercolor`, `markeroutline`, `lines`, `linewidth`,
`linestyle`, `linecolor`, and frame arguments.
Behavior:
Points are stored internally as an N-dimensional numpy array of
`values`, with meanings specified by `sig`.
Input points are _copied_, not set by reference, with both
input methods. The set-by-signature method is likely to be
faster for large datasets.
Setting `limit` to a value other than `None` restricts the
number of points to draw in the graphical backend, something
that may be necessary if the number of points is very large. A
random subset is selected when the scatter plot is drawn.
The numerical `limit` refers to the number of points drawn
*within a coordinate frame,* so zooming in will reveal more
points.
Since the input set of points is not guaranteed to be
monatonically increasing in x, a line connecting all points
might cross itself.
Setting `marker = None` is the only proper way to direct the
graphics backend to not draw a marker at each visible point
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line connecting all visible points
Exceptions:
At least `x` and `y` are required.
"""
_not_frameargs = ["sig", "values", "limit", "calcrange", "marker", "markersize", "markercolor", "markeroutline", "linewidth", "linestyle", "linecolor"]
def __init__(self, values=[], sig=None, x=None, y=None, ex=None, ey=None, exl=None, eyl=None, limit=None, calcrange=utilities.calcrange, marker="circle", markersize=1., markercolor="black", markeroutline=None, linewidth=1., linestyle="solid", linecolor=None, **frameargs):
self.limit, self.calcrange = limit, calcrange
self.marker, self.markersize, self.markercolor, self.markeroutline, self.linewidth, self.linestyle, self.linecolor = marker, markersize, markercolor, markeroutline, linewidth, linestyle, linecolor
if sig is None:
self.setvalues(x, y, ex, ey, exl, eyl)
else:
self.setbysig(values, sig)
Frame.__init__(self, **frameargs)
def __repr__(self):
if self.limit is None:
return "<Scatter %d (draw all) at 0x%x>" % (len(self.values), id(self))
else:
return "<Scatter %d (draw %d) at 0x%x>" % (len(self.values), self.limit, id(self))
def index(self):
"""Returns a dictionary of sig values ("x", "y", etc.) to `values` index.
Example usage::
scatter.values[0:1000,scatter.index()["ex"]]
returns the first thousand x error bars.
"""
return dict(zip(self.sig, range(len(self.sig))))
def sort(self, key="x"):
"""Sorts the data so that lines do not intersect themselves."""
self.values = self.values[self.values[:,self.index()[key]].argsort(),]
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None):
if len(self.values) == 0:
self._xlimited_values = numpy.array([], dtype=numpy.float)
self._limited_values = numpy.array([], dtype=numpy.float)
return
index = self.index()
# select elements within the given ranges
mask = numpy.ones(len(self.values), dtype="bool")
x = self.values[:,index["x"]]
y = self.values[:,index["y"]]
if "ex" in index:
numpy.logical_and(mask, (x + abs(self.values[:,index["ex"]]) > xmin), mask)
else:
numpy.logical_and(mask, (x > xmin), mask)
if "exl" in index:
numpy.logical_and(mask, (x - abs(self.values[:,index["exl"]]) < xmax), mask)
elif "ex" in index:
numpy.logical_and(mask, (x - abs(self.values[:,index["ex"]]) < xmax), mask)
else:
numpy.logical_and(mask, (x < xmax), mask)
self._xlimited_values = self.values[mask]
if "ey" in index:
numpy.logical_and(mask, (y + abs(self.values[:,index["ey"]]) > ymin), mask)
else:
numpy.logical_and(mask, (y > ymin), mask)
if "eyl" in index:
numpy.logical_and(mask, (y - abs(self.values[:,index["eyl"]]) < ymax), mask)
elif "ey" in index:
numpy.logical_and(mask, (y - abs(self.values[:,index["ey"]]) < ymax), mask)
else:
numpy.logical_and(mask, (y < ymax), mask)
inrange = self.values[mask]
# select an unbiased subset
if self.limit is not None and self.limit < len(inrange):
### Sometimes returns only the lower half of the range???
# index = numpy.array([], dtype=numpy.integer)
# while len(index) < self.limit:
# index = numpy.concatenate((index, numpy.random.random_integers(0, len(inrange) - 1, self.limit)))
# index = numpy.unique(index)
# index = numpy.resize(index, self.limit)
# self._limited_values = inrange[index]
### Simpler way to calculate the same thing
self._limited_values = inrange[random.sample(xrange(len(inrange)), self.limit)]
else:
self._limited_values = inrange
def setbysig(self, values, sig=("x", "y")):
"""Sets the values using a signature.
Arguments:
values (numpy array of N-dimensional points): X-Y points to
draw (with possible error bars)
sig (list of strings): how to interpret each N-dimensional
point, e.g. `('x', 'y', 'ey')` for triplets of x, y, and y
error bars
Exceptions:
At least `x` and `y` are required.
"""
if "x" not in sig or "y" not in sig:
raise ContainerException, "Signature must contain \"x\" and \"y\""
self.sig = sig
self.values = numpy.array(values, dtype=numpy.float)
def setvalues(self, x=None, y=None, ex=None, ey=None, exl=None, eyl=None):
"""Sets the values with separate lists.
Arguments:
x (list of floats or strings): x values
y (list of floats or strings): y values
ex (list of floats or `None`): symmetric or upper errors in x;
`None` for no x error bars
ey (list of floats or `None`): symmetric or upper errors in y
exl (list of floats or `None`): asymmetric lower errors in x
eyl (list of floats or `None`): asymmetric lower errors in y
Exceptions:
At least `x` and `y` are required.
"""
if x is None and y is None:
raise ContainerException, "Signature must contain \"x\" and \"y\""
longdim = 0
shortdim = 0
if x is not None:
longdim = max(longdim, len(x))
shortdim += 1
if y is not None:
longdim = max(longdim, len(y))
shortdim += 1
if ex is not None:
longdim = max(longdim, len(ex))
shortdim += 1
if ey is not None:
longdim = max(longdim, len(ey))
shortdim += 1
if exl is not None:
longdim = max(longdim, len(exl))
shortdim += 1
if eyl is not None:
longdim = max(longdim, len(eyl))
shortdim += 1
self.sig = []
self.values = numpy.empty((longdim, shortdim), dtype=numpy.float)
if x is not None:
x = numpy.array(x)
if x.dtype.char == "?":
x = numpy.array(x, dtype=numpy.string_)
if x.dtype.char in numpy.typecodes["Character"] + "Sa":
if len(x) > 0:
unique = numpy.unique(x)
self._xticks = dict(map(lambda (i, val): (float(i+1), val), enumerate(unique)))
strtoval = dict(map(lambda (i, val): (val, float(i+1)), enumerate(unique)))
x = numpy.apply_along_axis(numpy.vectorize(lambda s: strtoval[s]), 0, x)
else:
x = numpy.array([], dtype=numpy.float)
self.values[:,len(self.sig)] = x
self.sig.append("x")
if y is not None:
y = numpy.array(y)
if y.dtype.char == "?":
y = numpy.array(y, dtype=numpy.string_)
if y.dtype.char in numpy.typecodes["Character"] + "Sa":
if len(y) > 0:
unique = numpy.unique(y)
self._yticks = dict(map(lambda (i, val): (float(i+1), val), enumerate(unique)))
strtoval = dict(map(lambda (i, val): (val, float(i+1)), enumerate(unique)))
y = numpy.apply_along_axis(numpy.vectorize(lambda s: strtoval[s]), 0, y)
else:
y = numpy.array([], dtype=numpy.float)
self.values[:,len(self.sig)] = y
self.sig.append("y")
if ex is not None:
self.values[:,len(self.sig)] = ex
self.sig.append("ex")
if ey is not None:
self.values[:,len(self.sig)] = ey
self.sig.append("ey")
if exl is not None:
self.values[:,len(self.sig)] = exl
self.sig.append("exl")
if eyl is not None:
self.values[:,len(self.sig)] = eyl
self.sig.append("eyl")
def append(self, x, y, ex=None, ey=None, exl=None, eyl=None):
"""Append one point to the dataset.
Arguments:
x (float): x value
y (float): y value
ex (float or `None`): symmetric or upper error in x
ey (list of floats or `None`): symmetric or upper error in y
exl (list of floats or `None`): asymmetric lower error in x
eyl (list of floats or `None`): asymmetric lower error in y
Exceptions:
Input arguments must match the signature of the dataset
(`sig`).
Considerations:
This method is provided for convenience; it is more
efficient to input all points at once during
construction.
"""
index = self.index()
oldlen = self.values.shape[0]
oldwidth = self.values.shape[1]
for i in self.sig:
if eval(i) is None:
raise ContainerException, "This %s instance requires %s" % (self.__class__.__name__, i)
newvalues = [0.]*oldwidth
if x is not None: newvalues[index["x"]] = x
if y is not None: newvalues[index["y"]] = y
if ex is not None: newvalues[index["ex"]] = ex
if ey is not None: newvalues[index["ey"]] = ey
if exl is not None: newvalues[index["exl"]] = exl
if eyl is not None: newvalues[index["eyl"]] = eyl
self.values.resize((oldlen+1, oldwidth), refcheck=False)
self.values[oldlen,:] = newvalues
def _strip(self, which, limited=False):
try:
index = self.index()[which]
except KeyError:
raise ContainerException, "The signature doesn't have any \"%s\" variable" % which
if limited: return self._limited_values[:,index]
else: return self.values[:,index]
def x(self, limited=False):
"""Return a 1-D numpy array of x values.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("x", limited)
def y(self, limited=False):
"""Return a 1-D numpy array of y values.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("y", limited)
def ex(self, limited=False):
"""Return a 1-D numpy array of x error bars.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("ex", limited)
def ey(self, limited=False):
"""Return a 1-D numpy array of y error bars.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("ey", limited)
def exl(self, limited=False):
"""Return a 1-D numpy array of x lower error bars.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("exl", limited)
def eyl(self, limited=False):
"""Return a 1-D numpy array of y lower error bars.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("eyl", limited)
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
x = self.x()
y = self.y()
# if we're plotting logarithmically, only the positive values are relevant for ranges
if xlog or ylog:
mask = numpy.ones(len(self.values), dtype="bool")
if xlog:
numpy.logical_and(mask, (x > 0.), mask)
if ylog:
numpy.logical_and(mask, (y > 0.), mask)
x = x[mask]
y = y[mask]
if len(x) < 2:
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if ylog:
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
elif callable(self.calcrange):
xmin, xmax = self.calcrange(x, xlog)
ymin, ymax = self.calcrange(y, ylog)
else:
raise ContainerException, "Scatter.calcrange must be a function."
if xmin == xmax:
if xlog:
xmin, xmax = xmin/2., xmax*2.
else:
xmin, xmax = xmin - 0.5, xmax + 0.5
if ymin == ymax:
if ylog:
ymin, ymax = ymin/2., ymax*2.
else:
ymin, ymax = ymin - 0.5, ymax + 0.5
return xmin, ymin, xmax, ymax
class TimeSeries(Scatter):
"""A scatter-plot in which the x axis is interpreted as time strings.
Arguments:
informat (string or `None`): time formatting string for
interpreting x data (see `time documentation
<http://docs.python.org/library/time.html#time.strftime>`_)
outformat (string): time formatting string for plotting
subseconds (bool): if True, interpret ".xxx" at the end of
the string as fractions of a second
t0 (number or time-string): the time from which to start
counting; zero is equivalent to Jan 1, 1970
x (list of strings): time strings for the x axis
y (list of floats): y values
ex (list of floats or `None`): symmetric or upper errors in x
(in seconds); `None` for no x error bars
ey (list of floats or `None`): symmetric or upper errors in y
exl (list of floats or `None`): asymmetric lower errors in x
eyl (list of floats or `None`): asymmetric lower errors in y
limit (int or `None`): maximum number of points to draw
(randomly selected if less than total number of points)
sortbytime (bool): if True, sort the data in increasing
temporal order
calcrange (function): a function that chooses a reasonable range
to plot, based on the data (overruled by `xmin`, `xmax`, etc.)
marker (string or `None`): symbol to draw at each point; `None`
for no markers (e.g. just lines)
markersize (float): scale factor to resize marker points
markercolor (string, color, or `None`): color of the marker
points; hollow markers if `None`
markeroutline (string, color, or `None`): color of the outline
of each marker; no outline if `None`
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of a line
connecting all points; no line if `None`
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`informat`, `outformat`, `values`, `sig`, `limit`, `calcrange`,
`marker`, `markersize`, `markercolor`, `markeroutline`,
`lines`, `linewidth`, `linestyle`, `linecolor`, and frame
arguments.
Behavior:
Points are stored internally as an N-dimensional numpy array of
`values`, with meanings specified by `sig`.
Input points are _copied_, not set by reference, with both
input methods. The set-by-signature method is likely to be
faster for large datasets.
Setting `limit` to a value other than `None` restricts the
number of points to draw in the graphical backend, something
that may be necessary if the number of points is very large. A
random subset is selected when the scatter plot is drawn.
The numerical `limit` refers to the number of points drawn
*within a coordinate frame,* so zooming in will reveal more
points.
Since the input set of points is not guaranteed to be
monatonically increasing in x, a line connecting all points
might cross itself.
Setting `marker = None` is the only proper way to direct the
graphics backend to not draw a marker at each visible point
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line connecting all visible points
Exceptions:
At least `x` and `y` are required.
"""
_not_frameargs = Scatter._not_frameargs + ["informat", "outformat"]
def __init__(self, informat="%Y-%m-%d %H:%M:%S", outformat="%Y-%m-%d %H:%M:%S", subseconds=False, t0=0., x=None, y=None, ex=None, ey=None, exl=None, eyl=None, limit=None, sortbytime=True, calcrange=utilities.calcrange, marker=None, markersize=1., markercolor="black", markeroutline=None, linewidth=1., linestyle="solid", linecolor="black", **frameargs):
self.__dict__["informat"] = informat
self.__dict__["outformat"] = outformat
self._subseconds, self._t0 = subseconds, t0
Scatter.__init__(self, x=utilities.fromtimestring(x, informat, subseconds, t0), y=y, ex=ex, ey=ey, exl=exl, eyl=eyl, limit=limit, calcrange=calcrange, marker=marker, markersize=markersize, markercolor=markercolor, markeroutline=markeroutline, linewidth=linewidth, linestyle=linestyle, linecolor=linecolor, **frameargs)
if sortbytime: self.sort("x")
def __repr__(self):
if self.limit is None:
return "<TimeSeries %d (draw all) at 0x%x>" % (len(self.values), id(self))
else:
return "<TimeSeries %d (draw %d) at 0x%x>" % (len(self.values), self.limit, id(self))
def append(self, x, y, ex=None, ey=None, exl=None, eyl=None):
"""Append one point to the dataset.
Arguments:
x (string): x value (a time-string)
y (float): y value
ex (float or `None`): symmetric or upper error in x
ey (list of floats or `None`): symmetric or upper error in y
exl (list of floats or `None`): asymmetric lower error in x
eyl (list of floats or `None`): asymmetric lower error in y
Exceptions:
Input arguments must match the signature of the dataset
(`sig`).
Considerations:
This method is provided for convenience; it is more
efficient to input all points at once during
construction.
"""
Scatter.append(self, utilities.fromtimestring(x, self.informat, self._subseconds, self._t0), y, ex, ey, exl, eyl)
def totimestring(self, timenumbers):
"""Convert a number of seconds or a list of numbers into time string(s).
Arguments:
timenumbers (number or list of numbers): time(s) to be
converted
Behavior:
If only one `timenumbers` is passed, the return value is a
single string; if a list of strings is passed, the return value
is a list of strings.
Uses this timeseries's `outformat` and `t0` for the conversion.
"""
return utilities.totimestring(timenumbers, self.outformat, self._subseconds, self._t0)
def fromtimestring(self, timestrings):
"""Convert a time string or many time strings into a number(s) of seconds.
Arguments:
timestring (string or list of strings): time string(s) to be
converted
Behavior:
If only one `timestring` is passed, the return value is a
single number; if a list of strings is passed, the return value
is a list of numbers.
Uses this timeseries's `informat` and `t0` for the
conversion.
"""
return utilities.fromtimestring(timestrings, self.informat, self._subseconds, self._t0)
def timeticks(self, major, minor, start=None):
"""Set x tick-marks to temporally meaningful values.
Arguments:
major (number): number of seconds interval (may use combinations
of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, or YEAR constants)
for major ticks (ticks with labels)
minor (number): same for minor ticks (shorter ticks without labels)
start (number, string, or `None`): a time to set the offset
of the tick-marks (use `t0` if `None`)
Behavior:
A "month" is taken to be exactly 31 days and a "year" is
taken to be exactly 365 days. Week markers will only line
up with month markers at `start`.
"""
if isinstance(start, basestring): start = fromtimestring(start)
return utilities.timeticks(major, minor, self.outformat, self._subseconds, self._t0, start)
######################################################### Colorfield
class ColorField(Frame):
_not_frameargs = ["values", "zmin", "zmax", "zlog", "components", "tocolor", "smooth"]
def __init__(self, xbins, xmin, xmax, ybins, ymin, ymax, zmin=Auto, zmax=Auto, zlog=False, components=1, tocolor=color.gradients["rainbow"], smooth=False, **frameargs):
self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax, self.tocolor, self.smooth = xmin, xmax, ymin, ymax, zmin, zmax, tocolor, smooth
if components == 1:
self.values = numpy.zeros((xbins, ybins), numpy.float)
else:
self.values = numpy.zeros((xbins, ybins, components), numpy.float)
Frame.__init__(self, **frameargs)
def __repr__(self):
if self.components() == 1:
return "<ColorField [%d][%d] x=(%g, %g) y=(%g, %g) at 0x%x>" % (self.xbins(), self.ybins(), self.xmin, self.xmax, self.ymin, self.ymax, id(self))
else:
return "<ColorField [%d][%d][%d] x=(%g, %g) y=(%g, %g) at 0x%x>" % (self.xbins(), self.ybins(), self.components(), self.xmin, self.xmax, self.ymin, self.ymax, id(self))
def xbins(self):
return self.values.shape[0]
def ybins(self):
return self.values.shape[1]
def components(self):
if len(self.values.shape) > 2:
return self.values.shape[2]
else:
return 1
def index(self, x, y):
xindex = int(math.floor((x - self.xmin)*self.values.shape[0]/(self.xmax - self.xmin)))
if not (0 <= xindex < self.values.shape[0]):
raise ContainerException, "The value %g is not between xmin=%g and xmax=%g." % (x, self.xmin, self.xmax)
yindex = int(math.floor((y - self.ymin)*self.values.shape[1]/(self.ymax - self.ymin)))
if not (0 <= yindex < self.values.shape[1]):
raise ContainerException, "The value %g is not between ymin=%g and ymax=%g." % (y, self.ymin, self.ymax)
return xindex, yindex
def center(self, i, j):
x = (i + 0.5)*(self.xmax - self.xmin)/float(self.values.shape[0]) + self.xmin
if not (self.xmin <= x <= self.xmax):
raise ContainerException, "The index %d is not between 0 and xbins=%d" % (i, self.values.shape[0])
y = (j + 0.5)*(self.ymax - self.ymin)/float(self.values.shape[1]) + self.ymin
if not (self.ymin <= y <= self.ymax):
raise ContainerException, "The index %d is not between 0 and ybins=%d" % (j, self.values.shape[1])
return x, y
def map(self, func):
ybins = self.ybins()
for i in xrange(self.xbins()):
for j in xrange(ybins):
self.values[i,j] = func(*self.center(i, j))
def remap(self, func):
ybins = self.ybins()
for i in xrange(self.xbins()):
for j in xrange(ybins):
self.values[i,j] = func(*self.center(i, j), old=self.values[i,j])
def zranges(self):
ybins = self.ybins()
components = self.components()
if components == 1:
zmin, zmax = None, None
else:
zmin, zmax = [None]*self.components(), [None]*self.components()
for i in xrange(self.xbins()):
for j in xrange(ybins):
if components == 1:
if zmin is None or self.values[i,j] < zmin: zmin = self.values[i,j]
if zmax is None or self.values[i,j] > zmax: zmax = self.values[i,j]
else:
for k in xrange(components):
if zmin[k] is None or self.values[i,j,k] < zmin[k]: zmin[k] = self.values[i,j,k]
if zmax[k] is None or self.values[i,j,k] > zmax[k]: zmax[k] = self.values[i,j,k]
return zmin, zmax
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
return self.xmin, self.ymin, self.xmax, self.ymax
######################################################### Subregions of the plane
class Region(Frame):
"""Represents an enclosed region of the plane.
Signature::
Region([command1[, command2[, command3[, ...]]]], [linewidth=width,] [linestyle=style,] [linecolor=color,] [fillcolor=color,] [**frameargs])
Arguments:
commands (list of RegionCommands): a list of `MoveTo`, `EdgeTo`,
or `ClosePolygon` commands; commands have the same structure as
SVG path data, but may have infinite arguments (to enclose an
unbounded region of the plane)
fillcolor (string or color): fill color of the enclosed region
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`commands`, `fillcolor`, and frame arguments.
"""
_not_frameargs = ["commands", "fillcolor"]
def __init__(self, *commands, **kwds):
self.commands = list(commands)
params = {"fillcolor": "lightblue"}
params.update(kwds)
Frame.__init__(self, **params)
def __repr__(self):
return "<Region (%s commands) at 0x%x>" % (len(self.commands), id(self))
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
xmin, ymin, xmax, ymax = None, None, None, None
for command in self.commands:
if not isinstance(command, RegionCommand):
raise ContainerException, "Commands passed to Region must all be RegionCommands (MoveTo, EdgeTo, ClosePolygon)"
for x, y in command.points():
if not isinstance(x, mathtools.InfiniteType) and not xlog or x > 0.:
if xmin is None or x < xmin: xmin = x
if xmax is None or x > xmax: xmax = x
if not isinstance(y, mathtools.InfiniteType) and not ylog or y > 0.:
if ymin is None or y < ymin: ymin = y
if ymax is None or y > ymax: ymax = y
if xmin is None:
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if ymin is None:
if ylog:
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
if xmin == xmax:
if xlog:
xmin, xmax = xmin/2., xmax*2.
else:
xmin, xmax = xmin - 0.5, xmax + 0.5
if ymin == ymax:
if ylog:
ymin, ymax = ymin/2., ymax*2.
else:
ymin, ymax = ymin - 0.5, ymax + 0.5
return xmin, ymin, xmax, ymax
class RegionCommand:
def points(self): return []
class MoveTo(RegionCommand):
"""Represents a directive to move the pen to a specified point."""
def __init__(self, x, y):
self.x, self.y = x, y
def __repr__(self):
if isinstance(self.x, (numbers.Number, numpy.number)): x = "%g" % self.x
else: x = repr(self.x)
if isinstance(self.y, (numbers.Number, numpy.number)): y = "%g" % self.y
else: y = repr(self.y)
return "MoveTo(%s, %s)" % (x, y)
def points(self): return [(self.x, self.y)]
class EdgeTo(RegionCommand):
"""Represents a directive to draw an edge to a specified point."""
def __init__(self, x, y):
self.x, self.y = x, y
def __repr__(self):
if isinstance(self.x, (numbers.Number, numpy.number)): x = "%g" % self.x
else: x = repr(self.x)
if isinstance(self.y, (numbers.Number, numpy.number)): y = "%g" % self.y
else: y = repr(self.y)
return "EdgeTo(%s, %s)" % (x, y)
def points(self): return [(self.x, self.y)]
class ClosePolygon(RegionCommand):
"""Represents a directive to close the current polygon."""
def __repr__(self):
return "ClosePolygon()"
class RegionMap(Frame):
_not_frameargs = ["xbins", "ybins", "categories", "categorizer", "colors", "bordercolor"]
def __init__(self, xbins, xmin, xmax, ybins, ymin, ymax, categories, categorizer, colors=Auto, bordercolor=None, **frameargs):
self.xbins, self.xmin, self.xmax, self.ybins, self.ymin, self.ymax, self.categories, self.categorizer, self.colors, self.bordercolor = xbins, xmin, xmax, ybins, ymin, ymax, categories, categorizer, colors, bordercolor
Frame.__init__(self, **frameargs)
def __repr__(self):
return "<RegionMap [%d][%d] x=(%g, %g) y=(%g, %g) at 0x%x>" % (self.xbins, self.ybins, self.xmin, self.xmax, self.ymin, self.ymax, id(self))
def index(self, x, y):
xindex = int(math.floor((x - self.xmin)*self.xbins/(self.xmax - self.xmin)))
if not (0 <= xindex < self.xbins):
raise ContainerException, "The value %g is not between xmin=%g and xmax=%g." % (x, self.xmin, self.xmax)
yindex = int(math.floor((y - self.ymin)*self.ybins/(self.ymax - self.ymin)))
if not (0 <= yindex < self.ybins):
raise ContainerException, "The value %g is not between ymin=%g and ymax=%g." % (y, self.ymin, self.ymax)
return xindex, yindex
def center(self, i, j):
x = (i + 0.5)*(self.xmax - self.xmin)/float(self.xbins) + self.xmin
if not (self.xmin <= x <= self.xmax):
raise ContainerException, "The index %d is not between 0 and xbins=%d" % (i, self.xbins)
y = (j + 0.5)*(self.ymax - self.ymin)/float(self.ybins) + self.ymin
if not (self.ymin <= y <= self.ymax):
raise ContainerException, "The index %d is not between 0 and ybins=%d" % (j, self.ybins)
return x, y
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
return self.xmin, self.ymin, self.xmax, self.ymax
def _compile(self):
if isinstance(self.categorizer, numpy.ndarray) or callable(self.categorizer):
self._categorizer = self.categorizer
else:
self._categorizer = eval("lambda x, y: (%s)" % self.categorizer)
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=False, ylog=False):
self._compile()
if self.colors is Auto:
cols = color.lightseries(len(self.categories), alternating=False)
else:
cols = self.colors
self._colors = {}
ints = {}
counter = 0
for category, col in zip(self.categories, cols):
self._colors[category] = color.RGB(col).ints()
ints[category] = counter
counter += 1
if self.bordercolor is not None:
asarray = numpy.zeros((self.xbins, self.ybins), dtype=numpy.int)
self._values = []
for i in xrange(self.xbins):
row = []
for j in xrange(self.ybins):
if isinstance(self._categorizer, numpy.ndarray):
category = self.categories[self._categorizer[i,j]]
else:
category = self._categorizer(*self.center(i, j))
row.append(self._colors[category])
if self.bordercolor is not None:
asarray[i,j] = ints[category]
self._values.append(row)
if self.bordercolor is not None:
roll1 = numpy.roll(asarray, 1, 0)
roll2 = numpy.roll(asarray, -1, 0)
roll3 = numpy.roll(asarray, 1, 1)
roll4 = numpy.roll(asarray, -1, 1)
mask = numpy.equal(asarray, roll1)
numpy.logical_and(mask, numpy.equal(asarray, roll2), mask)
numpy.logical_and(mask, numpy.equal(asarray, roll3), mask)
numpy.logical_and(mask, numpy.equal(asarray, roll4), mask)
thecolor = color.RGB(self.bordercolor).ints()
for i in xrange(self.xbins):
for j in xrange(self.ybins):
if not mask[i,j]:
self._values[i][j] = thecolor
######################################################### Curves and functions
class Curve(Frame):
"""Represents a parameterized function.
Arguments:
func (function or string): the function to plot; if callable,
it should take one argument and accept parameters as keywords;
if a string, it should be valid Python code, accepting a
variable name specified by `var`, parameter names to be passed
through `parameters`, and any function in the `math` library
(`cmath` if complex).
xmin, xmax (numbers or `Auto`): nominal range of function input
parameters (dict): parameter name, value pairs to be passed
before plotting
var (string): name of the input variable (string `func` only)
namespace (module, dict, or `None`): names to be used by the
function; for example::
import scipy.special # (sudo apt-get install python-scipy)
curve = Curve("jn(4, x)", namespace=scipy.special)
draw(curve, xmin=-20., xmax=20., fileName="/tmp/tmp.svg")
form (built-in constant): if Curve.FUNCTION, `func` is expected
to input x and output y; if Curve.PARAMETRIC, `func` is expected
to input t and output the tuple (x, y); if Curve.COMPLEX, `func`
is expected to output a 2-D point as a complex number
samples (number or `Auto`): number of sample points or `Auto`
for dynamic sampling (_not yet copied over from SVGFig!_)
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color specification for
the curve
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`horiz`, `vert`, `linewidth`, `linestyle`, `linecolor`, and
frame arguments.
Examples::
>>> c = Curve("sin(x + delta)", 0, 6.28)
>>> c
<Curve x -> sin(x + delta) from 0 to 6.28>
>>> c(0., delta=0.1)
0.099833416646828155
>>> c.parameters = {"delta": 0.1}
>>> draw(c, fileName="/tmp/tmp.svg")
>>> def f(x, delta=0.):
... return math.sin(x + delta)
...
>>> c = Curve(f, 0, 6.28)
>>> c
<Curve f from 0 to 6.28>
>>> c(0., delta=0.1)
0.099833416646828155
"""
_not_frameargs = ["func", "parameters", "var", "namespace", "form", "samples", "linewidth", "linestyle", "linecolor", "FUNCTION", "PARAMETRIC", "COMPLEX"]
class CurveType:
def __init__(self, name): self.name = "Curve." + name
def __repr__(self): return self.name
FUNCTION = CurveType("FUNCTION")
PARAMETRIC = CurveType("PARAMETRIC")
COMPLEX = CurveType("COMPLEX")
def __init__(self, func, xmin=Auto, xmax=Auto, parameters={}, var="x", namespace=None, form=FUNCTION, samples=1000, linewidth=1., linestyle="solid", linecolor="black", **frameargs):
self.func, self.xmin, self.xmax, self.parameters, self.var, self.namespace, self.form, self.samples, self.linewidth, self.linestyle, self.linecolor = func, xmin, xmax, parameters, var, namespace, form, samples, linewidth, linestyle, linecolor
Frame.__init__(self, **frameargs)
def _compile(self, parameters):
if callable(self.func):
self._func = lambda t: self.func(t, **parameters)
self._func.func_name = self.func.func_name
else:
if self.form is self.COMPLEX: g = dict(cmath.__dict__)
else: g = dict(math.__dict__)
# missing these important functions
g["erf"] = mathtools.erf
g["erfc"] = mathtools.erfc
if self.namespace is not None:
if isinstance(self.namespace, dict):
g.update(self.namespace)
else:
g.update(self.namespace.__dict__)
g.update(parameters)
self._func = eval("lambda (%s): (%s)" % (self.var, self.func), g)
self._func.func_name = "%s -> %s" % (self.var, self.func)
def __repr__(self):
if callable(self.func):
func_name = self.func.func_name
else:
func_name = "%s -> %s" % (self.var, self.func)
return "<Curve %s>" % func_name
def __call__(self, values, **parameters):
"""Call the function for a set of values and parameters.
Arguments:
values (number or list of numbers): input(s) to the function
parameters (keyword arguments): parameter values for this
set of evaluations
"""
self._compile(parameters)
if isinstance(values, (numbers.Number, numpy.number)):
singleton = True
values = [values]
else:
singleton = False
if self.form is self.FUNCTION:
output = numpy.empty(len(values), dtype=numpy.float)
elif self.form is self.PARAMETRIC:
output = numpy.empty((len(values), 2), dtype=numpy.float)
elif self.form is self.COMPLEX:
output = numpy.empty(len(values), dtype=numpy.complex)
else:
raise ContainerException, "Curve.form must be one of Curve.FUNCTION, Curve.PARAMETRIC, or Curve.COMPLEX."
try:
for i, value in enumerate(values):
output[i] = self._func(value)
except NameError, err:
raise NameError, "%s: are the Curve's parameters missing (or namespace not set)?" % err
if singleton: output = output[0]
return output
def derivative(self, values, epsilon=mathtools.epsilon, **parameters):
"""Numerically calculate derivative for a set of values and parameters.
Arguments:
values (number or list of numbers): input(s) to the function
parameters (keyword arguments): parameter values for this
set of evaluations
"""
self._compile(parameters)
if isinstance(values, (numbers.Number, numpy.number)):
singleton = True
values = [values]
else:
singleton = False
if self.form is self.FUNCTION:
output = numpy.empty(len(values), dtype=numpy.float)
elif self.form is self.PARAMETRIC:
output = numpy.empty((len(values), 2), dtype=numpy.float)
elif self.form is self.COMPLEX:
raise ContainerException, "Curve.derivative not implemented for COMPLEX functions."
else:
raise ContainerException, "Curve.form must be one of Curve.FUNCTION, Curve.PARAMETRIC, or Curve.COMPLEX."
for i, value in enumerate(values):
up = self._func(value + mathtools.epsilon)
down = self._func(value - mathtools.epsilon)
output[i] = (up - down)/(2. * mathtools.epsilon)
if singleton: output = output[0]
return output
def scatter(self, low, high, samples=Auto, xlog=False, **parameters):
"""Create a `Scatter` object from the evaluated function.
Arguments:
samples (number or `Auto`): number of sample points
low, high (numbers): domain to sample
xlog (bool): if `form` == `FUNCTION`, distribute the sample
points logarithmically
parameters (keyword arguments): parameter values for this
set of evaluations
"""
tmp = self.parameters
tmp.update(parameters)
parameters = tmp
if samples is Auto: samples = self.samples
if self.form is self.FUNCTION:
points = numpy.empty((samples, 2), dtype=numpy.float)
if xlog:
step = (math.log(high) - math.log(low))/(samples - 1.)
points[:,0] = numpy.exp(numpy.arange(math.log(low), math.log(high) + 0.5*step, step))
else:
step = (high - low)/(samples - 1.)
points[:,0] = numpy.arange(low, high + 0.5*step, step)
points[:,1] = self(points[:,0], **parameters)
elif self.form is self.PARAMETRIC:
step = (high - low)/(samples - 1.)
points = self(numpy.arange(low, high + 0.5*step, step), **parameters)
elif self.form is self.COMPLEX:
step = (high - low)/(samples - 1.)
tmp = self(numpy.arange(low, high + 0.5*step, step), **parameters)
points = numpy.empty((samples, 2), dtype=numpy.float)
for i, value in enumerate(tmp):
points[i] = value.real, value.imag
else: raise ContainerException, "Curve.form must be one of Curve.FUNCTION, Curve.PARAMETRIC, or Curve.COMPLEX."
return Scatter(points, ("x", "y"), limit=None, calcrange=utilities.calcrange, marker=None, lines=True, linewidth=self.linewidth, linestyle=self.linestyle, linecolor=self.linecolor, **self._frameargs())
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=False, ylog=False):
if xmin in (None, Auto) and xmax in (None, Auto):
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
elif xmin is None:
if xlog:
xmin = xmax / 2.
else:
xmin = xmax - 1.
elif xmax is None:
if xlog:
xmax = xmin * 2.
else:
xmax = xmin + 1.
self._scatter = self.scatter(xmin, xmax, self.samples, xlog, **self.parameters)
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
if getattr(self, "_scatter", None) is not None:
return self._scatter.ranges(xlog=xlog, ylog=ylog)
else:
self._prepare(xlog=xlog)
output = self._scatter.ranges(xlog=xlog, ylog=ylog)
self._scatter = None
return output
def objective(self, data, parnames, method=Auto, exclude=Auto, centroids=False):
"""Return an objective function whose minimum represents a
best fit to a given dataset.
Arguments:
data (`Histogram` or `Scatter`): the data to fit
parnames (list of strings): names of the parameters
method (function or `Auto`): a function that will be called
for each data point to calculate the final value of the
objective function; examples:
`lambda f, x, y: (f - y)**2` chi^2 for data without uncertainties
`lambda f, x, y, ey: (f - y)**2/ey**2` chi^2 with uncertainties
If `method` is `Auto`, an appropriate chi^2 function will
be used.
exclude (function, `Auto`, or `None`): a function that will
be called for each data point to determine whether to
exclude the point; `Auto` excludes only zero values and
`None` excludes nothing
centroids (bool): use centroids of histogram, rather than
centers
"""
if isinstance(data, Histogram):
if isinstance(data, HistogramCategorical):
raise ContainerException, "A fit to a categorical histogram is not meaningful."
if exclude is Auto and method is Auto:
exclude = lambda x, y: y == 0.
else:
exclude = lambda x, y: False
self._exclude = exclude
if method is Auto:
method = lambda f, x, y: (f - y)**2/abs(y)
values = numpy.empty((len(data.bins), 2), dtype=numpy.float)
if centroids: values[:,0] = data.centroids()
else: values[:,0] = data.centers()
values[:,1] = data.values
return eval("lambda %s: sum([method(f, x, y) for f, (x, y) in itertools.izip(curve(values[:,0], **{%s}), values) if not exclude(x, y)])" % (", ".join(parnames), ", ".join(["\"%s\": %s" % (x, x) for x in parnames])), {"method": method, "itertools": itertools, "curve": self, "values": values, "exclude": exclude})
elif isinstance(data, Scatter):
if "ey" in data.sig and "eyl" in data.sig:
if method is Auto:
method = lambda f, x, y, ey, eyl: ((f - y)**2/eyl**2 if f < y else (f - y)**2/ey**2)
if exclude is Auto:
exclude = lambda x, y, ey, eyl: eyl == 0. or ey == 0.
elif exclude is None:
exclude = lambda x, y, ey, eyl: False
elif "ey" in data.sig:
if method is Auto:
method = lambda f, x, y, ey: (f - y)**2/ey**2
if exclude is Auto:
exclude = lambda x, y, ey: ey == 0.
elif exclude is None:
exclude = lambda x, y, ey: False
else:
if method is Auto:
method = lambda f, x, y: (f - y)**2
if exclude is Auto or exclude is None:
exclude = lambda x, y: False
self._exclude = exclude
index = data.index()
if "ey" in data.sig and "eyl" in data.sig:
values = numpy.empty((len(data.values), 4))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
values[:,2] = data.values[:,index["ey"]]
values[:,3] = data.values[:,index["eyl"]]
return eval("lambda %s: sum([method(f, x, y, ey, eyl) for f, (x, y, ey, eyl) in itertools.izip(curve(values[:,0], **{%s}), values) if not exclude(x, y, ey, eyl)])" % (", ".join(parnames), ", ".join(["\"%s\": %s" % (x, x) for x in parnames])), {"method": method, "itertools": itertools, "curve": self, "values": values, "exclude": exclude})
elif "ey" in data.sig:
values = numpy.empty((len(data.values), 3))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
values[:,2] = data.values[:,index["ey"]]
return eval("lambda %s: sum([method(f, x, y, ey) for f, (x, y, ey) in itertools.izip(curve(values[:,0], **{%s}), values) if not exclude(x, y, ey)])" % (", ".join(parnames), ", ".join(["\"%s\": %s" % (x, x) for x in parnames])), {"method": method, "itertools": itertools, "curve": self, "values": values, "exclude": exclude})
else:
values = numpy.empty((len(data.values), 2))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
return eval("lambda %s: sum([method(f, x, y) for f, (x, y) in itertools.izip(curve(values[:,0], **{%s}), values) if not exclude(x, y)])" % (", ".join(parnames), ", ".join(["\"%s\": %s" % (x, x) for x in parnames])), {"method": method, "itertools": itertools, "curve": self, "values": values, "exclude": exclude})
else:
raise ContainerException, "Data for Curve.objective must be a Histogram or a Scatter plot."
def fit(self, data, parameters=Auto, sequence=[("migrad",)], method=Auto, exclude=Auto, centroids=False, **fitter_arguments):
"""Fit this curve to a given dataset, updating its `parameters` and creating a `minimizer` member.
Arguments:
data (`Histogram` or `Scatter`): the data to fit
parameters (dict of strings -> values): the initial
parameters for the fit
sequence (list of (string, arg, arg)): sequence of Minuit
commands to call, with optional arguments
method (function or `Auto`): a function that will be called
for each data point to calculate the final value of the
objective function; examples:
`lambda f, x, y: (f - y)**2` chi^2 for data without uncertainties
`lambda f, x, y, ey: (f - y)**2/ey**2` chi^2 with uncertainties
If `method` is `Auto`, an appropriate chi^2 function will
be used.
exclude (function, `Auto`, or `None`): a function that will
be called for each data point to determine whether to
exclude the point; `Auto` excludes only zero values and
`None` excludes nothing
centroids (bool): use centroids of histogram, rather than
centers
Keyword arguments:
Keyword arguments will be passed to the Minuit object as member data.
"""
if parameters is Auto: parameters = self.parameters
self.minimizer = minuit.Minuit(self.objective(data, parameters.keys(), method=method, exclude=exclude, centroids=centroids))
for name, value in fitter_arguments.items():
exec("self.minimizer.%s = %s" % (name, str(value)))
self.minimizer.values = parameters
# this block is just to set ndf (with all exclusions applied)
ndf = 0
if isinstance(data, Histogram):
if isinstance(data, HistogramCategorical):
raise ContainerException, "A fit to a categorical histogram is not meaningful."
values = numpy.empty((len(data.bins), 2), dtype=numpy.float)
if centroids: values[:,0] = data.centroids()
else: values[:,0] = data.centers()
values[:,1] = data.values
for x, y in values:
if not self._exclude(x, y):
ndf += 1
elif isinstance(data, Scatter):
index = data.index()
if "ey" in data.sig and "eyl" in data.sig:
values = numpy.empty((len(data.values), 4))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
values[:,2] = data.values[:,index["ey"]]
values[:,3] = data.values[:,index["eyl"]]
for x, y, ey, eyl in values:
if not self._exclude(x, y, ey, eyl):
ndf += 1
elif "ey" in data.sig:
values = numpy.empty((len(data.values), 3))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
values[:,2] = data.values[:,index["ey"]]
for x, y, ey in values:
if not self._exclude(x, y, ey):
ndf += 1
else:
values = numpy.empty((len(data.values), 2))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
for x, y in values:
if not self._exclude(x, y):
ndf += 1
else:
raise ContainerException, "Data for Curve.objective must be a Histogram or a Scatter plot."
ndf -= len(parameters)
# end block to set ndf
try:
for command in sequence:
name = command[0]
args = list(command[1:])
for i in range(len(args)):
if isinstance(args[i], basestring): args[i] = "\"%s\"" % args[i]
else: args[i] = str(args[i])
eval("self.minimizer.%s(%s)" % (name, ", ".join(args)))
except Exception as tmp:
self.parameters = self.minimizer.values
self.chi2 = self.minimizer.fval
self.ndf = ndf
self.normalizedChi2 = (self.minimizer.fval / float(self.ndf) if self.ndf > 0 else -1.)
raise tmp
self.parameters = self.minimizer.values
self.chi2 = self.minimizer.fval
self.ndf = ndf
self.normalizedChi2 = (self.minimizer.fval / float(self.ndf) if self.ndf > 0 else -1.)
# reporting results after fitting
def round_errpair(self, parname, n=2):
"""Round a parameter and its uncertainty to n significant figures in
the uncertainty (default is two)."""
if getattr(self, "minimizer", None) is None:
raise ContainerException, "Curve.round_errpair can only be called after fitting."
return mathtools.round_errpair(self.minimizer.values[parname], self.minimizer.errors[parname], n=n)
def str_errpair(self, parname, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result as a string."""
if getattr(self, "minimizer", None) is None:
raise ContainerException, "Curve.str_errpair can only be called after fitting."
return mathtools.str_errpair(self.minimizer.values[parname], self.minimizer.errors[parname], n=n)
def unicode_errpair(self, parname, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result joined by a unicode
plus-minus sign."""
if getattr(self, "minimizer", None) is None:
raise ContainerException, "Curve.unicode_errpair can only be called after fitting."
return mathtools.unicode_errpair(self.minimizer.values[parname], self.minimizer.errors[parname], n=n)
def expr(self, varrepl=None, sigfigs=2):
if callable(self.func):
raise ContainerException, "Curve.expr only works for string-based functions."
if getattr(self, "minimizer", None) is None:
raise ContainerException, "Curve.expr can only be called after fitting."
output = self.func[:]
for name, value in self.minimizer.values.items():
if sigfigs is None:
value = ("%g" % value)
else:
value = mathtools.str_sigfigs(value, sigfigs)
output = re.sub(r"\b%s\b" % name, value, output)
if varrepl is not None:
output = re.sub(r"\b%s\b" % self.var, varrepl, output)
return output
######################################################### Grids, horiz/vert lines, annotations
class Line(Frame):
"""Represents a line drawn between two points (one of which may be at infinity).
Arguments:
x1, y1 (numbers): a point; either coordinate can be Infinity or
multiples of Infinity
x2, y2 (numbers): another point; either coordinate can be
Infinity or multiples of Infinity
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string or color): color specification for grid line(s)
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`x1`, `y1`, `x2`, `y2`, `linewidth`, `linestyle`, `linecolor`,
and frame arguments.
"""
_not_frameargs = ["x1", "y1", "x2", "y2", "linewidth", "linestyle", "linecolor"]
def __init__(self, x1, y1, x2, y2, linewidth=1., linestyle="solid", linecolor="black", **frameargs):
self.x1, self.y1, self.x2, self.y2, self.linewidth, self.linestyle, self.linecolor = x1, y1, x2, y2, linewidth, linestyle, linecolor
Frame.__init__(self, **frameargs)
def __repr__(self):
if isinstance(self.x1, mathtools.InfiniteType): x1 = repr(self.x1)
else: x1 = "%g" % self.x1
if isinstance(self.y1, mathtools.InfiniteType): y1 = repr(self.y1)
else: y1 = "%g" % self.y1
if isinstance(self.x2, mathtools.InfiniteType): x2 = repr(self.x2)
else: x2 = "%g" % self.x2
if isinstance(self.y2, mathtools.InfiniteType): y2 = repr(self.y2)
else: y2 = "%g" % self.y2
return "<Line %s %s %s %s at 0x%x>" % (x1, y1, x2, y2, id(self))
def ranges(self, xlog=False, ylog=False):
if (isinstance(self.x1, mathtools.InfiniteType) or isinstance(self.y1, mathtools.InfiniteType) or (getattr(self, "xlog", False) and self.x1 <= 0.) or (getattr(self, "ylog", False) and self.y1 <= 0.)) and \
(isinstance(self.x2, mathtools.InfiniteType) or isinstance(self.y2, mathtools.InfiniteType) or (getattr(self, "xlog", False) and self.x2 <= 0.) or (getattr(self, "ylog", False) and self.y2 <= 0.)):
if getattr(self, "xlog", False):
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if getattr(self, "ylog", False):
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
return xmin, ymin, xmax, ymax
elif isinstance(self.x1, mathtools.InfiniteType) or isinstance(self.y1, mathtools.InfiniteType) or (getattr(self, "xlog", False) and self.x1 <= 0.) or (getattr(self, "ylog", False) and self.y1 <= 0.):
singlepoint = (self.x2, self.y2)
elif isinstance(self.x2, mathtools.InfiniteType) or isinstance(self.y2, mathtools.InfiniteType) or (getattr(self, "xlog", False) and self.x2 <= 0.) or (getattr(self, "ylog", False) and self.y2 <= 0.):
singlepoint = (self.x1, self.y1)
else:
return min(self.x1, self.x2), min(self.y1, self.y2), max(self.x1, self.x2), max(self.y1, self.y2)
# handle singlepoint
if getattr(self, "xlog", False):
xmin, xmax = singlepoint[0]/2., singlepoint[0]*2.
else:
xmin, xmax = singlepoint[0] - 1., singlepoint[0] + 1.
if getattr(self, "ylog", False):
ymin, ymax = singlepoint[1]/2., singlepoint[1]*2.
else:
ymin, ymax = singlepoint[1] - 1., singlepoint[1] + 1.
return xmin, ymin, xmax, ymax
class Grid(Frame):
"""Represents one or more horizontal/vertical lines or a whole grid.
Arguments:
horiz (list of numbers, function, or `None`): a list of values
at which to draw horizontal lines, a function `f(a, b)` taking
an interval and providing such a list, or `None` for no
horizontal lines.
vert (list of numbers, function, or `None`): same for vertical
lines
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string or color): color specification for grid line(s)
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`horiz`, `vert`, `linewidth`, `linestyle`, `linecolor`, and
frame arguments.
Considerations:
The `regular` utility provides functions suitable for `horiz`
and `vert`.
"""
_not_frameargs = ["horiz", "vert", "linewidth", "linestyle", "linecolor"]
def __init__(self, horiz=None, vert=None, linewidth=1., linestyle="dotted", linecolor="grey", **frameargs):
self.horiz, self.vert, self.linewidth, self.linestyle, self.linecolor = horiz, vert, linewidth, linestyle, linecolor
Frame.__init__(self, **frameargs)
def __repr__(self):
return "<Grid %s %s at 0x%x>" % (repr(self.horiz), repr(self.vert), id(self))
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None):
try:
self._horiz = []
for i in self.horiz:
self._horiz.append(i)
except TypeError:
if callable(self.horiz):
try:
self._horiz = self.horiz(ymin, ymax)
except TypeError:
raise ContainerException, "If Grid.horiz is a function, it must take two endpoints and return a list of values"
elif self.horiz is None:
self._horiz = []
else:
raise ContainerException, "Grid.horiz must be None, a list of values, or a function returning a list of values (given endpoints)"
try:
self._vert = []
for i in self.vert:
self._vert.append(i)
except TypeError:
if callable(self.vert):
try:
self._vert = self.vert(xmin, xmax)
except TypeError:
raise ContainerException, "If Grid.vert is a function, it must take two endpoints and return a list of values"
elif self.vert is None:
self._vert = []
else:
raise ContainerException, "Grid.vert must be None, a list of values, or a function returning a list of values (given endpoints)"
######################################################### User-defined plot legend
class Legend(Frame):
"""Represents a table of information to overlay on a plot.
Arguments:
fields (list of lists): table data; may include text, numbers,
and objects with line, fill, or marker styles
colwid (list of numbers): column widths as fractions of the
whole width (minus padding); e.g. [0.5, 0.25, 0.25]
justify (list of "l", "m", "r"): column justification: "l" for
left, "m" or "c" for middle, and "r" for right
x, y (numbers): position of the legend box (use with
`textanchor`) in units of frame width; e.g. (1, 1) is the
top-right corner, (0, 0) is the bottom-left corner
width (number): width of the legend box in units of frame width
height (number or `Auto`): height of the legend box in units of
frame width or `Auto` to calculate from the number of rows,
`baselineskip`, and `padding`
anchor (2-character string): placement of the legend box
relative to `x`, `y`; first character is "t" for top, "m" or
"c" for middle, and "b" for bottom, second character is
"l" for left, "m" or "c" for middle, and "r" for right
textscale (number): scale factor for text (1 is normal)
padding (number): extra space between the legend box and its
contents, as a fraction of the whole SVG document
baselineskip (number): space to skip between rows of the table,
as a fraction of the whole SVG document
linewidth (float): scale factor to resize legend box line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of the boundary
around the legend box; no line if `None`
fillcolor (string, color, or `None`): fill color of the legend
box; hollow if `None`
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`fields`, `colwid`, `justify`, `x`, `y`, `width`, `height`,
`anchor`, `textscale`, `padding`, `baselineskip`, `linewidth`,
`linestyle`, `linecolor`, `fillcolor`, and frame arguments.
Considerations:
`Legend` is a drawable data container on its own, not attached
to any histogram or scatter plot. To overlay a `Legend` on
another plot, use the `Overlay` command, and be sure to point
`Overlay.frame` to the desired plot::
Overlay(plot, legend, frame=0)
Legends will always be drawn _above_ the frame (and therefore
also above all other plots in an overlay).
"""
_not_frameargs = ["colwid", "justify", "x", "y", "width", "height", "anchor", "textscale", "padding", "baselineskip", "linewidth", "linestyle", "linecolor", "fillcolor"]
def __init__(self, fields, colwid=Auto, justify="l", x=1., y=1., width=0.4, height=Auto, anchor="tr", textscale=1., padding=0.01, baselineskip=0.035, linewidth=1., linestyle="solid", linecolor="black", fillcolor="white"):
self.fields, self.colwid, self.justify, self.x, self.y, self.width, self.height, self.anchor, self.textscale, self.padding, self.baselineskip, self.linewidth, self.linestyle, self.linecolor, self.fillcolor = fields, colwid, justify, x, y, width, height, anchor, textscale, padding, baselineskip, linewidth, linestyle, linecolor, fillcolor
def __repr__(self):
return "<Legend %dx%d>" % self.dimensions()
def dimensions(self):
"""Determine the number of rows and columns in `fields`."""
rows = 1
columns = 1
if not isinstance(self.fields, basestring):
iterable = False
try:
iter(self.fields)
iterable = True
except TypeError: pass
if iterable:
rows -= 1
for line in self.fields:
if not isinstance(line, basestring):
length = 0
try:
for cell in line:
length += 1
except TypeError: pass
if length > columns: columns = length
rows += 1
return rows, columns
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None):
self._rows, self._columns = self.dimensions()
# make _fields a rectangular array with None in missing fields
self._fields = [[None for j in range(self._columns)] for i in range(self._rows)]
if isinstance(self.fields, basestring):
self._fields[0][0] = self.fields
else:
iterable = False
try:
iter(self.fields)
iterable = True
except TypeError: pass
if not iterable:
self._fields[0][0] = self.fields
else:
for i, line in enumerate(self.fields):
if isinstance(line, basestring):
self._fields[i][0] = line
else:
lineiterable = False
try:
iter(line)
lineiterable = True
except TypeError: pass
if not lineiterable:
self._fields[i][0] = line
else:
for j, cell in enumerate(line):
self._fields[i][j] = cell
# take user input if available, fill in what's remaining by evenly splitting the difference
if self.colwid is Auto:
self._colwid = [1./self._columns]*self._columns
else:
self._colwid = list(self.colwid[:self._columns])
if len(self._colwid) < self._columns:
if sum(self._colwid) < 1.:
width = (1. - sum(self._colwid)) / (self._columns - len(self._colwid))
self._colwid.extend([width]*(self._columns - len(self._colwid)))
else:
# or put in typical values if we have to normalize anyway
average = float(sum(self._colwid))/len(self._colwid)
self._colwid.extend([average]*(self._columns - len(self._colwid)))
# normalize: sum of colwid = 1
total = 1.*sum(self._colwid)
for i in range(len(self._colwid)):
self._colwid[i] /= total
# if we only get one directive, repeat for all self._columns
if self.justify is Auto or self.justify == "l":
self._justify = ["l"]*self._columns
elif self.justify == "m" or self.justify == "c":
self._justify = ["m"]*self._columns
elif self.justify == "r":
self._justify = ["r"]*self._columns
else:
# take all user input and fill in whatever's missing with "l"
self._justify = list(self.justify[:self._columns])
if len(self._justify) < self._columns:
self._justify.extend(["l"]*(self._columns - len(self._justify)))
self._anchor = [None, None]
if len(self.anchor) == 2:
if self.anchor[0] == "t": self._anchor[0] = "t"
if self.anchor[0] in ("m", "c"): self._anchor[0] = "m"
if self.anchor[0] == "b": self._anchor[0] = "b"
if self.anchor[1] == "l": self._anchor[1] = "l"
if self.anchor[1] in ("m", "c"): self._anchor[1] = "m"
if self.anchor[1] == "r": self._anchor[1] = "r"
# try the letters backward
if self._anchor[0] is None or self._anchor[1] is None:
self._anchor = [None, None]
if self.anchor[1] == "t": self._anchor[0] = "t"
if self.anchor[1] in ("m", "c"): self._anchor[0] = "m"
if self.anchor[1] == "b": self._anchor[0] = "b"
if self.anchor[0] == "l": self._anchor[1] = "l"
if self.anchor[0] in ("m", "c"): self._anchor[1] = "m"
if self.anchor[0] == "r": self._anchor[1] = "r"
if self._anchor[0] is None or self._anchor[1] is None:
raise ContainerException, "Legend.anchor not recognized: \"%s\"" % self.anchor
class Style:
"""Represents a line, fill, and marker style, but is not drawable.
Arguments:
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): stroke color
fillcolor (string, color, or `None`): fill color
marker (string or `None`): symbol at each point
markersize (float): scale factor to resize marker points
markercolor (string, color, or `None`): fill color for markers
markeroutline (string, color, or `None`): stroke color for markers
Public members:
`linewidth`, `linestyle`, `linecolor`, `fillcolor`, `marker`,
`markersize`, `markercolor`, and `markeroutline`.
Purpose:
Can be used in place of a real Histogram/Scatter/etc. in Legend.
"""
def __init__(self, linewidth=1., linestyle="solid", linecolor=None, fillcolor=None, marker=None, markersize=1., markercolor="black", markeroutline=None):
self.linewidth, self.linestyle, self.linecolor, self.fillcolor, self.marker, self.markersize, self.markercolor, self.markeroutline = linewidth, linestyle, linecolor, fillcolor, marker, markersize, markercolor, markeroutline
def __repr__(self):
attributes = [""]
if self.linecolor is not None:
attributes.append("linewidth=%g" % self.linewidth)
attributes.append("linestyle=%s" % str(self.linestyle))
attributes.append("linecolor=%s" % str(self.linecolor))
if self.fillcolor is not None:
attributes.append("fillcolor=%s" % str(self.fillcolor))
if self.marker is not None:
attributes.append("marker=%s" % str(self.marker))
attributes.append("markersize=%g" % self.markersize)
attributes.append("markercolor=%s" % str(self.markercolor))
return "<Style%s>" % " ".join(attributes)
######################################################### Interactive table for a PAW-style analysis
class InspectTable(UniTable):
"""Load, manipulate, and plot data quickly and interactively.
Class members:
cache_limit (int or `None`): a maximum number of preselected
subtables to cache
"""
cache_limit = 10
_comma = re.compile("\s*,\s*")
def __repr__(self):
return "<InspectTable %d keys %d rows>" % (len(self.keys()), len(self))
def _setup_cache(self):
if getattr(self, "_cache_subtables", None) is None:
self._cache_subtables = {}
self._cache_order = []
def __call__(self, expr, cuts=None, use_cache=True):
"""Select and return a subtable based on the expression and cuts.
Arguments:
expr (string): expression to evaluate in the namespace of
the table and plot
cuts (string): expression for filtering out unwanted data
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
"""
if cuts is None or cuts == "":
subtable = self
else:
if use_cache:
self._setup_cache()
if cuts in self._cache_subtables and set(self.keys()) == set(self._cache_subtables[cuts].keys()):
subtable = self._cache_subtables[cuts]
self._cache_order = [cuts] + filter(lambda x: x != cuts, self._cache_order)
else:
subtable = self.compress(self.eval(cuts))
self._cache_subtables[cuts] = subtable
self._cache_order = [cuts] + filter(lambda x: x != cuts, self._cache_order)
if self.cache_limit is not None:
while len(self._cache_order) > self.cache_limit:
del self._cache_subtables[self._cache_order.pop()]
else:
subtable = self.compress(self.eval(cuts))
return subtable.eval(expr)
def unique(self, expr=None, cuts=None, use_cache=True):
if expr is None:
keys = self.keys()
expr = ",".join(keys)
subtable = self(expr, cuts, use_cache)
if isinstance(subtable, tuple):
# can't use numpy because the output may be heterogeneous
output = set()
for event in zip(*subtable):
output.add(event)
return output
else:
return set(numpy.unique(subtable))
def scan(self, expr=None, cuts=None, subset=slice(0, 10), use_cache=True, width=12):
"""Print a table or subtable of values on the screen.
Arguments:
expr (string): comma-separated set of expressions to print
(if `None`, print all fields)
cuts (string): expression for filtering out unwanted data
subset (slice): slice applied to all fields, so that the
output is manageable
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
"""
if expr is None:
keys = self.keys()
expr = ",".join(keys)
subtable = self(expr, cuts, use_cache)
fields = re.split(self._comma, expr)
format_fields = []
separator = []
format_line = []
typechar = []
for field, array in zip(fields, subtable):
format_fields.append("%%%d.%ds" % (width, width))
separator.append("=" * width)
if array.dtype.char in numpy.typecodes["Float"]:
format_line.append("%%%dg" % width)
typechar.append("f")
elif array.dtype.char in numpy.typecodes["AllInteger"]:
format_line.append("%%%dd" % width)
typechar.append("i")
elif array.dtype.char == "?":
format_line.append("%%%ds" % width)
typechar.append("?")
elif array.dtype.char in numpy.typecodes["Complex"]:
format_line.append("%%%dg+%%%dgj" % ((width-2)//2, (width-2)//2))
typechar.append("F")
elif array.dtype.char in numpy.typecodes["Character"] + "Sa":
format_line.append("%%%d.%ds" % (width, width))
typechar.append("S")
format_fields = " ".join(format_fields)
separator = "=".join(separator)
print format_fields % tuple(fields)
print separator
if isinstance(subtable, tuple):
for records in zip(*[i[subset] for i in subtable]):
for r, f, c in zip(records, format_line, typechar):
if c == "F":
print f % (r.real, r.imag),
elif c == "?":
if r: print f % "True",
else: print f % "False",
elif c == "S":
print f % ("'%s'" % r),
else:
print f % r,
print
else:
for record in subtable[subset]:
if typechar[0] == "F":
print format_line[0] % (record.real, record.imag)
elif typechar[0] == "?":
if record: print format_line[0] % "True"
else: print format_line[0] % "False"
elif typechar[0] == "S":
print format_line[0] % ("'%s'" % record)
else:
print format_line[0] % record
def histogram(self, expr, cuts=None, weights=None, numbins=utilities.binning, lowhigh=utilities.calcrange_quartile, use_cache=True, **kwds):
"""Draw and return a histogram based on the expression and cuts.
Arguments:
expr (string): expression to evaluate in the namespace of
the table and plot
cuts (string): expression for filtering out unwanted data
weights (string): optional expression for the weight of
each data entry
numbins (int or function): number of bins or a function
that returns an optimized number of bins, given data, low,
and high
lowhigh ((low, high) or function): range of the histogram or
a function that returns an optimized range given the data
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
`**kwds`: any other arguments are passed to the Histogram
constructor
"""
if numbins is Auto: numbins = utilities.binning
if lowhigh is Auto: lowhigh = utilities.calcrange_quartile
data = self(expr, cuts)
if isinstance(data, tuple):
raise ContainerException, "The expr must return one-dimensional data (no commas!)"
if weights is not None:
dataweight = self(weights, cuts)
if isinstance(data, tuple):
raise ContainerException, "The weights must return one-dimensional data (no commas!)"
else:
dataweight = numpy.ones(len(data), numpy.float)
if len(data) > 0 and data.dtype.char in numpy.typecodes["Character"] + "SU":
bins = numpy.unique(data)
bins.sort()
kwds2 = {"xlabel": expr}
kwds2.update(kwds)
output = HistogramCategorical(bins, data, dataweight, **kwds2)
elif len(data) == 0 or data.dtype.char in numpy.typecodes["Float"] + numpy.typecodes["AllInteger"]:
if isinstance(lowhigh, (tuple, list)) and len(lowhigh) == 2 and isinstance(lowhigh[0], (numbers.Number, numpy.number)) and isinstance(lowhigh[1], (numbers.Number, numpy.number)):
low, high = lowhigh
elif callable(lowhigh):
low, high = lowhigh(data, kwds.get("xlog", False))
else:
raise ContainerException, "The 'lowhigh' argument must be a function or (low, high) tuple."
if isinstance(numbins, (int, long)):
pass
elif callable(numbins):
numbins = numbins(data, low, high)
else:
raise ContainerException, "The 'numbins' argument must be a function or an int."
if numbins < 1: numbins = 1
if low >= high: low, high = 0., 1.
kwds2 = {"xlabel": expr}
kwds2.update(kwds)
output = Histogram(numbins, low, high, data, dataweight, **kwds2)
else:
raise ContainerException, "Unrecognized data type: %s (%s)" % (data.dtype.name, data.dtype.char)
return output
def timeseries(self, expr, cuts=None, ex=None, ey=None, exl=None, eyl=None, limit=1000, use_cache=True, **kwds):
"""Draw and return a scatter-plot based on the expression and cuts.
Arguments:
expr (string): expression to evaluate in the namespace of
the table and plot
cuts (string): expression for filtering out unwanted data
ex (string): optional expression for x error bars (in seconds)
ey (string): optional expression for y error bars
exl (string): optional expression for x lower error bars (in seconds)
eyl (string): optional expression for y lower error bars
limit (int or `None`): set an upper limit on the number of
points that will be drawn
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
`**kwds`: any other arguments are passed to the Scatter
constructor
"""
return self.scatter(expr, cuts, ex, ey, exl, eyl, limit=limit, timeseries=True, use_cache=use_cache, **kwds)
def scatter(self, expr, cuts=None, ex=None, ey=None, exl=None, eyl=None, limit=1000, timeseries=False, use_cache=True, **kwds):
"""Draw and return a scatter-plot based on the expression and cuts.
Arguments:
expr (string): expression to evaluate in the namespace of
the table and plot
cuts (string): expression for filtering out unwanted data
ex (string): optional expression for x error bars
ey (string): optional expression for y error bars
exl (string): optional expression for x lower error bars
eyl (string): optional expression for y lower error bars
limit (int or `None`): set an upper limit on the number of
points that will be drawn
timeseries (bool): if True, produce a TimeSeries, rather
than a Scatter
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
`**kwds`: any other arguments are passed to the Scatter
constructor
"""
fields = re.split(self._comma, expr)
data = self(expr, cuts)
# convert one-dimensional complex data into two-dimensional real data
if not isinstance(data, tuple) and data.dtype.char in numpy.typecodes["Complex"]:
data = numpy.real(data), numpy.imag(data)
if not isinstance(data, tuple) or len(data) != 2:
raise ContainerException, "The expr must return two-dimensional data (include a comma!)"
xdata, ydata = data
if ex is not None:
ex = self(ex, cuts)
if isinstance(ex, tuple):
raise ContainerException, "The ex must return one-dimensional data"
if ey is not None:
ey = self(ey, cuts)
if isinstance(ey, tuple):
raise ContainerException, "The ey must return one-dimensional data"
if exl is not None:
exl = self(exl, cuts)
if isinstance(exl, tuple):
raise ContainerException, "The exl must return one-dimensional data"
if eyl is not None:
eyl = self(eyl, cuts)
if isinstance(eyl, tuple):
raise ContainerException, "The eyl must return one-dimensional data"
if timeseries:
if xdata.dtype.char in numpy.typecodes["Float"] + numpy.typecodes["AllInteger"]:
kwds2 = {"xlabel": fields[0], "ylabel": fields[1]}
kwds2.update(kwds)
output = TimeSeries(x=xdata, y=ydata, ex=ex, ey=ey, exl=exl, eyl=eyl, informat=None, limit=limit, **kwds2)
elif xdata.dtype.char in numpy.typecodes["Character"] + "Sa":
kwds2 = {"xlabel": fields[0], "ylabel": fields[1]}
kwds2.update(kwds)
output = TimeSeries(x=xdata, y=ydata, ex=ex, ey=ey, exl=exl, eyl=eyl, limit=limit, **kwds2)
else:
raise ContainerException, "Unsupported data type for x of TimeSeries: %s" % xdata.dtype.name
else:
kwds2 = {"xlabel": fields[0], "ylabel": fields[1]}
kwds2.update(kwds)
output = Scatter(x=xdata, y=ydata, ex=ex, ey=ey, exl=exl, eyl=eyl, limit=limit, **kwds2)
return output
def inspect(*files, **kwds):
output = InspectTable()
first = True
for f in files:
if first:
output.load(f, **kwds)
first = False
else:
output.extend(InspectTable().load(f, **kwds))
return output
| opendatagroup/cassius | tags/cassius-0_1_0_0/cassius/containers.py | Python | apache-2.0 | 141,242 |
# Copyright 2013 IBM Corp
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import testtools
from tempest.lib import base as test
from tempest.lib import decorators
from tempest.tests.lib import base
class TestSkipBecauseDecorator(base.TestCase):
def _test_skip_because_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@decorators.skip_because(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
def test_skip_because_bug(self):
self._test_skip_because_helper(bug='12345')
def test_skip_because_bug_and_condition_true(self):
self._test_skip_because_helper(bug='12348', condition=True)
def test_skip_because_bug_and_condition_false(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12349', condition=False)
def test_skip_because_bug_without_bug_never_skips(self):
"""Never skip without a bug parameter."""
self._test_skip_because_helper(expected_to_skip=False,
condition=True)
self._test_skip_because_helper(expected_to_skip=False)
def test_skip_because_invalid_bug_number(self):
"""Raise ValueError if with an invalid bug number"""
self.assertRaises(ValueError, self._test_skip_because_helper,
bug='critical_bug')
class TestIdempotentIdDecorator(base.TestCase):
def _test_helper(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
"""Docstring"""
pass
return foo
def _test_helper_without_doc(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
pass
return foo
def test_positive(self):
_id = str(uuid.uuid4())
foo = self._test_helper(_id)
self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_positive_without_doc(self):
_id = str(uuid.uuid4())
foo = self._test_helper_without_doc(_id)
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_idempotent_id_not_str(self):
_id = 42
self.assertRaises(TypeError, self._test_helper, _id)
def test_idempotent_id_not_valid_uuid(self):
_id = '42'
self.assertRaises(ValueError, self._test_helper, _id)
class TestSkipUnlessAttrDecorator(base.TestCase):
def _test_skip_unless_attr(self, attr, expected_to_skip=True):
class TestFoo(test.BaseTestCase):
expected_attr = not expected_to_skip
@decorators.skip_unless_attr(attr)
def test_foo(self):
pass
t = TestFoo('test_foo')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException,
t.test_foo())
else:
try:
t.test_foo()
except Exception:
raise testtools.TestCase.failureException()
def test_skip_attr_does_not_exist(self):
self._test_skip_unless_attr('unexpected_attr')
def test_skip_attr_false(self):
self._test_skip_unless_attr('expected_attr')
def test_no_skip_for_attr_exist_and_true(self):
self._test_skip_unless_attr('expected_attr', expected_to_skip=False)
| nuagenetworks/tempest | tempest/tests/lib/test_decorators.py | Python | apache-2.0 | 4,381 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Selection strategies for training with multiple adversarial representations.
A selector can select one representation for training at each step, and
maintain its internal state for subsequent selections. The state can also be
updated once every K epochs when the model is evaluated on the validation set.
"""
import gin
import tensorflow.compat.v2 as tf
class SelectionStrategy(tf.Module):
"""Base class for strategies to select representations.
This base class handles sample and update frequencies, as well as logging
selection statistics to TensorBoard. Sub-classes should implement _select()
and _update().
"""
def __init__(self, representation_names, sample_freq, update_freq):
"""Constructs a SelectionStrategy object.
Args:
representation_names: A list of representations names for tf.summary.
sample_freq: Frequency to draw a new selection (in steps).
update_freq: Frequency to update the selector's state (in epochs).
"""
self.num_representations = len(representation_names)
self.representation_names = representation_names
self.sample_freq = sample_freq
self.update_freq = update_freq
# index of the selected representation
self.current_selection = tf.Variable(0, trainable=False)
self.last_selection_step = tf.Variable(-1, trainable=False)
self.last_update_epoch = tf.Variable(0, trainable=False)
self.selection_counter = tf.Variable([0] * self.num_representations)
def select(self, step):
"""Returns the index of the selected representation for a training step."""
if step - self.last_selection_step >= self.sample_freq:
self.current_selection.assign(self._select())
self.last_selection_step.assign(step)
# Increment the counter for the newly selected item.
self.selection_counter.scatter_add(
tf.IndexedSlices(1, self.current_selection))
return self.current_selection.numpy()
def should_update(self, epoch):
"""Returns whether the strategy should update its state at this epoch."""
return epoch - self.last_update_epoch >= self.update_freq
def update(self, epoch, validation_losses):
"""Updates the strategy's state based on current validation losses.
Args:
epoch: Current epoch number.
validation_losses: A list of numbers, one for each representation.
"""
self._update(epoch, validation_losses)
self.last_update_epoch.assign(epoch)
# Log the counts since last update to the summary and reset the counts.
for i in range(self.num_representations):
tf.summary.scalar(
f"representations/selected/{self.representation_names[i]}",
self.selection_counter[i],
step=epoch)
self.selection_counter.assign([0] * self.num_representations)
def _select(self):
raise NotImplementedError
def _update(self, epoch, validation_losses):
"""See update()."""
raise NotImplementedError
class GreedyStrategy(SelectionStrategy):
"""Greedy strategy which selects the one with the highest validation loss."""
def _select(self):
# No needs to reselect since this strategy is deterministic.
return self.current_selection.numpy()
def _update(self, epoch, validation_losses):
del epoch # unused
self.current_selection.assign(
tf.cast(tf.argmax(validation_losses), self.current_selection.dtype))
class RoundRobinStrategy(SelectionStrategy):
"""Round-robin strategy which selects each representation sequentially."""
def _select(self):
return (self.current_selection + 1) % self.num_representations
def _update(self, epoch, validation_losses):
pass
@gin.configurable
def eta_scheduler(epoch, values=(0.1,), breakpoints=()):
"""Piecewise constant schedule for eta (selector weight learning rate)."""
idx = sum(1 if epoch > b else 0 for b in breakpoints)
return values[idx]
class MultiplicativeWeightStrategy(SelectionStrategy):
"""Multiplicative-weight strategy which samples representations adaptively."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Weights of each representation. Each selection is a sample drawn
# proportionally to the weights.
# TODO(csferng): Store the weights in logit space.
self.weights = tf.Variable(tf.ones(self.num_representations))
self.current_selection.assign(self._select())
def _select(self):
logits = tf.math.log(self.weights / tf.reduce_sum(self.weights))
return tf.random.categorical(tf.reshape(logits, [1, -1]), 1)[0][0].numpy()
def _update(self, epoch, validation_losses):
validation_losses = tf.convert_to_tensor(validation_losses)
eta = eta_scheduler(epoch)
self.weights.assign(self.weights * tf.math.exp(eta * validation_losses))
for i in range(self.num_representations):
tf.summary.scalar(
f"representations/weight/{self.representation_names[i]}",
self.weights[i],
step=epoch)
STRATEGY_CLASSES = {
"greedy": GreedyStrategy,
"roundrobin": RoundRobinStrategy,
"multiweight": MultiplicativeWeightStrategy,
}
@gin.configurable
def construct_representation_selector(representation_names,
selection_strategy="multiweight",
sample_freq=351, # in number of steps
update_freq=5): # in number of epochs
return STRATEGY_CLASSES[selection_strategy](representation_names, sample_freq,
update_freq)
| tensorflow/neural-structured-learning | research/multi_representation_adversary/multi_representation_adversary/selectors.py | Python | apache-2.0 | 6,095 |
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for manipulating revision chains in the database."""
import d1_common.types.exceptions
import d1_gmn.app
import d1_gmn.app.did
import d1_gmn.app.model_util
import d1_gmn.app.models
def create_or_update_chain(pid, sid, obsoletes_pid, obsoleted_by_pid):
chain_model = _get_chain_by_pid(pid)
if chain_model:
_set_chain_sid(chain_model, sid)
else:
_add_sciobj(pid, sid, obsoletes_pid, obsoleted_by_pid)
_update_sid_to_last_existing_pid_map(pid)
def delete_chain(pid):
pid_to_chain_model = d1_gmn.app.models.ChainMember.objects.get(pid__did=pid)
chain_model = pid_to_chain_model.chain
pid_to_chain_model.delete()
if not d1_gmn.app.models.ChainMember.objects.filter(chain=chain_model).exists():
if chain_model.sid:
# Cascades back to chain_model.
d1_gmn.app.models.IdNamespace.objects.filter(
did=chain_model.sid.did
).delete()
else:
chain_model.delete()
def cut_from_chain(sciobj_model):
"""Remove an object from a revision chain.
The object can be at any location in the chain, including the head or tail.
Preconditions:
- The object with the pid is verified to exist and to be a member of an
revision chain. E.g., with:
d1_gmn.app.views.asserts.is_existing_object(pid)
d1_gmn.app.views.asserts.is_in_revision_chain(pid)
Postconditions:
- The given object is a standalone object with empty obsoletes, obsoletedBy and
seriesId fields.
- The previously adjacent objects in the chain are adjusted to close any gap that
was created or remove dangling reference at the head or tail.
- If the object was the last object in the chain and the chain has a SID, the SID
reference is shifted over to the new last object in the chain.
"""
if _is_head(sciobj_model):
old_pid = sciobj_model.obsoletes.did
_cut_head_from_chain(sciobj_model)
elif _is_tail(sciobj_model):
old_pid = sciobj_model.obsoleted_by.did
_cut_tail_from_chain(sciobj_model)
else:
old_pid = sciobj_model.obsoleted_by.did
_cut_embedded_from_chain(sciobj_model)
_update_sid_to_last_existing_pid_map(old_pid)
def get_all_pid_by_sid(sid):
return [c.pid.did for c in _get_all_chain_member_queryset_by_sid(sid)]
# def set_revision(pid, obsoletes_pid=None, obsoleted_by_pid=None):
# sciobj_model = d1_gmn.app.util.get_sci_model(pid)
# set_revision_links(sciobj_model, obsoletes_pid, obsoleted_by_pid)
# sciobj_model.save()
def resolve_sid(sid):
"""Get the PID to which the ``sid`` currently maps.
Preconditions:
- ``sid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_sid().
"""
return d1_gmn.app.models.Chain.objects.get(sid__did=sid).head_pid.did
def get_sid_by_pid(pid):
"""Given the ``pid`` of the object in a chain, return the SID for the chain.
Return None if there is no SID for the chain. This operation is also valid
for standalone objects which may or may not have a SID.
This is the reverse of resolve.
All known PIDs are associated with a chain.
Preconditions:
- ``pid`` is verified to exist. E.g., with
d1_gmn.app.views.asserts.is_existing_object().
"""
return d1_gmn.app.did.get_did_by_foreign_key(_get_chain_by_pid(pid).sid)
def set_revision_links(sciobj_model, obsoletes_pid=None, obsoleted_by_pid=None):
if obsoletes_pid:
sciobj_model.obsoletes = d1_gmn.app.did.get_or_create_did(obsoletes_pid)
_set_revision_reverse(sciobj_model.pid.did, obsoletes_pid, is_obsoletes=False)
if obsoleted_by_pid:
sciobj_model.obsoleted_by = d1_gmn.app.did.get_or_create_did(obsoleted_by_pid)
_set_revision_reverse(sciobj_model.pid.did, obsoleted_by_pid, is_obsoletes=True)
sciobj_model.save()
def is_obsoletes_pid(pid):
"""Return True if ``pid`` is referenced in the obsoletes field of any object.
This will return True even if the PID is in the obsoletes field of an object that
does not exist on the local MN, such as replica that is in an incomplete chain.
"""
return d1_gmn.app.models.ScienceObject.objects.filter(obsoletes__did=pid).exists()
def is_obsoleted_by_pid(pid):
"""Return True if ``pid`` is referenced in the obsoletedBy field of any object.
This will return True even if the PID is in the obsoletes field of an object that
does not exist on the local MN, such as replica that is in an incomplete chain.
"""
return d1_gmn.app.models.ScienceObject.objects.filter(
obsoleted_by__did=pid
).exists()
def is_revision(pid):
"""Return True if ``pid`` is referenced in the obsoletes or obsoletedBy field of any
object.
This will return True even if the PID is in the obsoletes field of an object that
does not exist on the local MN, such as replica that is in an incomplete chain.
"""
return is_obsoletes_pid(pid) or is_obsoleted_by_pid(pid)
def _add_sciobj(pid, sid, obsoletes_pid, obsoleted_by_pid):
is_added = _add_to_chain(pid, sid, obsoletes_pid, obsoleted_by_pid)
if not is_added:
# if not obsoletes_pid and not obsoleted_by_pid:
_add_standalone(pid, sid)
# else:
def _add_standalone(pid, sid):
# assert_sid_unused(sid)
_create_chain(pid, sid)
def _add_to_chain(pid, sid, obsoletes_pid, obsoleted_by_pid):
_assert_sid_is_in_chain(sid, obsoletes_pid)
_assert_sid_is_in_chain(sid, obsoleted_by_pid)
obsoletes_chain_model = _get_chain_by_pid(obsoletes_pid)
obsoleted_by_chain_model = _get_chain_by_pid(obsoleted_by_pid)
sid_chain_model = _get_chain_by_sid(sid) if sid else None
chain_model = obsoletes_chain_model or obsoleted_by_chain_model or sid_chain_model
if not chain_model:
return False
if obsoletes_chain_model and obsoletes_chain_model != chain_model:
_merge_chains(chain_model, obsoletes_chain_model)
if obsoleted_by_chain_model and obsoleted_by_chain_model != chain_model:
_merge_chains(chain_model, obsoleted_by_chain_model)
_add_pid_to_chain(chain_model, pid)
_set_chain_sid(chain_model, sid)
return True
def _merge_chains(chain_model_a, chain_model_b):
"""Merge two chains.
For use when it becomes known that two chains that were created separately
actually are separate sections of the same chain
E.g.:
- A obsoleted by X is created. A has no SID. X does not exist yet. A chain is
created for A.
- B obsoleting Y is created. B has SID. Y does not exist yet. A chain is created
for B.
- C obsoleting X, obsoleted by Y is created. C tells us that X and Y are in the
same chain, which means that A and B are in the same chain. At this point, the
two chains need to be merged. Merging the chains causes A to take on the SID of
B.
"""
_set_chain_sid(
chain_model_a, d1_gmn.app.did.get_did_by_foreign_key(chain_model_b.sid)
)
for member_model in _get_all_chain_member_queryset_by_chain(chain_model_b):
member_model.chain = chain_model_a
member_model.save()
chain_model_b.delete()
def _add_pid_to_chain(chain_model, pid):
chain_member_model = d1_gmn.app.models.ChainMember(
chain=chain_model, pid=d1_gmn.app.did.get_or_create_did(pid)
)
chain_member_model.save()
def _set_chain_sid(chain_model, sid):
"""Set or update SID for chain.
If the chain already has a SID, ``sid`` must either be None or match the existing
SID.
"""
if not sid:
return
if chain_model.sid and chain_model.sid.did != sid:
raise d1_common.types.exceptions.ServiceFailure(
0,
"Attempted to modify existing SID. "
'existing_sid="{}", new_sid="{}"'.format(chain_model.sid.did, sid),
)
chain_model.sid = d1_gmn.app.did.get_or_create_did(sid)
chain_model.save()
def _assert_sid_is_in_chain(sid, pid):
if not sid or not pid:
return
chain_model = _get_chain_by_pid(pid)
if not chain_model or not chain_model.sid:
return
if chain_model.sid.did != sid:
raise d1_common.types.exceptions.ServiceFailure(
0,
"Attempted to create object in chain with non-matching SID. "
'existing_sid="{}", new_sid="{}"'.format(chain_model.sid.did, sid),
)
def _find_head_or_latest_connected(pid, last_pid=None):
"""Find latest existing sciobj that can be reached by walking towards the head from
``pid``
If ``pid`` does not exist, return None. If chain is connected all the way to head
and head exists, return the head. If chain ends in a dangling obsoletedBy, return
the last existing object.
"""
try:
sci_model = d1_gmn.app.model_util.get_sci_model(pid)
except d1_gmn.app.models.ScienceObject.DoesNotExist:
return last_pid
if sci_model.obsoleted_by is None:
return pid
return _find_head_or_latest_connected(sci_model.obsoleted_by.did, pid)
def _get_chain_by_pid(pid):
"""Find chain by pid.
Return None if not found.
"""
try:
return d1_gmn.app.models.ChainMember.objects.get(pid__did=pid).chain
except d1_gmn.app.models.ChainMember.DoesNotExist:
pass
def _get_chain_by_sid(sid):
"""Return None if not found."""
try:
return d1_gmn.app.models.Chain.objects.get(sid__did=sid)
except d1_gmn.app.models.Chain.DoesNotExist:
pass
def _update_sid_to_last_existing_pid_map(pid):
"""Set chain head PID to the last existing object in the chain to which ``pid``
belongs. If SID has been set for chain, it resolves to chain head PID.
Intended to be called in MNStorage.delete() and other chain manipulation.
Preconditions:
- ``pid`` must exist and be verified to be a PID.
d1_gmn.app.views.asserts.is_existing_object()
"""
last_pid = _find_head_or_latest_connected(pid)
chain_model = _get_chain_by_pid(last_pid)
if not chain_model:
return
chain_model.head_pid = d1_gmn.app.did.get_or_create_did(last_pid)
chain_model.save()
def _create_chain(pid, sid):
"""Create the initial chain structure for a new standalone object. Intended to be
called in MNStorage.create().
Preconditions:
- ``sid`` must be verified to be available to be assigned to a new standalone
object. E.g., with is_valid_sid_for_new_standalone().
"""
chain_model = d1_gmn.app.models.Chain(
# sid=d1_gmn.app.models.did(sid) if sid else None,
head_pid=d1_gmn.app.did.get_or_create_did(pid)
)
chain_model.save()
_add_pid_to_chain(chain_model, pid)
_set_chain_sid(chain_model, sid)
return chain_model
# def _get_or_create_chain_for_pid(pid):
# try:
# return d1_gmn.app.models.ChainMember.objects.get(pid__did=pid).chain
# except d1_gmn.app.models.ChainMember.DoesNotExist:
# return _create_chain(pid, None)
def _map_sid_to_pid(chain_model, sid, pid):
if sid is not None:
chain_model.sid = d1_gmn.app.did.get_or_create_did(sid)
chain_model.head_pid = d1_gmn.app.did.get_or_create_did(pid)
chain_model.save()
def _get_all_chain_member_queryset_by_sid(sid):
return d1_gmn.app.models.ChainMember.objects.filter(
chain=d1_gmn.app.models.Chain.objects.get(sid__did=sid)
)
def _get_all_chain_member_queryset_by_chain(chain_model):
return d1_gmn.app.models.ChainMember.objects.filter(chain=chain_model)
def _cut_head_from_chain(sciobj_model):
new_head_model = d1_gmn.app.model_util.get_sci_model(sciobj_model.obsoletes.did)
new_head_model.obsoleted_by = None
sciobj_model.obsoletes = None
sciobj_model.save()
new_head_model.save()
def _cut_tail_from_chain(sciobj_model):
new_tail_model = d1_gmn.app.model_util.get_sci_model(sciobj_model.obsoleted_by.did)
new_tail_model.obsoletes = None
sciobj_model.obsoleted_by = None
sciobj_model.save()
new_tail_model.save()
def _cut_embedded_from_chain(sciobj_model):
prev_model = d1_gmn.app.model_util.get_sci_model(sciobj_model.obsoletes.did)
next_model = d1_gmn.app.model_util.get_sci_model(sciobj_model.obsoleted_by.did)
prev_model.obsoleted_by = next_model.pid
next_model.obsoletes = prev_model.pid
sciobj_model.obsoletes = None
sciobj_model.obsoleted_by = None
sciobj_model.save()
prev_model.save()
next_model.save()
def _is_head(sciobj_model):
return sciobj_model.obsoletes and not sciobj_model.obsoleted_by
def _is_tail(sciobj_model):
return sciobj_model.obsoleted_by and not sciobj_model.obsoletes
def _set_revision_reverse(to_pid, from_pid, is_obsoletes):
try:
sciobj_model = d1_gmn.app.model_util.get_sci_model(from_pid)
except d1_gmn.app.models.ScienceObject.DoesNotExist:
return
if not d1_gmn.app.did.is_existing_object(to_pid):
return
did_model = d1_gmn.app.did.get_or_create_did(to_pid)
if is_obsoletes:
sciobj_model.obsoletes = did_model
else:
sciobj_model.obsoleted_by = did_model
sciobj_model.save()
# def assert_sid_unused(sid):
# if not sid:
# return
# if find_chain_by_sid(sid):
# raise d1_common.types.exceptions.ServiceFailure(
# 0, u'Attempted to create standalone object with SID already in use. '
# 'sid="{}"'.format(sid)
# )
# def upd_sid_resolve(pid, sid=None, obsoletes_pid=None, obsoleted_by_pid=None):
# """Set SID to resolve to the newest object that exists locally for a chain"""
#
# last_pid = find_head_or_latest_connected(pid)
# def has_chain(pid):
# return d1_gmn.app.models.ChainMember.objects.filter(pid__did=pid).exists()
# def create_chain(sid, pid):
# """Create the initial chain structure for a new standalone object. Intended to
# be called in MNStorage.create().
#
# Preconditions:
# - ``sid`` must either be None or be previously unused.
# d1_gmn.app.views.asserts.is_unused()
# - ``pid`` must exist and be verified to be a PID.
# d1_gmn.app.views.asserts.is_pid()
# """
# chain_model = _get_or_create_chain_for_pid(pid)
# _map_sid_to_pid(chain_model, sid, pid)
# def add_pid_to_chain(sid, old_pid, new_pid):
# """Add a new revision ``new_pid`` to the chain that ``old_pid`` belongs to and
# update any SID to resolve to the new PID. Intended to be called in
# MNStorage.update().
#
# Preconditions:
# - ``sid`` must either be None or match the SID already assigned to the chain.
# - Both ``old_pid`` and ``new_pid`` must exist and be verified to be PIDs
# d1_gmn.app.views.asserts.is_pid()
# """
# chain_model = _get_or_create_chain_for_pid(old_pid)
# _add_pid_to_chain(chain_model, new_pid)
# _map_sid_to_pid(chain_model, sid, new_pid)
# def is_sid_in_revision_chain(sid, pid):
# """Determine if ``sid`` resolves to an object in the revision chain to which
# ``pid`` belongs.
#
# Preconditions:
# - ``sid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_sid().
# """
# chain_pid_list = get_pids_in_revision_chain(pid)
# resolved_pid = resolve_sid(sid)
# return resolved_pid in chain_pid_list
# def update_or_create_sid_to_pid_map(sid, pid):
# """Update existing or create a new ``sid`` to ``pid`` association. Then create
# or update the ``sid`` to resolve to the ``pid``.
#
# Preconditions:
# - ``sid`` is verified to be unused if creating a standalone object (that may later become
# the first object in a chain).
# - ``sid`` is verified to belong to the given chain updating.
# - ``pid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_pid().
# """
# d1_gmn.app.models.sid_to_pid(sid, pid)
# d1_gmn.app.models.sid_to_head_pid(sid, pid)
# def get_sid_by_pid(pid):
# """Get the SID to which the ``pid`` maps.
# Return None if there is no SID maps to ``pid``.
# """
# try:
# return d1_gmn.app.models.SeriesIdToPersistentId.objects.get(
# pid__did=pid
# ).sid.did
# except d1_gmn.app.models.SeriesIdToPersistentId.DoesNotExist:
# return None
# def move_sid_to_last_object_in_chain(pid):
# """Move SID to the last object in a chain to which ``pid`` belongs.
#
# - If the chain does not have a SID, do nothing.
# - If the SID already maps to the last object in the chain, do nothing.
#
# A SID always resolves to the last object in its chain. So System Metadata XML
# docs are used for introducing SIDs and setting initial mappings, but the
# database maintains the current mapping going forward.
#
# Preconditions:
# - PID is verified to exist. E.g., with d1_gmn.app.views.asserts.is_pid().
#
# Postconditions:
# - The SID maps to the last object in the chain.
# """
# sid = sysmeta_db.get_sid_by_pid(pid)
# if sid:
# chain_pid_list = sysmeta_db.get_pids_in_revision_chain(pid)
# update_sid(sid, chain_pid_list[-1])
# def update_revision_chain(pid, obsoletes_pid, obsoleted_by_pid, sid):
# with sysmeta_file.SysMetaFile(pid) as sysmeta_pyxb:
# sysmeta_file.update_revision_chain(
# sysmeta_pyxb, obsoletes_pid, obsoleted_by_pid, sid
# )
# sysmeta_db.update_revision_chain(sysmeta_pyxb)
# if sysmeta.obsoletes is not None:
# chain_pid_list = [pid]
# sci_obj = mn.models.ScienceObject.objects.get(pid__did=pid)
# while sci_obj.obsoletes:
# obsoletes_pid = sysmeta_pyxb.obsoletes.value()
# chain_pid_list.append(obsoletes_pid)
# sci_obj = mn.models.ScienceObject.objects.get(pid__did=obsoletes_pid)
# sci_obj = mn.models.ScienceObject.objects.get(pid__did=pid)
# while sci_obj.obsoleted_by:
# obsoleted_by_pid = sysmeta_pyxb.obsoleted_by.value()
# chain_pid_list.append(obsoleted_by_pid)
# sci_obj = mn.models.ScienceObject.objects.get(pid__did=obsoleted_by_pid)
# return chain_pid_list
| DataONEorg/d1_python | gmn/src/d1_gmn/app/revision.py | Python | apache-2.0 | 18,605 |
# Copyright 2013, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
import uuid
import six
from glance.common import exception
from glance.common import store_utils
from glance.openstack.common import units
import glance.quota
from glance.tests.unit import utils as unit_test_utils
from glance.tests import utils as test_utils
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
class FakeContext(object):
owner = 'someone'
is_admin = False
class FakeImage(object):
size = None
image_id = 'someid'
locations = [{'url': 'file:///not/a/path', 'metadata': {}}]
tags = set([])
def set_data(self, data, size=None):
self.size = 0
for d in data:
self.size += len(d)
def __init__(self, **kwargs):
self.extra_properties = kwargs.get('extra_properties', {})
class TestImageQuota(test_utils.BaseTestCase):
def setUp(self):
super(TestImageQuota, self).setUp()
def tearDown(self):
super(TestImageQuota, self).tearDown()
def _get_image(self, location_count=1, image_size=10):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'xyz'
base_image.size = image_size
image = glance.quota.ImageProxy(base_image, context, db_api, store)
locations = []
for i in range(location_count):
locations.append({'url': 'file:///g/there/it/is%d' % i,
'metadata': {}, 'status': 'active'})
image_values = {'id': 'xyz', 'owner': context.owner,
'status': 'active', 'size': image_size,
'locations': locations}
db_api.image_create(context, image_values)
return image
def test_quota_allowed(self):
quota = 10
self.config(user_storage_quota=str(quota))
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
data = '*' * quota
base_image.set_data(data, size=None)
image.set_data(data)
self.assertEqual(quota, base_image.size)
def _test_quota_allowed_unit(self, data_length, config_quota):
self.config(user_storage_quota=config_quota)
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
data = '*' * data_length
base_image.set_data(data, size=None)
image.set_data(data)
self.assertEqual(data_length, base_image.size)
def test_quota_allowed_unit_b(self):
self._test_quota_allowed_unit(10, '10B')
def test_quota_allowed_unit_kb(self):
self._test_quota_allowed_unit(10, '1KB')
def test_quota_allowed_unit_mb(self):
self._test_quota_allowed_unit(10, '1MB')
def test_quota_allowed_unit_gb(self):
self._test_quota_allowed_unit(10, '1GB')
def test_quota_allowed_unit_tb(self):
self._test_quota_allowed_unit(10, '1TB')
def _quota_exceeded_size(self, quota, data,
deleted=True, size=None):
self.config(user_storage_quota=quota)
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
if deleted:
with patch.object(store_utils, 'safe_delete_from_backend'):
store_utils.safe_delete_from_backend(
context,
image.image_id,
base_image.locations[0])
self.assertRaises(exception.StorageQuotaFull,
image.set_data,
data,
size=size)
def test_quota_exceeded_no_size(self):
quota = 10
data = '*' * (quota + 1)
#NOTE(jbresnah) When the image size is None it means that it is
# not known. In this case the only time we will raise an
# exception is when there is no room left at all, thus we know
# it will not fit.
# That's why 'get_remaining_quota' is mocked with return_value = 0.
with patch.object(glance.api.common, 'get_remaining_quota',
return_value=0):
self._quota_exceeded_size(str(quota), data)
def test_quota_exceeded_with_right_size(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size(str(quota), data, size=len(data),
deleted=False)
def test_quota_exceeded_with_right_size_b(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size('10B', data, size=len(data),
deleted=False)
def test_quota_exceeded_with_right_size_kb(self):
quota = units.Ki
data = '*' * (quota + 1)
self._quota_exceeded_size('1KB', data, size=len(data),
deleted=False)
def test_quota_exceeded_with_lie_size(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size(str(quota), data, deleted=False,
size=quota - 1)
def test_append_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.append(new_location)
pre_add_locations.append(new_location)
self.assertEqual(image.locations, pre_add_locations)
def test_insert_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.insert(0, new_location)
pre_add_locations.insert(0, new_location)
self.assertEqual(image.locations, pre_add_locations)
def test_extend_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.extend([new_location])
pre_add_locations.extend([new_location])
self.assertEqual(image.locations, pre_add_locations)
def test_iadd_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations += [new_location]
pre_add_locations += [new_location]
self.assertEqual(image.locations, pre_add_locations)
def test_set_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
image.locations = [new_location]
self.assertEqual(image.locations, [new_location])
def _make_image_with_quota(self, image_size=10, location_count=2):
quota = image_size * location_count
self.config(user_storage_quota=str(quota))
return self._get_image(image_size=image_size,
location_count=location_count)
def test_exceed_append_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.append,
{'url': 'file:///a/path', 'metadata': {},
'status': 'active'})
def test_exceed_insert_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.insert,
0,
{'url': 'file:///a/path', 'metadata': {},
'status': 'active'})
def test_exceed_extend_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.extend,
[{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}])
def test_set_location_under(self):
image = self._make_image_with_quota(location_count=1)
image.locations = [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}]
def test_set_location_exceed(self):
image = self._make_image_with_quota(location_count=1)
try:
image.locations = [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'},
{'url': 'file:///a/path2', 'metadata': {},
'status': 'active'}]
self.fail('Should have raised the quota exception')
except exception.StorageQuotaFull:
pass
def test_iadd_location_exceed(self):
image = self._make_image_with_quota(location_count=1)
try:
image.locations += [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}]
self.fail('Should have raised the quota exception')
except exception.StorageQuotaFull:
pass
def test_append_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations.append({'url': 'file:///fake.img.tar.gz',
'metadata': {}})
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
def test_insert_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations.insert(0,
{'url': 'file:///fake.img.tar.gz',
'metadata': {}})
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
def test_set_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations = [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}]
self.assertEqual([{'url': 'file:///fake.img.tar.gz', 'metadata': {}}],
image.locations)
def test_iadd_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations += [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}]
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
class TestImagePropertyQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImagePropertyQuotas, self).setUp()
self.base_image = FakeImage()
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_mock.add.return_value = self.base_image
self.image_repo_mock.save.return_value = self.base_image
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_save_image_with_image_property(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
def test_save_image_too_many_image_properties(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'}
exc = self.assertRaises(exception.ImagePropertyLimitExceeded,
self.image_repo_proxy.save, self.image)
self.assertIn("Attempted: 2, Maximum: 1", six.text_type(exc))
def test_save_image_unlimited_image_properties(self):
self.config(image_property_quota=-1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
def test_add_image_with_image_property(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.add(self.image)
self.image_repo_mock.add.assert_called_once_with(self.base_image)
def test_add_image_too_many_image_properties(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'}
exc = self.assertRaises(exception.ImagePropertyLimitExceeded,
self.image_repo_proxy.add, self.image)
self.assertIn("Attempted: 2, Maximum: 1", six.text_type(exc))
def test_add_image_unlimited_image_properties(self):
self.config(image_property_quota=-1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.add(self.image)
self.image_repo_mock.add.assert_called_once_with(self.base_image)
def _quota_exceed_setup(self):
self.config(image_property_quota=2)
self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham'}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_modify_image_properties_when_quota_exceeded(self):
self._quota_exceed_setup()
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'frob', 'spam': 'eggs'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertEqual('frob', self.base_image.extra_properties['foo'])
self.assertEqual('eggs', self.base_image.extra_properties['spam'])
def test_delete_image_properties_when_quota_exceeded(self):
self._quota_exceed_setup()
self.config(image_property_quota=1)
del self.image.extra_properties['foo']
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertNotIn('foo', self.base_image.extra_properties)
self.assertEqual('ham', self.base_image.extra_properties['spam'])
def test_exceed_quota_during_patch_operation(self):
self._quota_exceed_setup()
self.image.extra_properties['frob'] = 'baz'
self.image.extra_properties['lorem'] = 'ipsum'
self.assertEqual('bar', self.base_image.extra_properties['foo'])
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertEqual('baz', self.base_image.extra_properties['frob'])
self.assertEqual('ipsum', self.base_image.extra_properties['lorem'])
del self.image.extra_properties['frob']
del self.image.extra_properties['lorem']
self.image_repo_proxy.save(self.image)
call_args = mock.call(self.base_image, from_state=None)
self.assertEqual(call_args, self.image_repo_mock.save.call_args)
self.assertEqual('bar', self.base_image.extra_properties['foo'])
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertNotIn('frob', self.base_image.extra_properties)
self.assertNotIn('lorem', self.base_image.extra_properties)
def test_quota_exceeded_after_delete_image_properties(self):
self.config(image_property_quota=3)
self.base_image.extra_properties = {'foo': 'bar',
'spam': 'ham',
'frob': 'baz'}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.config(image_property_quota=1)
del self.image.extra_properties['foo']
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertNotIn('foo', self.base_image.extra_properties)
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertEqual('baz', self.base_image.extra_properties['frob'])
class TestImageTagQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageTagQuotas, self).setUp()
self.base_image = mock.Mock()
self.base_image.tags = set([])
self.base_image.extra_properties = {}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_replace_image_tag(self):
self.config(image_tag_quota=1)
self.image.tags = ['foo']
self.assertEqual(len(self.image.tags), 1)
def test_replace_too_many_image_tags(self):
self.config(image_tag_quota=0)
exc = self.assertRaises(exception.ImageTagLimitExceeded,
setattr, self.image, 'tags', ['foo', 'bar'])
self.assertIn('Attempted: 2, Maximum: 0', six.text_type(exc))
self.assertEqual(len(self.image.tags), 0)
def test_replace_unlimited_image_tags(self):
self.config(image_tag_quota=-1)
self.image.tags = ['foo']
self.assertEqual(len(self.image.tags), 1)
def test_add_image_tag(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
self.assertEqual(len(self.image.tags), 1)
def test_add_too_many_image_tags(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
exc = self.assertRaises(exception.ImageTagLimitExceeded,
self.image.tags.add, 'bar')
self.assertIn('Attempted: 2, Maximum: 1', six.text_type(exc))
def test_add_unlimited_image_tags(self):
self.config(image_tag_quota=-1)
self.image.tags.add('foo')
self.assertEqual(len(self.image.tags), 1)
def test_remove_image_tag_while_over_quota(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
self.assertEqual(len(self.image.tags), 1)
self.config(image_tag_quota=0)
self.image.tags.remove('foo')
self.assertEqual(len(self.image.tags), 0)
class TestQuotaImageTagsProxy(test_utils.BaseTestCase):
def setUp(self):
super(TestQuotaImageTagsProxy, self).setUp()
def test_add(self):
proxy = glance.quota.QuotaImageTagsProxy(set([]))
proxy.add('foo')
self.assertIn('foo', proxy)
def test_add_too_many_tags(self):
self.config(image_tag_quota=0)
proxy = glance.quota.QuotaImageTagsProxy(set([]))
exc = self.assertRaises(exception.ImageTagLimitExceeded,
proxy.add, 'bar')
self.assertIn('Attempted: 1, Maximum: 0', six.text_type(exc))
def test_equals(self):
proxy = glance.quota.QuotaImageTagsProxy(set([]))
self.assertEqual(set([]), proxy)
def test_contains(self):
proxy = glance.quota.QuotaImageTagsProxy(set(['foo']))
self.assertIn('foo', proxy)
def test_len(self):
proxy = glance.quota.QuotaImageTagsProxy(set(['foo',
'bar',
'baz',
'niz']))
self.assertEqual(len(proxy), 4)
def test_iter(self):
items = set(['foo', 'bar', 'baz', 'niz'])
proxy = glance.quota.QuotaImageTagsProxy(items.copy())
self.assertEqual(len(items), 4)
for item in proxy:
items.remove(item)
self.assertEqual(len(items), 0)
class TestImageMemberQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMemberQuotas, self).setUp()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
context = FakeContext()
self.image = mock.Mock()
self.base_image_member_factory = mock.Mock()
self.image_member_factory = glance.quota.ImageMemberFactoryProxy(
self.base_image_member_factory, context,
db_api, store)
def test_new_image_member(self):
self.config(image_member_quota=1)
self.image_member_factory.new_image_member(self.image,
'fake_id')
self.base_image_member_factory.new_image_member\
.assert_called_once_with(self.image.base, 'fake_id')
def test_new_image_member_unlimited_members(self):
self.config(image_member_quota=-1)
self.image_member_factory.new_image_member(self.image,
'fake_id')
self.base_image_member_factory.new_image_member\
.assert_called_once_with(self.image.base, 'fake_id')
def test_new_image_member_too_many_members(self):
self.config(image_member_quota=0)
self.assertRaises(exception.ImageMemberLimitExceeded,
self.image_member_factory.new_image_member,
self.image, 'fake_id')
class TestImageLocationQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageLocationQuotas, self).setUp()
self.base_image = mock.Mock()
self.base_image.locations = []
self.base_image.size = 1
self.base_image.extra_properties = {}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_replace_image_location(self):
self.config(image_location_quota=1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}
}]
self.assertEqual(len(self.image.locations), 1)
def test_replace_too_many_image_locations(self):
self.config(image_location_quota=1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}}
]
locations = [
{"url": "file:///fake1.img.tar.gz", "metadata": {}},
{"url": "file:///fake2.img.tar.gz", "metadata": {}},
{"url": "file:///fake3.img.tar.gz", "metadata": {}}
]
exc = self.assertRaises(exception.ImageLocationLimitExceeded,
setattr, self.image, 'locations', locations)
self.assertIn('Attempted: 3, Maximum: 1', six.text_type(exc))
self.assertEqual(len(self.image.locations), 1)
def test_replace_unlimited_image_locations(self):
self.config(image_location_quota=-1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}}
]
self.assertEqual(len(self.image.locations), 1)
def test_add_image_location(self):
self.config(image_location_quota=1)
location = {"url": "file:///fake.img.tar.gz", "metadata": {}}
self.image.locations.append(location)
self.assertEqual(len(self.image.locations), 1)
def test_add_too_many_image_locations(self):
self.config(image_location_quota=1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
location2 = {"url": "file:///fake2.img.tar.gz", "metadata": {}}
exc = self.assertRaises(exception.ImageLocationLimitExceeded,
self.image.locations.append, location2)
self.assertIn('Attempted: 2, Maximum: 1', six.text_type(exc))
def test_add_unlimited_image_locations(self):
self.config(image_location_quota=-1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
self.assertEqual(len(self.image.locations), 1)
def test_remove_image_location_while_over_quota(self):
self.config(image_location_quota=1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
self.assertEqual(len(self.image.locations), 1)
self.config(image_location_quota=0)
self.image.locations.remove(location1)
self.assertEqual(len(self.image.locations), 0)
| redhat-openstack/glance | glance/tests/unit/test_quota.py | Python | apache-2.0 | 28,856 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: bigip_snmp_trap
short_description: Manipulate SNMP trap information on a BIG-IP
description:
- Manipulate SNMP trap information on a BIG-IP.
version_added: 2.4
options:
name:
description:
- Name of the SNMP configuration endpoint.
required: True
snmp_version:
description:
- Specifies to which Simple Network Management Protocol (SNMP) version
the trap destination applies.
choices:
- 1
- 2c
community:
description:
- Specifies the community name for the trap destination.
destination:
description:
- Specifies the address for the trap destination. This can be either an
IP address or a hostname.
port:
description:
- Specifies the port for the trap destination.
network:
description:
- Specifies the name of the trap network. This option is not supported in
versions of BIG-IP < 12.1.0. If used on versions < 12.1.0, it will simply
be ignored.
choices:
- other
- management
- default
state:
description:
- When C(present), ensures that the cloud connector exists. When
C(absent), ensures that the cloud connector does not exist.
default: present
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- This module only supports version v1 and v2c of SNMP.
- The C(network) option is not supported on versions of BIG-IP < 12.1.0 because
the platform did not support that option until 12.1.0. If used on versions
< 12.1.0, it will simply be ignored.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.2.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create snmp v1 trap
bigip_snmp_trap:
community: "general"
destination: "1.2.3.4"
name: "my-trap1"
network: "management"
port: "9000"
snmp_version: "1"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
delegate_to: localhost
- name: Create snmp v2 trap
bigip_snmp_trap:
community: "general"
destination: "5.6.7.8"
name: "my-trap2"
network: "default"
port: "7000"
snmp_version: "2c"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
delegate_to: localhost
'''
RETURN = '''
snmp_version:
description: The new C(snmp_version) configured on the remote device.
returned: changed and success
type: string
sample: "2c"
community:
description: The new C(community) name for the trap destination.
returned: changed and success
type: list
sample: "secret"
destination:
description: The new address for the trap destination in either IP or hostname form.
returned: changed and success
type: string
sample: "1.2.3.4"
port:
description: The new C(port) of the trap destination.
returned: changed and success
type: string
sample: "900"
network:
description: The new name of the network the SNMP trap is on.
returned: changed and success
type: string
sample: "management"
'''
from distutils.version import LooseVersion
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'version': 'snmp_version',
'community': 'community',
'host': 'destination'
}
@property
def snmp_version(self):
if self._values['snmp_version'] is None:
return None
return str(self._values['snmp_version'])
@property
def port(self):
if self._values['port'] is None:
return None
return int(self._values['port'])
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class NetworkedParameters(Parameters):
updatables = [
'snmp_version', 'community', 'destination', 'port', 'network'
]
returnables = [
'snmp_version', 'community', 'destination', 'port', 'network'
]
api_attributes = [
'version', 'community', 'host', 'port', 'network'
]
@property
def network(self):
if self._values['network'] is None:
return None
network = str(self._values['network'])
if network == 'management':
return 'mgmt'
elif network == 'default':
return ''
else:
return network
class NonNetworkedParameters(Parameters):
updatables = [
'snmp_version', 'community', 'destination', 'port'
]
returnables = [
'snmp_version', 'community', 'destination', 'port'
]
api_attributes = [
'version', 'community', 'host', 'port'
]
@property
def network(self):
return None
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
if self.is_version_non_networked():
manager = NonNetworkedManager(self.client)
else:
manager = NetworkedManager(self.client)
return manager.exec_module()
def is_version_non_networked(self):
"""Checks to see if the TMOS version is less than 13
Anything less than BIG-IP 13.x does not support users
on different partitions.
:return: Bool
"""
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('12.1.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, client):
self.client = client
self.have = None
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def exists(self):
result = self.client.api.tm.sys.snmp.traps_s.trap.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
if all(getattr(self.want, v) is None for v in self.required_resources):
raise F5ModuleError(
"You must specify at least one of "
', '.join(self.required_resources)
)
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.sys.snmp.traps_s.trap.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the snmp trap")
return True
def remove_from_device(self):
result = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class NetworkedManager(BaseManager):
def __init__(self, client):
super(NetworkedManager, self).__init__(client)
self.required_resources = [
'version', 'community', 'destination', 'port', 'network'
]
self.want = NetworkedParameters(self.client.module.params)
self.changes = NetworkedParameters()
def _set_changed_options(self):
changed = {}
for key in NetworkedParameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = NetworkedParameters(changed)
def _update_changed_options(self):
changed = {}
for key in NetworkedParameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = NetworkedParameters(changed)
return True
return False
def read_current_from_device(self):
resource = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
self._ensure_network(result)
return NetworkedParameters(result)
def _ensure_network(self, result):
# BIG-IP's value for "default" is that the key does not
# exist. This conflicts with our purpose of having a key
# not exist (which we equate to "i dont want to change that"
# therefore, if we load the information from BIG-IP and
# find that there is no 'network' key, that is BIG-IP's
# way of saying that the network value is "default"
if 'network' not in result:
result['network'] = 'default'
class NonNetworkedManager(BaseManager):
def __init__(self, client):
super(NonNetworkedManager, self).__init__(client)
self.required_resources = [
'version', 'community', 'destination', 'port'
]
self.want = NonNetworkedParameters(self.client.module.params)
self.changes = NonNetworkedParameters()
def _set_changed_options(self):
changed = {}
for key in NonNetworkedParameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = NonNetworkedParameters(changed)
def _update_changed_options(self):
changed = {}
for key in NonNetworkedParameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = NonNetworkedParameters(changed)
return True
return False
def read_current_from_device(self):
resource = self.client.api.tm.sys.snmp.traps_s.trap.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return NonNetworkedParameters(result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(
required=True
),
snmp_version=dict(
choices=['1', '2c']
),
community=dict(),
destination=dict(),
port=dict(),
network=dict(
choices=['other', 'management', 'default']
),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| mcgonagle/ansible_f5 | library/bigip_snmp_trap.py | Python | apache-2.0 | 13,740 |
# -*- coding: utf-8 -*-
import logging
from flask import request
from flask import render_template
from relay import app
from relay.decorators import jsonify
from relay.decorators import session_required
from relay.decorators import sanitize_user
from relay.models.relays import add_relay_model
from relay.models.relays import get_relay
from relay.models.relays import get_relays
from relay.models.relays import get_relays_for_recipient
from relay.models.relays import get_sent_relay
from relay.models.relays import get_sent_relays_for_user
from relay.models.relays import add_comment
from relay.models.relays import delete_comment
from relay.models.relays import add_like
from relay.models.relays import unlike
from relay.util import extract_url
from relay.util import make_relay_map
from relay.util import make_sent_relay_map
# remove the direct models from these files, but babysteps
from google.appengine.api import taskqueue
@app.route('/relays/preview')
@jsonify
def relay_preview():
# standardize the url so that we maximize our caching
url = extract_url(request.args.get('url'))
if not url:
return {}
relay = get_relay(url)
if not relay:
relay = add_relay_model(url)
relay.put()
return make_relay_map(relay)
@app.route('/relays/<user_id>/archive', methods=['POST'])
@jsonify
@sanitize_user
@session_required
def archive_relay(user_id, user=None):
sent_relay_id = long(request.form['relay_id'])
sent_relay = get_sent_relay(sent_relay_id)
sent_relay.not_archived.remove(user_id)
sent_relay.archived.append(user_id)
result = sent_relay.put()
logging.info('archiving sent_relay %s'%(str(sent_relay)))
return {'success': result is not None}
@app.route('/relays/like', methods=['POST'])
@jsonify
@session_required
def post_like(user=None):
sent_relay_id = long(request.form['relay_id'])
result = add_like(sent_relay_id, user.key.id())
return {'success': result}
@app.route('/relays/comment', methods=['POST'])
@jsonify
@session_required
def post_comment(user=None):
sent_relay_id = long(request.form['relay_id'])
message = request.form['message']
result = add_comment(sent_relay_id, user.key.id(), message)
return {'success': result}
@app.route('/relays/like/delete', methods=['POST'])
@jsonify
@session_required
def remove_like(user=None):
like_id = long(request.form['like_id'])
result = delete_like(like_id, user.key.id())
return {'success': result}
@app.route('/relays/comment/delete', methods=['POST'])
@jsonify
@session_required
def remove_comment(user_id, user=None):
comment_id = long(request.form['comment_id'])
result = delete_comment(comment_id, user.key.id())
return {'success': result}
@app.route('/relays', methods=['GET', 'POST'])
@app.route('/relays/<int:sent_relay_id>')
@jsonify
def reelay(sent_relay_id=None):
if request.method == 'GET':
offset = int(request.args.get('offset', 0))
return {'relays': get_relays(sent_relay_id, offset)}
elif request.method == 'POST':
success = queue_relay(
request.form['url'],
request.form['sender'],
request.form['recipients'],
)
return {'success': success}
@app.route('/a')
def test_relay_html():
relays = get_relays(None, 0)
return render_template('template.html', relays=relays)
def queue_relay(url, sender, recipients):
task = taskqueue.add(
url='/post_relay_queue',
params={
'url': url,
'sender': sender,
'recipients': recipients,
}
)
return task.was_enqueued
@app.route('/relays/<user_id>/delete', methods=['POST'])
@jsonify
@sanitize_user
@session_required
def delete_relay(user_id, user=None):
sent_relay_id = long(request.form['relay_id'])
sent_relay = get_sent_relay(sent_relay_id)
recipients = sent_relay.recipients
success = False
# validate this
if user_id == sent_relay.sender:
sent_relay.key.delete()
success = True
if user_id in recipients:
recipients.remove(user_id)
sent_relay.put()
success = True
return {'success': success}
@app.route('/relays/from/<user_id>')
@jsonify
@sanitize_user
@session_required
def get_relays_from_user(user_id=None, user=None):
offset = int(request.args.get('offset', 0))
limit = int(request.args.get('limit', 10))
sent_relays = []
sent_relay_items = get_sent_relays_for_user(user_id, offset=offset, limit=limit)
for sent_relay_item in sent_relay_items:
item_map = make_sent_relay_map(sent_relay_item)
item_map.pop('sender', None)
item_map['recipients'] = sent_relay_item.recipients
sent_relays.append(item_map)
return {'relays': sent_relays}
@app.route('/relays/to/<user_id>')
@jsonify
@sanitize_user
@session_required
def get_relay_to_user(user_id=None, user=None, archived=False):
archived = bool(int(request.args.get('archived', 0)))
return _get_relay_to_user(user_id, user, archived)
def _get_relay_to_user(user_id=None, user=None, archived=False):
offset = int(request.args.get('offset', 0))
relays = get_relays_for_recipient(user_id, offset, archived=archived)
return {
'relays' : [
make_sent_relay_map(r) for r in relays
]
}
| Magicjarvis/relay-backend | relay/views/relays.py | Python | apache-2.0 | 5,107 |
# Copyright 2015-2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from tests.cisco import enable, create_interface_vlan, configuring, configuring_interface_vlan, \
assert_interface_configuration, remove_vlan, create_vlan, set_interface_on_vlan, configuring_interface, \
revert_switchport_mode_access, create_port_channel_interface, configuring_port_channel
from tests.util.protocol_util import SshTester, TelnetTester, with_protocol, ProtocolTest
class TestCiscoSwitchProtocol(ProtocolTest):
__test__ = False
test_switch = "cisco"
@with_protocol
def test_enable_command_requires_a_password(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible(t.conf["extra"]["password"])
t.read("my_switch#")
@with_protocol
def test_wrong_password(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible("hello_world")
t.readln("% Access denied")
t.readln("")
t.read("my_switch>")
@with_protocol
def test_no_password_works_for_legacy_reasons(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible("")
t.read("my_switch#")
@with_protocol
def test_exiting_loses_the_connection(self, t):
t.write("enable")
t.read("Password: ")
t.write_invisible(t.conf["extra"]["password"])
t.read("my_switch#")
t.write("exit")
t.read_eof()
@with_protocol
def test_no_such_command_return_to_prompt(self, t):
enable(t)
t.write("shizzle")
t.readln("No such command : shizzle")
t.read("my_switch#")
@with_protocol
@mock.patch("fake_switches.adapters.tftp_reader.read_tftp")
def test_command_copy_failing(self, t, read_tftp):
read_tftp.side_effect = Exception("Stuff")
enable(t)
t.write("copy tftp://1.2.3.4/my-file system:/running-config")
t.read("Destination filename [running-config]? ")
t.write("gneh")
t.readln("Accessing tftp://1.2.3.4/my-file...")
t.readln("Error opening tftp://1.2.3.4/my-file (Timed out)")
t.read("my_switch#")
read_tftp.assert_called_with("1.2.3.4", "my-file")
@with_protocol
@mock.patch("fake_switches.adapters.tftp_reader.read_tftp")
def test_command_copy_success(self, t, read_tftp):
enable(t)
t.write("copy tftp://1.2.3.4/my-file system:/running-config")
t.read("Destination filename [running-config]? ")
t.write_raw("\r")
t.wait_for("\r\n")
t.readln("Accessing tftp://1.2.3.4/my-file...")
t.readln("Done (or some official message...)")
t.read("my_switch#")
read_tftp.assert_called_with("1.2.3.4", "my-file")
@with_protocol
def test_command_show_run_int_vlan_empty(self, t):
enable(t)
t.write("terminal length 0")
t.read("my_switch#")
t.write("show run vlan 120")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("end")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_command_add_vlan(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("vlan 123")
t.read("my_switch(config-vlan)#")
t.write("name shizzle")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
t.write("show run vlan 123")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("!")
t.readln("vlan 123")
t.readln(" name shizzle")
t.readln("end")
t.readln("")
t.read("my_switch#")
remove_vlan(t, "123")
t.write("show running-config vlan 123")
t.readln("Building configuration...")
t.readln("")
t.readln("Current configuration:")
t.readln("end")
t.read("")
@with_protocol
def test_command_assign_access_vlan_to_port(self, t):
enable(t)
create_vlan(t, "123")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
" switchport access vlan 123",
" switchport mode access",
"end"])
configuring_interface(t, "FastEthernet0/1", do="no switchport access vlan")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
" switchport mode access",
"end"])
configuring_interface(t, "FastEthernet0/1", do="no switchport mode access")
assert_interface_configuration(t, "Fa0/1", [
"interface FastEthernet0/1",
"end"])
remove_vlan(t, "123")
@with_protocol
def test_show_vlan_brief(self, t):
enable(t)
create_vlan(t, "123")
create_vlan(t, "3333", "some-name")
create_vlan(t, "2222", "your-name-is-way-too-long-for-this-pretty-printed-interface-man")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
t.write("show vlan brief")
t.readln("")
t.readln("VLAN Name Status Ports")
t.readln("---- -------------------------------- --------- -------------------------------")
t.readln("1 default active Fa0/2, Fa0/3, Fa0/4, Fa0/5")
t.readln(" Fa0/6, Fa0/7, Fa0/8, Fa0/9")
t.readln(" Fa0/10, Fa0/11, Fa0/12")
t.readln("123 VLAN123 active Fa0/1")
t.readln("2222 your-name-is-way-too-long-for-th active")
t.readln("3333 some-name active")
t.read("my_switch#")
revert_switchport_mode_access(t, "FastEthernet0/1")
remove_vlan(t, "123")
remove_vlan(t, "2222")
remove_vlan(t, "3333")
@with_protocol
def test_show_vlan(self, t):
enable(t)
create_vlan(t, "123")
create_vlan(t, "3333", "some-name")
create_vlan(t, "2222", "your-name-is-way-too-long-for-this-pretty-printed-interface-man")
set_interface_on_vlan(t, "FastEthernet0/1", "123")
t.write("show vlan")
t.readln("")
t.readln("VLAN Name Status Ports")
t.readln("---- -------------------------------- --------- -------------------------------")
t.readln("1 default active Fa0/2, Fa0/3, Fa0/4, Fa0/5")
t.readln(" Fa0/6, Fa0/7, Fa0/8, Fa0/9")
t.readln(" Fa0/10, Fa0/11, Fa0/12")
t.readln("123 VLAN123 active Fa0/1")
t.readln("2222 your-name-is-way-too-long-for-th active")
t.readln("3333 some-name active")
t.readln("")
t.readln("VLAN Type SAID MTU Parent RingNo BridgeNo Stp BrdgMode Trans1 Trans2")
t.readln("---- ----- ---------- ----- ------ ------ -------- ---- -------- ------ ------")
t.readln("1 enet 100001 1500 - - - - - 0 0")
t.readln("123 enet 100123 1500 - - - - - 0 0")
t.readln("2222 enet 102222 1500 - - - - - 0 0")
t.readln("3333 enet 103333 1500 - - - - - 0 0")
t.readln("")
t.readln("Remote SPAN VLANs")
t.readln("------------------------------------------------------------------------------")
t.readln("")
t.readln("")
t.readln("Primary Secondary Type Ports")
t.readln("------- --------- ----------------- ------------------------------------------")
t.readln("")
t.read("my_switch#")
revert_switchport_mode_access(t, "FastEthernet0/1")
remove_vlan(t, "123")
remove_vlan(t, "2222")
remove_vlan(t, "3333")
@with_protocol
def test_shutting_down(self, t):
enable(t)
configuring_interface(t, "FastEthernet 0/3", do="shutdown")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" shutdown",
"end"])
configuring_interface(t, "FastEthernet 0/3", do="no shutdown")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
"end"])
@with_protocol
def test_configure_trunk_port(self, t):
enable(t)
configuring_interface(t, "Fa0/3", do="switchport mode trunk")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport mode trunk",
"end"])
# not really added because all vlan are in trunk by default on cisco
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan add 123")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan none")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan none",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan add 123")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan 123",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan add 124,126-128")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan 123,124,126-128",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan remove 123-124,127")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan 126,128",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan all")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="switchport trunk allowed vlan 123-124,127")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport trunk allowed vlan 123,124,127",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="no switchport trunk allowed vlan")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" switchport mode trunk",
"end"])
configuring_interface(t, "Fa0/3", do="no switchport mode")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
"end"])
@with_protocol
def test_configure_native_vlan(self, t):
enable(t)
configuring_interface(t, "FastEthernet0/2", do="switchport trunk native vlan 555")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
" switchport trunk native vlan 555",
"end"])
configuring_interface(t, "FastEthernet0/2", do="no switchport trunk native vlan")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
"end"])
@with_protocol
def test_setup_an_interface(self, t):
enable(t)
create_vlan(t, "2999")
create_interface_vlan(t, "2999")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring_interface_vlan(t, "2999", do="description hey ho")
configuring_interface_vlan(t, "2999", do="ip address 1.1.1.2 255.255.255.0")
configuring_interface_vlan(t, "2999", do="standby 1 ip 1.1.1.1")
configuring_interface_vlan(t, "2999", do='standby 1 timers 5 15')
configuring_interface_vlan(t, "2999", do='standby 1 priority 110')
configuring_interface_vlan(t, "2999", do='standby 1 preempt delay minimum 60')
configuring_interface_vlan(t, "2999", do='standby 1 authentication VLAN2999')
configuring_interface_vlan(t, "2999", do='standby 1 track 10 decrement 50')
configuring_interface_vlan(t, "2999", do='standby 1 track 20 decrement 50')
configuring_interface_vlan(t, "2999", do='no ip proxy-arp')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 1.1.1.2 255.255.255.0",
" no ip proxy-arp",
" standby 1 ip 1.1.1.1",
" standby 1 timers 5 15",
" standby 1 priority 110",
" standby 1 preempt delay minimum 60",
" standby 1 authentication VLAN2999",
" standby 1 track 10 decrement 50",
" standby 1 track 20 decrement 50",
"end"])
configuring_interface_vlan(t, "2999", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "2999", do="standby 1 ip 2.2.2.1")
configuring_interface_vlan(t, "2999", do="standby 1 ip 2.2.2.3 secondary")
configuring_interface_vlan(t, "2999", do="no standby 1 authentication")
configuring_interface_vlan(t, "2999", do="standby 1 preempt delay minimum 42")
configuring_interface_vlan(t, "2999", do="no standby 1 priority")
configuring_interface_vlan(t, "2999", do="no standby 1 timers")
configuring_interface_vlan(t, "2999", do="no standby 1 track 10")
configuring_interface_vlan(t, "2999", do="ip proxy-arp")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 2.2.2.2 255.255.255.0",
" standby 1 ip 2.2.2.1",
" standby 1 ip 2.2.2.3 secondary",
" standby 1 preempt delay minimum 42",
" standby 1 track 20 decrement 50",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 ip 2.2.2.3")
configuring_interface_vlan(t, "2999", do="no standby 1 preempt delay")
configuring_interface_vlan(t, "2999", do="no standby 1 track 20")
configuring_interface_vlan(t, "2999", do="")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 2.2.2.2 255.255.255.0",
" standby 1 ip 2.2.2.1",
" standby 1 preempt",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 ip 2.2.2.1")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 2.2.2.2 255.255.255.0",
" standby 1 preempt",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1")
configuring_interface_vlan(t, "2999", do="no description")
configuring_interface_vlan(t, "2999", do="")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" ip address 2.2.2.2 255.255.255.0",
"end"])
configuring(t, do="no interface vlan 2999")
t.write("show run int vlan 2999")
t.readln("\s*\^", regex=True)
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch#")
remove_vlan(t, "2999")
@with_protocol
def test_partial_standby_properties(self, t):
enable(t)
create_vlan(t, "2999")
create_interface_vlan(t, "2999")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring_interface_vlan(t, "2999", do='standby 1 timers 5 15')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 timers 5 15",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 timers")
configuring_interface_vlan(t, "2999", do='standby 1 priority 110')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 priority 110",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 priority")
configuring_interface_vlan(t, "2999", do='standby 1 preempt delay minimum 60')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 preempt delay minimum 60",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 preempt")
configuring_interface_vlan(t, "2999", do='standby 1 authentication VLAN2999')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 authentication VLAN2999",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 authentication")
configuring_interface_vlan(t, "2999", do='standby 1 track 10 decrement 50')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 track 10 decrement 50",
"end"])
configuring_interface_vlan(t, "2999", do="no standby 1 track 10")
configuring(t, do="no interface vlan 2999")
remove_vlan(t, "2999")
@with_protocol
def test_partial_standby_ip_definition(self, t):
enable(t)
create_vlan(t, "2999")
create_interface_vlan(t, "2999")
configuring_interface_vlan(t, "2999", do='standby 1 ip')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 ip",
"end"])
configuring_interface_vlan(t, "2999", do='no standby 1 ip')
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 2999")
t.read("my_switch(config-if)#")
t.write("standby 1 ip 1..1.1")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("standby 1 ip 1.1.1.1")
t.readln("% Warning: address is not within a subnet on this interface")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring_interface_vlan(t, "2999", do="ip address 1.1.1.2 255.255.255.0")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 2999")
t.read("my_switch(config-if)#")
t.write("standby 1 ip 2.1.1.1")
t.readln("% Warning: address is not within a subnet on this interface")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring_interface_vlan(t, "2999", do='standby 1 ip 1.1.1.1')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" ip address 1.1.1.2 255.255.255.0",
" standby 1 ip 1.1.1.1",
"end"])
configuring_interface_vlan(t, "2999", do='standby 1 ip')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" ip address 1.1.1.2 255.255.255.0",
" standby 1 ip 1.1.1.1",
"end"])
configuring_interface_vlan(t, "2999", do="no ip address 1.1.1.2 255.255.255.0")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby 1 ip 1.1.1.1",
"end"])
configuring_interface_vlan(t, "2999", do='no standby 1 ip 1.1.1.1')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
remove_vlan(t, "2999")
@with_protocol
def test_creating_a_port_channel(self, t):
enable(t)
create_port_channel_interface(t, '1')
configuring_port_channel(t, '1', 'description HELLO')
configuring_port_channel(t, '1', 'switchport trunk encapsulation dot1q')
configuring_port_channel(t, '1', 'switchport trunk native vlan 998')
configuring_port_channel(t, '1', 'switchport trunk allowed vlan 6,4087-4089,4091,4093')
configuring_port_channel(t, '1', 'switchport mode trunk')
assert_interface_configuration(t, 'Port-channel1', [
"interface Port-channel1",
" description HELLO",
" switchport trunk encapsulation dot1q",
" switchport trunk native vlan 998",
" switchport trunk allowed vlan 6,4087-4089,4091,4093",
" switchport mode trunk",
"end"
])
t.write("show etherchannel summary")
t.readln("Flags: D - down P - bundled in port-channel")
t.readln(" I - stand-alone s - suspended")
t.readln(" H - Hot-standby (LACP only)")
t.readln(" R - Layer3 S - Layer2")
t.readln(" U - in use f - failed to allocate aggregator")
t.readln("")
t.readln(" M - not in use, minimum links not met")
t.readln(" u - unsuitable for bundling")
t.readln(" w - waiting to be aggregated")
t.readln(" d - default port")
t.readln("")
t.readln("")
t.readln("Number of channel-groups in use: 1")
t.readln("Number of aggregators: 1")
t.readln("")
t.readln("Group Port-channel Protocol Ports")
t.readln("------+-------------+-----------+-----------------------------------------------")
t.readln("1 Po1(S) LACP ")
t.readln("")
t.read("my_switch#")
configuring(t, do="no interface port-channel 1")
t.write("show run int po1")
t.readln("\s*\^", regex=True)
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_port_channel_is_automatically_created_when_adding_a_port_to_it(self, t):
enable(t)
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface FastEthernet0/1")
t.read("my_switch(config-if)#")
t.write("channel-group 2 mode active")
t.readln("Creating a port-channel interface Port-channel 2")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, 'fa0/1', [
"interface FastEthernet0/1",
" channel-group 2 mode active",
"end"
])
assert_interface_configuration(t, 'po2', [
"interface Port-channel2",
"end"
])
t.write("show etherchannel summary")
t.readln("Flags: D - down P - bundled in port-channel")
t.readln(" I - stand-alone s - suspended")
t.readln(" H - Hot-standby (LACP only)")
t.readln(" R - Layer3 S - Layer2")
t.readln(" U - in use f - failed to allocate aggregator")
t.readln("")
t.readln(" M - not in use, minimum links not met")
t.readln(" u - unsuitable for bundling")
t.readln(" w - waiting to be aggregated")
t.readln(" d - default port")
t.readln("")
t.readln("")
t.readln("Number of channel-groups in use: 1")
t.readln("Number of aggregators: 1")
t.readln("")
t.readln("Group Port-channel Protocol Ports")
t.readln("------+-------------+-----------+-----------------------------------------------")
t.readln("2 Po2(SU) LACP Fa0/1(P)")
t.readln("")
t.read("my_switch#")
configuring(t, do="no interface port-channel 2")
configuring_interface(t, interface="fa0/1", do="no channel-group 2 mode on")
assert_interface_configuration(t, "fa0/1", [
"interface FastEthernet0/1",
"end"
])
@with_protocol
def test_port_channel_is_not_automatically_created_when_adding_a_port_to_it_if_its_already_created(self, t):
enable(t)
create_port_channel_interface(t, '14')
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface FastEthernet0/1")
t.read("my_switch(config-if)#")
t.write("channel-group 14 mode active")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "fa0/1", [
"interface FastEthernet0/1",
" channel-group 14 mode active",
"end"
])
configuring_interface(t, interface="fa0/1", do="no channel-group 14 mode on")
assert_interface_configuration(t, "fa0/1", [
"interface FastEthernet0/1",
"end"
])
configuring(t, do="no interface port-channel 14")
@with_protocol
def test_setting_secondary_ips(self, t):
enable(t)
create_interface_vlan(t, "2999")
configuring_interface_vlan(t, "2999", do="description hey ho")
configuring_interface_vlan(t, "2999", do="no ip redirects")
configuring_interface_vlan(t, "2999", do="ip address 1.1.1.1 255.255.255.0")
configuring_interface_vlan(t, "2999", do="ip address 2.2.2.1 255.255.255.0 secondary")
configuring_interface_vlan(t, "2999", do="ip address 4.4.4.1 255.255.255.0 secondary")
configuring_interface_vlan(t, "2999", do="ip address 3.3.3.1 255.255.255.0 secondary")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" ip address 2.2.2.1 255.255.255.0 secondary",
" ip address 4.4.4.1 255.255.255.0 secondary",
" ip address 3.3.3.1 255.255.255.0 secondary",
" ip address 1.1.1.1 255.255.255.0",
" no ip redirects",
"end"])
configuring_interface_vlan(t, "2999", do="no ip address")
configuring_interface_vlan(t, "2999", do="ip redirects")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" description hey ho",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
@with_protocol
def test_setting_access_group(self, t):
enable(t)
create_interface_vlan(t, "2999")
configuring_interface_vlan(t, "2999", do="ip access-group SHNITZLE in")
configuring_interface_vlan(t, "2999", do="ip access-group WHIZZLE out")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" ip access-group SHNITZLE in",
" ip access-group WHIZZLE out",
"end"])
configuring_interface_vlan(t, "2999", do="no ip access-group in")
configuring_interface_vlan(t, "2999", do="no ip access-group WHIZZLE out")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
@with_protocol
def test_removing_ip_address(self, t):
enable(t)
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan2999")
t.read("my_switch(config-if)#")
t.write("ip address 1.1.1.1 255.255.255.0")
t.read("my_switch(config-if)#")
t.write("ip address 2.2.2.2 255.255.255.0 secondary")
t.read("my_switch(config-if)#")
t.write("no ip address 1.1.1.1 255.255.255.0")
t.readln("Must delete secondary before deleting primary")
t.read("my_switch(config-if)#")
t.write("no ip address 2.2.2.2 255.255.255.0 secondary")
t.read("my_switch(config-if)#")
t.write("no ip address 1.1.1.1 255.255.255.0")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
@with_protocol
def test_show_ip_interface(self, t):
enable(t)
create_vlan(t, "1000")
create_interface_vlan(t, "1000")
create_vlan(t, "2000")
create_vlan(t, "3000")
create_interface_vlan(t, "3000")
configuring_interface_vlan(t, "3000", do="ip address 1.1.1.1 255.255.255.0")
create_interface_vlan(t, "4000")
configuring_interface_vlan(t, "4000", do="ip vrf forwarding DEFAULT-LAN")
configuring_interface_vlan(t, "4000", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "4000", do="ip address 4.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.128 secondary")
configuring_interface_vlan(t, "4000", do="ip access-group shizzle in")
configuring_interface_vlan(t, "4000", do="ip access-group whizzle out")
t.write("show ip interface")
t.readln("Vlan1000 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("Vlan3000 is down, line protocol is down")
t.readln(" Internet address is 1.1.1.1/24")
t.readln(" Outgoing access list is not set")
t.readln(" Inbound access list is not set")
t.readln("Vlan4000 is down, line protocol is down")
t.readln(" Internet address is 2.2.2.2/24")
t.readln(" Secondary address 4.2.2.2/24")
t.readln(" Secondary address 3.2.2.2/25")
t.readln(" Outgoing access list is whizzle")
t.readln(" Inbound access list is shizzle")
t.readln(" VPN Routing/Forwarding \"DEFAULT-LAN\"")
t.readln("FastEthernet0/1 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/2 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/3 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/4 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/5 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/6 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/7 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/8 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/9 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/10 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/11 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.readln("FastEthernet0/12 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.read("my_switch#")
t.write("show ip interface vlan 4000")
t.readln("Vlan4000 is down, line protocol is down")
t.readln(" Internet address is 2.2.2.2/24")
t.readln(" Secondary address 4.2.2.2/24")
t.readln(" Secondary address 3.2.2.2/25")
t.readln(" Outgoing access list is whizzle")
t.readln(" Inbound access list is shizzle")
t.readln(" VPN Routing/Forwarding \"DEFAULT-LAN\"")
t.read("my_switch#")
t.write("show ip interface vlan1000")
t.readln("Vlan1000 is down, line protocol is down")
t.readln(" Internet protocol processing disabled")
t.read("my_switch#")
configuring(t, do="no interface vlan 1000")
configuring(t, do="no interface vlan 3000")
configuring(t, do="no interface vlan 4000")
remove_vlan(t, "1000")
remove_vlan(t, "2000")
remove_vlan(t, "3000")
@with_protocol
def test_assigning_a_secondary_ip_as_the_primary_removes_it_from_secondary_and_removes_the_primary(self, t):
enable(t)
create_interface_vlan(t, "4000")
configuring_interface_vlan(t, "4000", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "4000", do="ip address 4.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.128")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" ip address 4.2.2.2 255.255.255.0 secondary",
" ip address 3.2.2.2 255.255.255.128",
"end"])
configuring(t, do="no interface vlan 4000")
@with_protocol
def test_overlapping_ips(self, t):
enable(t)
create_vlan(t, "1000")
create_interface_vlan(t, "1000")
create_vlan(t, "2000")
create_interface_vlan(t, "2000")
configuring_interface_vlan(t, "1000", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "1000", do="ip address 3.3.3.3 255.255.255.0 secondary")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan2000")
t.read("my_switch(config-if)#")
t.write("ip address 2.2.2.75 255.255.255.128")
t.readln("% 2.2.2.0 overlaps with secondary address on Vlan1000")
t.read("my_switch(config-if)#")
t.write("ip address 3.3.3.4 255.255.255.128")
t.readln("% 3.3.3.0 is assigned as a secondary address on Vlan1000")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring(t, do="no interface vlan 2000")
remove_vlan(t, "2000")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_unknown_ip_interface(self, t):
enable(t)
t.write("show ip interface Vlan2345")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_removing_ip_needs_to_compare_objects_better(self, t):
enable(t)
create_vlan(t, "1000")
create_interface_vlan(t, "1000")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.1 255.255.255.0")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.3 255.255.255.0 secondary")
configuring_interface_vlan(t, "1000", do="no ip address 1.1.1.3 255.255.255.0 secondary")
t.write("show ip interface vlan 1000")
t.readln("Vlan1000 is down, line protocol is down")
t.readln(" Internet address is 1.1.1.1/24")
t.readln(" Secondary address 1.1.1.2/24")
t.readln(" Outgoing access list is not set")
t.readln(" Inbound access list is not set")
t.read("my_switch#")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_extreme_vlan_range(self, t):
enable(t)
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("vlan -1")
t.readln("Command rejected: Bad VLAN list - character #1 ('-') delimits a VLAN number")
t.readln(" which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("vlan 0")
t.readln("Command rejected: Bad VLAN list - character #X (EOL) delimits a VLAN")
t.readln("number which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("vlan 1")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("vlan 4094")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("no vlan 4094")
t.read("my_switch(config)#")
t.write("vlan 4095")
t.readln("Command rejected: Bad VLAN list - character #X (EOL) delimits a VLAN")
t.readln("number which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
@with_protocol
def test_full_running_config_and_pipe_begin_support(self, t):
enable(t)
create_vlan(t, "1000", name="hello")
create_interface_vlan(t, "1000")
configuring_interface(t, "Fa0/2", do="switchport mode trunk")
configuring_interface(t, "Fa0/2", do="switchport trunk allowed vlan 125")
t.write("show running | beg vlan")
t.readln("vlan 1")
t.readln("!")
t.readln("vlan 1000")
t.readln(" name hello")
t.readln("!")
t.readln("interface FastEthernet0/1")
t.readln("!")
t.readln("interface FastEthernet0/2")
t.readln(" switchport trunk allowed vlan 125")
t.readln(" switchport mode trunk")
t.readln("!")
t.readln("interface FastEthernet0/3")
t.readln("!")
t.readln("interface FastEthernet0/4")
t.readln("!")
t.readln("interface FastEthernet0/5")
t.readln("!")
t.readln("interface FastEthernet0/6")
t.readln("!")
t.readln("interface FastEthernet0/7")
t.readln("!")
t.readln("interface FastEthernet0/8")
t.readln("!")
t.readln("interface FastEthernet0/9")
t.readln("!")
t.readln("interface FastEthernet0/10")
t.readln("!")
t.readln("interface FastEthernet0/11")
t.readln("!")
t.readln("interface FastEthernet0/12")
t.readln("!")
t.readln("interface Vlan1000")
t.readln(" no ip address")
t.readln("!")
t.readln("end")
t.readln("")
t.read("my_switch#")
configuring_interface(t, "Fa0/2", do="no switchport mode trunk")
configuring_interface(t, "Fa0/2", do="no switchport trunk allowed vlan")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_pipe_inc_support(self, t):
enable(t)
create_vlan(t, "1000", name="hello")
t.write("show running | inc vlan")
t.readln("vlan 1")
t.readln("vlan 1000")
t.read("my_switch#")
remove_vlan(t, "1000")
@with_protocol
def test_ip_vrf(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("ip vrf SOME-LAN")
t.read("my_switch(config-vrf)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("no ip vrf SOME-LAN")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
@with_protocol
def test_ip_vrf_forwarding(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("ip vrf SOME-LAN")
t.read("my_switch(config-vrf)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("interface Fa0/2")
t.read("my_switch(config-if)#")
t.write("ip vrf forwarding NOT-DEFAULT-LAN")
t.readln("% VRF NOT-DEFAULT-LAN not configured.")
t.read("my_switch(config-if)#")
t.write("ip vrf forwarding SOME-LAN")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
" ip vrf forwarding SOME-LAN",
"end"])
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("no ip vrf SOME-LAN")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
"end"])
@with_protocol
def test_ip_vrf_default_lan(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface Fa0/2")
t.read("my_switch(config-if)#")
t.write("ip vrf forwarding DEFAULT-LAN")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
" ip vrf forwarding DEFAULT-LAN",
"end"])
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface Fa0/2")
t.read("my_switch(config-if)#")
t.write("no ip vrf forwarding")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
"end"])
@with_protocol
def test_ip_setting_vrf_forwarding_wipes_ip_addresses(self, t):
enable(t)
create_vlan(t, "4000")
create_interface_vlan(t, "4000")
configuring_interface_vlan(t, "4000", do="ip address 10.10.0.10 255.255.255.0")
configuring_interface_vlan(t, "4000", do="ip address 10.10.1.10 255.255.255.0 secondary")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" ip address 10.10.1.10 255.255.255.0 secondary",
" ip address 10.10.0.10 255.255.255.0",
"end"])
configuring_interface_vlan(t, "4000", do="ip vrf forwarding DEFAULT-LAN")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" ip vrf forwarding DEFAULT-LAN",
" no ip address",
"end"])
configuring(t, do="no interface vlan 4000")
remove_vlan(t, "4000")
@with_protocol
def test_ip_helper(self, t):
enable(t)
create_interface_vlan(t, "4000")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
"end"])
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 4000")
t.read("my_switch(config-if)#")
t.write("ip helper-address")
t.readln("% Incomplete command.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("ip helper-address 1.1.1")
t.readln("% Incomplete command.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("ip helper-address 1.a.1")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.") # not incomplete
t.readln("")
t.read("my_switch(config-if)#")
t.write("ip helper-address invalid.ip")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("ip helper-address 10.10.0.1 EXTRA INFO")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring_interface_vlan(t, "4000", do="ip helper-address 10.10.10.1")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
" ip helper-address 10.10.10.1",
"end"])
configuring_interface_vlan(t, "4000", do="ip helper-address 10.10.10.1")
configuring_interface_vlan(t, "4000", do="ip helper-address 10.10.10.2")
configuring_interface_vlan(t, "4000", do="ip helper-address 10.10.10.3")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
" ip helper-address 10.10.10.1",
" ip helper-address 10.10.10.2",
" ip helper-address 10.10.10.3",
"end"])
configuring_interface_vlan(t, "4000", do="no ip helper-address 10.10.10.1")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
" ip helper-address 10.10.10.2",
" ip helper-address 10.10.10.3",
"end"])
configuring_interface_vlan(t, "4000", do="no ip helper-address 10.10.10.1")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 4000")
t.read("my_switch(config-if)#")
t.write("no ip helper-address 10.10.0.1 EXTRA INFO")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring_interface_vlan(t, "4000", do="no ip helper-address")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" no ip address",
"end"])
configuring(t, do="no interface vlan 4000")
@with_protocol
def test_ip_route(self, t):
enable(t)
configuring(t, do="ip route 1.1.1.0 255.255.255.0 2.2.2.2")
t.write("show ip route static | inc 2.2.2.2")
t.readln("S 1.1.1.0 [x/y] via 2.2.2.2")
t.read("my_switch#")
t.write("show running | inc 2.2.2.2")
t.readln("ip route 1.1.1.0 255.255.255.0 2.2.2.2")
t.read("my_switch#")
configuring(t, do="no ip route 1.1.1.0 255.255.255.0 2.2.2.2")
t.write("show ip route static")
t.readln("")
t.read("my_switch#")
t.write("exit")
@with_protocol
def test_write_memory(self, t):
enable(t)
t.write("write memory")
t.readln("Building configuration...")
t.readln("OK")
t.read("my_switch#")
@with_protocol
def test_show_version(self, t):
enable(t)
t.write("show version")
t.readln("Cisco IOS Software, C3750 Software (C3750-IPSERVICESK9-M), Version 12.2(58)SE2, RELEASE SOFTWARE (fc1)")
t.readln("Technical Support: http://www.cisco.com/techsupport")
t.readln("Copyright (c) 1986-2011 by Cisco Systems, Inc.")
t.readln("Compiled Thu 21-Jul-11 01:53 by prod_rel_team")
t.readln("")
t.readln("ROM: Bootstrap program is C3750 boot loader")
t.readln("BOOTLDR: C3750 Boot Loader (C3750-HBOOT-M) Version 12.2(44)SE5, RELEASE SOFTWARE (fc1)")
t.readln("")
t.readln("my_switch uptime is 1 year, 18 weeks, 5 days, 1 hour, 11 minutes")
t.readln("System returned to ROM by power-on")
t.readln("System image file is \"flash:c3750-ipservicesk9-mz.122-58.SE2.bin\"")
t.readln("")
t.readln("")
t.readln("This product contains cryptographic features and is subject to United")
t.readln("States and local country laws governing import, export, transfer and")
t.readln("use. Delivery of Cisco cryptographic products does not imply")
t.readln("third-party authority to import, export, distribute or use encryption.")
t.readln("Importers, exporters, distributors and users are responsible for")
t.readln("compliance with U.S. and local country laws. By using this product you")
t.readln("agree to comply with applicable laws and regulations. If you are unable")
t.readln("to comply with U.S. and local laws, return this product immediately.")
t.readln("")
t.readln("A summary of U.S. laws governing Cisco cryptographic products may be found at:")
t.readln("http://www.cisco.com/wwl/export/crypto/tool/stqrg.html")
t.readln("")
t.readln("If you require further assistance please contact us by sending email to")
t.readln("[email protected].")
t.readln("")
t.readln("cisco WS-C3750G-24TS-1U (PowerPC405) processor (revision H0) with 131072K bytes of memory.")
t.readln("Processor board ID FOC1530X2F7")
t.readln("Last reset from power-on")
t.readln("0 Virtual Ethernet interfaces")
t.readln("12 Gigabit Ethernet interfaces")
t.readln("The password-recovery mechanism is enabled.")
t.readln("")
t.readln("512K bytes of flash-simulated non-volatile configuration memory.")
t.readln("Base ethernet MAC Address : 00:00:00:00:00:00")
t.readln("Motherboard assembly number : 73-10219-09")
t.readln("Power supply part number : 341-0098-02")
t.readln("Motherboard serial number : FOC153019Z6")
t.readln("Power supply serial number : ALD153000BB")
t.readln("Model revision number : H0")
t.readln("Motherboard revision number : A0")
t.readln("Model number : WS-C3750G-24TS-S1U")
t.readln("System serial number : FOC1530X2F7")
t.readln("Top Assembly Part Number : 800-26859-03")
t.readln("Top Assembly Revision Number : C0")
t.readln("Version ID : V05")
t.readln("CLEI Code Number : COMB600BRA")
t.readln("Hardware Board Revision Number : 0x09")
t.readln("")
t.readln("")
t.readln("Switch Ports Model SW Version SW Image")
t.readln("------ ----- ----- ---------- ----------")
t.readln("* 1 12 WS-C3750G-24TS-1U 12.2(58)SE2 C3750-IPSERVICESK9-M")
t.readln("")
t.readln("")
t.readln("Configuration register is 0xF")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_reset_port(self, t):
enable(t)
configuring_interface(t, "FastEthernet0/3", do="description shizzle the whizzle and drizzle with lizzle")
configuring_interface(t, "FastEthernet0/3", do="shutdown")
set_interface_on_vlan(t, "FastEthernet0/3", "123")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" description shizzle the whizzle and drizzle with lizzle",
" switchport access vlan 123",
" switchport mode access",
" shutdown",
"end"])
configuring(t, "default interface FastEthernet0/3")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
"end"])
@with_protocol
def test_reset_port_invalid_interface_fails(self, t):
enable(t)
configuring_interface(t, "FastEthernet0/3", do="description shizzle the whizzle and drizzle with lizzle")
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("default interface WrongInterfaceName0/3")
t.readln("\s*\^", regex=True)
t.readln("% Invalid input detected at '^' marker (not such interface)")
t.readln("")
t.read("my_switch(config)#")
configuring(t, "default interface FastEthernet0/3")
@with_protocol
def test_standby_version(self, t):
enable(t)
create_vlan(t, "2999")
create_interface_vlan(t, "2999")
configuring_interface_vlan(t, "2999", do='standby version 2')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
" standby version 2",
"end"])
configuring_interface_vlan(t, "2999", do='no standby version 2')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring_interface_vlan(t, "2999", do='standby version 2')
configuring_interface_vlan(t, "2999", do='standby version 1')
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan 2999")
t.read("my_switch(config-if)#")
t.write("standby version")
t.readln("% Incomplete command.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("standby version 3")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("standby version 2 2")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Vlan2999", [
"interface Vlan2999",
" no ip address",
"end"])
configuring(t, do="no interface vlan 2999")
remove_vlan(t, "2999")
@with_protocol
def test_disable_ntp(self, t):
enable(t)
configuring_interface(t, "FastEthernet 0/3", do="ntp disable")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
" ntp disable",
"end"])
configuring_interface(t, "FastEthernet 0/3", do="no ntp disable")
assert_interface_configuration(t, "FastEthernet0/3", [
"interface FastEthernet0/3",
"end"])
class TestCiscoSwitchProtocolSSH(TestCiscoSwitchProtocol):
__test__ = True
tester_class = SshTester
class TestCiscoSwitchProtocolTelnet(TestCiscoSwitchProtocol):
__test__ = True
tester_class = TelnetTester
| internap/fake-switches | tests/cisco/test_cisco_switch_protocol.py | Python | apache-2.0 | 58,284 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from twisted.enterprise import adbapi
from calvin.runtime.south.async import async
from calvin.utilities.calvinlogger import get_logger
from calvin.runtime.south.calvinsys import base_calvinsys_object
_log = get_logger(__name__)
class PersistentBuffer(base_calvinsys_object.BaseCalvinsysObject):
"""
Asynchronous (using twisted adbapi) SQLite-based implementation of persistant queue
Based on the following (from sqlite.org):
1) If no ROWID is specified on the insert [...] then an appropriate ROWID is created automatically.
2) The usual algorithm is to give the newly created row a ROWID that is one larger than the largest
ROWID in the table prior to the insert.
3) If the table is initially empty, then a ROWID of 1 is used.
4) If the largest ROWID is equal to the largest possible integer (9223372036854775807) then the
database engine starts picking positive candidate ROWIDs at random until it finds one
that is not previously used.
5) The normal ROWID selection [...] will generate monotonically increasing unique ROWIDs as long
as you never use the maximum ROWID value and you never delete the entry in the table with the largest ROWID.
Since we are implementing a FIFO queue, 1) should ensure there is a row id, 2) & 5) that the ordering is correct
and 3) that the rowid is reset whenever the queue is emptied, so 4) should never happen.
"""
init_schema = {
"type": "object",
"properties": {
"buffer_id": {
"description": "Buffer identifier, should be unique - will be used as part of filename",
"type": "string",
"pattern": "^[a-zA-Z0-9]+"
},
"reporting": {
"description": "Log some statistics on buffer at given interval (in seconds)",
"type": "number"
}
},
"required": ["buffer_id"],
"description": "Initialize buffer"
}
can_write_schema = {
"description": "Returns True if buffer ready for write, otherwise False",
"type": "boolean"
}
write_schema = {
"description": "Push data to buffer; always a list of json serializable items",
"type": "array"
}
can_read_schema = {
"description": "Returns True if data can be read, otherwise False",
"type": "boolean"
}
read_schema = {
"description": "Pop data from buffer, always a list",
"type": "array"
}
def init(self, buffer_id, reporting=None, *args, **kwargs):
self.db_name = buffer_id
self.db_path = os.path.join(os.path.abspath(os.path.curdir), self.db_name + ".sq3")
self.db = adbapi.ConnectionPool('sqlite3', self.db_path, check_same_thread=False)
self._pushed_values = 0
self._popped_values = 0
self._latest_timestamp = 0
self._value = None
self._changed = None
self._statlogging = None
def ready(length):
def log_stats():
_log.info("{} : pushed {}, popped {} (latest timestamp: {}) ".format(self.db_name, self._pushed_values, self._popped_values, self._latest_timestamp))
self._statlogging.reset()
self._changed = True # Something has changed, need to check if readable
# install timer to report on pushing/popping
if reporting:
self._statlogging= async.DelayedCall(reporting, log_stats)
self.scheduler_wakeup()
def create(db):
# Create simple queue table. Using TEXT unless there is a reason not to.
db.execute("CREATE TABLE IF NOT EXISTS queue (value BLOB)")
def error(e):
_log.error("Error initializing queue {}: {}".format(self.db_name, e))
q = self.db.runInteraction(create)
q.addCallback(ready)
q.addErrback(error)
def can_write(self):
# Can always write after init, meaning changed is no longer None
return self._changed is not None
def write(self, value):
def error(e):
_log.warning("Error during write: {}".format(e))
done() # Call done to wake scheduler, not sure this is a good idea
def done(unused=None):
self._changed = True # Let can_read know there may be something new to read
self.scheduler_wakeup()
self._pushed_values += len(value)
try:
value = json.dumps(value) # Convert to string for sqlite
except TypeError:
_log.error("Value is not json serializable")
else:
q = self.db.runOperation("INSERT INTO queue (value) VALUES (?)", (value, ))
q.addCallback(done)
q.addErrback(error)
def can_read(self):
def error(e):
_log.warning("Error during read: {}".format(e))
done()
def done(value=None):
if value:
self._changed = True # alert can_read that the database has changed
self._value = value
self.scheduler_wakeup()
def pop(db):
limit = 2 # <- Not empirically/theoretically tested
db.execute("SELECT value FROM queue ORDER BY rowid LIMIT (?)", (limit,))
value = db.fetchall() # a list of (value, ) tuples, or None
if value:
# pop values (i.e. delete rows with len(value) lowest row ids)
db.execute("DELETE FROM queue WHERE rowid in (SELECT rowid FROM queue ORDER BY rowid LIMIT (?))",
(len(value),))
return value
if self._value :
# There is a value to read
return True
elif self._changed :
# Something has changed, try to pop a value
self._changed = False
q = self.db.runInteraction(pop)
q.addCallback(done)
q.addErrback(error)
# Nothing to do
return False
def read(self):
value = []
while self._value:
# get an item from list of replies
dbtuple = self._value.pop(0)
# the item is a tuple, get the first value
dbvalue = dbtuple[0]
# convert value from string and return it
try:
value.extend(json.loads(dbvalue))
except ValueError:
_log.error("No value decoded - possibly corrupt file")
self._popped_values += len(value)
return value
def close(self):
if self._statlogging:
self._statlogging.cancel()
def done(response):
# A count response; [(cnt,)]
if response[0][0] == 0:
try:
os.remove(self.db_path)
except:
# Failed for some reason
_log.warning("Could not remove db file {}".format(self._dbpath))
q = self.db.runQuery("SELECT COUNT(*) from queue")
q.addCallback(done)
self.db.close()
| EricssonResearch/calvin-base | calvinextras/calvinsys/data/buffer/PersistentBuffer.py | Python | apache-2.0 | 7,704 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import operator
import sys
import uuid
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from multiprocessing.pool import ThreadPool
from pyspark import keyword_only, since, SparkContext
from pyspark.ml import Estimator, Predictor, PredictionModel, Model
from pyspark.ml.param.shared import HasRawPredictionCol, HasProbabilityCol, HasThresholds, \
HasRegParam, HasMaxIter, HasFitIntercept, HasTol, HasStandardization, HasWeightCol, \
HasAggregationDepth, HasThreshold, HasBlockSize, HasMaxBlockSizeInMB, Param, Params, \
TypeConverters, HasElasticNetParam, HasSeed, HasStepSize, HasSolver, HasParallelism
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _RandomForestParams, _GBTParams, \
_HasVarianceImpurity, _TreeClassifierParams
from pyspark.ml.regression import _FactorizationMachinesParams, DecisionTreeRegressionModel
from pyspark.ml.base import _PredictorParams
from pyspark.ml.util import DefaultParamsReader, DefaultParamsWriter, \
JavaMLReadable, JavaMLReader, JavaMLWritable, JavaMLWriter, \
MLReader, MLReadable, MLWriter, MLWritable, HasTrainingSummary
from pyspark.ml.wrapper import JavaParams, \
JavaPredictor, JavaPredictionModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.sql import DataFrame
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LinearSVC', 'LinearSVCModel',
'LinearSVCSummary', 'LinearSVCTrainingSummary',
'LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'RandomForestClassificationSummary', 'RandomForestClassificationTrainingSummary',
'BinaryRandomForestClassificationSummary',
'BinaryRandomForestClassificationTrainingSummary',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'MultilayerPerceptronClassificationSummary',
'MultilayerPerceptronClassificationTrainingSummary',
'OneVsRest', 'OneVsRestModel',
'FMClassifier', 'FMClassificationModel', 'FMClassificationSummary',
'FMClassificationTrainingSummary']
class _ClassifierParams(HasRawPredictionCol, _PredictorParams):
"""
Classifier Params for classification tasks.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class Classifier(Predictor, _ClassifierParams, metaclass=ABCMeta):
"""
Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class ClassificationModel(PredictionModel, _ClassifierParams, metaclass=ABCMeta):
"""
Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@abstractproperty
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
raise NotImplementedError()
@abstractmethod
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
raise NotImplementedError()
class _ProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, _ClassifierParams):
"""
Params for :py:class:`ProbabilisticClassifier` and
:py:class:`ProbabilisticClassificationModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class ProbabilisticClassifier(Classifier, _ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Probabilistic Classifier for classification tasks.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@inherit_doc
class ProbabilisticClassificationModel(ClassificationModel,
_ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@abstractmethod
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
raise NotImplementedError()
@inherit_doc
class _JavaClassifier(Classifier, JavaPredictor, metaclass=ABCMeta):
"""
Java Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class _JavaClassificationModel(ClassificationModel, JavaPredictionModel):
"""
Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with :class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
return self._call_java("predictRaw", value)
@inherit_doc
class _JavaProbabilisticClassifier(ProbabilisticClassifier, _JavaClassifier,
metaclass=ABCMeta):
"""
Java Probabilistic Classifier for classification tasks.
"""
pass
@inherit_doc
class _JavaProbabilisticClassificationModel(ProbabilisticClassificationModel,
_JavaClassificationModel):
"""
Java Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class _ClassificationSummary(JavaWrapper):
"""
Abstraction for multiclass classification results for a given model.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("3.1.0")
def predictionCol(self):
"""
Field in "predictions" which gives the prediction of each class.
"""
return self._call_java("predictionCol")
@property
@since("3.1.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("3.1.0")
def weightCol(self):
"""
Field in "predictions" which gives the weight of each instance
as a vector.
"""
return self._call_java("weightCol")
@property
def labels(self):
"""
Returns the sequence of labels in ascending order. This order matches the order used
in metrics which are specified as arrays over labels, e.g., truePositiveRateByLabel.
.. versionadded:: 3.1.0
Notes
-----
In most cases, it will be values {0.0, 1.0, ..., numClasses-1}, However, if the
training set is missing a label, then all of the arrays over labels
(e.g., from truePositiveRateByLabel) will be of length numClasses-1 instead of the
expected numClasses.
"""
return self._call_java("labels")
@property
@since("3.1.0")
def truePositiveRateByLabel(self):
"""
Returns true positive rate for each label (category).
"""
return self._call_java("truePositiveRateByLabel")
@property
@since("3.1.0")
def falsePositiveRateByLabel(self):
"""
Returns false positive rate for each label (category).
"""
return self._call_java("falsePositiveRateByLabel")
@property
@since("3.1.0")
def precisionByLabel(self):
"""
Returns precision for each label (category).
"""
return self._call_java("precisionByLabel")
@property
@since("3.1.0")
def recallByLabel(self):
"""
Returns recall for each label (category).
"""
return self._call_java("recallByLabel")
@since("3.1.0")
def fMeasureByLabel(self, beta=1.0):
"""
Returns f-measure for each label (category).
"""
return self._call_java("fMeasureByLabel", beta)
@property
@since("3.1.0")
def accuracy(self):
"""
Returns accuracy.
(equals to the total number of correctly classified instances
out of the total number of instances.)
"""
return self._call_java("accuracy")
@property
@since("3.1.0")
def weightedTruePositiveRate(self):
"""
Returns weighted true positive rate.
(equals to precision, recall and f-measure)
"""
return self._call_java("weightedTruePositiveRate")
@property
@since("3.1.0")
def weightedFalsePositiveRate(self):
"""
Returns weighted false positive rate.
"""
return self._call_java("weightedFalsePositiveRate")
@property
@since("3.1.0")
def weightedRecall(self):
"""
Returns weighted averaged recall.
(equals to precision, recall and f-measure)
"""
return self._call_java("weightedRecall")
@property
@since("3.1.0")
def weightedPrecision(self):
"""
Returns weighted averaged precision.
"""
return self._call_java("weightedPrecision")
@since("3.1.0")
def weightedFMeasure(self, beta=1.0):
"""
Returns weighted averaged f-measure.
"""
return self._call_java("weightedFMeasure", beta)
@inherit_doc
class _TrainingSummary(JavaWrapper):
"""
Abstraction for Training results.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration. It contains one more element, the initial state,
than number of iterations.
"""
return self._call_java("objectiveHistory")
@property
@since("3.1.0")
def totalIterations(self):
"""
Number of training iterations until termination.
"""
return self._call_java("totalIterations")
@inherit_doc
class _BinaryClassificationSummary(_ClassificationSummary):
"""
Binary classification results for a given model.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def scoreCol(self):
"""
Field in "predictions" which gives the probability or raw prediction
of each class as a vector.
"""
return self._call_java("scoreCol")
@property
def roc(self):
"""
Returns the receiver operating characteristic (ROC) curve,
which is a Dataframe having two fields (FPR, TPR) with
(0.0, 0.0) prepended and (1.0, 1.0) appended to it.
.. versionadded:: 3.1.0
Notes
-----
`Wikipedia reference <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
"""
return self._call_java("roc")
@property
@since("3.1.0")
def areaUnderROC(self):
"""
Computes the area under the receiver operating characteristic
(ROC) curve.
"""
return self._call_java("areaUnderROC")
@property
@since("3.1.0")
def pr(self):
"""
Returns the precision-recall curve, which is a Dataframe
containing two fields recall, precision with (0.0, 1.0) prepended
to it.
"""
return self._call_java("pr")
@property
@since("3.1.0")
def fMeasureByThreshold(self):
"""
Returns a dataframe with two fields (threshold, F-Measure) curve
with beta = 1.0.
"""
return self._call_java("fMeasureByThreshold")
@property
@since("3.1.0")
def precisionByThreshold(self):
"""
Returns a dataframe with two fields (threshold, precision) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the precision.
"""
return self._call_java("precisionByThreshold")
@property
@since("3.1.0")
def recallByThreshold(self):
"""
Returns a dataframe with two fields (threshold, recall) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the recall.
"""
return self._call_java("recallByThreshold")
class _LinearSVCParams(_ClassifierParams, HasRegParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth, HasThreshold,
HasMaxBlockSizeInMB):
"""
Params for :py:class:`LinearSVC` and :py:class:`LinearSVCModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"The threshold in binary classification applied to the linear model"
" prediction. This threshold can be any real number, where Inf will make"
" all predictions 0.0 and -Inf will make all predictions 1.0.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_LinearSVCParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, threshold=0.0, aggregationDepth=2,
maxBlockSizeInMB=0.0)
@inherit_doc
class LinearSVC(_JavaClassifier, _LinearSVCParams, JavaMLWritable, JavaMLReadable):
"""
This binary classifier optimizes the Hinge Loss using the OWLQN optimizer.
Only supports L2 regularization currently.
.. versionadded:: 2.2.0
Notes
-----
`Linear SVM Classifier <https://en.wikipedia.org/wiki/Support_vector_machine#Linear_SVM>`_
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = sc.parallelize([
... Row(label=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(label=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> svm = LinearSVC()
>>> svm.getMaxIter()
100
>>> svm.setMaxIter(5)
LinearSVC...
>>> svm.getMaxIter()
5
>>> svm.getRegParam()
0.0
>>> svm.setRegParam(0.01)
LinearSVC...
>>> svm.getRegParam()
0.01
>>> model = svm.fit(df)
>>> model.setPredictionCol("newPrediction")
LinearSVCModel...
>>> model.getPredictionCol()
'newPrediction'
>>> model.setThreshold(0.5)
LinearSVCModel...
>>> model.getThreshold()
0.5
>>> model.getMaxBlockSizeInMB()
0.0
>>> model.coefficients
DenseVector([0.0, -0.2792, -0.1833])
>>> model.intercept
1.0206118982229047
>>> model.numClasses
2
>>> model.numFeatures
3
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, -1.0, -1.0))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.4831, 1.4831])
>>> result = model.transform(test0).head()
>>> result.newPrediction
1.0
>>> result.rawPrediction
DenseVector([-1.4831, 1.4831])
>>> svm_path = temp_path + "/svm"
>>> svm.save(svm_path)
>>> svm2 = LinearSVC.load(svm_path)
>>> svm2.getMaxIter()
5
>>> model_path = temp_path + "/svm_model"
>>> model.save(model_path)
>>> model2 = LinearSVCModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
super(LinearSVC, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LinearSVC", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, maxBlockSizeInMB=0.0):
Sets params for Linear SVM Classifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearSVCModel(java_model)
@since("2.2.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.2.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.2.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.2.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.2.0")
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
@since("2.2.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("2.2.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.2.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LinearSVCModel(_JavaClassificationModel, _LinearSVCParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by LinearSVC.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@property
@since("2.2.0")
def coefficients(self):
"""
Model coefficients of Linear SVM Classifier.
"""
return self._call_java("coefficients")
@property
@since("2.2.0")
def intercept(self):
"""
Model intercept of Linear SVM Classifier.
"""
return self._call_java("intercept")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return LinearSVCTrainingSummary(super(LinearSVCModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lsvc_summary = self._call_java("evaluate", dataset)
return LinearSVCSummary(java_lsvc_summary)
class LinearSVCSummary(_BinaryClassificationSummary):
"""
Abstraction for LinearSVC Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class LinearSVCTrainingSummary(LinearSVCSummary, _TrainingSummary):
"""
Abstraction for LinearSVC Training results.
.. versionadded:: 3.1.0
"""
pass
class _LogisticRegressionParams(_ProbabilisticClassifierParams, HasRegParam,
HasElasticNetParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth,
HasThreshold, HasMaxBlockSizeInMB):
"""
Params for :py:class:`LogisticRegression` and :py:class:`LogisticRegressionModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
lowerBoundsOnCoefficients = Param(Params._dummy(), "lowerBoundsOnCoefficients",
"The lower bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
upperBoundsOnCoefficients = Param(Params._dummy(), "upperBoundsOnCoefficients",
"The upper bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
lowerBoundsOnIntercepts = Param(Params._dummy(), "lowerBoundsOnIntercepts",
"The lower bounds on intercepts if fitting under bound "
"constrained optimization. The bounds vector size must be"
"equal with 1 for binomial regression, or the number of"
"lasses for multinomial regression.",
typeConverter=TypeConverters.toVector)
upperBoundsOnIntercepts = Param(Params._dummy(), "upperBoundsOnIntercepts",
"The upper bounds on intercepts if fitting under bound "
"constrained optimization. The bound vector size must be "
"equal with 1 for binomial regression, or the number of "
"classes for multinomial regression.",
typeConverter=TypeConverters.toVector)
def __init__(self, *args):
super(_LogisticRegressionParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto",
maxBlockSizeInMB=0.0)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self.clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self.clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: {0}".format(str(ts)))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getOrDefault(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
@since("2.3.0")
def getLowerBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self.getOrDefault(self.lowerBoundsOnCoefficients)
@since("2.3.0")
def getUpperBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self.getOrDefault(self.upperBoundsOnCoefficients)
@since("2.3.0")
def getLowerBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self.getOrDefault(self.lowerBoundsOnIntercepts)
@since("2.3.0")
def getUpperBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self.getOrDefault(self.upperBoundsOnIntercepts)
@inherit_doc
class LogisticRegression(_JavaProbabilisticClassifier, _LogisticRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
.. versionadded:: 1.3.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=1.0, features=Vectors.dense(0.0, 5.0)),
... Row(label=0.0, weight=2.0, features=Vectors.dense(1.0, 2.0)),
... Row(label=1.0, weight=3.0, features=Vectors.dense(2.0, 1.0)),
... Row(label=0.0, weight=4.0, features=Vectors.dense(3.0, 3.0))]).toDF()
>>> blor = LogisticRegression(weightCol="weight")
>>> blor.getRegParam()
0.0
>>> blor.setRegParam(0.01)
LogisticRegression...
>>> blor.getRegParam()
0.01
>>> blor.setMaxIter(10)
LogisticRegression...
>>> blor.getMaxIter()
10
>>> blor.clear(blor.maxIter)
>>> blorModel = blor.fit(bdf)
>>> blorModel.setFeaturesCol("features")
LogisticRegressionModel...
>>> blorModel.setProbabilityCol("newProbability")
LogisticRegressionModel...
>>> blorModel.getProbabilityCol()
'newProbability'
>>> blorModel.getMaxBlockSizeInMB()
0.0
>>> blorModel.setThreshold(0.1)
LogisticRegressionModel...
>>> blorModel.getThreshold()
0.1
>>> blorModel.coefficients
DenseVector([-1.080..., -0.646...])
>>> blorModel.intercept
3.112...
>>> blorModel.evaluate(bdf).accuracy == blorModel.summary.accuracy
True
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> mdf = spark.read.format("libsvm").load(data_path)
>>> mlor = LogisticRegression(regParam=0.1, elasticNetParam=1.0, family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> mlorModel.coefficientMatrix
SparseMatrix(3, 4, [0, 1, 2, 3], [3, 2, 1], [1.87..., -2.75..., -0.50...], 1)
>>> mlorModel.interceptVector
DenseVector([0.04..., -0.42..., 0.37...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 1.0))]).toDF()
>>> blorModel.predict(test0.head().features)
1.0
>>> blorModel.predictRaw(test0.head().features)
DenseVector([-3.54..., 3.54...])
>>> blorModel.predictProbability(test0.head().features)
DenseVector([0.028, 0.972])
>>> result = blorModel.transform(test0).head()
>>> result.prediction
1.0
>>> result.newProbability
DenseVector([0.02..., 0.97...])
>>> result.rawPrediction
DenseVector([-3.54..., 3.54...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> blor.save(lr_path)
>>> lr2 = LogisticRegression.load(lr_path)
>>> lr2.getRegParam()
0.01
>>> model_path = temp_path + "/lr_model"
>>> blorModel.save(model_path)
>>> model2 = LogisticRegressionModel.load(model_path)
>>> blorModel.coefficients[0] == model2.coefficients[0]
True
>>> blorModel.intercept == model2.intercept
True
>>> model2
LogisticRegressionModel: uid=..., numClasses=2, numFeatures=2
>>> blorModel.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto",
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None,
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None,
maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto", \
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None, \
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None, \
maxBlockSizeInMB=0.0):
If the threshold and thresholds Params are both set, they must be equivalent.
"""
super(LogisticRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LogisticRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._checkThresholdConsistency()
@keyword_only
@since("1.3.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto",
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None,
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None,
maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto", \
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None, \
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None, \
maxBlockSizeInMB=0.0):
Sets params for logistic regression.
If the threshold and thresholds Params are both set, they must be equivalent.
"""
kwargs = self._input_kwargs
self._set(**kwargs)
self._checkThresholdConsistency()
return self
def _create_model(self, java_model):
return LogisticRegressionModel(java_model)
@since("2.1.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.3.0")
def setLowerBoundsOnCoefficients(self, value):
"""
Sets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self._set(lowerBoundsOnCoefficients=value)
@since("2.3.0")
def setUpperBoundsOnCoefficients(self, value):
"""
Sets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self._set(upperBoundsOnCoefficients=value)
@since("2.3.0")
def setLowerBoundsOnIntercepts(self, value):
"""
Sets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self._set(lowerBoundsOnIntercepts=value)
@since("2.3.0")
def setUpperBoundsOnIntercepts(self, value):
"""
Sets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self._set(upperBoundsOnIntercepts=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LogisticRegressionModel(_JavaProbabilisticClassificationModel, _LogisticRegressionParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by LogisticRegression.
.. versionadded:: 1.3.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("intercept")
@property
@since("2.1.0")
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
@property
@since("2.1.0")
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
if self.numClasses <= 2:
return BinaryLogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
return LogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_blr_summary = self._call_java("evaluate", dataset)
if self.numClasses <= 2:
return BinaryLogisticRegressionSummary(java_blr_summary)
else:
return LogisticRegressionSummary(java_blr_summary)
class LogisticRegressionSummary(_ClassificationSummary):
"""
Abstraction for Logistic Regression Results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def probabilityCol(self):
"""
Field in "predictions" which gives the probability
of each class as a vector.
"""
return self._call_java("probabilityCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@inherit_doc
class LogisticRegressionTrainingSummary(LogisticRegressionSummary, _TrainingSummary):
"""
Abstraction for multinomial Logistic Regression Training results.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class BinaryLogisticRegressionSummary(_BinaryClassificationSummary,
LogisticRegressionSummary):
"""
Binary Logistic regression results for a given model.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary,
LogisticRegressionTrainingSummary):
"""
Binary Logistic regression training results for a given model.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class _DecisionTreeClassifierParams(_DecisionTreeParams, _TreeClassifierParams):
"""
Params for :py:class:`DecisionTreeClassifier` and :py:class:`DecisionTreeClassificationModel`.
"""
def __init__(self, *args):
super(_DecisionTreeClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", leafCol="", minWeightFractionPerNode=0.0)
@inherit_doc
class DecisionTreeClassifier(_JavaProbabilisticClassifier, _DecisionTreeClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed", leafCol="leafId")
>>> model = dt.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
DecisionTreeClassificationModel...
>>> model.numNodes
3
>>> model.depth
1
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> model.numClasses
2
>>> print(model.toDebugString)
DecisionTreeClassificationModel...depth=1, numNodes=3...
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([1.0, 0.0])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([1.0, 0.0])
>>> result.rawPrediction
DenseVector([1.0, 0.0])
>>> result.leafId
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtc_path = temp_path + "/dtc"
>>> dt.save(dtc_path)
>>> dt2 = DecisionTreeClassifier.load(dtc_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtc_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> df3 = spark.createDataFrame([
... (1.0, 0.2, Vectors.dense(1.0)),
... (1.0, 0.8, Vectors.dense(1.0)),
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> si3 = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model3 = si3.fit(df3)
>>> td3 = si_model3.transform(df3)
>>> dt3 = DecisionTreeClassifier(maxDepth=2, weightCol="weight", labelCol="indexed")
>>> model3 = dt3.fit(td3)
>>> print(model3.toDebugString)
DecisionTreeClassificationModel...depth=1, numNodes=3...
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0)
"""
super(DecisionTreeClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", seed=None, weightCol=None, leafCol="",
minWeightFractionPerNode=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0)
Sets params for the DecisionTreeClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class DecisionTreeClassificationModel(_DecisionTreeModel, _JavaProbabilisticClassificationModel,
_DecisionTreeClassifierParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by DecisionTreeClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. versionadded:: 2.0.0
Notes
-----
Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestClassifier`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class _RandomForestClassifierParams(_RandomForestParams, _TreeClassifierParams):
"""
Params for :py:class:`RandomForestClassifier` and :py:class:`RandomForestClassificationModel`.
"""
def __init__(self, *args):
super(_RandomForestClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", numTrees=20, featureSubsetStrategy="auto",
subsamplingRate=1.0, leafCol="", minWeightFractionPerNode=0.0,
bootstrap=True)
@inherit_doc
class RandomForestClassifier(_JavaProbabilisticClassifier, _RandomForestClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
.. versionadded:: 1.4.0
Examples
--------
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42,
... leafCol="leafId")
>>> rf.getMinWeightFractionPerNode()
0.0
>>> model = rf.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
RandomForestClassificationModel...
>>> model.setRawPredictionCol("newRawPrediction")
RandomForestClassificationModel...
>>> model.getBootstrap()
True
>>> model.getRawPredictionCol()
'newRawPrediction'
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([2.0, 0.0])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> numpy.argmax(result.probability)
0
>>> numpy.argmax(result.newRawPrediction)
0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.trees
[DecisionTreeClassificationModel...depth=..., DecisionTreeClassificationModel...]
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0, \
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True)
"""
super(RandomForestClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.RandomForestClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None,
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0, \
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True)
Sets params for linear classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("3.0.0")
def setBootstrap(self, value):
"""
Sets the value of :py:attr:`bootstrap`.
"""
return self._set(bootstrap=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class RandomForestClassificationModel(_TreeEnsembleModel, _JavaProbabilisticClassificationModel,
_RandomForestClassifierParams, JavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by RandomForestClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
See Also
--------
DecisionTreeClassificationModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))]
@property
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
if self.numClasses <= 2:
return BinaryRandomForestClassificationTrainingSummary(
super(RandomForestClassificationModel, self).summary)
else:
return RandomForestClassificationTrainingSummary(
super(RandomForestClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_rf_summary = self._call_java("evaluate", dataset)
if self.numClasses <= 2:
return BinaryRandomForestClassificationSummary(java_rf_summary)
else:
return RandomForestClassificationSummary(java_rf_summary)
class RandomForestClassificationSummary(_ClassificationSummary):
"""
Abstraction for RandomForestClassification Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class RandomForestClassificationTrainingSummary(RandomForestClassificationSummary,
_TrainingSummary):
"""
Abstraction for RandomForestClassificationTraining Training results.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class BinaryRandomForestClassificationSummary(_BinaryClassificationSummary):
"""
BinaryRandomForestClassification results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class BinaryRandomForestClassificationTrainingSummary(BinaryRandomForestClassificationSummary,
RandomForestClassificationTrainingSummary):
"""
BinaryRandomForestClassification training results for a given model.
.. versionadded:: 3.1.0
"""
pass
class _GBTClassifierParams(_GBTParams, _HasVarianceImpurity):
"""
Params for :py:class:`GBTClassifier` and :py:class:`GBTClassifierModel`.
.. versionadded:: 3.0.0
"""
supportedLossTypes = ["logistic"]
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(supportedLossTypes),
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_GBTClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
leafCol="", minWeightFractionPerNode=0.0)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@inherit_doc
class GBTClassifier(_JavaProbabilisticClassifier, _GBTClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for classification.
It supports binary labels, as well as both continuous and categorical features.
.. versionadded:: 1.4.0
Notes
-----
Multiclass labels are not currently supported.
The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
Gradient Boosting vs. TreeBoost:
- This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
- Both algorithms learn tree ensembles by minimizing loss functions.
- TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
based on the loss function, whereas the original gradient boosting method does not.
- We expect to implement TreeBoost in the future:
`SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_
Examples
--------
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42,
... leafCol="leafId")
>>> gbt.setMaxIter(5)
GBTClassifier...
>>> gbt.setMinWeightFractionPerNode(0.049)
GBTClassifier...
>>> gbt.getMaxIter()
5
>>> gbt.getFeatureSubsetStrategy()
'all'
>>> model = gbt.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
GBTClassificationModel...
>>> model.setThresholds([0.3, 0.7])
GBTClassificationModel...
>>> model.getThresholds()
[0.3, 0.7]
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([1.1697, -1.1697])
>>> model.predictProbability(test0.head().features)
DenseVector([0.9121, 0.0879])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.totalNumNodes
15
>>> print(model.toDebugString)
GBTClassificationModel...numTrees=5...
>>> gbtc_path = temp_path + "gbtc"
>>> gbt.save(gbtc_path)
>>> gbt2 = GBTClassifier.load(gbtc_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtc_model"
>>> model.save(model_path)
>>> model2 = GBTClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0),)],
... ["indexed", "features"])
>>> model.evaluateEachIteration(validation)
[0.25..., 0.23..., 0.21..., 0.19..., 0.18...]
>>> model.numClasses
2
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
>>> gbt.getValidationIndicatorCol()
'validationIndicator'
>>> gbt.getValidationTol()
0.01
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic",
maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, impurity="variance",
featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
"""
super(GBTClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.GBTClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for Gradient Boosted Tree Classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("3.0.0")
def setValidationIndicatorCol(self, value):
"""
Sets the value of :py:attr:`validationIndicatorCol`.
"""
return self._set(validationIndicatorCol=value)
@since("1.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("1.4.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class GBTClassificationModel(_TreeEnsembleModel, _JavaProbabilisticClassificationModel,
_GBTClassifierParams, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GBTClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
See Also
--------
DecisionTreeClassificationModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
def evaluateEachIteration(self, dataset):
"""
Method to compute error or loss for every iteration of gradient boosting.
.. versionadded:: 2.4.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
return self._call_java("evaluateEachIteration", dataset)
class _NaiveBayesParams(_PredictorParams, HasWeightCol):
"""
Params for :py:class:`NaiveBayes` and :py:class:`NaiveBayesModel`.
.. versionadded:: 3.0.0
"""
smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " +
"default is 1.0", typeConverter=TypeConverters.toFloat)
modelType = Param(Params._dummy(), "modelType", "The model type which is a string " +
"(case-sensitive). Supported options: multinomial (default), bernoulli " +
"and gaussian.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_NaiveBayesParams, self).__init__(*args)
self._setDefault(smoothing=1.0, modelType="multinomial")
@since("1.5.0")
def getSmoothing(self):
"""
Gets the value of smoothing or its default value.
"""
return self.getOrDefault(self.smoothing)
@since("1.5.0")
def getModelType(self):
"""
Gets the value of modelType or its default value.
"""
return self.getOrDefault(self.modelType)
@inherit_doc
class NaiveBayes(_JavaProbabilisticClassifier, _NaiveBayesParams, HasThresholds, HasWeightCol,
JavaMLWritable, JavaMLReadable):
"""
Naive Bayes Classifiers.
It supports both Multinomial and Bernoulli NB. `Multinomial NB \
<http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_
can handle finitely supported discrete data. For example, by converting documents into
TF-IDF vectors, it can be used for document classification. By making every vector a
binary (0/1) data, it can also be used as `Bernoulli NB \
<http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_.
The input feature values for Multinomial NB and Bernoulli NB must be nonnegative.
Since 3.0.0, it supports Complement NB which is an adaptation of the Multinomial NB.
Specifically, Complement NB uses statistics from the complement of each class to compute
the model's coefficients. The inventors of Complement NB show empirically that the parameter
estimates for CNB are more stable than those for Multinomial NB. Like Multinomial NB, the
input feature values for Complement NB must be nonnegative.
Since 3.0.0, it also supports `Gaussian NB \
<https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Gaussian_naive_Bayes>`_.
which can handle continuous data.
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
>>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
>>> model = nb.fit(df)
>>> model.setFeaturesCol("features")
NaiveBayesModel...
>>> model.getSmoothing()
1.0
>>> model.pi
DenseVector([-0.81..., -0.58...])
>>> model.theta
DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
>>> model.sigma
DenseMatrix(0, 0, [...], ...)
>>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.72..., -0.99...])
>>> model.predictProbability(test0.head().features)
DenseVector([0.32..., 0.67...])
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.32..., 0.67...])
>>> result.rawPrediction
DenseVector([-1.72..., -0.99...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
1.0
>>> nb_path = temp_path + "/nb"
>>> nb.save(nb_path)
>>> nb2 = NaiveBayes.load(nb_path)
>>> nb2.getSmoothing()
1.0
>>> model_path = temp_path + "/nb_model"
>>> model.save(model_path)
>>> model2 = NaiveBayesModel.load(model_path)
>>> model.pi == model2.pi
True
>>> model.theta == model2.theta
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> nb = nb.setThresholds([0.01, 10.00])
>>> model3 = nb.fit(df)
>>> result = model3.transform(test0).head()
>>> result.prediction
0.0
>>> nb3 = NaiveBayes().setModelType("gaussian")
>>> model4 = nb3.fit(df)
>>> model4.getModelType()
'gaussian'
>>> model4.sigma
DenseMatrix(2, 2, [0.0, 0.25, 0.0, 0.0], 1)
>>> nb5 = NaiveBayes(smoothing=1.0, modelType="complement", weightCol="weight")
>>> model5 = nb5.fit(df)
>>> model5.getModelType()
'complement'
>>> model5.theta
DenseMatrix(2, 2, [...], 1)
>>> model5.sigma
DenseMatrix(0, 0, [...], ...)
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
"""
super(NaiveBayes, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.NaiveBayes", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
Sets params for Naive Bayes.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return NaiveBayesModel(java_model)
@since("1.5.0")
def setSmoothing(self, value):
"""
Sets the value of :py:attr:`smoothing`.
"""
return self._set(smoothing=value)
@since("1.5.0")
def setModelType(self, value):
"""
Sets the value of :py:attr:`modelType`.
"""
return self._set(modelType=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
class NaiveBayesModel(_JavaProbabilisticClassificationModel, _NaiveBayesParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by NaiveBayes.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pi(self):
"""
log of class priors.
"""
return self._call_java("pi")
@property
@since("2.0.0")
def theta(self):
"""
log of class conditional probabilities.
"""
return self._call_java("theta")
@property
@since("3.0.0")
def sigma(self):
"""
variance of each feature.
"""
return self._call_java("sigma")
class _MultilayerPerceptronParams(_ProbabilisticClassifierParams, HasSeed, HasMaxIter,
HasTol, HasStepSize, HasSolver, HasBlockSize):
"""
Params for :py:class:`MultilayerPerceptronClassifier`.
.. versionadded:: 3.0.0
"""
layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " +
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
"neurons and output layer of 10 neurons.",
typeConverter=TypeConverters.toListInt)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: l-bfgs, gd.", typeConverter=TypeConverters.toString)
initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.",
typeConverter=TypeConverters.toVector)
def __init__(self, *args):
super(_MultilayerPerceptronParams, self).__init__(*args)
self._setDefault(maxIter=100, tol=1E-6, blockSize=128, stepSize=0.03, solver="l-bfgs")
@since("1.6.0")
def getLayers(self):
"""
Gets the value of layers or its default value.
"""
return self.getOrDefault(self.layers)
@since("2.0.0")
def getInitialWeights(self):
"""
Gets the value of initialWeights or its default value.
"""
return self.getOrDefault(self.initialWeights)
@inherit_doc
class MultilayerPerceptronClassifier(_JavaProbabilisticClassifier, _MultilayerPerceptronParams,
JavaMLWritable, JavaMLReadable):
"""
Classifier trainer based on the Multilayer Perceptron.
Each layer has sigmoid activation function, output layer has softmax.
Number of inputs has to be equal to the size of feature vectors.
Number of outputs has to be equal to the total number of labels.
.. versionadded:: 1.6.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (0.0, Vectors.dense([0.0, 0.0])),
... (1.0, Vectors.dense([0.0, 1.0])),
... (1.0, Vectors.dense([1.0, 0.0])),
... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"])
>>> mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
>>> mlp.setMaxIter(100)
MultilayerPerceptronClassifier...
>>> mlp.getMaxIter()
100
>>> mlp.getBlockSize()
128
>>> mlp.setBlockSize(1)
MultilayerPerceptronClassifier...
>>> mlp.getBlockSize()
1
>>> model = mlp.fit(df)
>>> model.setFeaturesCol("features")
MultilayerPerceptronClassificationModel...
>>> model.getMaxIter()
100
>>> model.getLayers()
[2, 2, 2]
>>> model.weights.size
12
>>> testDF = spark.createDataFrame([
... (Vectors.dense([1.0, 0.0]),),
... (Vectors.dense([0.0, 0.0]),)], ["features"])
>>> model.predict(testDF.head().features)
1.0
>>> model.predictRaw(testDF.head().features)
DenseVector([-16.208, 16.344])
>>> model.predictProbability(testDF.head().features)
DenseVector([0.0, 1.0])
>>> model.transform(testDF).select("features", "prediction").show()
+---------+----------+
| features|prediction|
+---------+----------+
|[1.0,0.0]| 1.0|
|[0.0,0.0]| 0.0|
+---------+----------+
...
>>> mlp_path = temp_path + "/mlp"
>>> mlp.save(mlp_path)
>>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path)
>>> mlp2.getBlockSize()
1
>>> model_path = temp_path + "/mlp_model"
>>> model.save(model_path)
>>> model2 = MultilayerPerceptronClassificationModel.load(model_path)
>>> model.getLayers() == model2.getLayers()
True
>>> model.weights == model2.weights
True
>>> model.transform(testDF).take(1) == model2.transform(testDF).take(1)
True
>>> mlp2 = mlp2.setInitialWeights(list(range(0, 12)))
>>> model3 = mlp2.fit(df)
>>> model3.weights != model2.weights
True
>>> model3.getLayers() == model.getLayers()
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None, probabilityCol="probability",
rawPredictionCol="rawPrediction"):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction")
"""
super(MultilayerPerceptronClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None, probabilityCol="probability",
rawPredictionCol="rawPrediction"):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction"):
Sets params for MultilayerPerceptronClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MultilayerPerceptronClassificationModel(java_model)
@since("1.6.0")
def setLayers(self, value):
"""
Sets the value of :py:attr:`layers`.
"""
return self._set(layers=value)
@since("1.6.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@since("2.0.0")
def setInitialWeights(self, value):
"""
Sets the value of :py:attr:`initialWeights`.
"""
return self._set(initialWeights=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
class MultilayerPerceptronClassificationModel(_JavaProbabilisticClassificationModel,
_MultilayerPerceptronParams, JavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by MultilayerPerceptronClassifier.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def weights(self):
"""
the weights of layers.
"""
return self._call_java("weights")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return MultilayerPerceptronClassificationTrainingSummary(
super(MultilayerPerceptronClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_mlp_summary = self._call_java("evaluate", dataset)
return MultilayerPerceptronClassificationSummary(java_mlp_summary)
class MultilayerPerceptronClassificationSummary(_ClassificationSummary):
"""
Abstraction for MultilayerPerceptronClassifier Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class MultilayerPerceptronClassificationTrainingSummary(MultilayerPerceptronClassificationSummary,
_TrainingSummary):
"""
Abstraction for MultilayerPerceptronClassifier Training results.
.. versionadded:: 3.1.0
"""
pass
class _OneVsRestParams(_ClassifierParams, HasWeightCol):
"""
Params for :py:class:`OneVsRest` and :py:class:`OneVsRestModelModel`.
"""
classifier = Param(Params._dummy(), "classifier", "base binary classifier")
@since("2.0.0")
def getClassifier(self):
"""
Gets the value of classifier or its default value.
"""
return self.getOrDefault(self.classifier)
@inherit_doc
class OneVsRest(Estimator, _OneVsRestParams, HasParallelism, MLReadable, MLWritable):
"""
Reduction of Multiclass Classification to Binary Classification.
Performs reduction using one against all strategy.
For a multiclass classification with k classes, train k models (one per class).
Each example is scored against all k models and the model with highest score
is picked to label the example.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> df = spark.read.format("libsvm").load(data_path)
>>> lr = LogisticRegression(regParam=0.01)
>>> ovr = OneVsRest(classifier=lr)
>>> ovr.getRawPredictionCol()
'rawPrediction'
>>> ovr.setPredictionCol("newPrediction")
OneVsRest...
>>> model = ovr.fit(df)
>>> model.models[0].coefficients
DenseVector([0.5..., -1.0..., 3.4..., 4.2...])
>>> model.models[1].coefficients
DenseVector([-2.1..., 3.1..., -2.6..., -2.3...])
>>> model.models[2].coefficients
DenseVector([0.3..., -3.4..., 1.0..., -1.1...])
>>> [x.intercept for x in model.models]
[-2.7..., -2.5..., -1.3...]
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0, 1.0, 1.0))]).toDF()
>>> model.transform(test0).head().newPrediction
0.0
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(4, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().newPrediction
2.0
>>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4, 0.3, 0.2))]).toDF()
>>> model.transform(test2).head().newPrediction
0.0
>>> model_path = temp_path + "/ovr_model"
>>> model.save(model_path)
>>> model2 = OneVsRestModel.load(model_path)
>>> model2.transform(test0).head().newPrediction
0.0
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.transform(test2).columns
['features', 'rawPrediction', 'newPrediction']
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
super(OneVsRest, self).__init__()
self._setDefault(parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
Sets params for OneVsRest.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setClassifier(self, value):
"""
Sets the value of :py:attr:`classifier`.
"""
return self._set(classifier=value)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def _fit(self, dataset):
labelCol = self.getLabelCol()
featuresCol = self.getFeaturesCol()
predictionCol = self.getPredictionCol()
classifier = self.getClassifier()
numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1
weightCol = None
if (self.isDefined(self.weightCol) and self.getWeightCol()):
if isinstance(classifier, HasWeightCol):
weightCol = self.getWeightCol()
else:
warnings.warn("weightCol is ignored, "
"as it is not supported by {} now.".format(classifier))
if weightCol:
multiclassLabeled = dataset.select(labelCol, featuresCol, weightCol)
else:
multiclassLabeled = dataset.select(labelCol, featuresCol)
# persist if underlying dataset is not persistent.
handlePersistence = dataset.storageLevel == StorageLevel(False, False, False, False)
if handlePersistence:
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
def trainSingleClass(index):
binaryLabelCol = "mc2b$" + str(index)
trainingDataset = multiclassLabeled.withColumn(
binaryLabelCol,
when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0))
paramMap = dict([(classifier.labelCol, binaryLabelCol),
(classifier.featuresCol, featuresCol),
(classifier.predictionCol, predictionCol)])
if weightCol:
paramMap[classifier.weightCol] = weightCol
return classifier.fit(trainingDataset, paramMap)
pool = ThreadPool(processes=min(self.getParallelism(), numClasses))
models = pool.map(trainSingleClass, range(numClasses))
if handlePersistence:
multiclassLabeled.unpersist()
return self._copyValues(OneVsRestModel(models=models))
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Examples
--------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`OneVsRest`
Copy of this instance
"""
if extra is None:
extra = dict()
newOvr = Params.copy(self, extra)
if self.isSet(self.classifier):
newOvr.setClassifier(self.getClassifier().copy(extra))
return newOvr
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRest, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
rawPredictionCol = java_stage.getRawPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
parallelism = java_stage.getParallelism()
py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol,
rawPredictionCol=rawPredictionCol, classifier=classifier,
parallelism=parallelism)
if java_stage.isDefined(java_stage.getParam("weightCol")):
py_stage.setWeightCol(java_stage.getWeightCol())
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRest. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
_java_obj.setClassifier(self.getClassifier()._to_java())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setFeaturesCol(self.getFeaturesCol())
_java_obj.setLabelCol(self.getLabelCol())
_java_obj.setPredictionCol(self.getPredictionCol())
if (self.isDefined(self.weightCol) and self.getWeightCol()):
_java_obj.setWeightCol(self.getWeightCol())
_java_obj.setRawPredictionCol(self.getRawPredictionCol())
return _java_obj
@classmethod
def read(cls):
return OneVsRestReader(cls)
def write(self):
if isinstance(self.getClassifier(), JavaMLWritable):
return JavaMLWriter(self)
else:
return OneVsRestWriter(self)
class _OneVsRestSharedReadWrite:
@staticmethod
def saveImpl(instance, sc, path, extraMetadata=None):
skipParams = ['classifier']
jsonParams = DefaultParamsWriter.extractJsonParams(instance, skipParams)
DefaultParamsWriter.saveMetadata(instance, path, sc, paramMap=jsonParams,
extraMetadata=extraMetadata)
classifierPath = os.path.join(path, 'classifier')
instance.getClassifier().save(classifierPath)
@staticmethod
def loadClassifier(path, sc):
classifierPath = os.path.join(path, 'classifier')
return DefaultParamsReader.loadParamsInstance(classifierPath, sc)
@staticmethod
def validateParams(instance):
elems_to_check = [instance.getClassifier()]
if isinstance(instance, OneVsRestModel):
elems_to_check.extend(instance.models)
for elem in elems_to_check:
if not isinstance(elem, MLWritable):
raise ValueError(f'OneVsRest write will fail because it contains {elem.uid} '
f'which is not writable.')
@inherit_doc
class OneVsRestReader(MLReader):
def __init__(self, cls):
super(OneVsRestReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
classifier = _OneVsRestSharedReadWrite.loadClassifier(path, self.sc)
ova = OneVsRest(classifier=classifier)._resetUid(metadata['uid'])
DefaultParamsReader.getAndSetParams(ova, metadata, skipParams=['classifier'])
return ova
@inherit_doc
class OneVsRestWriter(MLWriter):
def __init__(self, instance):
super(OneVsRestWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_OneVsRestSharedReadWrite.validateParams(self.instance)
_OneVsRestSharedReadWrite.saveImpl(self.instance, self.sc, path)
class OneVsRestModel(Model, _OneVsRestParams, MLReadable, MLWritable):
"""
Model fitted by OneVsRest.
This stores the models resulting from training k binary classifiers: one for each class.
Each example is scored against all k models, and the model with the highest score
is picked to label the example.
.. versionadded:: 2.0.0
"""
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def __init__(self, models):
super(OneVsRestModel, self).__init__()
self.models = models
if not isinstance(models[0], JavaMLWritable):
return
# set java instance
java_models = [model._to_java() for model in self.models]
sc = SparkContext._active_spark_context
java_models_array = JavaWrapper._new_java_array(java_models,
sc._gateway.jvm.org.apache.spark.ml
.classification.ClassificationModel)
# TODO: need to set metadata
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
self._java_obj = \
JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
def _transform(self, dataset):
# determine the input columns: these need to be passed through
origCols = dataset.columns
# add an accumulator column to store predictions of all the models
accColName = "mbc$acc" + str(uuid.uuid4())
initUDF = udf(lambda _: [], ArrayType(DoubleType()))
newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]]))
# persist if underlying dataset is not persistent.
handlePersistence = dataset.storageLevel == StorageLevel(False, False, False, False)
if handlePersistence:
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
# update the accumulator column with the result of prediction of models
aggregatedDataset = newDataset
for index, model in enumerate(self.models):
rawPredictionCol = self.getRawPredictionCol()
columns = origCols + [rawPredictionCol, accColName]
# add temporary column to store intermediate scores and update
tmpColName = "mbc$tmp" + str(uuid.uuid4())
updateUDF = udf(
lambda predictions, prediction: predictions + [prediction.tolist()[1]],
ArrayType(DoubleType()))
transformedDataset = model.transform(aggregatedDataset).select(*columns)
updatedDataset = transformedDataset.withColumn(
tmpColName,
updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol]))
newColumns = origCols + [tmpColName]
# switch out the intermediate column with the accumulator column
aggregatedDataset = updatedDataset\
.select(*newColumns).withColumnRenamed(tmpColName, accColName)
if handlePersistence:
newDataset.unpersist()
if self.getRawPredictionCol():
def func(predictions):
predArray = []
for x in predictions:
predArray.append(x)
return Vectors.dense(predArray)
rawPredictionUDF = udf(func, VectorUDT())
aggregatedDataset = aggregatedDataset.withColumn(
self.getRawPredictionCol(), rawPredictionUDF(aggregatedDataset[accColName]))
if self.getPredictionCol():
# output the index of the classifier with highest confidence as prediction
labelUDF = udf(lambda predictions: float(max(enumerate(predictions),
key=operator.itemgetter(1))[0]), DoubleType())
aggregatedDataset = aggregatedDataset.withColumn(
self.getPredictionCol(), labelUDF(aggregatedDataset[accColName]))
return aggregatedDataset.drop(accColName)
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`OneVsRestModel`
Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol)\
.setFeaturesCol(featuresCol)
py_stage._set(labelCol=labelCol)
if java_stage.isDefined(java_stage.getParam("weightCol")):
py_stage._set(weightCol=java_stage.getWeightCol())
py_stage._set(classifier=classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
java_models = [model._to_java() for model in self.models]
java_models_array = JavaWrapper._new_java_array(
java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
if (self.isDefined(self.weightCol) and self.getWeightCol()):
_java_obj.set("weightCol", self.getWeightCol())
return _java_obj
@classmethod
def read(cls):
return OneVsRestModelReader(cls)
def write(self):
if all(map(lambda elem: isinstance(elem, JavaMLWritable),
[self.getClassifier()] + self.models)):
return JavaMLWriter(self)
else:
return OneVsRestModelWriter(self)
@inherit_doc
class OneVsRestModelReader(MLReader):
def __init__(self, cls):
super(OneVsRestModelReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
classifier = _OneVsRestSharedReadWrite.loadClassifier(path, self.sc)
numClasses = metadata['numClasses']
subModels = [None] * numClasses
for idx in range(numClasses):
subModelPath = os.path.join(path, f'model_{idx}')
subModels[idx] = DefaultParamsReader.loadParamsInstance(subModelPath, self.sc)
ovaModel = OneVsRestModel(subModels)._resetUid(metadata['uid'])
ovaModel.set(ovaModel.classifier, classifier)
DefaultParamsReader.getAndSetParams(ovaModel, metadata, skipParams=['classifier'])
return ovaModel
@inherit_doc
class OneVsRestModelWriter(MLWriter):
def __init__(self, instance):
super(OneVsRestModelWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_OneVsRestSharedReadWrite.validateParams(self.instance)
instance = self.instance
numClasses = len(instance.models)
extraMetadata = {'numClasses': numClasses}
_OneVsRestSharedReadWrite.saveImpl(instance, self.sc, path, extraMetadata=extraMetadata)
for idx in range(numClasses):
subModelPath = os.path.join(path, f'model_{idx}')
instance.models[idx].save(subModelPath)
@inherit_doc
class FMClassifier(_JavaProbabilisticClassifier, _FactorizationMachinesParams, JavaMLWritable,
JavaMLReadable):
"""
Factorization Machines learning algorithm for classification.
Solver supports:
* gd (normal mini-batch gradient descent)
* adamW (default)
.. versionadded:: 3.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.classification import FMClassifier
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> fm = FMClassifier(factorSize=2)
>>> fm.setSeed(11)
FMClassifier...
>>> model = fm.fit(df)
>>> model.getMaxIter()
100
>>> test0 = spark.createDataFrame([
... (Vectors.dense(-1.0),),
... (Vectors.dense(0.5),),
... (Vectors.dense(1.0),),
... (Vectors.dense(2.0),)], ["features"])
>>> model.predictRaw(test0.head().features)
DenseVector([22.13..., -22.13...])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> model.transform(test0).select("features", "probability").show(10, False)
+--------+------------------------------------------+
|features|probability |
+--------+------------------------------------------+
|[-1.0] |[0.9999999997574736,2.425264676902229E-10]|
|[0.5] |[0.47627851732981163,0.5237214826701884] |
|[1.0] |[5.491554426243495E-4,0.9994508445573757] |
|[2.0] |[2.005766663870645E-10,0.9999999997994233]|
+--------+------------------------------------------+
...
>>> model.intercept
-7.316665276826291
>>> model.linear
DenseVector([14.8232])
>>> model.factors
DenseMatrix(1, 2, [0.0163, -0.0051], 1)
>>> model_path = temp_path + "/fm_model"
>>> model.save(model_path)
>>> model2 = FMClassificationModel.load(model_path)
>>> model2.intercept
-7.316665276826291
>>> model2.linear
DenseVector([14.8232])
>>> model2.factors
DenseMatrix(1, 2, [0.0163, -0.0051], 1)
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", thresholds=None, seed=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", thresholds=None, seed=None)
"""
super(FMClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.FMClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", thresholds=None, seed=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", thresholds=None, seed=None)
Sets Params for FMClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FMClassificationModel(java_model)
@since("3.0.0")
def setFactorSize(self, value):
"""
Sets the value of :py:attr:`factorSize`.
"""
return self._set(factorSize=value)
@since("3.0.0")
def setFitLinear(self, value):
"""
Sets the value of :py:attr:`fitLinear`.
"""
return self._set(fitLinear=value)
@since("3.0.0")
def setMiniBatchFraction(self, value):
"""
Sets the value of :py:attr:`miniBatchFraction`.
"""
return self._set(miniBatchFraction=value)
@since("3.0.0")
def setInitStd(self, value):
"""
Sets the value of :py:attr:`initStd`.
"""
return self._set(initStd=value)
@since("3.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("3.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("3.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
class FMClassificationModel(_JavaProbabilisticClassificationModel, _FactorizationMachinesParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`FMClassifier`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("3.0.0")
def linear(self):
"""
Model linear term.
"""
return self._call_java("linear")
@property
@since("3.0.0")
def factors(self):
"""
Model factor term.
"""
return self._call_java("factors")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return FMClassificationTrainingSummary(super(FMClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_fm_summary = self._call_java("evaluate", dataset)
return FMClassificationSummary(java_fm_summary)
class FMClassificationSummary(_BinaryClassificationSummary):
"""
Abstraction for FMClassifier Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class FMClassificationTrainingSummary(FMClassificationSummary, _TrainingSummary):
"""
Abstraction for FMClassifier Training results.
.. versionadded:: 3.1.0
"""
pass
if __name__ == "__main__":
import doctest
import pyspark.ml.classification
from pyspark.sql import SparkSession
globs = pyspark.ml.classification.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.classification tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| BryanCutler/spark | python/pyspark/ml/classification.py | Python | apache-2.0 | 126,641 |
from django.conf.urls import url
from externals.views import (
PartnerVendorNumberAPIView,
PartnerExternalDetailsAPIView,
PartnerBasicInfoAPIView,
)
urlpatterns = [
url(r'^vendor-number/partner/$', PartnerVendorNumberAPIView.as_view(), name="vendor-number-create"),
url(r'^vendor-number/partner/(?P<pk>\d+)/$', PartnerVendorNumberAPIView.as_view(), name="vendor-number-details"),
url(
r'^partner-details/(?P<agency_id>\d+)/(?P<partner_id>\d+)/$',
PartnerExternalDetailsAPIView.as_view(),
name="partner-external-details"
),
url(
r'^partner-basic-info/$',
PartnerBasicInfoAPIView.as_view(),
name="partner-basic-info"
),
]
| unicef/un-partner-portal | backend/unpp_api/apps/externals/urls.py | Python | apache-2.0 | 707 |
"""Tests for init functions."""
from datetime import timedelta
from zoneminder.zm import ZoneMinder
from homeassistant import config_entries
from homeassistant.components.zoneminder import const
from homeassistant.components.zoneminder.common import is_client_in_data
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import (
ATTR_ID,
ATTR_NAME,
CONF_HOST,
CONF_PASSWORD,
CONF_PATH,
CONF_SOURCE,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import MagicMock, patch
from tests.common import async_fire_time_changed
async def test_no_yaml_config(hass: HomeAssistant) -> None:
"""Test empty yaml config."""
with patch(
"homeassistant.components.zoneminder.common.ZoneMinder", autospec=ZoneMinder
) as zoneminder_mock:
zm_client: ZoneMinder = MagicMock(spec=ZoneMinder)
zm_client.get_zms_url.return_value = "http://host1/path_zms1"
zm_client.login.return_value = True
zm_client.get_monitors.return_value = []
zoneminder_mock.return_value = zm_client
hass_config = {const.DOMAIN: []}
await async_setup_component(hass, const.DOMAIN, hass_config)
await hass.async_block_till_done()
assert not hass.services.has_service(const.DOMAIN, const.SERVICE_SET_RUN_STATE)
async def test_yaml_config_import(hass: HomeAssistant) -> None:
"""Test yaml config import."""
with patch(
"homeassistant.components.zoneminder.common.ZoneMinder", autospec=ZoneMinder
) as zoneminder_mock:
zm_client: ZoneMinder = MagicMock(spec=ZoneMinder)
zm_client.get_zms_url.return_value = "http://host1/path_zms1"
zm_client.login.return_value = True
zm_client.get_monitors.return_value = []
zoneminder_mock.return_value = zm_client
hass_config = {const.DOMAIN: [{CONF_HOST: "host1"}]}
await async_setup_component(hass, const.DOMAIN, hass_config)
await hass.async_block_till_done()
assert hass.services.has_service(const.DOMAIN, const.SERVICE_SET_RUN_STATE)
async def test_load_call_service_and_unload(hass: HomeAssistant) -> None:
"""Test config entry load/unload and calling of service."""
with patch(
"homeassistant.components.zoneminder.common.ZoneMinder", autospec=ZoneMinder
) as zoneminder_mock:
zm_client: ZoneMinder = MagicMock(spec=ZoneMinder)
zm_client.get_zms_url.return_value = "http://host1/path_zms1"
zm_client.login.side_effect = [True, True, False, True]
zm_client.get_monitors.return_value = []
zm_client.is_available.return_value = True
zoneminder_mock.return_value = zm_client
await hass.config_entries.flow.async_init(
const.DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_USER},
data={
CONF_HOST: "host1",
CONF_USERNAME: "username1",
CONF_PASSWORD: "password1",
CONF_PATH: "path1",
const.CONF_PATH_ZMS: "path_zms1",
CONF_SSL: False,
CONF_VERIFY_SSL: True,
},
)
await hass.async_block_till_done()
config_entry = next(iter(hass.config_entries.async_entries(const.DOMAIN)), None)
assert config_entry
assert config_entry.state == ENTRY_STATE_SETUP_RETRY
assert not is_client_in_data(hass, "host1")
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert config_entry.state == ENTRY_STATE_LOADED
assert is_client_in_data(hass, "host1")
assert hass.services.has_service(const.DOMAIN, const.SERVICE_SET_RUN_STATE)
await hass.services.async_call(
const.DOMAIN,
const.SERVICE_SET_RUN_STATE,
{ATTR_ID: "host1", ATTR_NAME: "away"},
)
await hass.async_block_till_done()
zm_client.set_active_state.assert_called_with("away")
await config_entry.async_unload(hass)
await hass.async_block_till_done()
assert config_entry.state == ENTRY_STATE_NOT_LOADED
assert not is_client_in_data(hass, "host1")
assert not hass.services.has_service(const.DOMAIN, const.SERVICE_SET_RUN_STATE)
| tchellomello/home-assistant | tests/components/zoneminder/test_init.py | Python | apache-2.0 | 4,539 |
"""Resource manager for using the add-apt-repository command (part of the
python-software-properties package).
"""
# Common stdlib imports
import sys
import os
import os.path
import re
import glob
# fix path if necessary (if running from source or running as test)
try:
import engage.utils
except:
sys.exc_clear()
dir_to_add_to_python_path = os.path.abspath((os.path.join(os.path.dirname(__file__), "../../..")))
sys.path.append(dir_to_add_to_python_path)
import engage.drivers.resource_manager as resource_manager
import engage.drivers.utils
# Drivers compose *actions* to implement their methods.
from engage.drivers.action import *
from engage.drivers.password_repo_mixin import PasswordRepoMixin
from engage.drivers.genforma.aptget import update
import engage_utils.process as procutils
# setup errors
from engage.utils.user_error import UserError, EngageErrInf
import gettext
_ = gettext.gettext
errors = { }
def define_error(error_code, msg):
global errors
error_info = EngageErrInf(__name__, error_code, msg)
errors[error_info.error_code] = error_info
# error codes
# FILL IN
ERR_TBD = 0
define_error(ERR_TBD,
_("Replace this with your error codes"))
# setup logging
from engage.utils.log_setup import setup_engage_logger
logger = setup_engage_logger(__name__)
# this is used by the package manager to locate the packages.json
# file associated with the driver
def get_packages_filename():
return engage.drivers.utils.get_packages_filename(__file__)
def make_context(resource_json, sudo_password_fn, dry_run=False):
"""Create a Context object (defined in engage.utils.action). This contains
the resource's metadata in ctx.props, references to the logger and sudo
password function, and various helper functions. The context object is used
by individual actions.
If your resource does not need the sudo password, you can just pass in
None for sudo_password_fn.
"""
ctx = Context(resource_json, logger, __file__,
sudo_password_fn=sudo_password_fn,
dry_run=dry_run)
ctx.check_port('input_ports.host',
sudo_password=unicode)
ctx.check_port('input_ports.add_rep_exe_info',
add_apt_repository_exe=unicode)
ctx.check_port('output_ports.repository',
repo_name=unicode)
if hasattr(ctx.props.output_ports.repository, 'repo_url'):
ctx.add('repo_url', ctx.props.output_ports.repository.repo_url)
else:
ctx.add('repo_url', None)
# add any extra computed properties here using the ctx.add() method.
return ctx
ADD_APT_REPO_COMMAND="/usr/bin/add-apt-repository"
@make_action
def run_add_apt_repository(self, repository_name):
procutils.run_sudo_program([ADD_APT_REPO_COMMAND, '-y', repository_name],
self.ctx._get_sudo_password(self),
self.ctx.logger)
def search_for_repository(repo_url):
"""Look in the all the repository files for the specified
repository url. If it is found, then we have already added the
repository.
"""
r = re.compile(re.escape('deb %s ' % repo_url) + r'\w+\ \w+')
def find_url_in_file(fname):
if not os.path.exists(fname):
return False
with open(fname) as f:
for line in f:
line = line.rstrip()
if r.match(line)!=None:
return True
return False
filelist = glob.glob('/etc/apt/sources.list.d/*.list')
filelist.append('/etc/apt/sources.list')
for fpath in filelist:
if find_url_in_file(fpath):
return True # found it
return False # didn't find repo in any of the files
#
# Now, define the main resource manager class for the driver.
# If this driver is a service, inherit from service_manager.Manager.
# If the driver is just a resource, it should inherit from
# resource_manager.Manager. If you need the sudo password, add
# PasswordRepoMixin to the inheritance list.
#
class Manager(resource_manager.Manager, PasswordRepoMixin):
REQUIRES_ROOT_ACCESS = True
def __init__(self, metadata, dry_run=False):
package_name = "%s %s" % (metadata.key["name"],
metadata.key["version"])
resource_manager.Manager.__init__(self, metadata, package_name)
self.ctx = make_context(metadata.to_json(),
self._get_sudo_password,
dry_run=dry_run)
self._is_installed = False # fallback on this flag if repo_url isn't specified
def validate_pre_install(self):
pass
def is_installed(self):
p = self.ctx.props
if p.repo_url and (not self._is_installed):
self._is_installed = search_for_repository(p.repo_url)
if self._is_installed:
logger.info("Repository %s already installed" %
p.output_ports.repository.repo_name)
return self._is_installed
def install(self, package):
p = self.ctx.props
r = self.ctx.r
r(check_file_exists, ADD_APT_REPO_COMMAND)
r(run_add_apt_repository,
p.output_ports.repository.repo_name)
r(update)
self._is_installed = True
def validate_post_install(self):
pass
| quaddra/engage | python_pkg/engage/drivers/genforma/add_apt_repository.py | Python | apache-2.0 | 5,376 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the resnet model.
Adapted from
https://github.com/tensorflow/models/tree/master/official/vision/image_classification/resnet.
The following code is based on its v1 version.
"""
import tensorflow.compat.v1 as tf
_BATCH_NORM_DECAY = 0.9
_BATCH_NORM_EPSILON = 1e-5
DEFAULT_VERSION = 2
DEFAULT_DTYPE = tf.float32
CASTABLE_TYPES = (tf.float16,)
ALLOWED_TYPES = (DEFAULT_DTYPE,) + CASTABLE_TYPES
NUM_CLASSES = 10
################################################################################
# Convenience functions for building the ResNet model.
################################################################################
def batch_norm(inputs, training, data_format, name=''):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.compat.v1.layers.batch_normalization(
inputs=inputs,
axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
training=training,
fused=True,
name=name)
# add name later if necessary
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format,
name):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.compat.v1.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
reuse=tf.AUTO_REUSE,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(),
data_format=data_format,
name=name)
################################################################################
# ResNet block definitions.
################################################################################
def _building_block_v2(inputs, filters, training, projection_shortcut, strides,
data_format, name):
"""A single block for ResNet v2, without a bottleneck.
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference mode.
Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
name: Block name.
Returns:
The output tensor of the block; shape should match inputs.
"""
shortcut = inputs
first_name = name + 'first'
inputs = batch_norm(
inputs, training, data_format, name=first_name + 'batch_norm')
inputs = tf.nn.relu(inputs, name=first_name + 'relu')
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs, name=first_name + 'proj')
second_name = name + 'second'
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format,
name=second_name + 'input')
inputs = batch_norm(
inputs, training, data_format, name=second_name + 'batch_norm')
inputs = tf.nn.relu(inputs, name=second_name + 'relu')
third_name = name + 'third'
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format,
name=third_name + 'input')
return inputs + shortcut
def block_layer(inputs,
filters,
bottleneck,
block_fn,
blocks,
strides,
training,
name,
data_format,
shortcut=True):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or [batch,
height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
bottleneck: Is the block created a bottleneck block.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
training: Either True or False, whether we are currently training the model.
Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
shortcut: Whether to use projection shortcut in the first block.
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = filters * 4 if bottleneck else filters
def projection_shortcut(inputs, name):
return conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
data_format=data_format,
name=name)
# Only the first block per block_layer uses projection_shortcut and strides.
# Skip the projection shortcut in the first block layer.
shortcut_fn = projection_shortcut if shortcut else None
inputs = block_fn(
inputs,
filters,
training,
shortcut_fn,
strides,
data_format,
name=name + 'input')
for j in range(1, blocks):
inputs = block_fn(
inputs,
filters,
training,
None,
1,
data_format,
name=name + 'block' + str(j))
return tf.identity(inputs, name)
class Model(object):
"""Base class for building the Resnet Model."""
def __init__(self,
resnet_size,
bottleneck,
num_classes,
num_filters,
kernel_size,
conv_stride,
first_pool_size,
first_pool_stride,
block_sizes,
block_strides,
resnet_version=DEFAULT_VERSION,
data_format=None,
dtype=DEFAULT_DTYPE):
"""Creates a model for classifying an image.
Args:
resnet_size: A single integer for the size of the ResNet model.
bottleneck: Use regular blocks or bottleneck blocks.
num_classes: The number of classes used as labels.
num_filters: The number of filters to use for the first block layer of the
model. This number is then doubled for each subsequent block layer.
kernel_size: The kernel size to use for convolution.
conv_stride: stride size for the initial convolutional layer
first_pool_size: Pool size to be used for the first pooling layer. If
none, the first pooling layer is skipped.
first_pool_stride: stride size for the first pooling layer. Not used if
first_pool_size is None.
block_sizes: A list containing n values, where n is the number of sets of
block layers desired. Each value should be the number of blocks in the
i-th set.
block_strides: List of integers representing the desired stride size for
each of the sets of block layers. Should be same length as block_sizes.
resnet_version: Integer representing which version of the ResNet network
to use. See README for details. Valid values: [1, 2]
data_format: Input format ('channels_last', 'channels_first', or None). If
set to None, the format is dependent on whether a GPU is available.
dtype: The TensorFlow dtype to use for calculations. If not specified
tf.float32 is used.
Raises:
ValueError: if invalid version is selected.
"""
self.resnet_size = resnet_size
if not data_format:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
self.resnet_version = resnet_version
if resnet_version not in (1, 2):
raise ValueError(
'Resnet version should be 1 or 2. See README for citations.')
self.bottleneck = bottleneck
self.block_fn = _building_block_v2
if dtype not in ALLOWED_TYPES:
raise ValueError('dtype must be one of: {}'.format(ALLOWED_TYPES))
self.data_format = data_format
self.num_classes = num_classes
self.num_filters = num_filters
self.kernel_size = kernel_size
self.conv_stride = conv_stride
self.first_pool_size = first_pool_size
self.first_pool_stride = first_pool_stride
self.block_sizes = block_sizes
self.block_strides = block_strides
self.dtype = dtype
self.pre_activation = resnet_version == 2
def _custom_dtype_getter(self, # pylint: disable=keyword-arg-before-vararg
getter,
name,
shape=None,
dtype=DEFAULT_DTYPE,
*args,
**kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
dtype: The dtype of the variable to get. Note that if this is a low
precision dtype, the variable will be created as a tf.float32 variable,
then cast to the appropriate dtype
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
if dtype in CASTABLE_TYPES:
var = getter(name, shape, tf.float32, *args, **kwargs)
return tf.cast(var, dtype=dtype, name=name + '_cast')
else:
return getter(name, shape, dtype, *args, **kwargs)
def _model_variable_scope(self):
"""Returns a variable scope that the model should be created under.
If self.dtype is a castable type, model variable will be created in fp32
then cast to self.dtype before being used.
Returns:
A variable scope for the model.
"""
return tf.compat.v1.variable_scope(
'resnet_model',
custom_getter=self._custom_dtype_getter,
reuse=tf.AUTO_REUSE)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, self.num_classes].
"""
with self._model_variable_scope():
if self.data_format == 'channels_first':
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
inputs = tf.transpose(a=inputs, perm=[0, 3, 1, 2])
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=self.num_filters,
kernel_size=self.kernel_size,
strides=self.conv_stride,
data_format=self.data_format,
name='initial_input')
inputs = tf.identity(inputs, 'initial_conv')
# We do not include batch normalization or activation functions in V2
# for the initial conv1 because the first ResNet unit will perform these
# for both the shortcut and non-shortcut paths as part of the first
# block's projection. Cf. Appendix of [2].
if self.resnet_version == 1:
inputs = batch_norm(inputs, training, self.data_format)
inputs = tf.nn.relu(inputs)
if self.first_pool_size:
inputs = tf.compat.v1.layers.max_pooling2d(
inputs=inputs,
pool_size=self.first_pool_size,
strides=self.first_pool_stride,
padding='SAME',
data_format=self.data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
for i, num_blocks in enumerate(self.block_sizes):
# We now have 4 block layers, but the last does not
# double the number of filters.
# We also skip the projection shortcut in the first block layer.
num_filters = self.num_filters * min((2**i), 4)
shortcut = i != 0
inputs = block_layer(
inputs=inputs,
filters=num_filters,
bottleneck=self.bottleneck,
block_fn=self.block_fn,
blocks=num_blocks,
strides=self.block_strides[i],
training=training,
name='block_layer{}'.format(i + 1),
data_format=self.data_format,
shortcut=shortcut)
# Skip the last BN+relu.
# Only apply the BN and ReLU for model that does pre_activation in each
# building/bottleneck block, eg resnet V2.
# if self.pre_activation:
# inputs = batch_norm(inputs, training, self.data_format,
# name='pre_act'+'batch_norm')
# inputs = tf.nn.relu(inputs,name='pre_act'+'relu')
# The current top layer has shape
# `batch_size x pool_size x pool_size x final_size`.
# ResNet does an Average Pooling layer over pool_size,
# but that is the same as doing a reduce_mean. We do a reduce_mean
# here because it performs better than AveragePooling2D.
# Also perform max-pooling, and concat results.
axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
avg_pooled = tf.reduce_mean(input_tensor=inputs, axis=axes, keepdims=True)
avg_pooled = tf.squeeze(avg_pooled, axes)
max_pooled = tf.reduce_max(input_tensor=inputs, axis=axes, keepdims=True)
max_pooled = tf.squeeze(max_pooled, axes)
inputs = tf.concat([avg_pooled, max_pooled], axis=1)
inputs = tf.identity(inputs, 'final_pooling')
inputs = tf.compat.v1.layers.dense(
inputs=inputs, units=self.num_classes, reuse=tf.AUTO_REUSE)
inputs = tf.identity(inputs, 'final_dense')
return inputs
###############################################################################
# Running the model
###############################################################################
class FastCifar10Model(Model):
"""Model class with appropriate defaults for CIFAR-10 data."""
def __init__(self,
resnet_size,
data_format=None,
num_classes=NUM_CLASSES,
resnet_version=DEFAULT_VERSION,
dtype=DEFAULT_DTYPE):
"""These are the parameters that work for CIFAR-10 data.
Args:
resnet_size: The number of convolutional layers needed in the model.
data_format: Either 'channels_first' or 'channels_last', specifying which
data format to use when setting up the model.
num_classes: The number of output classes needed from the model. This
enables users to extend the same model to their own datasets.
resnet_version: Integer representing which version of the ResNet network
to use. See README for details. Valid values: [1, 2]
dtype: The TensorFlow dtype to use for calculations.
Raises:
ValueError: if invalid resnet_size is chosen
"""
# 4 block layers, so change to 8n+2.
if resnet_size % 8 != 2:
raise ValueError('resnet_size must be 8n + 2:', resnet_size)
num_blocks = (resnet_size - 2) // 8
# Switch to 4 block layers. Use 64, 128, 256, 256 filters.
super(FastCifar10Model, self).__init__(
resnet_size=resnet_size,
bottleneck=False,
num_classes=num_classes,
num_filters=64,
kernel_size=3,
conv_stride=1,
first_pool_size=None,
first_pool_stride=None,
block_sizes=[num_blocks] * 4,
block_strides=[1, 2, 2, 2],
resnet_version=resnet_version,
data_format=data_format,
dtype=dtype)
| google-research/google-research | adaptive_learning_rate_tuner/resnet_model_fast.py | Python | apache-2.0 | 19,574 |
import json
import re
from behave import given, when, then
from behave import use_step_matcher
use_step_matcher("re")
# implicitly used
import sure # noqa
# We use this instead of validator from json_schema_generator
# because its error reports are far better
from jsonschema import validate
from _lazy_request import LazyRequest
# supress requests logging
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
# (?:xx) changes priority but will not be captured as args
@given('(.+) are users')
def step_impl(context, user_names_str):
if not hasattr(context, 'users'):
context.users = {}
user_names = [name.strip() for name in re.split('and|,', user_names_str)]
for user_name in user_names:
token = 'fake_token_' + user_name
user_id = context.helpers.create_test_user(user_name, token)
context.users[user_name] = {'token': token, 'id': user_id}
@given('(\w+) (?:is|am|are) logged in')
def step_impl(context, user_name):
context.token = context.users[user_name]['token']
@given('(\w+) received (\d+) postcards')
def step_impl(context, user_name, count):
context.helpers.load_postcards(user_name, count)
@when('GET "(\S+)"')
def step_impl(context, rel_url):
context.request = LazyRequest(
'GET', context.helpers.url(rel_url), context.token)
@when('POST "(\S+)"')
def step_impl(context, rel_url):
context.request = LazyRequest(
'POST', context.helpers.url(rel_url), context.token)
@when('with file "(\S+)" as (\w+)')
def step_impl(context, name, field):
context.request.add_file(context.helpers.file_path(name), field)
@when('with data')
def step_impl(context):
data = json.loads(context.text)
receiver_name = re.match(r"\<(\w+)'s id\>", data['receiver']).group(1)
data['receiver'] = context.users[receiver_name]['id']
context.request.add_data(data)
@then('request will (\w+) for (\d+)')
def step_impl(context, state, code):
context.response = context.request.send()
context.response.status_code.should.equal(int(code))
@then('return (\d+) items')
def step_impl(context, count):
cnt = len(context.response.json())
cnt.should.equal(int(count))
@then('has structure')
def step_impl(context):
validate(context.response.json(), json.loads(context.text))
| sunForest/AviPost | e2e/features/steps/crud.py | Python | apache-2.0 | 2,306 |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Runs an Herald XMPP framework
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.4
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Herald constants
import herald.transports.xmpp
# Pelix
from pelix.ipopo.constants import use_waiting_list
import pelix.framework
# Standard library
import argparse
import logging
# ------------------------------------------------------------------------------
def main(xmpp_server, xmpp_port, peer_name, node_name, app_id,
xmpp_jid=None, xmpp_password=None):
"""
Runs the framework
:param xmpp_server: Address of the XMPP server
:param xmpp_port: Port of the XMPP server
:param peer_name: Name of the peer
:param node_name: Name (also, UID) of the node hosting the peer
:param app_id: Application ID
:param xmpp_jid: XMPP JID, None for Anonymous login
:param xmpp_password: XMPP account password
"""
# Create the framework
framework = pelix.framework.create_framework(
('pelix.ipopo.core',
'pelix.ipopo.waiting',
'pelix.shell.core',
'pelix.shell.ipopo',
'pelix.shell.console',
# Herald core
'herald.core',
'herald.directory',
'herald.shell',
# Herald XMPP
'herald.transports.xmpp.directory',
'herald.transports.xmpp.transport',
# RPC
'pelix.remote.dispatcher',
'pelix.remote.registry',
'herald.remote.discovery',
'herald.remote.herald_xmlrpc',),
{herald.FWPROP_NODE_UID: node_name,
herald.FWPROP_NODE_NAME: node_name,
herald.FWPROP_PEER_NAME: peer_name,
herald.FWPROP_APPLICATION_ID: app_id})
context = framework.get_bundle_context()
# Start everything
framework.start()
# Instantiate components
with use_waiting_list(context) as ipopo:
# ... XMPP Transport
ipopo.add(herald.transports.xmpp.FACTORY_TRANSPORT,
"herald-xmpp-transport",
{herald.transports.xmpp.PROP_XMPP_SERVER: xmpp_server,
herald.transports.xmpp.PROP_XMPP_PORT: xmpp_port,
herald.transports.xmpp.PROP_XMPP_JID: xmpp_jid,
herald.transports.xmpp.PROP_XMPP_PASSWORD: xmpp_password})
# Start the framework and wait for it to stop
framework.wait_for_stop()
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="Pelix Herald demo")
# XMPP server
group = parser.add_argument_group("XMPP Configuration",
"Configuration of the XMPP transport")
group.add_argument("-s", "--server", action="store", default="localhost",
dest="xmpp_server", help="Host of the XMPP server")
group.add_argument("-p", "--port", action="store", type=int, default=5222,
dest="xmpp_port", help="Port of the XMPP server")
# XMPP login
group.add_argument("-j", "--jid", action="store", default=None,
dest="xmpp_jid", help="JID to login with")
group.add_argument("--password", action="store", default=None,
dest="xmpp_password", help="Password for authentication")
group.add_argument("--ask-password", action="store_true", default=False,
dest="ask_password",
help="Ask password for authentication")
# Peer info
group = parser.add_argument_group("Peer Configuration",
"Identity of the Peer")
group.add_argument("-n", "--name", action="store", default=None,
dest="name", help="Peer name")
group.add_argument("--node", action="store", default=None,
dest="node", help="Node name")
group.add_argument("-a", "--app", action="store",
default=herald.DEFAULT_APPLICATION_ID,
dest="app_id", help="Application ID")
# Parse arguments
args = parser.parse_args()
# Configure the logging package
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('herald').setLevel(logging.DEBUG)
if args.xmpp_jid and args.ask_password:
import getpass
password = getpass.getpass("Password for {0}: ".format(args.xmpp_jid))
else:
password = args.xmpp_password
# Run the framework
main(args.xmpp_server, args.xmpp_port, args.name, args.node, args.app_id,
args.xmpp_jid, password)
| ahmadshahwan/cohorte-herald | python/run_xmpp.py | Python | apache-2.0 | 5,512 |
"""
Functions for converting numbers with SI units to integers
"""
import copy
import re
si_multipliers = {
None: 1,
'': 1,
'k': 1000 ** 1,
'm': 1000 ** 2,
'g': 1000 ** 3,
't': 1000 ** 4,
'p': 1000 ** 5,
'e': 1000 ** 6,
'z': 1000 ** 7,
'y': 1000 ** 8,
'ki': 1024 ** 1,
'mi': 1024 ** 2,
'gi': 1024 ** 3,
'ti': 1024 ** 4,
'pi': 1024 ** 5,
'ei': 1024 ** 6,
'zi': 1024 ** 7,
'yi': 1024 ** 8
}
si_regex = re.compile('^(-?[0-9]+(\.[0-9]+)?)\s*([kmgtpezy][i]?)?$')
def si_as_number(text):
"""Convert a string containing an SI value to an integer or return an
integer if that is what was passed in."""
if type(text) == int:
return text
if type(text) not in [str, unicode]:
raise ValueError("Source value must be string or integer")
matches = si_regex.search(text.lower(), 0)
if matches is None:
raise ValueError("Invalid SI value '" + text + "'")
number = int(matches.group(1)) if matches.group(2) is None \
else float(matches.group(1))
unit = matches.group(3)
multiplier = 1 if unit is None else si_multipliers.get(unit.lower(), '')
return number * multiplier
def number_as_si(number, places=2, base=10):
"""Convert a number to the largest possible SI
representation of that number"""
# Try to cast any input to a float to make
# division able to get some deci places
number = float(number)
if base not in [2, 10]:
raise ValueError("base must be either 2 or 10")
# Ensure we get all the things in the correct order
sorted_si = sorted(si_multipliers.items(),
key=lambda x: x[1], reverse=True)
number_format = "{0:.%sf}" % places
for unit, value in sorted_si:
# Make string ops later happy
if unit is None:
unit = ""
# Kind of hacky, depending on what base we're in
# we need to skip either all the base 2 or base 10 entries
if base == 10 and unit.endswith("i"):
continue
if base == 2 and not unit.endswith("i"):
continue
if number > value:
return number_format.format(number / value) + unit.title()
# no matches, must be less than anything so just return number
return number_format.format(number / value)
def si_range(value, default_lower=0):
"""Convert a range of SI numbers to a range of integers.
The 'value' is a dict containing members 'lower' and 'upper', each
being an integer or string suitable for si_as_number(). If
'value' is not a dict, it will be passed directly to
si_as_number() and treated as a non-range (see below). If there
is no 'lower' member and 'default_lower' has been provided, that
value will be used for the lower number.
Returns a dict containing memebers 'lower' and 'upper', both
integers. For non-ranges, both will be identical.
Raises ValueError if something doesn't make sense.
"""
if type(value) in [int, str, unicode]:
result = si_as_number(value)
return {"lower": result, "upper": result}
if type(default_lower) != int:
raise ValueError("Default lower value must be integer")
# TODO: Complain about anything else in the input?
result = {}
if "lower" not in value:
# Copy this because altering it would clobber the original (not cool)
vrange = copy.copy(value)
vrange["lower"] = default_lower
value = vrange
for member in ["lower", "upper"]:
try:
result[member] = si_as_number(value[member])
except KeyError:
raise ValueError("Missing '%s' in input" % member)
if result['lower'] > result['upper']:
raise ValueError("Lower value must be less than upper value")
return result
#
# Test
#
if __name__ == "__main__":
# These should convert
print "Simple:"
for value in [
"1234",
"1234K",
"-1234ki",
"5g", "5G", "-5Gi",
"2y",
"12.34",
"123.4K",
"106.9m",
"3.1415P"
]:
integer = si_as_number(value)
print value, integer
# These should not.
print
print "Simple, should throw exceptions:"
for value in [
"ki",
"Steak",
"123e1",
3.1415
]:
try:
integer = si_as_number(value)
print value, integer
except ValueError:
print value, "-> ValueError"
print
print "Ranges:"
for value in [
15,
"16ki",
{"upper": 1000},
{"lower": 1000, "upper": 2000},
{"lower": 1000, "upper": "2k"},
{"lower": "1k", "upper": 2000},
{"lower": "1k", "upper": "2k"},
{"lower": "2k", "upper": "1k"}
]:
try:
returned = si_range(value, default_lower=0)
print value, "->", returned
except Exception as ex:
print value, "-> Exception:", ex
# Convert to SI
print
print "Convert from number to SI representation:"
for value in [
1000,
1000 ** 3,
1234567890,
"9.8",
0
]:
result = number_as_si(value)
print "%s -> %s (base 10)" % (value, result)
result = number_as_si(value, base=2)
print "%s -> %s (base 2)" % (value, result)
result = number_as_si(value, places=3)
print "%s -> %s (3 places)" % (value, result)
| mfeit-internet2/pscheduler-dev | python-pscheduler/pscheduler/pscheduler/sinumber.py | Python | apache-2.0 | 5,585 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import st2tests
from st2common.bootstrap.policiesregistrar import register_policy_types, register_policies
from st2common.models.api.action import ActionAPI, RunnerTypeAPI
from st2common.models.api.policy import PolicyTypeAPI, PolicyAPI
from st2common.persistence.action import Action
from st2common.persistence.policy import PolicyType, Policy
from st2common.persistence.runner import RunnerType
from st2common.policies import ResourcePolicyApplicator, get_driver
from st2tests import DbTestCase, fixturesloader
TEST_FIXTURES = {
'runners': [
'testrunner1.yaml'
],
'actions': [
'action1.yaml'
],
'policytypes': [
'fake_policy_type_1.yaml',
'fake_policy_type_2.yaml'
],
'policies': [
'policy_1.yaml',
'policy_2.yaml'
]
}
PACK = 'generic'
LOADER = fixturesloader.FixturesLoader()
FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
class PolicyTest(DbTestCase):
@classmethod
def setUpClass(cls):
super(PolicyTest, cls).setUpClass()
for _, fixture in six.iteritems(FIXTURES['runners']):
instance = RunnerTypeAPI(**fixture)
RunnerType.add_or_update(RunnerTypeAPI.to_model(instance))
for _, fixture in six.iteritems(FIXTURES['actions']):
instance = ActionAPI(**fixture)
Action.add_or_update(ActionAPI.to_model(instance))
for _, fixture in six.iteritems(FIXTURES['policytypes']):
instance = PolicyTypeAPI(**fixture)
PolicyType.add_or_update(PolicyTypeAPI.to_model(instance))
for _, fixture in six.iteritems(FIXTURES['policies']):
instance = PolicyAPI(**fixture)
Policy.add_or_update(PolicyAPI.to_model(instance))
def test_get_by_ref(self):
policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency')
self.assertIsNotNone(policy_db)
self.assertEqual(policy_db.pack, 'wolfpack')
self.assertEqual(policy_db.name, 'action-1.concurrency')
policy_type_db = PolicyType.get_by_ref(policy_db.policy_type)
self.assertIsNotNone(policy_type_db)
self.assertEqual(policy_type_db.resource_type, 'action')
self.assertEqual(policy_type_db.name, 'concurrency')
def test_get_driver(self):
policy_db = Policy.get_by_ref('wolfpack.action-1.concurrency')
policy = get_driver(policy_db.ref, policy_db.policy_type, **policy_db.parameters)
self.assertIsInstance(policy, ResourcePolicyApplicator)
self.assertEqual(policy._policy_ref, policy_db.ref)
self.assertEqual(policy._policy_type, policy_db.policy_type)
self.assertTrue(hasattr(policy, 'threshold'))
self.assertEqual(policy.threshold, 3)
class PolicyBootstrapTest(DbTestCase):
def test_register_policy_types(self):
self.assertEqual(register_policy_types(st2tests), 2)
type1 = PolicyType.get_by_ref('action.concurrency')
self.assertEqual(type1.name, 'concurrency')
self.assertEqual(type1.resource_type, 'action')
type2 = PolicyType.get_by_ref('action.mock_policy_error')
self.assertEqual(type2.name, 'mock_policy_error')
self.assertEqual(type2.resource_type, 'action')
def test_register_policies(self):
pack_dir = os.path.join(fixturesloader.get_fixtures_base_path(), 'dummy_pack_1')
self.assertEqual(register_policies(pack_dir=pack_dir), 2)
p1 = Policy.get_by_ref('dummy_pack_1.test_policy_1')
self.assertEqual(p1.name, 'test_policy_1')
self.assertEqual(p1.pack, 'dummy_pack_1')
self.assertEqual(p1.resource_ref, 'dummy_pack_1.local')
self.assertEqual(p1.policy_type, 'action.concurrency')
p2 = Policy.get_by_ref('dummy_pack_1.test_policy_2')
self.assertEqual(p2.name, 'test_policy_2')
self.assertEqual(p2.pack, 'dummy_pack_1')
self.assertEqual(p2.resource_ref, 'dummy_pack_1.local')
self.assertEqual(p2.policy_type, 'action.mock_policy_error')
self.assertEqual(p2.resource_ref, 'dummy_pack_1.local')
| alfasin/st2 | st2common/tests/unit/test_policies.py | Python | apache-2.0 | 4,902 |
#!/usr/bin/env python
# vim: set filetype=python expandtab tabstop=4 shiftwidth=4 autoindent smartindent:
#from distutils.core import setup
import os
import logging
import setuptools
import distutils.cmd
import subprocess
# TODO add check to make sure required libs are present (eg sqlite3)
_PACKAGE_NAME = 'python-ibank',
_PACKAGE_VERSION = '0.0.1'
_LIB_DIR = 'lib'
_BIN_DIR = 'bin'
_DESCRIPTION = ''
_HOME_DIR = os.path.expanduser('~')
print "home:",_HOME_DIR
_HOME_DIR = '/Users/csebenik'
# TODO verify _HOME_DIR makes sense
# TODO ugh.... this isnt working :/
#_VIRTUALENV_DIR = os.path.join(_HOME_DIR, '.virtualenvs', _PACKAGE_NAME)
_VIRTUALENV_DIR = os.path.join('.virtualenv')
# TODO remove all of the print statements and replace with logging
_LOGGER_NAME = __name__
logger = logging.getLogger(_LOGGER_NAME)
class VirtualEnv(distutils.cmd.Command):
"""Setup virtualenv."""
description = "Setup virtualenv."
user_options = []
def __init__(self, dist=None):
self.announce('Init virtualenv.', level=3)
self.finalized = None
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Create the virtualenv."""
# TODO finish automating virtualenv... manual is ok for now
self.announce('Running virtualenv.', level=2)
print "Creating virtual env in:", _VIRTUALENV_DIR
# TODO check to make sure parent dir exists
# TODO what do to if already exists? re-create? destroy and create?
cmd = 'virtualenv --no-site-packages {0}'.format(_VIRTUALENV_DIR)
print "cmd:",cmd
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
returnCode = proc.returncode
print "stdout:",stdout
print "stderr:",stderr
print "return code:", returnCode
# TODO need to symlink bin/* into .virtualenv/bin/
setuptools.setup(
name = _PACKAGE_NAME,
version = _PACKAGE_VERSION,
description = 'Python libraries for reading iBank files.',
author = 'Craig Sebenik',
author_email = '[email protected]',
url = 'https://github.com/craig5/python-ibank',
scripts = [os.path.join(_BIN_DIR,cur) for cur in os.listdir(_BIN_DIR)
if not cur.endswith('.swp')],
package_dir = {'': _LIB_DIR},
packages = setuptools.find_packages(_LIB_DIR),
cmdclass = {'virtualenv':VirtualEnv}
)
# End of file.
| craig5/python-ibank | setup.py | Python | apache-2.0 | 2,638 |
#!/usr/bin/env python
"""A flow to run checks for a host."""
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib.checks import checks
from grr.proto import flows_pb2
class CheckFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.CheckFlowArgs
class CheckRunner(flow.GRRFlow):
"""This flow runs checks on a host.
CheckRunner:
- Identifies what checks should be run for a host.
- Identifies the artifacts that need to be collected to perform those checks.
- Orchestrates collection of the host data.
- Routes host data to the relevant checks.
- Returns check data ready for reporting.
"""
friendly_name = "Run Checks"
category = "/Checks/"
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["MapArtifactData"])
def Start(self):
"""."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.state.Register("knowledge_base",
client.Get(client.Schema.KNOWLEDGE_BASE))
self.state.Register("labels", client.GetLabels())
self.state.Register("artifacts_wanted", set())
self.state.Register("artifacts_fetched", set())
self.state.Register("checks_run", [])
self.state.Register("checks_with_findings", [])
self.state.Register("results_store", None)
self.state.Register("host_data", {})
self.CallState(next_state="MapArtifactData")
@flow.StateHandler(next_state=["AddResponses", "RunChecks"])
def MapArtifactData(self, responses):
"""Get processed data, mapped to artifacts."""
self.state.artifacts_wanted = checks.CheckRegistry.SelectArtifacts(
os_type=self.state.knowledge_base.os)
# Fetch Artifacts and map results to the artifacts that generated them.
# This is an inefficient collection, but necessary because results need to
# be mapped to the originating artifact. An alternative would be to have
# rdfvalues labeled with originating artifact ids.
for artifact_id in self.state.artifacts_wanted:
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_id],
request_data={"artifact_id": artifact_id},
next_state="AddResponses")
self.CallState(next_state="RunChecks")
@flow.StateHandler()
def AddResponses(self, responses):
artifact_id = responses.request_data["artifact_id"]
# TODO(user): Check whether artifact collection succeeded.
self.state.host_data[artifact_id] = list(responses)
@flow.StateHandler(next_state=["Done"])
def RunChecks(self, responses):
if not responses.success:
raise RuntimeError("Checks did not run successfully.")
# Hand host data across to checks. Do this after all data has been collected
# in case some checks require multiple artifacts/results.
for finding in checks.CheckHost(self.state.host_data,
os_type=self.state.knowledge_base.os):
self.state.checks_run.append(finding.check_id)
if finding.anomaly:
self.state.checks_with_findings.append(finding.check_id)
self.SendReply(finding)
| wandec/grr | lib/flows/general/checks.py | Python | apache-2.0 | 3,098 |
# -*- coding: utf-8 -*-
class Config(object):
def __init__(self, bucket, root):
self.bucket = bucket
self.root = root
| lyst/shovel | src/shovel/config.py | Python | apache-2.0 | 138 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
from st2client.client import Client
from st2client.models import KeyValuePair
from st2common.services.access import create_token
from st2common.util.api import get_full_public_api_url
from st2common.util.date import get_datetime_utc_now
from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE
class DatastoreService(object):
"""
Class provides public methods for accessing datastore items.
"""
DATASTORE_NAME_SEPARATOR = DATASTORE_KEY_SEPARATOR
def __init__(self, logger, pack_name, class_name, api_username):
self._api_username = api_username
self._pack_name = pack_name
self._class_name = class_name
self._logger = logger
self._client = None
self._token_expire = get_datetime_utc_now()
##################################
# Methods for datastore management
##################################
def list_values(self, local=True, prefix=None):
"""
Retrieve all the datastores items.
:param local: List values from a namespace local to this pack/class. Defaults to True.
:type: local: ``bool``
:param prefix: Optional key name prefix / startswith filter.
:type prefix: ``str``
:rtype: ``list`` of :class:`KeyValuePair`
"""
client = self._get_api_client()
self._logger.audit('Retrieving all the value from the datastore')
key_prefix = self._get_full_key_prefix(local=local, prefix=prefix)
kvps = client.keys.get_all(prefix=key_prefix)
return kvps
def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):
"""
Retrieve a value from the datastore for the provided key.
By default, value is retrieved from the namespace local to the pack/class. If you want to
retrieve a global value from a datastore, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param local: Retrieve value from a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which item is saved. Defaults to system scope.
:type: local: ``str``
:param decrypt: Return the decrypted value. Defaults to False.
:type: local: ``bool``
:rtype: ``str`` or ``None``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.' % scope)
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
self._logger.audit('Retrieving value from the datastore (name=%s)', name)
try:
params = {'decrypt': str(decrypt).lower(), 'scope': scope}
kvp = client.keys.get_by_id(id=name, params=params)
except Exception as e:
self._logger.exception(
'Exception retrieving value from datastore (name=%s): %s',
name,
e
)
return None
if kvp:
return kvp.value
return None
def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):
"""
Set a value for the provided key.
By default, value is set in a namespace local to the pack/class. If you want to
set a global value, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param value: Key value.
:type value: ``str``
:param ttl: Optional TTL (in seconds).
:type ttl: ``int``
:param local: Set value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which to place the item. Defaults to system scope.
:type: local: ``str``
:param encrypt: Encrypt the value when saving. Defaults to False.
:type: local: ``bool``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.', scope)
name = self._get_full_key_name(name=name, local=local)
value = str(value)
client = self._get_api_client()
self._logger.audit('Setting value in the datastore (name=%s)', name)
instance = KeyValuePair()
instance.id = name
instance.name = name
instance.value = value
instance.scope = scope
if encrypt:
instance.secret = True
if ttl:
instance.ttl = ttl
client.keys.update(instance=instance)
return True
def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):
"""
Delete the provided key.
By default, value is deleted from a namespace local to the pack/class. If you want to
delete a global value, pass local=False to this method.
:param name: Name of the key to delete.
:type name: ``str``
:param local: Delete a value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which item is saved. Defaults to system scope.
:type: local: ``str``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.', scope)
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
instance = KeyValuePair()
instance.id = name
instance.name = name
self._logger.audit('Deleting value from the datastore (name=%s)', name)
try:
params = {'scope': scope}
client.keys.delete(instance=instance, params=params)
except Exception as e:
self._logger.exception(
'Exception deleting value from datastore (name=%s): %s',
name,
e
)
return False
return True
def _get_api_client(self):
"""
Retrieve API client instance.
"""
token_expire = self._token_expire <= get_datetime_utc_now()
if not self._client or token_expire:
self._logger.audit('Creating new Client object.')
ttl = (24 * 60 * 60)
self._token_expire = get_datetime_utc_now() + timedelta(seconds=ttl)
temporary_token = create_token(username=self._api_username, ttl=ttl)
api_url = get_full_public_api_url()
self._client = Client(api_url=api_url, token=temporary_token.token)
return self._client
def _get_full_key_name(self, name, local):
"""
Retrieve a full key name.
:rtype: ``str``
"""
if local:
name = self._get_key_name_with_prefix(name=name)
return name
def _get_full_key_prefix(self, local, prefix=None):
if local:
key_prefix = self._get_local_key_name_prefix()
if prefix:
key_prefix += prefix
else:
key_prefix = prefix
return key_prefix
def _get_local_key_name_prefix(self):
"""
Retrieve key prefix which is local to this pack/class.
"""
key_prefix = self._get_datastore_key_prefix() + self.DATASTORE_NAME_SEPARATOR
return key_prefix
def _get_key_name_with_prefix(self, name):
"""
Retrieve a full key name which is local to the current pack/class.
:param name: Base datastore key name.
:type name: ``str``
:rtype: ``str``
"""
prefix = self._get_datastore_key_prefix()
full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name
return full_name
def _get_datastore_key_prefix(self):
prefix = '%s.%s' % (self._pack_name, self._class_name)
return prefix
| tonybaloney/st2 | st2common/st2common/services/datastore.py | Python | apache-2.0 | 8,753 |
# stdlib
from collections import defaultdict
import sys
from typing import Any as TypeAny
from typing import Callable
from typing import Dict
from typing import KeysView
from typing import List as TypeList
from typing import Set
# third party
from cachetools import cached
from cachetools.keys import hashkey
# relative
from ...ast import add_classes
from ...ast import add_methods
from ...ast import add_modules
from ...ast import globals
from ...logger import traceback_and_raise
from .union import lazy_pairing
def get_cache() -> Dict:
return dict()
@cached(cache=get_cache(), key=lambda path, lib_ast: hashkey(path))
def solve_ast_type_functions(path: str, lib_ast: globals.Globals) -> KeysView:
root = lib_ast
for path_element in path.split("."):
root = getattr(root, path_element)
return root.attrs.keys()
def get_allowed_functions(
lib_ast: globals.Globals, union_types: TypeList[str]
) -> Dict[str, bool]:
"""
This function generates a set of functions that can go into a union type.
A function has to meet the following requirements to be present on a union type:
1. If it's present on all Class attributes associated with the union types
on the ast, add it.
2. If it's not present on all Class attributes associated with the union
types, check if they exist on the original type functions list. If they
do exist, drop it, if not, add it.
Args:
lib_ast (Globals): the AST on which we want to generate the union pointer.
union_types (List[str]): the qualnames of the types on which we want a union.
Returns:
allowed_functions (dict): The keys of the dict are function names (str)
and the values are Bool (if they are allowed or not).
"""
allowed_functions: Dict[str, bool] = defaultdict(lambda: True)
def solve_real_type_functions(path: str) -> Set[str]:
parts = path.split(".")
klass_name = parts[-1]
# TODO: a better way. Loot at https://github.com/OpenMined/PySyft/issues/5249
# A way to walkaround the problem we can't `import torch.return_types` and
# get it from `sys.modules`.
if parts[-2] == "return_types":
modu = getattr(sys.modules["torch"], "return_types")
else:
modu = sys.modules[".".join(parts[:-1])]
return set(dir(getattr(modu, klass_name)))
for union_type in union_types:
real_type_function_set = solve_real_type_functions(union_type)
ast_type_function_set = solve_ast_type_functions(union_type, lib_ast)
rejected_function_set = real_type_function_set - ast_type_function_set
for accepted_function in ast_type_function_set:
allowed_functions[accepted_function] &= True
for rejected_function in rejected_function_set:
allowed_functions[rejected_function] = False
return allowed_functions
def create_union_ast(
lib_ast: globals.Globals, client: TypeAny = None
) -> globals.Globals:
ast = globals.Globals(client)
modules = ["syft", "syft.lib", "syft.lib.misc", "syft.lib.misc.union"]
classes = []
methods = []
for klass in lazy_pairing.keys():
classes.append(
(
f"syft.lib.misc.union.{klass.__name__}",
f"syft.lib.misc.union.{klass.__name__}",
klass,
)
)
union_types = lazy_pairing[klass]
allowed_functions = get_allowed_functions(lib_ast, union_types)
for target_method, allowed in allowed_functions.items():
if not allowed:
continue
def generate_func(target_method: str) -> Callable:
def func(self: TypeAny, *args: TypeAny, **kwargs: TypeAny) -> TypeAny:
func = getattr(self, target_method, None)
if func:
return func(*args, **kwargs)
else:
traceback_and_raise(
ValueError(
f"Can't call {target_method} on {klass} with the instance type of {type(self)}"
)
)
return func
def generate_attribute(target_attribute: str) -> TypeAny:
def prop_get(self: TypeAny) -> TypeAny:
prop = getattr(self, target_attribute, None)
if prop is not None:
return prop
else:
ValueError(
f"Can't call {target_attribute} on {klass} with the instance type of {type(self)}"
)
def prop_set(self: TypeAny, value: TypeAny) -> TypeAny:
setattr(self, target_attribute, value)
return property(prop_get, prop_set)
# TODO: Support dynamic properties for types in AST
# torch.Tensor.grad and torch.Tensor.data are not in the class
# Issue: https://github.com/OpenMined/PySyft/issues/5338
if target_method == "grad" and "Tensor" in klass.__name__:
setattr(klass, target_method, generate_attribute(target_method))
methods.append(
(
f"syft.lib.misc.union.{klass.__name__}.{target_method}",
"torch.Tensor",
)
)
continue
elif target_method == "data" and "Tensor" in klass.__name__:
setattr(klass, target_method, generate_attribute(target_method))
else:
setattr(klass, target_method, generate_func(target_method))
methods.append(
(
f"syft.lib.misc.union.{klass.__name__}.{target_method}",
"syft.lib.python.Any",
)
)
add_modules(ast, modules)
add_classes(ast, classes)
add_methods(ast, methods)
for ast_klass in ast.classes:
ast_klass.create_pointer_class()
ast_klass.create_send_method()
ast_klass.create_storable_object_attr_convenience_methods()
return ast
| OpenMined/PySyft | packages/syft/src/syft/lib/misc/__init__.py | Python | apache-2.0 | 6,217 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import os
import mock
from oslo.config import cfg
from testtools import matchers
import webob.exc
import neutron
from neutron.api import api_common
from neutron.api.extensions import PluginAwareExtensionManager
from neutron.api.v2 import attributes
from neutron.api.v2.attributes import ATTR_NOT_SPECIFIED
from neutron.api.v2.router import APIRouter
from neutron.common import config
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common.test_lib import test_config
from neutron import context
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import models_v2
from neutron.manager import NeutronManager
from neutron.openstack.common import importutils
from neutron.tests import base
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def optional_ctx(obj, fallback):
if not obj:
return fallback()
@contextlib.contextmanager
def context_wrapper():
yield obj
return context_wrapper()
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def _fake_get_pagination_helper(self, request):
return api_common.PaginationEmulatedHelper(request, self._primary_key)
def _fake_get_sorting_helper(self, request):
return api_common.SortingEmulatedHelper(request, self._attr_info)
class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
fmt = 'json'
resource_prefix_map = {}
def setUp(self, plugin=None, service_plugins=None,
ext_mgr=None):
super(NeutronDbPluginV2TestCase, self).setUp()
cfg.CONF.set_override('notify_nova_on_port_status_changes', False)
# Make sure at each test according extensions for the plugin is loaded
PluginAwareExtensionManager._instance = None
# Save the attributes map in case the plugin will alter it
# loading extensions
# Note(salvatore-orlando): shallow copy is not good enough in
# this case, but copy.deepcopy does not seem to work, since it
# causes test failures
self._attribute_map_bk = {}
for item in attributes.RESOURCE_ATTRIBUTE_MAP:
self._attribute_map_bk[item] = (attributes.
RESOURCE_ATTRIBUTE_MAP[item].
copy())
self._tenant_id = 'test-tenant'
if not plugin:
plugin = DB_PLUGIN_KLASS
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
# If test_config specifies some config-file, use it, as well
for config_file in test_config.get('config_files', []):
args.extend(['--config-file', config_file])
config.parse(args=args)
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override(
'service_plugins',
[test_config.get(key, default)
for key, default in (service_plugins or {}).iteritems()]
)
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
cfg.CONF.set_override('max_dns_nameservers', 2)
cfg.CONF.set_override('max_subnet_host_routes', 2)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self.api = APIRouter()
# Set the defualt status
self.net_create_status = 'ACTIVE'
self.port_create_status = 'ACTIVE'
def _is_native_bulk_supported():
plugin_obj = NeutronManager.get_plugin()
native_bulk_attr_name = ("_%s__native_bulk_support"
% plugin_obj.__class__.__name__)
return getattr(plugin_obj, native_bulk_attr_name, False)
self._skip_native_bulk = not _is_native_bulk_supported()
def _is_native_pagination_support():
native_pagination_attr_name = (
"_%s__native_pagination_support" %
NeutronManager.get_plugin().__class__.__name__)
return (cfg.CONF.allow_pagination and
getattr(NeutronManager.get_plugin(),
native_pagination_attr_name, False))
self._skip_native_pagination = not _is_native_pagination_support()
def _is_native_sorting_support():
native_sorting_attr_name = (
"_%s__native_sorting_support" %
NeutronManager.get_plugin().__class__.__name__)
return (cfg.CONF.allow_sorting and
getattr(NeutronManager.get_plugin(),
native_sorting_attr_name, False))
self._skip_native_sorting = not _is_native_sorting_support()
if ext_mgr:
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def tearDown(self):
self.api = None
self._deserializers = None
self._skip_native_bulk = None
self._skip_native_pagination = None
self._skip_native_sortin = None
self.ext_api = None
# NOTE(jkoelker) for a 'pluggable' framework, Neutron sure
# doesn't like when the plugin changes ;)
db.clear_db()
cfg.CONF.reset()
# Restore the original attribute map
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk
super(NeutronDbPluginV2TestCase, self).tearDown()
def _req(self, method, resource, data=None, fmt=None, id=None, params=None,
action=None, subresource=None, sub_id=None):
fmt = fmt or self.fmt
path = '/%s.%s' % (
'/'.join(p for p in
(resource, id, subresource, sub_id, action) if p),
fmt
)
prefix = self.resource_prefix_map.get(resource)
if prefix:
path = prefix + path
content_type = 'application/%s' % fmt
body = None
if data is not None: # empty dict is valid
body = self.serialize(data)
return testlib_api.create_request(path, body, content_type, method,
query_string=params)
def new_create_request(self, resource, data, fmt=None, id=None,
subresource=None):
return self._req('POST', resource, data, fmt, id=id,
subresource=subresource)
def new_list_request(self, resource, fmt=None, params=None,
subresource=None):
return self._req(
'GET', resource, None, fmt, params=params, subresource=subresource
)
def new_show_request(self, resource, id, fmt=None,
subresource=None, fields=None):
if fields:
params = "&".join(["fields=%s" % x for x in fields])
else:
params = None
return self._req('GET', resource, None, fmt, id=id,
params=params, subresource=subresource)
def new_delete_request(self, resource, id, fmt=None, subresource=None,
sub_id=None):
return self._req(
'DELETE',
resource,
None,
fmt,
id=id,
subresource=subresource,
sub_id=sub_id
)
def new_update_request(self, resource, data, id, fmt=None,
subresource=None):
return self._req(
'PUT', resource, data, fmt, id=id, subresource=subresource
)
def new_action_request(self, resource, data, id, action, fmt=None,
subresource=None):
return self._req(
'PUT',
resource,
data,
fmt,
id=id,
action=action,
subresource=subresource
)
def deserialize(self, content_type, response):
ctype = 'application/%s' % content_type
data = self._deserializers[ctype].deserialize(response.body)['body']
return data
def _create_bulk_from_list(self, fmt, resource, objects, **kwargs):
"""Creates a bulk request from a list of objects."""
collection = "%ss" % resource
req_data = {collection: objects}
req = self.new_create_request(collection, req_data, fmt)
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
elif 'context' in kwargs:
req.environ['neutron.context'] = kwargs['context']
return req.get_response(self.api)
def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs):
"""Creates a bulk request for any kind of resource."""
objects = []
collection = "%ss" % resource
for i in range(number):
obj = copy.deepcopy(data)
obj[resource]['name'] = "%s_%s" % (name, i)
if 'override' in kwargs and i in kwargs['override']:
obj[resource].update(kwargs['override'][i])
objects.append(obj)
req_data = {collection: objects}
req = self.new_create_request(collection, req_data, fmt)
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
elif 'context' in kwargs:
req.environ['neutron.context'] = kwargs['context']
return req.get_response(self.api)
def _create_network(self, fmt, name, admin_state_up,
arg_list=None, **kwargs):
data = {'network': {'name': name,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'tenant_id', 'shared') +
(arg_list or ())):
# Arg must be present
if arg in kwargs:
data['network'][arg] = kwargs[arg]
network_req = self.new_create_request('networks', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
network_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
return network_req.get_response(self.api)
def _create_network_bulk(self, fmt, number, name,
admin_state_up, **kwargs):
base_data = {'network': {'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
return self._create_bulk(fmt, number, 'network', base_data, **kwargs)
def _create_subnet(self, fmt, net_id, cidr,
expected_res_status=None, **kwargs):
data = {'subnet': {'network_id': net_id,
'cidr': cidr,
'ip_version': 4,
'tenant_id': self._tenant_id}}
for arg in ('ip_version', 'tenant_id',
'enable_dhcp', 'allocation_pools',
'dns_nameservers', 'host_routes',
'shared'):
# Arg must be present and not null (but can be false)
if arg in kwargs and kwargs[arg] is not None:
data['subnet'][arg] = kwargs[arg]
if ('gateway_ip' in kwargs and
kwargs['gateway_ip'] is not ATTR_NOT_SPECIFIED):
data['subnet']['gateway_ip'] = kwargs['gateway_ip']
subnet_req = self.new_create_request('subnets', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
subnet_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
subnet_res = subnet_req.get_response(self.api)
if expected_res_status:
self.assertEqual(subnet_res.status_int, expected_res_status)
return subnet_res
def _create_subnet_bulk(self, fmt, number, net_id, name,
ip_version=4, **kwargs):
base_data = {'subnet': {'network_id': net_id,
'ip_version': ip_version,
'tenant_id': self._tenant_id}}
# auto-generate cidrs as they should not overlap
overrides = dict((k, v)
for (k, v) in zip(range(number),
[{'cidr': "10.0.%s.0/24" % num}
for num in range(number)]))
kwargs.update({'override': overrides})
return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs)
def _create_port(self, fmt, net_id, expected_res_status=None,
arg_list=None, **kwargs):
data = {'port': {'network_id': net_id,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'device_id',
'mac_address', 'name', 'fixed_ips',
'tenant_id', 'device_owner', 'security_groups') +
(arg_list or ())):
# Arg must be present
if arg in kwargs:
data['port'][arg] = kwargs[arg]
port_req = self.new_create_request('ports', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(port_res.status_int, expected_res_status)
return port_res
def _list_ports(self, fmt, expected_res_status=None,
net_id=None, **kwargs):
query_params = None
if net_id:
query_params = "network_id=%s" % net_id
port_req = self.new_list_request('ports', fmt, query_params)
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(port_res.status_int, expected_res_status)
return port_res
def _create_port_bulk(self, fmt, number, net_id, name,
admin_state_up, **kwargs):
base_data = {'port': {'network_id': net_id,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
return self._create_bulk(fmt, number, 'port', base_data, **kwargs)
def _make_network(self, fmt, name, admin_state_up, **kwargs):
res = self._create_network(fmt, name, admin_state_up, **kwargs)
# TODO(salvatore-orlando): do exception handling in this test module
# in a uniform way (we do it differently for ports, subnets, and nets
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_subnet(self, fmt, network, gateway, cidr,
allocation_pools=None, ip_version=4, enable_dhcp=True,
dns_nameservers=None, host_routes=None, shared=None):
res = self._create_subnet(fmt,
net_id=network['network']['id'],
cidr=cidr,
gateway_ip=gateway,
tenant_id=network['network']['tenant_id'],
allocation_pools=allocation_pools,
ip_version=ip_version,
enable_dhcp=enable_dhcp,
dns_nameservers=dns_nameservers,
host_routes=host_routes,
shared=shared)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs):
res = self._create_port(fmt, net_id, expected_res_status, **kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _api_for_resource(self, resource):
if resource in ['networks', 'subnets', 'ports']:
return self.api
else:
return self.ext_api
def _delete(self, collection, id,
expected_code=webob.exc.HTTPNoContent.code,
neutron_context=None):
req = self.new_delete_request(collection, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(collection))
self.assertEqual(res.status_int, expected_code)
def _show(self, resource, id,
expected_code=webob.exc.HTTPOk.code,
neutron_context=None):
req = self.new_show_request(resource, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(res.status_int, expected_code)
return self.deserialize(self.fmt, res)
def _update(self, resource, id, new_data,
expected_code=webob.exc.HTTPOk.code,
neutron_context=None):
req = self.new_update_request(resource, new_data, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(res.status_int, expected_code)
return self.deserialize(self.fmt, res)
def _list(self, resource, fmt=None, neutron_context=None,
query_params=None):
fmt = fmt or self.fmt
req = self.new_list_request(resource, fmt, query_params)
if neutron_context:
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
return self.deserialize(fmt, res)
def _fail_second_call(self, patched_plugin, orig, *args, **kwargs):
"""Invoked by test cases for injecting failures in plugin."""
def second_call(*args, **kwargs):
raise n_exc.NeutronException()
patched_plugin.side_effect = second_call
return orig(*args, **kwargs)
def _validate_behavior_on_bulk_failure(
self, res, collection,
errcode=webob.exc.HTTPClientError.code):
self.assertEqual(res.status_int, errcode)
req = self.new_list_request(collection)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
items = self.deserialize(self.fmt, res)
self.assertEqual(len(items[collection]), 0)
def _validate_behavior_on_bulk_success(self, res, collection,
names=['test_0', 'test_1']):
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
items = self.deserialize(self.fmt, res)[collection]
self.assertEqual(len(items), 2)
self.assertEqual(items[0]['name'], 'test_0')
self.assertEqual(items[1]['name'], 'test_1')
def _test_list_resources(self, resource, items, neutron_context=None,
query_params=None):
res = self._list('%ss' % resource,
neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertEqual(sorted([i['id'] for i in res['%ss' % resource]]),
sorted([i[resource]['id'] for i in items]))
@contextlib.contextmanager
def network(self, name='net1',
admin_state_up=True,
fmt=None,
do_delete=True,
**kwargs):
network = self._make_network(fmt or self.fmt, name,
admin_state_up, **kwargs)
try:
yield network
finally:
if do_delete:
# The do_delete parameter allows you to control whether the
# created network is immediately deleted again. Therefore, this
# function is also usable in tests, which require the creation
# of many networks.
self._delete('networks', network['network']['id'])
@contextlib.contextmanager
def subnet(self, network=None,
gateway_ip=ATTR_NOT_SPECIFIED,
cidr='10.0.0.0/24',
fmt=None,
ip_version=4,
allocation_pools=None,
enable_dhcp=True,
dns_nameservers=None,
host_routes=None,
shared=None,
do_delete=True):
with optional_ctx(network, self.network) as network_to_use:
subnet = self._make_subnet(fmt or self.fmt,
network_to_use,
gateway_ip,
cidr,
allocation_pools,
ip_version,
enable_dhcp,
dns_nameservers,
host_routes,
shared=shared)
try:
yield subnet
finally:
if do_delete:
self._delete('subnets', subnet['subnet']['id'])
@contextlib.contextmanager
def port(self, subnet=None, fmt=None, no_delete=False,
**kwargs):
with optional_ctx(subnet, self.subnet) as subnet_to_use:
net_id = subnet_to_use['subnet']['network_id']
port = self._make_port(fmt or self.fmt, net_id, **kwargs)
try:
yield port
finally:
if not no_delete:
self._delete('ports', port['port']['id'])
def _test_list_with_sort(self, resource,
items, sorts, resources=None, query_params=''):
query_str = query_params
for key, direction in sorts:
query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key,
direction)
if not resources:
resources = '%ss' % resource
req = self.new_list_request(resources,
params=query_str)
api = self._api_for_resource(resources)
res = self.deserialize(self.fmt, req.get_response(api))
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
expected_res = [item[resource]['id'] for item in items]
self.assertEqual(sorted([n['id'] for n in res[resources]]),
sorted(expected_res))
def _test_list_with_pagination(self, resource, items, sort,
limit, expected_page_num,
resources=None,
query_params='',
verify_key='id'):
if not resources:
resources = '%ss' % resource
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&sort_key=%s&"
"sort_dir=%s") % (limit, sort[0], sort[1])
req = self.new_list_request(resources, params=query_str)
items_res = []
page_num = 0
api = self._api_for_resource(resources)
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
while req:
page_num = page_num + 1
res = self.deserialize(self.fmt, req.get_response(api))
self.assertThat(len(res[resources]),
matchers.LessThan(limit + 1))
items_res = items_res + res[resources]
req = None
if '%s_links' % resources in res:
for link in res['%s_links' % resources]:
if link['rel'] == 'next':
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(page_num, expected_page_num)
self.assertEqual(sorted([n[verify_key] for n in items_res]),
sorted([item[resource][verify_key]
for item in items]))
def _test_list_with_pagination_reverse(self, resource, items, sort,
limit, expected_page_num,
resources=None,
query_params=''):
if not resources:
resources = '%ss' % resource
resource = resource.replace('-', '_')
api = self._api_for_resource(resources)
marker = items[-1][resource]['id']
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&page_reverse=True&"
"sort_key=%s&sort_dir=%s&"
"marker=%s") % (limit, sort[0], sort[1],
marker)
req = self.new_list_request(resources, params=query_str)
item_res = [items[-1][resource]]
page_num = 0
resources = resources.replace('-', '_')
while req:
page_num = page_num + 1
res = self.deserialize(self.fmt, req.get_response(api))
self.assertThat(len(res[resources]),
matchers.LessThan(limit + 1))
res[resources].reverse()
item_res = item_res + res[resources]
req = None
if '%s_links' % resources in res:
for link in res['%s_links' % resources]:
if link['rel'] == 'previous':
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(page_num, expected_page_num)
expected_res = [item[resource]['id'] for item in items]
expected_res.reverse()
self.assertEqual(sorted([n['id'] for n in item_res]),
sorted(expected_res))
class TestBasicGet(NeutronDbPluginV2TestCase):
def test_single_get_admin(self):
plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2()
with self.network() as network:
net_id = network['network']['id']
ctx = context.get_admin_context()
n = plugin._get_network(ctx, net_id)
self.assertEqual(net_id, n.id)
def test_single_get_tenant(self):
plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2()
with self.network() as network:
net_id = network['network']['id']
ctx = context.get_admin_context()
n = plugin._get_network(ctx, net_id)
self.assertEqual(net_id, n.id)
class TestV2HTTPResponse(NeutronDbPluginV2TestCase):
def test_create_returns_201(self):
res = self._create_network(self.fmt, 'net2', True)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_list_returns_200(self):
req = self.new_list_request('networks')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def _check_list_with_fields(self, res, field_name):
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
body = self.deserialize(self.fmt, res)
# further checks: 1 networks
self.assertEqual(len(body['networks']), 1)
# 1 field in the network record
self.assertEqual(len(body['networks'][0]), 1)
# field is 'name'
self.assertIn(field_name, body['networks'][0])
def test_list_with_fields(self):
self._create_network(self.fmt, 'some_net', True)
req = self.new_list_request('networks', params="fields=name")
res = req.get_response(self.api)
self._check_list_with_fields(res, 'name')
def test_list_with_fields_noadmin(self):
tenant_id = 'some_tenant'
self._create_network(self.fmt,
'some_net',
True,
tenant_id=tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=name")
req.environ['neutron.context'] = context.Context('', tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'name')
def test_list_with_fields_noadmin_and_policy_field(self):
"""If a field used by policy is selected, do not duplicate it.
Verifies that if the field parameter explicitly specifies a field
which is used by the policy engine, then it is not duplicated
in the response.
"""
tenant_id = 'some_tenant'
self._create_network(self.fmt,
'some_net',
True,
tenant_id=tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=tenant_id")
req.environ['neutron.context'] = context.Context('', tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'tenant_id')
def test_show_returns_200(self):
with self.network() as net:
req = self.new_show_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_delete_returns_204(self):
res = self._create_network(self.fmt, 'net1', True)
net = self.deserialize(self.fmt, res)
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_update_returns_200(self):
with self.network() as net:
req = self.new_update_request('networks',
{'network': {'name': 'steve'}},
net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_update_invalid_json_400(self):
with self.network() as net:
req = self.new_update_request('networks',
'{{"name": "aaa"}}',
net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_bad_route_404(self):
req = self.new_list_request('doohickeys')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_json(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.port(name='myname') as port:
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual('myname', port['port']['name'])
def test_create_port_as_admin(self):
with self.network(do_delete=False) as network:
self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='bad_tenant_id',
device_id='fake_device',
device_owner='fake_owner',
fixed_ips=[],
set_context=False)
def test_create_port_bad_tenant(self):
with self.network() as network:
self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPNotFound.code,
tenant_id='bad_tenant_id',
device_id='fake_device',
device_owner='fake_owner',
fixed_ips=[],
set_context=True)
def test_create_port_public_network(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.network(shared=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
self._delete('ports', port['port']['id'])
def test_create_port_public_network_with_ip(self):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
keys = [('admin_state_up', True),
('status', self.port_create_status),
('fixed_ips', [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}])]
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
self._delete('ports', port['port']['id'])
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True)
self._validate_behavior_on_bulk_success(res, 'ports')
for p in self.deserialize(self.fmt, res)['ports']:
self._delete('ports', p['id'])
def test_create_ports_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True)
self._validate_behavior_on_bulk_success(res, 'ports')
for p in self.deserialize(self.fmt, res)['ports']:
self._delete('ports', p['id'])
def test_create_ports_bulk_wrong_input(self):
with self.network() as net:
overrides = {1: {'admin_state_up': 'doh'}}
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True,
override=overrides)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
req = self.new_list_request('ports')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
ports = self.deserialize(self.fmt, res)
self.assertEqual(len(ports['ports']), 0)
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
orig = NeutronManager.get_plugin().create_port
with mock.patch.object(NeutronManager.get_plugin(),
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'ports', webob.exc.HTTPServerError.code
)
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
orig = NeutronManager._instance.plugin.create_port
with mock.patch.object(NeutronManager._instance.plugin,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'ports', webob.exc.HTTPServerError.code)
def test_list_ports(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(),
self.port(),
self.port()) as ports:
self._test_list_resources('port', ports)
def test_list_ports_filtered_by_fixed_ip(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(), self.port()) as (port1, port2):
fixed_ips = port1['port']['fixed_ips'][0]
query_params = """
fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
""".strip() % (fixed_ips['ip_address'],
'192.168.126.5',
fixed_ips['subnet_id'])
self._test_list_resources('port', [port1],
query_params=query_params)
def test_list_ports_public_network(self):
with self.network(shared=True) as network:
with self.subnet(network) as subnet:
with contextlib.nested(self.port(subnet, tenant_id='tenant_1'),
self.port(subnet, tenant_id='tenant_2')
) as (port1, port2):
# Admin request - must return both ports
self._test_list_resources('port', [port1, port2])
# Tenant_1 request - must return single port
q_context = context.Context('', 'tenant_1')
self._test_list_resources('port', [port1],
neutron_context=q_context)
# Tenant_2 request - must return single port
q_context = context.Context('', 'tenant_2')
self._test_list_resources('port', [port2],
neutron_context=q_context)
def test_list_ports_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(admin_state_up='True',
mac_address='00:00:00:00:00:01'),
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:02'),
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:03')
) as (port1, port2, port3):
self._test_list_with_sort('port', (port3, port2, port1),
[('admin_state_up', 'asc'),
('mac_address', 'desc')])
def test_list_ports_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
try:
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(admin_state_up='True',
mac_address='00:00:00:00:00:01'),
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:02'),
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:03')
) as (port1, port2, port3):
self._test_list_with_sort('port', (port3, port2, port1),
[('admin_state_up', 'asc'),
('mac_address', 'desc')])
finally:
helper_patcher.stop()
def test_list_ports_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'),
self.port(mac_address='00:00:00:00:00:02'),
self.port(mac_address='00:00:00:00:00:03')
) as (port1, port2, port3):
self._test_list_with_pagination('port',
(port1, port2, port3),
('mac_address', 'asc'), 2, 2)
def test_list_ports_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
try:
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'),
self.port(mac_address='00:00:00:00:00:02'),
self.port(mac_address='00:00:00:00:00:03')
) as (port1, port2, port3):
self._test_list_with_pagination('port',
(port1, port2, port3),
('mac_address', 'asc'), 2, 2)
finally:
helper_patcher.stop()
def test_list_ports_with_pagination_reverse_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'),
self.port(mac_address='00:00:00:00:00:02'),
self.port(mac_address='00:00:00:00:00:03')
) as (port1, port2, port3):
self._test_list_with_pagination_reverse('port',
(port1, port2, port3),
('mac_address', 'asc'),
2, 2)
def test_list_ports_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
try:
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'),
self.port(mac_address='00:00:00:00:00:02'),
self.port(mac_address='00:00:00:00:00:03')
) as (port1, port2, port3):
self._test_list_with_pagination_reverse('port',
(port1, port2, port3),
('mac_address', 'asc'),
2, 2)
finally:
helper_patcher.stop()
def test_show_port(self):
with self.port() as port:
req = self.new_show_request('ports', port['port']['id'], self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port']['id'], sport['port']['id'])
def test_delete_port(self):
with self.port(no_delete=True) as port:
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_delete_port_public_network(self):
with self.network(shared=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_update_port(self):
with self.port() as port:
data = {'port': {'admin_state_up': False}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
def test_update_port_not_admin(self):
res = self._create_network(self.fmt, 'net1', True,
tenant_id='not_admin',
set_context=True)
net1 = self.deserialize(self.fmt, res)
res = self._create_port(self.fmt, net1['network']['id'],
tenant_id='not_admin', set_context=True)
port = self.deserialize(self.fmt, res)
data = {'port': {'admin_state_up': False}}
neutron_context = context.Context('', 'not_admin')
port = self._update('ports', port['port']['id'], data,
neutron_context=neutron_context)
self.assertEqual(port['port']['admin_state_up'], False)
def test_update_device_id_null(self):
with self.port() as port:
data = {'port': {'device_id': None}}
req = self.new_update_request('ports', data, port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_delete_network_if_port_exists(self):
with self.port() as port:
req = self.new_delete_request('networks',
port['port']['network_id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_delete_network_port_exists_owned_by_network(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
self._create_port(self.fmt, network_id,
device_owner=constants.DEVICE_OWNER_DHCP)
req = self.new_delete_request('networks', network_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_update_port_delete_ip(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': []}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self.assertEqual(res['port']['fixed_ips'],
data['port']['fixed_ips'])
def test_no_more_port_exception(self):
with self.subnet(cidr='10.0.0.0/32') as subnet:
id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, id)
data = self.deserialize(self.fmt, res)
msg = str(n_exc.IpAddressGenerationFailure(net_id=id))
self.assertEqual(data['NeutronError']['message'], msg)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_update_port_update_ip(self):
"""Test update of port IP.
Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
"""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
def test_update_port_update_ip_address_only(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"},
{'ip_address': "10.0.0.2"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self.assertEqual(ips[1]['ip_address'], '10.0.0.10')
self.assertEqual(ips[1]['subnet_id'], subnet['subnet']['id'])
def test_update_port_update_ips(self):
"""Update IP and associate new IP on port.
Check a port update with the specified subnet_id's. A IP address
will be allocated for each subnet_id.
"""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': '10.0.0.3'}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
def test_update_port_add_additional_ip(self):
"""Test update of port with additional IP."""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']},
{'subnet_id':
subnet['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self.assertEqual(ips[1]['ip_address'], '10.0.0.4')
self.assertEqual(ips[1]['subnet_id'], subnet['subnet']['id'])
def test_requested_duplicate_mac(self):
with self.port() as port:
mac = port['port']['mac_address']
# check that MAC address matches base MAC
base_mac = cfg.CONF.base_mac[0:2]
self.assertTrue(mac.startswith(base_mac))
kwargs = {"mac_address": mac}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_mac_generation(self):
cfg.CONF.set_override('base_mac', "12:34:56:00:00:00")
with self.port() as port:
mac = port['port']['mac_address']
self.assertTrue(mac.startswith("12:34:56"))
def test_mac_generation_4octet(self):
cfg.CONF.set_override('base_mac', "12:34:56:78:00:00")
with self.port() as port:
mac = port['port']['mac_address']
self.assertTrue(mac.startswith("12:34:56:78"))
def test_bad_mac_format(self):
cfg.CONF.set_override('base_mac', "bad_mac")
try:
self.plugin._check_base_mac_format()
except Exception:
return
self.fail("No exception for illegal base_mac format")
def test_mac_exhaustion(self):
# rather than actually consuming all MAC (would take a LONG time)
# we just raise the exception that would result.
@staticmethod
def fake_gen_mac(context, net_id):
raise n_exc.MacAddressGenerationFailure(net_id=net_id)
with mock.patch.object(neutron.db.db_base_plugin_v2.NeutronDbPluginV2,
'_generate_mac', new=fake_gen_mac):
res = self._create_network(fmt=self.fmt, name='net1',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
net_id = network['network']['id']
res = self._create_port(self.fmt, net_id=net_id)
self.assertEqual(res.status_int,
webob.exc.HTTPServiceUnavailable.code)
def test_requested_duplicate_ip(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Check configuring of duplicate IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': ips[0]['ip_address']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_requested_subnet_delete(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
req = self.new_delete_request('subnet',
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_requested_subnet_id(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Request a IP from specific subnet
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._delete('ports', port2['port']['id'])
def test_requested_subnet_id_not_on_network(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
# Create new network
res = self._create_network(fmt=self.fmt, name='net2',
admin_state_up=True)
network2 = self.deserialize(self.fmt, res)
subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1",
"1.1.1.0/24", ip_version=4)
net_id = port['port']['network_id']
# Request a IP from specific subnet
kwargs = {"fixed_ips": [{'subnet_id':
subnet2['subnet']['id']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_overlapping_subnets(self):
with self.subnet() as subnet:
tenant_id = subnet['subnet']['tenant_id']
net_id = subnet['subnet']['network_id']
res = self._create_subnet(self.fmt,
tenant_id=tenant_id,
net_id=net_id,
cidr='10.0.0.225/28',
ip_version=4,
gateway_ip=ATTR_NOT_SPECIFIED)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_subnet_id_v4_and_v6(self):
with self.subnet() as subnet:
# Get a IPv4 and IPv6 address
tenant_id = subnet['subnet']['tenant_id']
net_id = subnet['subnet']['network_id']
res = self._create_subnet(self.fmt,
tenant_id=tenant_id,
net_id=net_id,
cidr='2607:f0d0:1002:51::/124',
ip_version=6,
gateway_ip=ATTR_NOT_SPECIFIED)
subnet2 = self.deserialize(self.fmt, res)
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port3 = self.deserialize(self.fmt, res)
ips = port3['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self.assertEqual(ips[1]['ip_address'], '2607:f0d0:1002:51::2')
self.assertEqual(ips[1]['subnet_id'], subnet2['subnet']['id'])
res = self._create_port(self.fmt, net_id=net_id)
port4 = self.deserialize(self.fmt, res)
# Check that a v4 and a v6 address are allocated
ips = port4['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self.assertEqual(ips[1]['ip_address'], '2607:f0d0:1002:51::3')
self.assertEqual(ips[1]['subnet_id'], subnet2['subnet']['id'])
self._delete('ports', port3['port']['id'])
self._delete('ports', port4['port']['id'])
def test_range_allocation(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/29') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 5)
alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5',
'10.0.0.6']
for ip in ips:
self.assertIn(ip['ip_address'], alloc)
self.assertEqual(ip['subnet_id'],
subnet['subnet']['id'])
alloc.remove(ip['ip_address'])
self.assertEqual(len(alloc), 0)
self._delete('ports', port['port']['id'])
with self.subnet(gateway_ip='11.0.0.6',
cidr='11.0.0.0/29') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 5)
alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4',
'11.0.0.5']
for ip in ips:
self.assertIn(ip['ip_address'], alloc)
self.assertEqual(ip['subnet_id'],
subnet['subnet']['id'])
alloc.remove(ip['ip_address'])
self.assertEqual(len(alloc), 0)
self._delete('ports', port['port']['id'])
def test_requested_invalid_fixed_ips(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Test invalid subnet_id
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id':
'00000000-ffff-ffff-ffff-000000000000'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
# Test invalid IP address on specified subnet_id
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '1.1.1.1'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
# Test invalid addresses - IP's not on subnet or network
# address or broadcast address
bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255']
net_id = port['port']['network_id']
for ip in bad_ips:
kwargs = {"fixed_ips": [{'ip_address': ip}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
# Enable allocation of gateway address
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.1'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.1')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._delete('ports', port2['port']['id'])
def test_invalid_ip(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '1011.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_split(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ports_to_delete = []
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ports_to_delete.append(port2)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.5')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Allocate specific IP's
allocated = ['10.0.0.3', '10.0.0.4', '10.0.0.6']
for a in allocated:
res = self._create_port(self.fmt, net_id=net_id)
port2 = self.deserialize(self.fmt, res)
ports_to_delete.append(port2)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], a)
self.assertEqual(ips[0]['subnet_id'],
subnet['subnet']['id'])
for p in ports_to_delete:
self._delete('ports', p['port']['id'])
def test_duplicate_ips(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'},
{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_fixed_ip_invalid_subnet_id(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': 'i am invalid',
'ip_address': '10.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_fixed_ip_invalid_ip(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.55555'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_ips_only(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21',
'10.0.0.3', '10.0.0.17', '10.0.0.19']
ports_to_delete = []
for i in ips_only:
kwargs = {"fixed_ips": [{'ip_address': i}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ports_to_delete.append(port)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], i)
self.assertEqual(ips[0]['subnet_id'],
subnet['subnet']['id'])
for p in ports_to_delete:
self._delete('ports', p['port']['id'])
def test_invalid_admin_state(self):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id'],
'admin_state_up': 7,
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_mac_address(self):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id'],
'admin_state_up': 1,
'mac_address': 'mac',
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_max_fixed_ips_exceeded(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/24') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_max_fixed_ips_exceeded(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/24') as subnet:
with self.port(subnet) as port:
data = {'port': {'fixed_ips':
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'},
{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.4'},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_delete_ports_by_device_id(self):
plugin = NeutronManager.get_plugin()
ctx = context.get_admin_context()
with self.subnet() as subnet:
with contextlib.nested(
self.port(subnet=subnet, device_id='owner1', no_delete=True),
self.port(subnet=subnet, device_id='owner1', no_delete=True),
self.port(subnet=subnet, device_id='owner2'),
) as (p1, p2, p3):
network_id = subnet['subnet']['network_id']
plugin.delete_ports_by_device_id(ctx, 'owner1',
network_id)
self._show('ports', p1['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p2['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p3['port']['id'],
expected_code=webob.exc.HTTPOk.code)
def _test_delete_ports_by_device_id_second_call_failure(self, plugin):
ctx = context.get_admin_context()
with self.subnet() as subnet:
with contextlib.nested(
self.port(subnet=subnet, device_id='owner1', no_delete=True),
self.port(subnet=subnet, device_id='owner1'),
self.port(subnet=subnet, device_id='owner2'),
) as (p1, p2, p3):
orig = plugin.delete_port
with mock.patch.object(plugin, 'delete_port') as del_port:
def side_effect(*args, **kwargs):
return self._fail_second_call(del_port, orig,
*args, **kwargs)
del_port.side_effect = side_effect
network_id = subnet['subnet']['network_id']
self.assertRaises(n_exc.NeutronException,
plugin.delete_ports_by_device_id,
ctx, 'owner1', network_id)
self._show('ports', p1['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p2['port']['id'],
expected_code=webob.exc.HTTPOk.code)
self._show('ports', p3['port']['id'],
expected_code=webob.exc.HTTPOk.code)
def test_delete_ports_by_device_id_second_call_failure(self):
plugin = NeutronManager.get_plugin()
self._test_delete_ports_by_device_id_second_call_failure(plugin)
class TestNetworksV2(NeutronDbPluginV2TestCase):
# NOTE(cerberus): successful network update and delete are
# effectively tested above
def test_create_network(self):
name = 'net1'
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', self.net_create_status), ('shared', False)]
with self.network(name=name) as net:
for k, v in keys:
self.assertEqual(net['network'][k], v)
def test_create_public_network(self):
name = 'public_net'
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', self.net_create_status), ('shared', True)]
with self.network(name=name, shared=True) as net:
for k, v in keys:
self.assertEqual(net['network'][k], v)
def test_create_public_network_no_admin_tenant(self):
name = 'public_net'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.network(name=name,
shared=True,
tenant_id="another_tenant",
set_context=True):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPForbidden.code)
def test_update_network(self):
with self.network() as network:
data = {'network': {'name': 'a_brand_new_name'}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['name'],
data['network']['name'])
def test_update_shared_network_noadmin_returns_403(self):
with self.network(shared=True) as network:
data = {'network': {'name': 'a_brand_new_name'}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.api)
# The API layer always returns 404 on updates in place of 403
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_update_network_set_shared(self):
with self.network(shared=False) as network:
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
def test_update_network_set_shared_owner_returns_404(self):
with self.network(shared=False) as network:
net_owner = network['network']['tenant_id']
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('u', net_owner)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_update_network_with_subnet_set_shared(self):
with self.network(shared=False) as network:
with self.subnet(network=network) as subnet:
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
# must query db to see whether subnet's shared attribute
# has been updated or not
ctx = context.Context('', '', is_admin=True)
subnet_db = NeutronManager.get_plugin()._get_subnet(
ctx, subnet['subnet']['id'])
self.assertEqual(subnet_db['shared'], True)
def test_update_network_set_not_shared_single_tenant(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertFalse(res['network']['shared'])
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_other_tenant_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_multi_tenants_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
res2 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
port2 = self.deserialize(self.fmt, res2)
self._delete('ports', port1['port']['id'])
self._delete('ports', port2['port']['id'])
def test_update_network_set_not_shared_multi_tenants2_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_create_networks_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
res = self._create_network_bulk(self.fmt, 2, 'test', True)
self._validate_behavior_on_bulk_success(res, 'networks')
def test_create_networks_bulk_native_quotas(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 4
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
res = self._create_network_bulk(self.fmt, quota + 1, 'test', True)
self._validate_behavior_on_bulk_failure(
res, 'networks',
errcode=webob.exc.HTTPConflict.code)
def test_create_networks_bulk_tenants_and_quotas(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 2
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
networks = [{'network': {'name': 'n1',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n1',
'tenant_id': 't1'}},
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'network', networks)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_networks_bulk_tenants_and_quotas_fail(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 2
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
networks = [{'network': {'name': 'n1',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n1',
'tenant_id': 't1'}},
{'network': {'name': 'n3',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'network', networks)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_networks_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
res = self._create_network_bulk(self.fmt, 2, 'test', True)
self._validate_behavior_on_bulk_success(res, 'networks')
def test_create_networks_bulk_wrong_input(self):
res = self._create_network_bulk(self.fmt, 2, 'test', True,
override={1:
{'admin_state_up': 'doh'}})
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
req = self.new_list_request('networks')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
nets = self.deserialize(self.fmt, res)
self.assertEqual(len(nets['networks']), 0)
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
orig = NeutronManager.get_plugin().create_network
#ensures the API choose the emulation code path
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with mock.patch.object(NeutronManager.get_plugin(),
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'networks', webob.exc.HTTPServerError.code
)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
orig = NeutronManager.get_plugin().create_network
with mock.patch.object(NeutronManager.get_plugin(),
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'networks', webob.exc.HTTPServerError.code
)
def test_list_networks(self):
with contextlib.nested(self.network(),
self.network(),
self.network()) as networks:
self._test_list_resources('network', networks)
def test_list_networks_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with contextlib.nested(self.network(admin_status_up=True,
name='net1'),
self.network(admin_status_up=False,
name='net2'),
self.network(admin_status_up=False,
name='net3')
) as (net1, net2, net3):
self._test_list_with_sort('network', (net3, net2, net1),
[('admin_state_up', 'asc'),
('name', 'desc')])
def test_list_networks_with_sort_extended_attr_native_returns_400(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with contextlib.nested(self.network(admin_status_up=True,
name='net1'),
self.network(admin_status_up=False,
name='net2'),
self.network(admin_status_up=False,
name='net3')
):
req = self.new_list_request(
'networks',
params='sort_key=provider:segmentation_id&sort_dir=asc')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_networks_with_sort_remote_key_native_returns_400(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with contextlib.nested(self.network(admin_status_up=True,
name='net1'),
self.network(admin_status_up=False,
name='net2'),
self.network(admin_status_up=False,
name='net3')
):
req = self.new_list_request(
'networks', params='sort_key=subnets&sort_dir=asc')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_networks_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
try:
with contextlib.nested(self.network(admin_status_up=True,
name='net1'),
self.network(admin_status_up=False,
name='net2'),
self.network(admin_status_up=False,
name='net3')
) as (net1, net2, net3):
self._test_list_with_sort('network', (net3, net2, net1),
[('admin_state_up', 'asc'),
('name', 'desc')])
finally:
helper_patcher.stop()
def test_list_networks_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with contextlib.nested(self.network(name='net1'),
self.network(name='net2'),
self.network(name='net3')
) as (net1, net2, net3):
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
try:
with contextlib.nested(self.network(name='net1'),
self.network(name='net2'),
self.network(name='net3')
) as (net1, net2, net3):
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
finally:
helper_patcher.stop()
def test_list_networks_without_pk_in_fields_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
try:
with contextlib.nested(self.network(name='net1',
shared=True),
self.network(name='net2',
shared=False),
self.network(name='net3',
shared=True)
) as (net1, net2, net3):
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2,
query_params="fields=name",
verify_key='name')
finally:
helper_patcher.stop()
def test_list_networks_without_pk_in_fields_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with contextlib.nested(self.network(name='net1'),
self.network(name='net2'),
self.network(name='net3')
) as (net1, net2, net3):
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2,
query_params="fields=shared",
verify_key='shared')
def test_list_networks_with_pagination_reverse_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with contextlib.nested(self.network(name='net1'),
self.network(name='net2'),
self.network(name='net3')
) as (net1, net2, net3):
self._test_list_with_pagination_reverse('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
try:
with contextlib.nested(self.network(name='net1'),
self.network(name='net2'),
self.network(name='net3')
) as (net1, net2, net3):
self._test_list_with_pagination_reverse('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
finally:
helper_patcher.stop()
def test_list_networks_with_parameters(self):
with contextlib.nested(self.network(name='net1',
admin_state_up=False),
self.network(name='net2')) as (net1, net2):
query_params = 'admin_state_up=False'
self._test_list_resources('network', [net1],
query_params=query_params)
query_params = 'admin_state_up=True'
self._test_list_resources('network', [net2],
query_params=query_params)
def test_list_networks_with_fields(self):
with self.network(name='net1') as net1:
req = self.new_list_request('networks',
params='fields=name')
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(1, len(res['networks']))
self.assertEqual(res['networks'][0]['name'],
net1['network']['name'])
self.assertIsNone(res['networks'][0].get('id'))
def test_list_networks_with_parameters_invalid_values(self):
with contextlib.nested(self.network(name='net1',
admin_state_up=False),
self.network(name='net2')) as (net1, net2):
req = self.new_list_request('networks',
params='admin_state_up=fake')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_shared_networks_with_non_admin_user(self):
with contextlib.nested(self.network(shared=False,
name='net1',
tenant_id='tenant1'),
self.network(shared=True,
name='net2',
tenant_id='another_tenant'),
self.network(shared=False,
name='net3',
tenant_id='another_tenant')
) as (net1, net2, net3):
ctx = context.Context(user_id='non_admin',
tenant_id='tenant1',
is_admin=False)
self._test_list_resources('network', (net1, net2), ctx)
def test_show_network(self):
with self.network(name='net1') as net:
req = self.new_show_request('networks', net['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['name'],
net['network']['name'])
def test_show_network_with_subnet(self):
with self.network(name='net1') as net:
with self.subnet(net) as subnet:
req = self.new_show_request('networks', net['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['subnets'][0],
subnet['subnet']['id'])
def test_invalid_admin_status(self):
value = [[7, False, webob.exc.HTTPClientError.code],
[True, True, webob.exc.HTTPCreated.code],
["True", True, webob.exc.HTTPCreated.code],
["true", True, webob.exc.HTTPCreated.code],
[1, True, webob.exc.HTTPCreated.code],
["False", False, webob.exc.HTTPCreated.code],
[False, False, webob.exc.HTTPCreated.code],
["false", False, webob.exc.HTTPCreated.code],
["7", False, webob.exc.HTTPClientError.code]]
for v in value:
data = {'network': {'name': 'net',
'admin_state_up': v[0],
'tenant_id': self._tenant_id}}
network_req = self.new_create_request('networks', data)
req = network_req.get_response(self.api)
self.assertEqual(req.status_int, v[2])
if v[2] == webob.exc.HTTPCreated.code:
res = self.deserialize(self.fmt, req)
self.assertEqual(res['network']['admin_state_up'], v[1])
class TestSubnetsV2(NeutronDbPluginV2TestCase):
def _test_create_subnet(self, network=None, expected=None, **kwargs):
keys = kwargs.copy()
keys.setdefault('cidr', '10.0.0.0/24')
keys.setdefault('ip_version', 4)
keys.setdefault('enable_dhcp', True)
with self.subnet(network=network, **keys) as subnet:
# verify the response has each key with the correct value
for k in keys:
self.assertIn(k, subnet['subnet'])
if isinstance(keys[k], list):
self.assertEqual(sorted(subnet['subnet'][k]),
sorted(keys[k]))
else:
self.assertEqual(subnet['subnet'][k], keys[k])
# verify the configured validations are correct
if expected:
for k in expected:
self.assertIn(k, subnet['subnet'])
if isinstance(expected[k], list):
self.assertEqual(sorted(subnet['subnet'][k]),
sorted(expected[k]))
else:
self.assertEqual(subnet['subnet'][k], expected[k])
return subnet
def test_create_subnet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
subnet = self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr)
self.assertEqual(4, subnet['subnet']['ip_version'])
self.assertIn('name', subnet['subnet'])
def test_create_two_subnets(self):
gateway_ips = ['10.0.0.1', '10.0.1.1']
cidrs = ['10.0.0.0/24', '10.0.1.0/24']
with self.network() as network:
with self.subnet(network=network,
gateway_ip=gateway_ips[0],
cidr=cidrs[0]):
with self.subnet(network=network,
gateway_ip=gateway_ips[1],
cidr=cidrs[1]):
net_req = self.new_show_request('networks',
network['network']['id'])
raw_res = net_req.get_response(self.api)
net_res = self.deserialize(self.fmt, raw_res)
for subnet_id in net_res['network']['subnets']:
sub_req = self.new_show_request('subnets', subnet_id)
raw_res = sub_req.get_response(self.api)
sub_res = self.deserialize(self.fmt, raw_res)
self.assertIn(sub_res['subnet']['cidr'], cidrs)
self.assertIn(sub_res['subnet']['gateway_ip'],
gateway_ips)
def test_create_two_subnets_same_cidr_returns_400(self):
gateway_ip_1 = '10.0.0.1'
cidr_1 = '10.0.0.0/24'
gateway_ip_2 = '10.0.0.10'
cidr_2 = '10.0.0.0/24'
with self.network() as network:
with self.subnet(network=network,
gateway_ip=gateway_ip_1,
cidr=cidr_1):
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.subnet(network=network,
gateway_ip=gateway_ip_2,
cidr=cidr_2):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_V4_cidr(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0',
'ip_version': '4',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_V6_cidr(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::',
'ip_version': '6',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'fe80::1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self):
cidr_1 = '10.0.0.0/23'
cidr_2 = '10.0.0.0/24'
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(cidr=cidr_1),
self.subnet(cidr=cidr_2)):
pass
def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self):
cidr_1 = '10.0.0.0/23'
cidr_2 = '10.0.0.0/24'
cfg.CONF.set_override('allow_overlapping_ips', False)
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with contextlib.nested(self.subnet(cidr=cidr_1),
self.subnet(cidr=cidr_2)):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnets_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'],
'test')
self._validate_behavior_on_bulk_success(res, 'subnets')
def test_create_subnets_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
self._validate_behavior_on_bulk_success(res, 'subnets')
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
orig = NeutronManager.get_plugin().create_subnet
with mock.patch.object(NeutronManager.get_plugin(),
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'subnets', webob.exc.HTTPServerError.code
)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
orig = NeutronManager._instance.plugin.create_subnet
with mock.patch.object(NeutronManager._instance.plugin,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'subnets', webob.exc.HTTPServerError.code
)
def test_delete_subnet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_port_exists_owned_by_network(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
self._create_port(self.fmt,
network['network']['id'],
device_owner=constants.DEVICE_OWNER_DHCP)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_port_exists_owned_by_other(self):
with self.subnet() as subnet:
with self.port(subnet=subnet):
id = subnet['subnet']['id']
req = self.new_delete_request('subnets', id)
res = req.get_response(self.api)
data = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
msg = str(n_exc.SubnetInUse(subnet_id=id))
self.assertEqual(data['NeutronError']['message'], msg)
def test_delete_network(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=4)
req = self.new_delete_request('networks', network['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_create_subnet_bad_tenant(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.2.0/24',
webob.exc.HTTPNotFound.code,
ip_version=4,
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
device_owner='fake_owner',
set_context=True)
def test_create_subnet_as_admin(self):
with self.network(do_delete=False) as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.2.0/24',
webob.exc.HTTPCreated.code,
ip_version=4,
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
device_owner='fake_owner',
set_context=False)
def test_create_subnet_bad_cidr(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.5/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_subnet_bad_ip_version(self):
with self.network() as network:
# Check bad IP version
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 'abc',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_ip_version_null(self):
with self.network() as network:
# Check bad IP version
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': None,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_uuid(self):
with self.network() as network:
# Check invalid UUID
data = {'subnet': {'network_id': None,
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_boolean(self):
with self.network() as network:
# Check invalid boolean
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'enable_dhcp': None,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_pools(self):
with self.network() as network:
# Check allocation pools
allocation_pools = [[{'end': '10.0.0.254'}],
[{'start': '10.0.0.254'}],
[{'start': '1000.0.0.254'}],
[{'start': '10.0.0.2', 'end': '10.0.0.254'},
{'end': '10.0.0.254'}],
None,
[{'start': '10.0.0.2', 'end': '10.0.0.3'},
{'start': '10.0.0.2', 'end': '10.0.0.3'}]]
tenant_id = network['network']['tenant_id']
for pool in allocation_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'allocation_pools': pool}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_nameserver(self):
with self.network() as network:
# Check nameservers
nameserver_pools = [['1100.0.0.2'],
['1.1.1.2', '1.1000.1.3'],
['1.1.1.2', '1.1.1.2']]
tenant_id = network['network']['tenant_id']
for nameservers in nameserver_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'dns_nameservers': nameservers}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_hostroutes(self):
with self.network() as network:
# Check hostroutes
hostroute_pools = [[{'destination': '100.0.0.0/24'}],
[{'nexthop': '10.0.2.20'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'},
{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'}]]
tenant_id = network['network']['tenant_id']
for hostroutes in hostroute_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'host_routes': hostroutes}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_defaults(self):
gateway = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
enable_dhcp = True
subnet = self._test_create_subnet()
# verify cidr & gw have been correctly generated
self.assertEqual(subnet['subnet']['cidr'], cidr)
self.assertEqual(subnet['subnet']['gateway_ip'], gateway)
self.assertEqual(subnet['subnet']['enable_dhcp'], enable_dhcp)
self.assertEqual(subnet['subnet']['allocation_pools'],
allocation_pools)
def test_create_subnet_gw_values(self):
# Gateway not in subnet
gateway = '100.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.254'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected, gateway_ip=gateway)
# Gateway is last IP in range
gateway = '10.0.0.254'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.253'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected, gateway_ip=gateway)
# Gateway is first in subnet
gateway = '10.0.0.1'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected,
gateway_ip=gateway)
def test_create_subnet_gw_outside_cidr_force_on_returns_400(self):
cfg.CONF.set_override('force_gateway_on_subnet', True)
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='100.0.0.1')
def test_create_subnet_gw_of_network_force_on_returns_400(self):
cfg.CONF.set_override('force_gateway_on_subnet', True)
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='10.0.0.0')
def test_create_subnet_gw_bcast_force_on_returns_400(self):
cfg.CONF.set_override('force_gateway_on_subnet', True)
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='10.0.0.255')
def test_create_subnet_with_allocation_pool(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_none_gateway(self):
cidr = '10.0.0.0/24'
self._test_create_subnet(gateway_ip=None,
cidr=cidr)
def test_create_subnet_with_none_gateway_fully_allocated(self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.254'}]
self._test_create_subnet(gateway_ip=None,
cidr=cidr,
allocation_pools=allocation_pools)
def test_subnet_with_allocation_range(self):
with self.network() as network:
net_id = network['network']['id']
data = {'subnet': {'network_id': net_id,
'cidr': '10.0.0.0/24',
'ip_version': 4,
'gateway_ip': '10.0.0.1',
'tenant_id': network['network']['tenant_id'],
'allocation_pools': [{'start': '10.0.0.100',
'end': '10.0.0.120'}]}}
subnet_req = self.new_create_request('subnets', data)
subnet = self.deserialize(self.fmt,
subnet_req.get_response(self.api))
# Check fixed IP not in allocation range
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.10'}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
port = self.deserialize(self.fmt, res)
# delete the port
self._delete('ports', port['port']['id'])
# Check when fixed IP is gateway
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.1'}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
port = self.deserialize(self.fmt, res)
# delete the port
self._delete('ports', port['port']['id'])
def test_create_subnet_with_none_gateway_allocation_pool(self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
self._test_create_subnet(gateway_ip=None,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_v6_allocation_pool(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/80'
allocation_pools = [{'start': 'fe80::2',
'end': 'fe80::ffff:fffa:ffff'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
allocation_pools=allocation_pools)
def test_create_subnet_with_large_allocation_pool(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/8'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'},
{'start': '10.1.0.0',
'end': '10.200.0.100'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_multiple_allocation_pools(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'},
{'start': '10.0.0.110',
'end': '10.0.0.150'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_dhcp_disabled(self):
enable_dhcp = False
self._test_create_subnet(enable_dhcp=enable_dhcp)
def test_create_subnet_default_gw_conflict_allocation_pool_returns_409(
self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.5'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_gateway_in_allocation_pool_returns_409(self):
gateway_ip = '10.0.0.50'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.100'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_overlapping_allocation_pools_returns_409(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.150'},
{'start': '10.0.0.140',
'end': '10.0.0.180'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_invalid_allocation_pool_returns_400(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.256'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_out_of_range_allocation_pool_returns_400(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.1.6'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_shared_returns_400(self):
cidr = '10.0.0.0/24'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(cidr=cidr,
shared=True)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_cidrv4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 6,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_cidrv6(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_gatewayv6(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'gateway_ip': 'fe80::1',
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_gatewayv4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 6,
'gateway_ip': '192.168.0.1',
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_dns_v4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 6,
'dns_nameservers': ['192.168.0.1'],
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self):
host_routes = [{'destination': 'fe80::0/48',
'nexthop': '10.0.2.20'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': 'fe80::1'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_update_subnet_no_gateway(self):
with self.subnet() as subnet:
data = {'subnet': {'gateway_ip': '11.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'],
data['subnet']['gateway_ip'])
data = {'subnet': {'gateway_ip': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIsNone(data['subnet']['gateway_ip'])
def test_update_subnet(self):
with self.subnet() as subnet:
data = {'subnet': {'gateway_ip': '11.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'],
data['subnet']['gateway_ip'])
def test_update_subnet_adding_additional_host_routes_and_dns(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'dns_nameservers': ['192.168.0.1'],
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, subnet_req.get_response(self.api))
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'},
{'destination': '192.168.0.0/24',
'nexthop': '10.0.2.3'}]
dns_nameservers = ['192.168.0.1', '192.168.0.2']
data = {'subnet': {'host_routes': host_routes,
'dns_nameservers': dns_nameservers}}
req = self.new_update_request('subnets', data,
res['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(res['subnet']['host_routes']),
sorted(host_routes))
self.assertEqual(sorted(res['subnet']['dns_nameservers']),
sorted(dns_nameservers))
def test_update_subnet_shared_returns_400(self):
with self.network(shared=True) as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'shared': True}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gw_outside_cidr_force_on_returns_400(self):
cfg.CONF.set_override('force_gateway_on_subnet', True)
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'gateway_ip': '100.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gw_ip_in_use_returns_409(self):
with self.network() as network:
with self.subnet(
network=network,
allocation_pools=[{'start': '10.0.0.100',
'end': '10.0.0.253'}]) as subnet:
subnet_data = subnet['subnet']
with self.port(
subnet=subnet,
fixed_ips=[{'subnet_id': subnet_data['id'],
'ip_address': subnet_data['gateway_ip']}]):
data = {'subnet': {'gateway_ip': '10.0.0.99'}}
req = self.new_update_request('subnets', data,
subnet_data['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
def test_update_subnet_inconsistent_ipv4_gatewayv6(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'gateway_ip': 'fe80::1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_gatewayv4(self):
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'gateway_ip': '10.1.1.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv4_dns_v6(self):
dns_nameservers = ['fe80::1']
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'dns_nameservers': dns_nameservers}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self):
host_routes = [{'destination': 'fe80::0/48',
'nexthop': '10.0.2.20'}]
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'host_routes': host_routes}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': 'fe80::1'}]
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'host_routes': host_routes}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gateway_in_allocation_pool_returns_409(self):
allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
cidr='10.0.0.0/24') as subnet:
data = {'subnet': {'gateway_ip': '10.0.0.50'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPConflict.code)
def test_show_subnet(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
req = self.new_show_request('subnets',
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['id'],
subnet['subnet']['id'])
self.assertEqual(res['subnet']['network_id'],
network['network']['id'])
def test_list_subnets(self):
with self.network() as network:
with contextlib.nested(self.subnet(network=network,
gateway_ip='10.0.0.1',
cidr='10.0.0.0/24'),
self.subnet(network=network,
gateway_ip='10.0.1.1',
cidr='10.0.1.0/24'),
self.subnet(network=network,
gateway_ip='10.0.2.1',
cidr='10.0.2.0/24')) as subnets:
self._test_list_resources('subnet', subnets)
def test_list_subnets_shared(self):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as priv_subnet:
# normal user should see only 1 subnet
req = self.new_list_request('subnets')
req.environ['neutron.context'] = context.Context(
'', 'some_tenant')
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(len(res['subnets']), 1)
self.assertEqual(res['subnets'][0]['cidr'],
subnet['subnet']['cidr'])
# admin will see both subnets
admin_req = self.new_list_request('subnets')
admin_res = self.deserialize(
self.fmt, admin_req.get_response(self.api))
self.assertEqual(len(admin_res['subnets']), 2)
cidrs = [sub['cidr'] for sub in admin_res['subnets']]
self.assertIn(subnet['subnet']['cidr'], cidrs)
self.assertIn(priv_subnet['subnet']['cidr'], cidrs)
def test_list_subnets_with_parameter(self):
with self.network() as network:
with contextlib.nested(self.subnet(network=network,
gateway_ip='10.0.0.1',
cidr='10.0.0.0/24'),
self.subnet(network=network,
gateway_ip='10.0.1.1',
cidr='10.0.1.0/24')
) as subnets:
query_params = 'ip_version=4&ip_version=6'
self._test_list_resources('subnet', subnets,
query_params=query_params)
query_params = 'ip_version=6'
self._test_list_resources('subnet', [],
query_params=query_params)
def test_list_subnets_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with contextlib.nested(self.subnet(enable_dhcp=True,
cidr='10.0.0.0/24'),
self.subnet(enable_dhcp=False,
cidr='11.0.0.0/24'),
self.subnet(enable_dhcp=False,
cidr='12.0.0.0/24')
) as (subnet1, subnet2, subnet3):
self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1),
[('enable_dhcp', 'asc'),
('cidr', 'desc')])
def test_list_subnets_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
try:
with contextlib.nested(self.subnet(enable_dhcp=True,
cidr='10.0.0.0/24'),
self.subnet(enable_dhcp=False,
cidr='11.0.0.0/24'),
self.subnet(enable_dhcp=False,
cidr='12.0.0.0/24')
) as (subnet1, subnet2, subnet3):
self._test_list_with_sort('subnet', (subnet3,
subnet2,
subnet1),
[('enable_dhcp', 'asc'),
('cidr', 'desc')])
finally:
helper_patcher.stop()
def test_list_subnets_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented sorting feature")
with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
self.subnet(cidr='11.0.0.0/24'),
self.subnet(cidr='12.0.0.0/24')
) as (subnet1, subnet2, subnet3):
self._test_list_with_pagination('subnet',
(subnet1, subnet2, subnet3),
('cidr', 'asc'), 2, 2)
def test_list_subnets_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
try:
with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
self.subnet(cidr='11.0.0.0/24'),
self.subnet(cidr='12.0.0.0/24')
) as (subnet1, subnet2, subnet3):
self._test_list_with_pagination('subnet',
(subnet1, subnet2, subnet3),
('cidr', 'asc'), 2, 2)
finally:
helper_patcher.stop()
def test_list_subnets_with_pagination_reverse_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
self.subnet(cidr='11.0.0.0/24'),
self.subnet(cidr='12.0.0.0/24')
) as (subnet1, subnet2, subnet3):
self._test_list_with_pagination_reverse('subnet',
(subnet1, subnet2,
subnet3),
('cidr', 'asc'), 2, 2)
def test_list_subnets_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
try:
with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
self.subnet(cidr='11.0.0.0/24'),
self.subnet(cidr='12.0.0.0/24')
) as (subnet1, subnet2, subnet3):
self._test_list_with_pagination_reverse('subnet',
(subnet1, subnet2,
subnet3),
('cidr', 'asc'), 2, 2)
finally:
helper_patcher.stop()
def test_invalid_ip_version(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 7,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_subnet(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'invalid',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_ip_address(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'ipaddress'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_uuid(self):
with self.network() as network:
data = {'subnet': {'network_id': 'invalid-uuid',
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_with_one_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
dns_nameservers = ['1.2.3.4']
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
def test_create_subnet_with_two_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
dns_nameservers = ['1.2.3.4', '4.3.2.1']
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
def test_create_subnet_with_too_many_dns(self):
with self.network() as network:
dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1',
'dns_nameservers': dns_list}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_with_one_host_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
host_routes=host_routes)
def test_create_subnet_with_two_host_routes(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'},
{'destination': '12.0.0.0/8',
'nexthop': '4.3.2.1'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
host_routes=host_routes)
def test_create_subnet_with_too_many_routes(self):
with self.network() as network:
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'},
{'destination': '12.0.0.0/8',
'nexthop': '4.3.2.1'},
{'destination': '141.212.0.0/16',
'nexthop': '2.2.2.2'}]
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1',
'host_routes': host_routes}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_update_subnet_dns(self):
with self.subnet() as subnet:
data = {'subnet': {'dns_nameservers': ['11.0.0.1']}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['dns_nameservers'],
data['subnet']['dns_nameservers'])
def test_update_subnet_dns_to_None(self):
with self.subnet(dns_nameservers=['11.0.0.1']) as subnet:
data = {'subnet': {'dns_nameservers': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual([], res['subnet']['dns_nameservers'])
data = {'subnet': {'dns_nameservers': ['11.0.0.3']}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(data['subnet']['dns_nameservers'],
res['subnet']['dns_nameservers'])
def test_update_subnet_dns_with_too_many_entries(self):
with self.subnet() as subnet:
dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
data = {'subnet': {'dns_nameservers': dns_list}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_update_subnet_route(self):
with self.subnet() as subnet:
data = {'subnet': {'host_routes':
[{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['host_routes'],
data['subnet']['host_routes'])
def test_update_subnet_route_to_None(self):
with self.subnet(host_routes=[{'destination': '12.0.0.0/8',
'nexthop': '1.2.3.4'}]) as subnet:
data = {'subnet': {'host_routes': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual([], res['subnet']['host_routes'])
data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8',
'nexthop': '1.2.3.4'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(data['subnet']['host_routes'],
res['subnet']['host_routes'])
def test_update_subnet_route_with_too_many_entries(self):
with self.subnet() as subnet:
data = {'subnet': {'host_routes': [
{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'},
{'destination': '13.0.0.0/8', 'nexthop': '1.2.3.5'},
{'destination': '14.0.0.0/8', 'nexthop': '1.2.3.6'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_delete_subnet_with_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
dns_nameservers = ['1.2.3.4']
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
dns_nameservers=dns_nameservers)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_with_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
host_routes=host_routes)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_with_dns_and_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
dns_nameservers = ['1.2.3.4']
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
dns_nameservers=dns_nameservers,
host_routes=host_routes)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def _helper_test_validate_subnet(self, option, exception):
cfg.CONF.set_override(option, 0)
with self.network() as network:
subnet = {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.8.8'],
'host_routes': [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]}
plugin = NeutronManager.get_plugin()
e = self.assertRaises(exception,
plugin._validate_subnet,
context.get_admin_context(
load_admin_roles=False),
subnet)
self.assertThat(
str(e),
matchers.Not(matchers.Contains('built-in function id')))
def test_validate_subnet_dns_nameservers_exhausted(self):
self._helper_test_validate_subnet(
'max_dns_nameservers',
n_exc.DNSNameServersExhausted)
def test_validate_subnet_host_routes_exhausted(self):
self._helper_test_validate_subnet(
'max_subnet_host_routes',
n_exc.HostRoutesExhausted)
class DbModelTestCase(base.BaseTestCase):
"""DB model tests."""
def test_repr(self):
"""testing the string representation of 'model' classes."""
network = models_v2.Network(name="net_net", status="OK",
admin_state_up=True)
actual_repr_output = repr(network)
exp_start_with = "<neutron.db.models_v2.Network"
exp_middle = "[object at %x]" % id(network)
exp_end_with = (" {tenant_id=None, id=None, "
"name='net_net', status='OK', "
"admin_state_up=True, shared=None}>")
final_exp = exp_start_with + exp_middle + exp_end_with
self.assertEqual(actual_repr_output, final_exp)
class TestNeutronDbPluginV2(base.BaseTestCase):
"""Unit Tests for NeutronDbPluginV2 IPAM Logic."""
def test_generate_ip(self):
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_try_generate_ip') as generate:
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_rebuild_availability_ranges') as rebuild:
db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's')
generate.assert_called_once_with('c', 's')
self.assertEqual(0, rebuild.call_count)
def test_generate_ip_exhausted_pool(self):
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_try_generate_ip') as generate:
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_rebuild_availability_ranges') as rebuild:
exception = n_exc.IpAddressGenerationFailure(net_id='n')
generate.side_effect = exception
# I want the side_effect to throw an exception once but I
# didn't see a way to do this. So, let it throw twice and
# catch the second one. Check below to ensure that
# _try_generate_ip was called twice.
try:
db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's')
except n_exc.IpAddressGenerationFailure:
pass
self.assertEqual(2, generate.call_count)
rebuild.assert_called_once_with('c', 's')
def test_rebuild_availability_ranges(self):
pools = [{'id': 'a',
'first_ip': '192.168.1.3',
'last_ip': '192.168.1.10'},
{'id': 'b',
'first_ip': '192.168.1.100',
'last_ip': '192.168.1.120'}]
allocations = [{'ip_address': '192.168.1.3'},
{'ip_address': '192.168.1.78'},
{'ip_address': '192.168.1.7'},
{'ip_address': '192.168.1.110'},
{'ip_address': '192.168.1.11'},
{'ip_address': '192.168.1.4'},
{'ip_address': '192.168.1.111'}]
ip_qry = mock.Mock()
ip_qry.with_lockmode.return_value = ip_qry
ip_qry.filter_by.return_value = allocations
pool_qry = mock.Mock()
pool_qry.options.return_value = pool_qry
pool_qry.with_lockmode.return_value = pool_qry
pool_qry.filter_by.return_value = pools
def return_queries_side_effect(*args, **kwargs):
if args[0] == models_v2.IPAllocation:
return ip_qry
if args[0] == models_v2.IPAllocationPool:
return pool_qry
context = mock.Mock()
context.session.query.side_effect = return_queries_side_effect
subnets = [mock.MagicMock()]
db_base_plugin_v2.NeutronDbPluginV2._rebuild_availability_ranges(
context, subnets)
actual = [[args[0].allocation_pool_id,
args[0].first_ip, args[0].last_ip]
for _name, args, _kwargs in context.session.add.mock_calls]
self.assertEqual([['a', '192.168.1.5', '192.168.1.6'],
['a', '192.168.1.8', '192.168.1.10'],
['b', '192.168.1.100', '192.168.1.109'],
['b', '192.168.1.112', '192.168.1.120']], actual)
class NeutronDbPluginV2AsMixinTestCase(base.BaseTestCase):
"""Tests for NeutronDbPluginV2 as Mixin.
While NeutronDbPluginV2TestCase checks NeutronDbPlugin and all plugins as
a complete plugin, this test case verifies abilities of NeutronDbPlugin
which are provided to other plugins (e.g. DB operations). This test case
may include tests only for NeutronDbPlugin, so this should not be used in
unit tests for other plugins.
"""
def setUp(self):
super(NeutronDbPluginV2AsMixinTestCase, self).setUp()
self.plugin = importutils.import_object(DB_PLUGIN_KLASS)
self.context = context.get_admin_context()
self.net_data = {'network': {'id': 'fake-id',
'name': 'net1',
'admin_state_up': True,
'tenant_id': 'test-tenant',
'shared': False}}
self.addCleanup(db.clear_db)
def test_create_network_with_default_status(self):
net = self.plugin.create_network(self.context, self.net_data)
default_net_create_status = 'ACTIVE'
expected = [('id', 'fake-id'), ('name', 'net1'),
('admin_state_up', True), ('tenant_id', 'test-tenant'),
('shared', False), ('status', default_net_create_status)]
for k, v in expected:
self.assertEqual(net[k], v)
def test_create_network_with_status_BUILD(self):
self.net_data['network']['status'] = 'BUILD'
net = self.plugin.create_network(self.context, self.net_data)
self.assertEqual(net['status'], 'BUILD')
class TestBasicGetXML(TestBasicGet):
fmt = 'xml'
class TestNetworksV2XML(TestNetworksV2):
fmt = 'xml'
class TestPortsV2XML(TestPortsV2):
fmt = 'xml'
class TestSubnetsV2XML(TestSubnetsV2):
fmt = 'xml'
class TestV2HTTPResponseXML(TestV2HTTPResponse):
fmt = 'xml'
| zhhf/charging | charging/tests/unit/test_db_plugin.py | Python | apache-2.0 | 180,785 |
from django.db import models
class ProductionDatasetsExec(models.Model):
name = models.CharField(max_length=200, db_column='NAME', primary_key=True)
taskid = models.DecimalField(decimal_places=0, max_digits=10, db_column='TASK_ID', null=False, default=0)
status = models.CharField(max_length=12, db_column='STATUS', null=True)
phys_group = models.CharField(max_length=20, db_column='PHYS_GROUP', null=True)
events = models.DecimalField(decimal_places=0, max_digits=7, db_column='EVENTS', null=False, default=0)
class Meta:
app_label = "grisli"
managed = False
db_table = 'T_PRODUCTIONDATASETS_EXEC'
class TaskProdSys1(models.Model):
taskid = models.DecimalField(decimal_places=0, max_digits=10, db_column='REQID', primary_key=True)
total_events = models.DecimalField(decimal_places=0, max_digits=10, db_column='TOTAL_EVENTS')
task_name = models.CharField(max_length=130, db_column='TASKNAME')
status = models.CharField(max_length=12, db_column='STATUS')
class Meta:
app_label = "grisli"
managed = False
db_table = 'T_TASK_REQUEST'
class TRequest(models.Model):
request = models.CharField(max_length=200, db_column='REQUEST', null=True)
| PanDAWMS/panda-bigmon-atlas | atlas/getdatasets/models.py | Python | apache-2.0 | 1,240 |
#!/usr/bin/python
# Copyright 2015 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# End Copyright
# An example action endpoint for rules with language="POST". This
# example is NOT an action executor. Instead, it's just an endpoint
# in the role of any external system that deals directly with JSON
# bodies.
# curl -d '{"likes":"tacos"}' http://localhost:6667/
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
# import json
PORT = 6667
def protest (response, message):
response.send_response(200)
response.send_header('Content-type','application/json')
response.end_headers()
response.wfile.write(message)
class handler(BaseHTTPRequestHandler):
def do_GET(self):
protest(self, "You should POST with json.\n")
return
def do_POST(self):
try:
content_len = int(self.headers.getheader('content-length'))
body = self.rfile.read(content_len)
print 'body ', body
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
response = '{"Got":%s}' % (body)
self.wfile.write(response)
except Exception as broke:
protest(self, str(broke))
try:
server = HTTPServer(('', PORT), handler)
print 'Started example action endpoint on port ' , PORT
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down example action endpoint on ', PORT
server.socket.close()
| Comcast/rulio | examples/actionendpoint.py | Python | apache-2.0 | 2,070 |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX penguin template pipeline definition.
This file defines TFX pipeline and various components in the pipeline.
"""
from typing import List, Optional
import tensorflow_model_analysis as tfma
from tfx import v1 as tfx
from tfx.experimental.templates.penguin.models import features
from ml_metadata.proto import metadata_store_pb2
def create_pipeline(
pipeline_name: str,
pipeline_root: str,
data_path: str,
preprocessing_fn: str,
run_fn: str,
train_args: tfx.proto.TrainArgs,
eval_args: tfx.proto.EvalArgs,
eval_accuracy_threshold: float,
serving_model_dir: str,
schema_path: Optional[str] = None,
metadata_connection_config: Optional[
metadata_store_pb2.ConnectionConfig] = None,
beam_pipeline_args: Optional[List[str]] = None,
) -> tfx.dsl.Pipeline:
"""Implements the penguin pipeline with TFX."""
components = []
# Brings data into the pipeline or otherwise joins/converts training data.
# TODO(step 2): Might use another ExampleGen class for your data.
example_gen = tfx.components.CsvExampleGen(input_base=data_path)
components.append(example_gen)
# Computes statistics over data for visualization and example validation.
statistics_gen = tfx.components.StatisticsGen(
examples=example_gen.outputs['examples'])
components.append(statistics_gen)
if schema_path is None:
# Generates schema based on statistics files.
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'])
components.append(schema_gen)
else:
# Import user provided schema into the pipeline.
schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path)
components.append(schema_gen)
# Performs anomaly detection based on statistics and data schema.
example_validator = tfx.components.ExampleValidator( # pylint: disable=unused-variable
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
components.append(example_validator)
# Performs transformations and feature engineering in training and serving.
transform = tfx.components.Transform( # pylint: disable=unused-variable
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
preprocessing_fn=preprocessing_fn)
# TODO(step 3): Uncomment here to add Transform to the pipeline.
# components.append(transform)
# Uses user-provided Python function that implements a model using Tensorflow.
trainer = tfx.components.Trainer(
run_fn=run_fn,
examples=example_gen.outputs['examples'],
# Use outputs of Transform as training inputs if Transform is used.
# examples=transform.outputs['transformed_examples'],
# transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=train_args,
eval_args=eval_args)
# TODO(step 4): Uncomment here to add Trainer to the pipeline.
# components.append(trainer)
# Get the latest blessed model for model validation.
model_resolver = tfx.dsl.Resolver(
strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,
model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.dsl.Channel(
type=tfx.types.standard_artifacts.ModelBlessing)).with_id(
'latest_blessed_model_resolver')
# TODO(step 5): Uncomment here to add Resolver to the pipeline.
# components.append(model_resolver)
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='serving_default',
label_key=features.LABEL_KEY,
# Use transformed label key if Transform is used.
# label_key=features.transformed_name(features.LABEL_KEY),
preprocessing_function_names=['transform_features'])
],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': eval_accuracy_threshold}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = tfx.components.Evaluator( # pylint: disable=unused-variable
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# TODO(step 5): Uncomment here to add Evaluator to the pipeline.
# components.append(evaluator)
# Pushes the model to a file destination if check passed.
pusher = tfx.components.Pusher( # pylint: disable=unused-variable
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=serving_model_dir)))
# TODO(step 5): Uncomment here to add Pusher to the pipeline.
# components.append(pusher)
return tfx.dsl.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
# Change this value to control caching of execution results. Default value
# is `False`.
# enable_cache=True,
metadata_connection_config=metadata_connection_config,
beam_pipeline_args=beam_pipeline_args,
)
| tensorflow/tfx | tfx/experimental/templates/penguin/pipeline/pipeline.py | Python | apache-2.0 | 6,485 |
from __future__ import annotations
import os
import shutil
import time
import gc
import threading
from typing import Optional
from utils.utilfuncs import safeprint
def DummyAsyncFileWrite(fn, writestr, access='a'):
safeprint('Called HB file write before init {} {} {}'.format(fn, writestr, access))
AsyncFileWrite = DummyAsyncFileWrite # set from log support to avoid circular imports
DevPrint = None
# import topper
WatchGC = False # set True to see garbage collection info
Buffers = {}
HBdir = ''
GCBuf: Optional[HistoryBuffer] = None
bufdumpseq = 0
HBNet = None
def SetupHistoryBuffers(dirnm, maxlogs):
global HBdir, GCBuf
r = [k for k in os.listdir('.') if '.HistoryBuffer' in k]
if ".HistoryBuffer." + str(maxlogs) in r:
shutil.rmtree(".HistoryBuffer." + str(maxlogs))
for i in range(maxlogs - 1, 0, -1):
if ".HistoryBuffer." + str(i) in r:
os.rename('.HistoryBuffer.' + str(i), ".HistoryBuffer." + str(i + 1))
# noinspection PyBroadException
try:
os.rename('.HistoryBuffer', '.HistoryBuffer.1')
except:
pass
os.mkdir('.HistoryBuffer')
HBdir = dirnm + '/.HistoryBuffer/'
if WatchGC:
gc.callbacks.append(NoteGCs)
GCBuf = HistoryBuffer(50, 'GC')
def NoteGCs(phase, info):
if GCBuf is not None:
GCBuf.Entry('GC Call' + phase + repr(info))
def DumpAll(idline, entrytime):
global bufdumpseq
if HBdir == '': # logs not yet set up
safeprint(time.strftime('%m-%d-%y %H:%M:%S') + ' Suppressing History Buffer Dump for {}'.format(idline))
return
fn = HBdir + str(bufdumpseq) + '-' + entrytime
try:
#topper.mvtops(str(bufdumpseq) + '-' + entrytime)
bufdumpseq += 1
t = {}
curfirst = {}
curtime = {}
initial = {}
now = time.time()
more = True
for nm, HB in Buffers.items():
t[nm] = HB.content()
try:
curfirst[nm] = next(t[nm])
curtime[nm] = curfirst[nm][1]
except StopIteration:
if nm in curfirst: del curfirst[nm]
if nm in curtime: del curtime[nm]
initial[nm] = '*'
if curfirst == {} or curtime == {}:
more = False
prevtime = 0
AsyncFileWrite(fn, '{} ({}): '.format(entrytime, now) + idline + '\n', 'w')
while more:
nextup = min(curtime, key=curtime.get)
if curtime[nextup] > prevtime:
prevtime = curtime[nextup]
else:
AsyncFileWrite(fn, 'seq error:' + str(prevtime) + ' ' + str(curtime[nextup]) + '\n')
prevtime = 0
if now - curfirst[nextup][1] < 300: # limit history dump to 5 minutes worth
AsyncFileWrite(fn,
'{:1s}{:10s}:({:3d}) {:.5f}: [{}] {}\n'.format(initial[nextup], nextup,
curfirst[nextup][0],
now - curfirst[nextup][1],
curfirst[nextup][3],
curfirst[nextup][2]))
initial[nextup] = ' '
try:
curfirst[nextup] = next(t[nextup])
curtime[nextup] = curfirst[nextup][1]
except StopIteration:
del curfirst[nextup]
del curtime[nextup]
if curfirst == {} or curtime == {}: more = False
except Exception as E:
AsyncFileWrite(fn, 'Error dumping buffer for: ' + entrytime + ': ' + idline + '\n')
AsyncFileWrite(fn, 'Exception was: ' + repr(E) + '\n')
class EntryItem(object):
def __init__(self):
self.timeofentry = 0
self.entry = ""
self.thread = ""
class HistoryBuffer(object):
def __init__(self, size, name):
self.buf = []
for i in range(size):
self.buf.append(EntryItem())
self.current = 0
self.size = size
self.name = name
Buffers[name] = self
def Entry(self, entry):
self.buf[self.current].entry = entry
self.buf[self.current].timeofentry = time.time()
self.buf[self.current].thread = threading.current_thread().name
self.current = (self.current + 1) % self.size
def content(self):
# freeze for dump and reset empty
# this is subject to races from other threads doing entry reports
# sequence must be create new buf offline, replace current buf with it so always one or other valid list
# then change current back to 0
# at worst this loses a few events that record between grabbing current and replacing with new one
tempbuf = []
for i in range(self.size):
tempbuf.append(EntryItem())
cur = self.buf
curind = self.current
self.buf = tempbuf
self.current = 0
#DevPrint('Enter HB content for: {} index {}'.format(self.name, curind))
for i in range(self.size):
j = (i + curind) % self.size
if cur[j].timeofentry != 0:
# DevPrint('Item from {}: {}/{}/{}/{}'.format(self.name, i, j, cur[j].timeofentry, cur[j].entry))
yield j, cur[j].timeofentry, cur[j].entry, cur[j].thread
#DevPrint('Content exit: {}/{}'.format(self.name, j))
| kevinkahn/softconsole | historybuffer.py | Python | apache-2.0 | 4,574 |
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
__author__ = 'kairong'
#解决crontab中无法执行的问题
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import ConfigParser
import MySQLdb
import requests
import re
from socket import socket, SOCK_DGRAM, AF_INET
from multiprocessing import Process
###函数声明区域
def get_localIP():
'''获取本地ip'''
s = socket(AF_INET, SOCK_DGRAM)
s.connect(('google.com', 0))
return s.getsockname()[0]
def get_args(main, name):
"""获取配置"""
return cf.get(main, name)
def check_url(id, url, keyword, method='GET'):
'''检查域名和关键词,并把结果写入db'''
r = requests.get(url)
if r.status_code <=400 and re.search(unicode(keyword), r.text):
c_result = 0
else:
c_result = 1
status_2_db(id, c_result)
def status_2_db(id, status):
'''把结果写入db'''
conn = MySQLdb.connect(host=db_hostname, user=db_user, passwd=db_pass, db='url_mon',charset='utf8')
cur = conn.cursor()
sql_get_id_status = "select status_code from status_code where ID = %d and rep_point = '%s' ;" %(id, local_ip)
cur.execute(sql_get_id_status)
last_code = cur.fetchone()
if last_code:
last_code = last_code[0]
cur_code = last_code * status + status
sql_update_id_status = "update status_code set status_code = %d, rep_time = CURRENT_TIMESTAMP where ID = %d and rep_point = '%s';" %(cur_code, id, local_ip)
cur.execute(sql_update_id_status)
else:
cur_code = status
sql_into_id_status = "insert into status_code(ID, status_code, rep_point) value(%d, %d, '%s')" %(id, cur_code, local_ip)
cur.execute(sql_into_id_status)
conn.commit()
conn.close()
def main():
conn = MySQLdb.connect(host=db_hostname, user=db_user, passwd='test', db='url_mon',charset='utf8')
cur = conn.cursor()
cur.execute("select * from montior_url;")
while True:
line = cur.fetchone()
if not line:
break
c_id, c_domain, c_location, c_method, c_keyword = line[0], line[1], line[2], line[3], line[4]
c_url = "http://%s%s" % (c_domain,c_location)
if c_method == line[5]:
c_post_d = line[6]
Process(target=check_url, args=(c_id, c_url, c_keyword)).start()
###变量获取区域
local_ip = get_localIP()
cf = ConfigParser.ConfigParser()
cf.read("./local_config")
db_hostname = get_args("DB", "db_host")
db_user = get_args("DB", "username")
db_pass = get_args("DB", "passwd")
db_default = get_args("DB", "db")
if __name__ == "__main__":
main()
| sageskr/domain_mon | code/url_monitor.py | Python | apache-2.0 | 2,606 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Service object."""
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
from knob.db.sqlalchemy import api as db_api
from knob.objects import base as knob_base
class Service(
knob_base.KnobObject,
base.VersionedObjectDictCompat,
base.ComparableVersionedObject,
):
fields = {
'id': fields.IntegerField(),
'host': fields.StringField(),
'binary': fields.StringField(),
'topic': fields.StringField(),
'created_at': fields.DateTimeField(read_only=True),
'updated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True)
}
@staticmethod
def _from_db_object(context, service, db_service):
for field in service.fields:
service[field] = db_service[field]
service._context = context
service.obj_reset_changes()
return service
@classmethod
def _from_db_objects(cls, context, list_obj):
return [cls._from_db_object(context, cls(context), obj)
for obj in list_obj]
@classmethod
def get_by_id(cls, context, service_id):
service_db = db_api.service_get(context, service_id)
service = cls._from_db_object(context, cls(), service_db)
return service
@classmethod
def create(cls, context, values):
return cls._from_db_object(
context,
cls(),
db_api.service_create(context, values))
@classmethod
def update_by_id(cls, context, service_id, values):
return cls._from_db_object(
context,
cls(),
db_api.service_update(context, service_id, values))
@classmethod
def delete(cls, context, service_id, soft_delete=True):
db_api.service_delete(context, service_id, soft_delete)
@classmethod
def get_all(cls, context):
return cls._from_db_objects(context,
db_api.service_get_all(context))
@classmethod
def get_all_by_args(cls, context, host, binary, topic):
return cls._from_db_objects(
context,
db_api.service_get_all_by_args(context,
host,
binary,
topic))
| igor-toga/knob2 | knob/objects/service.py | Python | apache-2.0 | 2,894 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import time
import threading
import tempfile
from mog_commons.command import *
from mog_commons import unittest
class TestCommand(unittest.TestCase):
def test_execute_command(self):
self.assertEqual(execute_command(['exit', '2'], shell=True), 2)
self.assertEqual(execute_command('exit 3', shell=True), 3)
if os.name == 'nt':
self.assertEqual(execute_command(['cmd', '/C', 'exit 4'], shell=False), 4)
self.assertEqual(execute_command(['cmd', '/C', 'echo あい'], shell=False, cmd_encoding='sjis'), 0)
else:
self.assertEqual(execute_command(['/bin/sh', '-c', 'exit 4'], shell=False), 4)
# This code will not pass in non-Japanese Windows OS.
with self.withAssertOutputFile(
os.path.join('tests', 'resources', 'sjis_ja.txt'), expect_file_encoding='sjis',
output_encoding='sjis', variables={'quote': '"' if os.name == 'nt' else ''}, replace_linesep=True
) as out:
execute_command('echo "あいうえお"', shell=True, cmd_encoding='sjis', stdout=out)
def test_capture_command(self):
self.assertEqual(capture_command(['echo', 'abc'], shell=True), (0, ('abc' + os.linesep).encode('utf-8'), b''))
if os.name == 'nt':
self.assertEqual(capture_command(['cmd', '/C', 'echo abc'], shell=False, cmd_encoding='sjis'),
(0, ('abc' + os.linesep).encode('sjis'), b''))
else:
# This code will not pass in non-Japanese Windows OS.
self.assertEqual(capture_command(['echo', 'あい'], shell=True),
(0, ('あい' + os.linesep).encode('utf-8'), b''))
self.assertEqual(capture_command(['/bin/sh', '-c', 'echo あい'], shell=False),
(0, ('あい' + os.linesep).encode('utf-8'), b''))
def test_execute_command_with_pid(self):
pid_file = os.path.join(tempfile.gettempdir(), 'mog-commons-python-test.pid')
class RunSleep(threading.Thread):
def run(self):
execute_command_with_pid('python -c "import time;time.sleep(2)"', pid_file, shell=True)
th = RunSleep()
th.start()
time.sleep(1)
with open(pid_file, 'r') as f:
pid = int(f.read())
self.assertTrue(pid_exists(pid))
time.sleep(2)
self.assertFalse(pid_exists(pid))
self.assertEqual(execute_command_with_pid(['exit', '2'], None, shell=True), 2)
def test_pid_exists(self):
self.assertTrue(pid_exists(0))
| mogproject/mog-commons-python | tests/mog_commons/test_command.py | Python | apache-2.0 | 2,719 |
import base64
import itertools
import json
import logging
import os
import re
import time
from .buckets import get_bucket_client
from .params import get_param_client
from .secrets import get_secret_client
logger = logging.getLogger("zentral.conf.config")
class Proxy:
pass
class EnvProxy(Proxy):
def __init__(self, name):
self._name = name
def get(self):
return os.environ[self._name]
class ResolverMethodProxy(Proxy):
def __init__(self, resolver, proxy_type, key):
if proxy_type == "file":
self._method = resolver.get_file_content
elif proxy_type == "param":
self._method = resolver.get_parameter_value
elif proxy_type == "secret":
self._method = resolver.get_secret_value
elif proxy_type == "bucket_file":
self._method = resolver.get_bucket_file
else:
raise ValueError("Unknown proxy type %s", proxy_type)
self._key = key
def get(self):
return self._method(self._key)
class JSONDecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return json.loads(self._child_proxy.get())
class Base64DecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return base64.b64decode(self._child_proxy.get())
class ElementFilter(Proxy):
def __init__(self, key, child_proxy):
try:
self._key = int(key)
except ValueError:
self._key = key
self._child_proxy = child_proxy
def get(self):
return self._child_proxy.get()[self._key]
class Resolver:
def __init__(self):
self._cache = {}
self._bucket_client = None
self._param_client = None
self._secret_client = None
def _get_or_create_cached_value(self, key, getter, ttl=None):
# happy path
try:
expiry, value = self._cache[key]
except KeyError:
pass
else:
if expiry is None or time.time() < expiry:
logger.debug("Key %s from cache", key)
return value
logger.debug("Cache for key %s has expired", key)
# get value
value = getter()
if ttl:
expiry = time.time() + ttl
else:
expiry = None
self._cache[key] = (expiry, value)
logger.debug("Set cache for key %s", key)
return value
def get_file_content(self, filepath):
cache_key = ("FILE", filepath)
def getter():
with open(filepath, "r") as f:
return f.read()
return self._get_or_create_cached_value(cache_key, getter)
def get_secret_value(self, name):
cache_key = ("SECRET", name)
if not self._secret_client:
self._secret_client = get_secret_client()
def getter():
return self._secret_client.get(name)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
def get_bucket_file(self, key):
cache_key = ("BUCKET_FILE", key)
if not self._bucket_client:
self._bucket_client = get_bucket_client()
def getter():
return self._bucket_client.download_to_tmpfile(key)
return self._get_or_create_cached_value(cache_key, getter)
def get_parameter_value(self, key):
cache_key = ("PARAM", key)
if not self._param_client:
self._param_client = get_param_client()
def getter():
return self._param_client.get(key)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
class BaseConfig:
PROXY_VAR_RE = re.compile(
r"^\{\{\s*"
r"(?P<type>bucket_file|env|file|param|secret)\:(?P<key>[^\}\|]+)"
r"(?P<filters>(\s*\|\s*(jsondecode|base64decode|element:[a-zA-Z_\-/0-9]+))*)"
r"\s*\}\}$"
)
custom_classes = {}
def __init__(self, path=None, resolver=None):
self._path = path or ()
if not resolver:
resolver = Resolver()
self._resolver = resolver
def _make_proxy(self, key, match):
proxy_type = match.group("type")
key = match.group("key").strip()
if proxy_type == "env":
proxy = EnvProxy(key)
else:
proxy = ResolverMethodProxy(self._resolver, proxy_type, key)
filters = [f for f in [rf.strip() for rf in match.group("filters").split("|")] if f]
for filter_name in filters:
if filter_name == "jsondecode":
proxy = JSONDecodeFilter(proxy)
elif filter_name == "base64decode":
proxy = Base64DecodeFilter(proxy)
elif filter_name.startswith("element:"):
key = filter_name.split(":", 1)[-1]
proxy = ElementFilter(key, proxy)
else:
raise ValueError("Unknown filter %s", filter_name)
return proxy
def _from_python(self, key, value):
new_path = self._path + (key,)
if isinstance(value, dict):
value = self.custom_classes.get(new_path, ConfigDict)(value, new_path)
elif isinstance(value, list):
value = self.custom_classes.get(new_path, ConfigList)(value, new_path)
elif isinstance(value, str):
match = self.PROXY_VAR_RE.match(value)
if match:
value = self._make_proxy(key, match)
return value
def _to_python(self, value):
if isinstance(value, Proxy):
return value.get()
else:
return value
def __len__(self):
return len(self._collection)
def __delitem__(self, key):
del self._collection[key]
def __setitem__(self, key, value):
self._collection[key] = self._from_python(key, value)
def pop(self, key, default=None):
value = self._collection.pop(key, default)
if isinstance(value, Proxy):
value = value.get()
return value
class ConfigList(BaseConfig):
def __init__(self, config_l, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = []
for key, value in enumerate(config_l):
self._collection.append(self._from_python(str(key), value))
def __getitem__(self, key):
value = self._collection[key]
if isinstance(key, slice):
slice_repr = ":".join(str("" if i is None else i) for i in (key.start, key.stop, key.step))
logger.debug("Get /%s[%s] config key", "/".join(self._path), slice_repr)
return [self._to_python(item) for item in value]
else:
logger.debug("Get /%s[%s] config key", "/".join(self._path), key)
return self._to_python(value)
def __iter__(self):
for element in self._collection:
yield self._to_python(element)
def serialize(self):
s = []
for v in self:
if isinstance(v, BaseConfig):
v = v.serialize()
s.append(v)
return s
class ConfigDict(BaseConfig):
def __init__(self, config_d, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = {}
for key, value in config_d.items():
self._collection[key] = self._from_python(key, value)
def __getitem__(self, key):
logger.debug("Get /%s config key", "/".join(self._path + (key,)))
value = self._collection[key]
return self._to_python(value)
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
value = self._to_python(default)
return value
def __iter__(self):
yield from self._collection
def keys(self):
return self._collection.keys()
def values(self):
for value in self._collection.values():
yield self._to_python(value)
def items(self):
for key, value in self._collection.items():
yield key, self._to_python(value)
def clear(self):
return self._collection.clear()
def setdefault(self, key, default=None):
return self._collection.setdefault(key, self._from_python(key, default))
def pop(self, key, default=None):
value = self._collection.pop(key, default)
return self._to_python(value)
def popitem(self):
key, value = self._collection.popitem()
return key, self._to_python(value)
def copy(self):
return ConfigDict(self._collection.copy(), path=self._path, resolver=self._resolver)
def update(self, *args, **kwargs):
chain = []
for arg in args:
if isinstance(arg, dict):
iterator = arg.items()
else:
iterator = arg
chain = itertools.chain(chain, iterator)
if kwargs:
chain = itertools.chain(chain, kwargs.items())
for key, value in iterator:
self._collection[key] = self._from_python(key, value)
def serialize(self):
s = {}
for k, v in self.items():
if isinstance(v, BaseConfig):
v = v.serialize()
s[k] = v
return s
| zentralopensource/zentral | zentral/conf/config.py | Python | apache-2.0 | 9,328 |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp.deprecated.dsl import artifact_utils
from typing import Any, List
class ComplexMetricsBase(object):
def get_schema(self):
"""Returns the set YAML schema for the metric class.
Returns:
YAML schema of the metrics type.
"""
return self._schema
def get_metrics(self):
"""Returns the stored metrics.
The metrics are type checked against the set schema.
Returns:
Dictionary of metrics data in the format of the set schema.
"""
artifact_utils.verify_schema_instance(self._schema, self._values)
return self._values
def __init__(self, schema_file: str):
self._schema = artifact_utils.read_schema_file(schema_file)
self._type_name, self._metric_fields = artifact_utils.parse_schema(
self._schema)
self._values = {}
class ConfidenceMetrics(ComplexMetricsBase):
"""Metrics class representing a Confidence Metrics."""
# Initialization flag to support setattr / getattr behavior.
_initialized = False
def __getattr__(self, name: str) -> Any:
"""Custom __getattr__ to allow access to metrics schema fields."""
if name not in self._metric_fields:
raise AttributeError('No field: {} in metrics.'.format(name))
return self._values[name]
def __setattr__(self, name: str, value: Any):
"""Custom __setattr__ to allow access to metrics schema fields."""
if not self._initialized:
object.__setattr__(self, name, value)
return
if name not in self._metric_fields:
raise RuntimeError(
'Field: {} not defined in metirc schema'.format(name))
self._values[name] = value
def __init__(self):
super().__init__('confidence_metrics.yaml')
self._initialized = True
class ConfusionMatrix(ComplexMetricsBase):
"""Metrics class representing a confusion matrix."""
def __init__(self):
super().__init__('confusion_matrix.yaml')
self._matrix = [[]]
self._categories = []
self._initialized = True
def set_categories(self, categories: List[str]):
"""Sets the categories for Confusion Matrix.
Args:
categories: List of strings specifying the categories.
"""
self._categories = []
annotation_specs = []
for category in categories:
annotation_spec = {'displayName': category}
self._categories.append(category)
annotation_specs.append(annotation_spec)
self._values['annotationSpecs'] = annotation_specs
self._matrix = [[0
for i in range(len(self._categories))]
for j in range(len(self._categories))]
self._values['row'] = self._matrix
def log_row(self, row_category: str, row: List[int]):
"""Logs a confusion matrix row.
Args:
row_category: Category to which the row belongs.
row: List of integers specifying the values for the row.
Raises:
ValueError: If row_category is not in the list of categories set in
set_categories or size of the row does not match the size of
categories.
"""
if row_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
if len(row) != len(self._categories):
raise ValueError('Invalid row. Expected size: {} got: {}'.\
format(len(self._categories), len(row)))
self._matrix[self._categories.index(row_category)] = row
def log_cell(self, row_category: str, col_category: str, value: int):
"""Logs a cell in the confusion matrix.
Args:
row_category: String representing the name of the row category.
col_category: String representing the name of the column category.
value: Int value of the cell.
Raises:
ValueError: If row_category or col_category is not in the list of
categories set in set_categories.
"""
if row_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
if col_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
self._matrix[self._categories.index(row_category)][
self._categories.index(col_category)] = value
def load_matrix(self, categories: List[str], matrix: List[List[int]]):
"""Supports bulk loading the whole confusion matrix.
Args:
categories: List of the category names.
matrix: Complete confusion matrix.
Raises:
ValueError: Length of categories does not match number of rows or columns.
"""
self.set_categories(categories)
if len(matrix) != len(categories):
raise ValueError('Invalid matrix: {} passed for categories: {}'.\
format(matrix, categories))
for index in range(len(categories)):
if len(matrix[index]) != len(categories):
raise ValueError('Invalid matrix: {} passed for categories: {}'.\
format(matrix, categories))
self.log_row(categories[index], matrix[index])
| kubeflow/pipelines | sdk/python/kfp/deprecated/dsl/metrics_utils.py | Python | apache-2.0 | 6,121 |
from django.conf import settings
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login, logout
from django.core.context_processors import csrf
from skilltreeapp.forms import LoginForm
from django.shortcuts import render_to_response, HttpResponseRedirect
import datetime
from labgeeks_hermes.models import Notification
from labgeeks_hermes.forms import NotificationForm
# Create your views here.
def home(request):
params = {}
c = {}
c.update(csrf(request))
if request.user.is_authenticated():
# hermes code goes here
locations = request.user.location_set.all()
now = datetime.datetime.now()
notifications = Notification.objects.all()
events = []
alerts = []
for noti in notifications:
if noti.due_date:
if now.date() - noti.due_date.date() >= datetime.timedelta(days=1):
noti.archived = True
elif not noti.due_date.date() - now.date() > datetime.timedelta(days=7) and not noti.archived:
events.append(noti)
else:
if not noti.archived:
alerts.append(noti)
events.sort(key=lambda x: x.due_date)
form_is_valid = True
if request.method == 'POST':
archive_ids = request.POST.getlist('pk')
if archive_ids:
for archive_id in archive_ids:
notif = Notification.objects.get(pk=archive_id)
notif.archived = True
notif.save()
return HttpResponseRedirect('/')
form = NotificationForm(request.POST)
if form.is_valid():
form_is_valid = True
notification = form.save(commit=False)
notification.user = request.user
if notification.due_date:
if now.date() - notification.due_date.date() >= datetime.timedelta(days=1):
notification.archived = True
notification.save()
return HttpResponseRedirect('/')
else:
form_is_valid = False
else:
form = NotificationForm()
params = {
'request': request,
'events': events,
'alerts': alerts,
'form': form,
'c': c,
}
return render_to_response('pages/home.html', params, context_instance=RequestContext(request))
else:
form = LoginForm()
params = { 'form': form }
return HttpResponseRedirect('/accounts/login')
def basic(request):
params = {}
return render_to_response('pages/basic.html', params, context_instance=RequestContext(request))
def hybrid(request):
params = {}
return render_to_response('pages/hybrid.html', params, context_instance=RequestContext(request))
def tools_login(request):
""" Login a user. Called by @login_required decorator.
"""
c = {}
c.update(csrf(request))
if request.user.is_authenticated():
try:
return HttpResponseRedirect(request.GET['next'])
except:
return HttpResponseRedirect('/')
elif request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
else:
form = LoginForm()
return render_to_response('pages/login.html', locals(), context_instance=RequestContext(request))
def tools_logout(request):
""" Manually log a user out.
"""
logout(request)
return HttpResponseRedirect('/')
| abztrakt/uw-skilltree | skilltreeapp/views/pages.py | Python | apache-2.0 | 4,046 |
#!/usr/bin/env python
"""
MCNPX Model for Cylindrical RPM8
"""
import sys
sys.path.append('../MCNPTools/')
sys.path.append('../')
from MCNPMaterial import Materials
import subprocess
import math
import mctal
import numpy as np
import itertools
import os
class CylinderRPM(object):
# Material Dictionaries
cellForStr = '{:5d} {:d} -{:4.3f} {:d} -{:d} u={:d}\n'
surfForStr = '{:5d} cz {:5.3f}\n'
tranForStr = '*tr{:d} {:4.3f} {:4.3f} 0.000\n'
geoParam={'RPM8Size':12.7,'DetectorThickness':0.01,'DetectorSpacing':0.8,
'CylinderLightGuideRadius':0.5,'CylinderRadius':2.5}
def __init__(self,inp='INP.mcnp'):
""" Wrapped Cylinder MCNPX Model of RPM8
Keywords:
inp -- desired name of the input deck
"""
# Material dictionary for the moderator, light guide, and detector
self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
self.material['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector
self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA
self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE
# Cell and Surface Inital Numbering
self.CellStartNum = 600
self.SurfaceStartNum = 600
self.ZeroSurfaceNum = 500
self.UniverseNum = 200
self.surfGeo = None
self.inp = inp
self.name = 'OUT_'+self.inp.strip('.mcnp')+'.'
self.setMaterial(0.1,'PS')
def __str__(self):
s = '\tMCNPX Model of Wrapped Cylinder\n'
s += '\t Cell Number Starts: {0:d}\n'.format(self.CellStartNum)
s += '\t Surface Number Starts: {0:d}\n'.format(self.SurfaceStartNum)
return s
def getInteractionRate(self):
""" Returns the interaction rate """
m = mctal.MCTAL(self.name+'.m')
t = m.tallies[4]
# Returing the total
return t.data[-1],t.errors[-1]
def setMaterial(self,massFraction,polymer):
"""
Sets the detector material
"""
M = Materials()
num = self.material['Detector']['mt']
if polymer == 'PS':
self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)
elif polymer == 'PEN':
self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)
else:
raise ValueError('Polymer {} is not in the material database'.format(polymer))
def createSurfaceGeo(self):
"""
Creates a dictionary of surface positions and cylinders
"""
self.surfGeo = dict()
r = self.geoParam['CylinderLightGuideRadius']
self.surfGeo[r] = 'LightGuide'
#self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
r += self.geoParam['DetectorThickness']
self.surfGeo[r] = 'Detector'
r += self.geoParam['DetectorSpacing']
if (r < self.geoParam['CylinderRadius']):
self.surfGeo[r] = 'LightGuide'
return self.surfGeo
def calculateDetectorArea(self):
"""
Calculates the area used in a detector
"""
area = 0.0
r = self.geoParam['CylinderLightGuideRadius']
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
area -= math.pow(r,2)
r += self.geoParam['DetectorThickness']
area += math.pow(r,2)
r += self.geoParam['DetectorSpacing']
return math.pi*area
def createDetectorCylinder(self,uNum=1):
"""
Creates a detector cylinder
Returns an ntuple of s,c,detectorCells
s - the surface string
c - the cell string
detectorCells - a list of the numbers corresponding to the detectors cells
"""
cellsCreated = 0
sNum = self.SurfaceStartNum
cNum = self.CellStartNum
detectorCells = list()
s = '{:5d} rcc 0 0 0 0 0 217.7 {}\n'.format(self.SurfaceStartNum,self.geoParam['CylinderRadius'])
c = ''
keyList = sorted(self.surfGeo.keys(), key = lambda x: float(x))
for key in keyList:
sPrev = sNum
sNum += 1
cNum += 1
s += self.surfForStr.format(sNum,key)
m = self.material[self.surfGeo[key]]
if cNum == self.CellStartNum+1:
c+= '{:5d} {:d} -{:4.3f} -{:d} u={:d}\n'.format(cNum,m['mt'],m['rho'],sNum,uNum)
else:
c += self.cellForStr.format(cNum,m['mt'],m['rho'],sPrev,sNum,uNum)
# List of cells for the detector
if self.surfGeo[key] is 'Detector':
detectorCells.append(cNum)
cellsCreated += 1
# Last cell up to universe boundary
m = self.material['Moderator']
c += '{:5d} {:d} -{:4.3f} {:d} u={:d}\n'.format(cNum+1,m['mt'],m['rho'],sNum,uNum)
cellsCreated += 1
return s,c,detectorCells,cellsCreated
def runModel(self):
"""
Runs the Model by submission to Tourqe / Maui
"""
qsub= subprocess.check_output('which qsub',shell=True).strip()
cmd = '#!/bin/bash\n'
cmd += '#PBS -N {0}\n#PBS -V\n#PBS -q gen1\n#PBS -l nodes=1:ppn=1\n'
cmd += 'cd $PBS_O_WORKDIR\nmpirun mcnpx inp={1} name={2}\n'
job = cmd.format('Job_RPMCylinder',self.inp,self.name)
with open('qsub','w') as o:
o.write(job)
subprocess.call(qsub+' qsub',shell=True)
subprocess.call('rm qsub',shell=True)
def createInputDeck(self,cylinderPositions,inp=None,name=None):
""" createInputDeck
Creates an input deck of the given geometry
"""
self.inp = inp
self.name = name
if not inp:
self.inp = 'INP_Cylinder.mcnp'
if not name:
self.name = 'OUT_Cylinder.'
oFile = self.inp
# Problem Constants
cellString = 'c ------------------------- Source ----------------------------------------\n'
cellString += '70 5 -15.1 -70 $ 252Cf source \n'
cellString += '71 406 -11.34 -71 70 $ Lead around source\n'
cellString += '72 456 -0.93 -72 71 $ Poly around source\n'
surfString = 'c ########################### Surface Cards ##############################\n'
surfString += 'c ------------------- Encasing Bounds (Size of RPM8) ---------------------\n'
surfString += '500 rpp 0 12.7 -15.25 15.25 0 217.7 \n'
# Add in other cells here
numCells = 4 # 3 Source, 1 RPM8 Encasing
##################################################################
# Add in Detector Cells and Surfaces #
##################################################################
universeNum = 1
(s,c,detectorCells,cellsCreated) = self.createDetectorCylinder(universeNum)
surfString += s
cellString += 'c ------------------- Detector Cylinder Universe ------------------------\n'
cellString += c
transNum = 1
uCellNum = self.UniverseNum
transString = ''
cellString += 'c ----------------------- Detector Universe ----------------------------\n'
for pos in cylinderPositions:
transString += self.tranForStr.format(transNum,pos[0],pos[1])
cellString += '{:5d} 0 -{:d} trcl={:d} fill={:d}\n'.format(uCellNum,self.SurfaceStartNum,transNum,universeNum)
transNum +=1
uCellNum +=1
# Adding the PMMA Moderator Block
m = self.material['Moderator']
cellString += 'c ------------------------- HDPE Moderator -----------------------------\n'
cellString += '{:5d} {:d} -{:4.3f} -{:d} '.format(500,m['mt'],m['rho'],self.ZeroSurfaceNum)
cellString += ''.join('#{:d} '.format(i) for i in range(self.UniverseNum,uCellNum))
cellString += '\n'
# Getting total number of cells
numCells += cellsCreated + uCellNum-self.UniverseNum +1
##################################################################
# Write the Tallies #
##################################################################
univCells = range(self.UniverseNum,uCellNum)
tallyString = 'c ------------------------- Tallies Yo! -----------------------------------\n'
tallies = {'F54:n':{'cells':detectorCells,'comments':'FC54 6Li Reaction Rates\n',
'options':' T\nSD54 1 {0:d}R\nFM54 -1 3 105'}}
for t in tallies:
# Getting a list of cells
tallyString += tallies[t]['comments']
tallyString += str(t)+' '
j = 0
for u in univCells:
cell = list('('+str(c)+'<'+str(u)+') ' for c in tallies[t]['cells'])
cell = [cell[i:i+6] for i in range(0,len(cell),6)]
if j > 0:
tallyString += ' '+''.join(''.join(i)+'\n' for i in cell)
else:
tallyString += ' '.join(''.join(i)+'\n' for i in cell)
j +=1
tallyString = tallyString.rstrip()
tallyString += tallies[t]['options'].format(len(univCells)*len(tallies[t]['cells']))
tallyString+='\n'
# Finish up the problem data
cellString += 'c ---------------------- Detector Encasing ------------------------------\n'
cellString += '700 488 -7.92 701 -700 $ SS-316 Encasing \n'
cellString += 'c -------------------------- Outside World -------------------------------\n'
cellString += '1000 204 -0.001225 -1000 700 #70 #71 #72 $ Atmosphere \n'
cellString += '1001 0 1000 \n'
surfString += 'c ------------------------ Encasing Material -----------------------------\n'
surfString += '700 rpp -0.3175 13.018 -15.5675 15.5675 -0.3175 218.018 \n'
surfString += '701 rpp 0.0 12.7 -15.25 15.25 0.0 217.7 \n'
surfString += 'c -------------- Source --------------------------------------------------\n'
surfString += '70 s -200 0 108.85 2.510E-04 $ Source \n'
surfString += '71 s -200 0 108.85 5.0025E-01 $ 0.5 cm lead surrounding source \n'
surfString += '72 s -200 0 108.85 3.00025 $ 2.5 cm poly surrounding source \n'
surfString += 'c -------------- Outside World -------------------------------------------\n'
surfString += '1000 so 250 \n'
matString = 'c -------------------------- Material Cards -----------------------------\n'
matString += self.material['Detector']['matString']
matString += self.getMaterialString()
with open(oFile,'w') as o:
o.write('MCNPX Simulation of RPM8 Cylinder\n')
o.write(cellString)
o.write('\n')
o.write(surfString)
o.write('\n')
o.write(self.getRunString().format(numCells))
o.write(self.getSrcString())
o.write(tallyString)
o.write(matString)
o.write(transString)
o.write('\n')
def getRunString(self):
runString ='c ------------------------------ Run Info ---------------------------------\n'
runString +='nps 1E6 \n'
runString +='IMP:N 1 {0:d}R 0 $ Particle Importances within cells \n'
runString +='c -------------- Output --------------------------------------------------\n'
runString +='PRDMP j j 1 $ Write a MCTAL File \n'
runString +='PRINT 40 \n'
runString +='c ------------------------------ Physics ---------------------------------\n'
runString +='MODE N \n'
runString +='PHYS:N 100 4j -1 2 \n'
runString +='CUT:N 2j 0 0 \n'
return runString
def getSrcString(self):
"""
Returns the MCNPX formated source string
"""
srcString = 'c -------------------------- Source Defination ----------------------------\n'
srcString += 'c 1 nanogram Cf-252 source = 1E-9 grams = 6.623E-11 cc \n'
srcString += 'sdef pos=-200 0 108.85 cel=70 par=SF rad=d1 \n'
srcString += 'si1 0 2.510E-04 \n'
srcString += 'sp1 -21 1 \n'
return srcString
def getMaterialString(self):
"""
Returns the MCNXP material string
"""
matString = 'm10 1001.70c -0.080538 $Lucite (PMMA / Plexiglass) rho = 1.19 g/cc\n'
matString += ' 6012.70c -0.599848 8016.70c -0.319614 \n'
matString += 'm204 7014.70c -0.755636 $air (US S. Atm at sea level) rho = 0.001225 \n'
matString += ' 8016.70c -0.231475 18036.70c -3.9e-005 18038.70c -8e-006\n'
matString += ' 18040.70c -0.012842 \n'
matString += 'm5 98252.66c 1 $ Cf-252, rho =15.1 g/cc wiki \n'
matString += 'm406 82204.70c -0.013781 $Lead, \n'
matString += ' 82206.70c -0.239557 82207.70c -0.220743 82208.70c -0.525919\n'
matString += 'm456 1001.70c -0.143716 $Polyethylene - rho = 0.93 g/cc \n'
matString += ' 6000.70c -0.856284 \n'
matString += 'm488 14028.70c -0.009187 $Steel, Stainless 316 rho = 7.92 \n'
matString += ' 14029.70c -0.000482 14030.70c -0.000331 24050.70c -0.007095\n'
matString += ' 24052.70c -0.142291 24053.70c -0.016443 24054.70c -0.004171\n'
matString += ' 25055.70c -0.02 26054.70c -0.037326 26056.70c -0.601748\n'
matString += ' 26057.70c -0.014024 26058.70c -0.001903 28058.70c -0.080873\n'
matString += ' 28060.70c -0.031984 28061.70c -0.001408 28062.70c -0.004546\n'
matString += ' 28064.70c -0.001189 42092.70c -0.003554 42094.70c -0.002264\n'
matString += ' 42095.70c -0.003937 42096.70c -0.004169 42097.70c -0.002412\n'
matString += ' 42098.70c -0.006157 42100.70c -0.002507 \n'
matString += 'mt3 poly.01t \n'
matString += 'mt456 poly.01t \n'
matString += 'mt10 poly.01t \n'
return matString
def run(loading,polymers):
"""
Runs a matrix of loading and polymers
"""
cylinderPositions = ((4.23,10.16),(4.23,-10.16))
cylinderPositions = ((4.23,7.625),(4.23,0),(4.23,-7.625))
cylinderPositions = ((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15))
cylinderPositions = ((4.23,10.16),(4.23,5.08),(4.23,0.0),(4.23,-5.08),(4.23,-10.16))
for l in loading:
for p in polymers:
RunCylinder(l,p,cylinderPositions)
def RunCylinder(l,p,cylinderPositions):
"""
Runs an mcnpx model of the cylinder of loading l, polymer p, with
cylinder positions cylinderPositions.
Keywords:
l - loading of the films
p - polymer
cylinderPositions - the cylinder positons
"""
# Creating input and output deck names
posString = ''
for pos in cylinderPositions:
posString += '{:2.1f}-'.format(pos[0])
posString = posString.rstrip('-')
inp='Cyl_{}LiF_{}_{}.mcnp'.format(int(l*100),p,posString)
name='OUTCyl_{}LiF_{}_{}.'.format(int(l*100),p,posString)
print inp
# Creating and running the model
m = CylinderRPM()
m.createSurfaceGeo()
m.setMaterial(l,p)
m.createDetectorCylinder()
m.createInputDeck(cylinderPositions,inp,name)
m.runModel()
def CreatePositions(yPos,numXPertubations):
"""
Creates and returns an array of positions, using a set array of y
positions, with equally spaced number of numXPertubations.
Keywords:
yPos - the number of y positions (or spacing of the cylinders). The
number of elements in this array corresponds to the number of
cylinders that are simulated.
numXPertubations - the number of pertubations in x. The arrays
positions returned are spaced linerly in the x from 2.54 to
10.16 cm
"""
pos = list()
xVals = np.linspace(2.54,10,numXPertubations)
xPos = [i for i in itertools.product(xVals,repeat=len(yPos))]
for x in xPos:
pos.append(zip(x,yPos))
return pos
def PositionOptimization(loading,polymers,positions):
"""
Runs a matrix of loading, polymers and positions
"""
for l in loading:
for p in polymers:
for pos in positions:
RunCylinder(l,p,pos)
def createInputPlotDecks():
positions = list()
positions.append(((4.23,10.16),(4.23,-10.16)))
positions.append(((4.23,7.625),(4.23,0),(4.23,-7.625)))
#positions.append(((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15)))
for pos in positions:
m = CylinderRPM()
m.createSurfaceGeo()
m.createDetectorCylinder()
inp='Cylinder_{}.mcnp'.format(len(pos))
name='OUTCylinder_{}.'.format(len(pos))
m.createInputDeck(pos,inp,name)
def computeMassLi(polymer,loading,density=1.1):
"""
Computes the mass of Li for a given polymer and loading
"""
M = Materials()
m = CylinderRPM()
area = m.calculateDetectorArea()
massLi = area*217.0*M.GetLiMassFraction(loading,polymer)*density
return massLi
def extractRunInfo(filename):
"""
Extracts the loading and polymer from the file name
"""
tokens = filename.split('_')
loading = tokens[1].strip('LiF')
polymer = tokens[2].strip('.m')
return (float(loading)/100, polymer)
###########################################################################
# #
# Summerizes / Analysis #
# #
###########################################################################
def GetInteractionRate(f,tallyNum=54,src=2.3E3):
"""
Returns the interaction rate of the mctal file
"""
m = mctal.MCTAL(f)
t = m.tallies[tallyNum]
return (t.data[-1]*src,t.errors[-1]*t.data[-1]*src)
import glob
def summerize():
files = glob.glob('OUTCylinder*.m')
s = 'Polymer, loading, mass Li, count rate, error, count rate per mass\n'
for f in files:
runParam = extractRunInfo(f)
massLi = computeMassLi(runParam[1],runParam[0])
countRate = GetInteractionRate(f)
s += '{}, {:5.2f} , {:5.3f} , {:5.3f} , {:4.2f} , {:5.3f}\n'.format(runParam[1].ljust(7),runParam[0],massLi,countRate[0],countRate[1],countRate[0]/massLi)
print s
def OptimizationSummary(path):
"""
Summerizes the Optimization Output
"""
# Getting the files
if not os.path.isdir(path):
raise IOError('Path {} is not found'.format(path))
files = glob.glob(path+'/*.m')
if not files:
print 'No files matched the pattern'
return
# Parsing the files
data = dict()
for f in files:
name = os.path.splitext(os.path.split(f)[1])[0]
data[name] = GetInteractionRate(f)
# Max value
sortedKeys = sorted(data, key=data.get,reverse=True)
#sortedKeys = sorted(data.items(), key=lambda x : float(x[1][0]),reverse=True)
for key in sortedKeys[0:9]:
print '{} -> {:5.2f} +/- {:5.2f}'.format(key,data[key][0],data[key][1])
for key in sortedKeys[-6:-1]:
print '{} -> {:5.2f} +/- {:5.2f}'.format(key,data[key][0],data[key][1])
def cleanup(path):
files = glob.glob(path+'/OUTCyl_*.m')
for f in files:
head,tail = os.path.split(f)
numCylinders = tail.count('-')+1
if numCylinders == 3:
newdir = 'ThreeCylPosOpt'
elif numCylinders == 4:
newdir = 'FourCylPosOpt'
elif numCylinders == 5:
newdir = 'FiveCylPosOpt'
os.rename(f,os.path.join(newdir,tail))
###########################################################################
# #
# MAIN #
# #
###########################################################################
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r','--run',action="store_true",
default=False,help='Runs the cylinders for multiple polymers and precent loadings')
parser.add_argument('-p','--plot',action="store_true",
default=False,help='Creates input decks for plotting')
parser.add_argument('-c','--clean',action="store_true",
default=False,help='Cleans up the files')
parser.add_argument('-a','--analysis',action="store_true",default=False,help="Analyze the results")
parser.add_argument('path', nargs='?', default='CylPosOpt',help='Specifiy the output directory to summerize')
parser.add_argument('-o','--optimize',action='store',type=int,default=-1,help='Run a number of optimizations on the positions. If 0 is entered a summary is preformed on the directory provided with path')
parser.add_argument('loading',metavar='loading',type=float,nargs='*',action="store",default=(0.1,0.2,0.3),help='Precent Loading of LiF')
args = parser.parse_args()
if args.run:
run(args.loading,('PS','PEN'))
if args.plot:
createInputPlotDecks()
if args.optimize > 0:
yPos = (7.625,0,-7.625)
yPos = (9.15,3.05,-3.05,-9.15)
#yPos = (10.16,5.08,0.0,-5.08,-10.16)
pos = CreatePositions(yPos,args.optimize)
loading = (0.3,)
polymers = ('PS',)
PositionOptimization(loading,polymers,pos)
if args.optimize == 0:
OptimizationSummary(args.path)
if args.analysis:
summerize()
if args.clean:
cleanup(os.getcwd())
| murffer/DetectorSim | MCNPXRPMModels/WrappedCylinders/CylinderMCNPX.py | Python | apache-2.0 | 23,537 |
#
# Copyright 2015 Simulmedia, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import shutil
import socket
import tarfile
import tempfile
import time
from contextlib import closing
from distutils import spawn
from os.path import expanduser
from subprocess import Popen
import psycopg2
import requests
from psycopg2._psycopg import OperationalError
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
logger = logging.getLogger('pyembedpg')
class PyEmbedPg(object):
DOWNLOAD_BASE_URL = 'http://ftp.postgresql.org/pub/source'
DOWNLOAD_URL = DOWNLOAD_BASE_URL + '/v{version}/postgresql-{version}.tar.bz2'
LOCAL_VERSION = 'local'
CACHE_DIRECTORY = '.pyembedpg'
def __init__(self, version=None):
"""
Initialize a new Postgres object
:param version: version to use. If it is not set, use the latest version in .pyembedpg directory. If not present
use the latest version remotely. Use 'local' to use the local postgres version installed on the machine
:return:
"""
home_dir = expanduser("~")
self._cache_dir = os.path.join(home_dir, PyEmbedPg.CACHE_DIRECTORY)
# if version is not specified, check local last version otherwise get last remote version
self.version = version
if not self.version:
self.version = self.get_latest_local_version()
if not self.version:
self.version = self.get_latest_remote_version()
if version == PyEmbedPg.LOCAL_VERSION:
full_path = spawn.find_executable('postgres')
if not full_path:
raise PyEmbedPgException('Cannot find postgres executable. Make sure it is in your path')
self._version_path = os.path.dirname(full_path)
else:
self._version_path = os.path.join(self._cache_dir, self.version)
def get_latest_local_version(self):
"""
Return the latest version installed in the cache
:return: latest version installed locally in the cache and None if there is nothing downloaded
"""
if not os.path.exists(self._cache_dir):
return None
tags = os.listdir(self._cache_dir)
# we want to sort based on numbers so:
# v3.0.0-QA6
# v3.0.0-QA15
# v3.0.0-QA2
# are sorted according to the numbers so no lexigraphically
revs_to_tag = [(re.split(r"[^\d]+", tag), tag) for tag in tags]
return max(revs_to_tag)[1]
def get_latest_remote_version(self):
"""
Return the latest version on the Postgres FTP server
:return: latest version installed locally on the Postgres FTP server
"""
response = requests.get(PyEmbedPg.DOWNLOAD_BASE_URL)
last_version_match = list(re.finditer('>v(?P<version>[^<]+)<', response.content.decode()))[-1]
return last_version_match.group('version')
def check_version_present(self):
"""
Check if the version is present in the cache
:return: True if the version has already been downloaded and build, False otherwise
"""
return os.path.exists(self._version_path)
def download_and_unpack(self):
# if the version we want to download already exists, do not do anything
if self.check_version_present():
logger.debug('Version {version} already present in cache'.format(version=self.version))
return
url = PyEmbedPg.DOWNLOAD_URL.format(version=self.version)
response = requests.get(url, stream=True)
if not response.ok:
raise PyEmbedPgException('Cannot download file {url}. Error: {error}'.format(url=url, error=response.content))
with tempfile.NamedTemporaryFile() as fd:
logger.debug('Downloading {url}'.format(url=url))
for block in response.iter_content(chunk_size=4096):
fd.write(block)
fd.flush()
# Unpack the file into temporary dir
temp_dir = tempfile.mkdtemp()
source_dir = os.path.join(temp_dir, 'postgresql-{version}'.format(version=self.version))
try:
# Can't use with context directly because of python 2.6
with closing(tarfile.open(fd.name)) as tar:
tar.extractall(temp_dir)
os.system(
'sh -c "cd {path} && ./configure --prefix={target_dir} && make install && cd contrib && make install"'.format(
path=source_dir,
target_dir=self._version_path)
)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def start(self, port=5432):
"""
Start a new Postgres server on the specified port
:param port: port to connect to, can be an int or a list of ports
:return:
"""
if not self.check_version_present():
self.download_and_unpack()
bin_dir = os.path.join(self._version_path, 'bin')
ports = [port] if isinstance(port, int) else port
return DatabaseRunner(bin_dir, ports)
class DatabaseRunner(object):
ADMIN_USER = 'root'
TIMEOUT = 10
def __init__(self, bin_dir, ports):
self._ports = ports
self._postgres_cmd = os.path.join(bin_dir, 'postgres')
# init db
init_db = os.path.join(bin_dir, 'initdb')
self._temp_dir = tempfile.mkdtemp()
command = init_db + ' -D ' + self._temp_dir + ' -U ' + DatabaseRunner.ADMIN_USER
logger.debug('Running command: {command}'.format(command=command))
os.system(command)
# overwrite pg_hba.conf to only allow local access with password authentication
with open(os.path.join(self._temp_dir, 'pg_hba.conf'), 'w') as fd:
fd.write(
'# TYPE DATABASE USER ADDRESS METHOD\n'
'# "local" is for Unix domain socket connections only\n'
'local all {admin} trust\n'
'local all all md5\n'
'host all {admin} 127.0.0.1/32 trust\n'
'host all all 127.0.0.1/32 md5\n'
'# IPv6 local connections:\n'
'host all {admin} ::1/128 trust\n'
'host all all ::1/128 md5\n'.format(admin=DatabaseRunner.ADMIN_USER)
)
def can_connect(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(('127.0.0.1', port)) != 0
self.running_port = next((port for port in ports if can_connect(port)), None)
if self.running_port is None:
raise PyEmbedPgException('Cannot run postgres on any of these ports [{ports}]'.format(ports=', '.join((str(p) for p in ports))))
self.proc = Popen([self._postgres_cmd, '-D', self._temp_dir, '-p', str(self.running_port)])
logger.debug('Postgres started on port {port}...'.format(port=self.running_port))
# Loop until the server is started
logger.debug('Waiting for Postgres to start...')
start = time.time()
while time.time() - start < DatabaseRunner.TIMEOUT:
try:
with psycopg2.connect(database='postgres', user=DatabaseRunner.ADMIN_USER, host='localhost', port=self.running_port):
break
except OperationalError:
pass
time.sleep(0.1)
else:
raise PyEmbedPgException('Cannot start postgres after {timeout} seconds'.format(timeout=DatabaseRunner.TIMEOUT))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
def create_user(self, username, password):
"""Create a user
:param username:
:type username: basestring
:param password:
:type password: basestring
"""
with psycopg2.connect(database='postgres', user=DatabaseRunner.ADMIN_USER, host='localhost', port=self.running_port) as conn:
with conn.cursor() as cursor:
cursor.execute("CREATE USER {username} WITH ENCRYPTED PASSWORD '{password}'".format(username=username, password=password))
def create_database(self, name, owner=None):
"""Create a new database
:param name: database name
:type name: basestring
:param owner: username of the owner or None if unspecified
:type owner: basestring
"""
with psycopg2.connect(database='postgres', user=DatabaseRunner.ADMIN_USER, host='localhost', port=self.running_port) as conn:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
sql = 'CREATE DATABASE {name} ' + ('WITH OWNER {owner}' if owner else '')
cursor.execute(sql.format(name=name, owner=owner))
def shutdown(self):
"""
Shutdown postgres and remove the data directory
"""
# stop pg
try:
logger.debug('Killing postgres on port {port}'.format(port=self.running_port))
self.proc.kill()
os.waitpid(self.proc.pid, 0)
finally:
logger.debug('Removing postgres data dir on {dir}'.format(dir=self._temp_dir))
# remove data directory
shutil.rmtree(self._temp_dir, ignore_errors=True)
class PyEmbedPgException(Exception):
pass
| Simulmedia/pyembedpg | pyembedpg.py | Python | apache-2.0 | 10,244 |
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_policy import policy as oslo_policy
import webob
from nova.api.openstack.compute import keypairs as keypairs_v21
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import policy
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_keypair
QUOTAS = quota.QUOTAS
keypair_data = {
'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
}
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
name=name, **keypair_data)
def db_key_pair_get_all_by_user(self, user_id, limit, marker):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return fake_keypair(name=keypair['name'])
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context):
raise exception.KeyPairExists(key_name='create_duplicate')
class KeypairsTestV21(test.TestCase):
base_url = '/v2/%s' % fakes.FAKE_PROJECT_ID
validation_error = exception.ValidationError
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def _setup_app_and_controller(self):
self.app_server = fakes.wsgi_app_v21()
self.controller = keypairs_v21.KeypairController()
def setUp(self):
super(KeypairsTestV21, self).setUp()
fakes.stub_out_networking(self)
fakes.stub_out_secgroup_api(self)
self.stub_out("nova.db.api.key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stub_out("nova.db.api.key_pair_create",
db_key_pair_create)
self.stub_out("nova.db.api.key_pair_destroy",
db_key_pair_destroy)
self._setup_app_and_controller()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
def test_keypair_list(self):
res_dict = self.controller.index(self.req)
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
res_dict = self.controller.create(self.req, body=body)
self.assertGreater(len(res_dict['keypair']['fingerprint']), 0)
self.assertGreater(len(res_dict['keypair']['private_key']), 0)
self._assert_keypair_type(res_dict)
def _test_keypair_create_bad_request_case(self,
body,
exception):
self.assertRaises(exception,
self.controller.create, self.req, body=body)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_leading_trailing_spaces(self):
body = {
'keypair': {
'name': ' test '
}
}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
body = {'keypair': {'name': ' test '}}
self.req.set_legacy_v2()
res_dict = self.controller.create(self.req, body=body)
self.assertEqual('test', res_dict['keypair']['name'])
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
self._test_keypair_create_bad_request_case(body,
webob.exc.HTTPBadRequest)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
self._test_keypair_create_bad_request_case(body,
webob.exc.HTTPBadRequest)
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
self._test_keypair_create_bad_request_case(body,
self.validation_error)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
res_dict = self.controller.create(self.req, body=body)
# FIXME(ja): Should we check that public_key was sent to create?
self.assertGreater(len(res_dict['keypair']['fingerprint']), 0)
self.assertNotIn('private_key', res_dict['keypair'])
self._assert_keypair_type(res_dict)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_import_quota_limit(self, mock_check):
mock_check.side_effect = exception.OverQuota(overs='key_pairs',
usages={'key_pairs': 100})
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_quota_limit(self, mock_check):
mock_check.side_effect = exception.OverQuota(overs='key_pairs',
usages={'key_pairs': 100})
body = {
'keypair': {
'name': 'create_test',
},
}
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_over_quota_during_recheck(self, mock_check):
# Simulate a race where the first check passes and the recheck fails.
# First check occurs in compute/api.
exc = exception.OverQuota(overs='key_pairs', usages={'key_pairs': 100})
mock_check.side_effect = [None, exc]
body = {
'keypair': {
'name': 'create_test',
},
}
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=body)
ctxt = self.req.environ['nova.context']
self.assertEqual(2, mock_check.call_count)
call1 = mock.call(ctxt, {'key_pairs': 1}, ctxt.user_id)
call2 = mock.call(ctxt, {'key_pairs': 0}, ctxt.user_id)
mock_check.assert_has_calls([call1, call2])
# Verify we removed the key pair that was added after the first
# quota check passed.
key_pairs = objects.KeyPairList.get_by_user(ctxt, ctxt.user_id)
names = [key_pair.name for key_pair in key_pairs]
self.assertNotIn('create_test', names)
@mock.patch('nova.objects.Quotas.check_deltas')
def test_keypair_create_no_quota_recheck(self, mock_check):
# Disable recheck_quota.
self.flags(recheck_quota=False, group='quota')
body = {
'keypair': {
'name': 'create_test',
},
}
self.controller.create(self.req, body=body)
ctxt = self.req.environ['nova.context']
# check_deltas should have been called only once.
mock_check.assert_called_once_with(ctxt, {'key_pairs': 1},
ctxt.user_id)
def test_keypair_create_duplicate(self):
self.stub_out("nova.objects.KeyPair.create",
db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
ex = self.assertRaises(webob.exc.HTTPConflict,
self.controller.create, self.req, body=body)
self.assertIn("Key pair 'create_duplicate' already exists.",
ex.explanation)
@mock.patch('nova.objects.KeyPair.get_by_name')
def test_keypair_delete(self, mock_get_by_name):
mock_get_by_name.return_value = objects.KeyPair(
nova_context.get_admin_context(), **fake_keypair('FAKE'))
self.controller.delete(self.req, 'FAKE')
def test_keypair_get_keypair_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 'DOESNOTEXIST')
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stub_out("nova.db.api.key_pair_destroy",
db_key_pair_get_not_found)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.req, 'FAKE')
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY',
type='ssh')
self.stub_out("nova.db.api.key_pair_get", _db_key_pair_get)
res_dict = self.controller.show(self.req, 'FAKE')
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
self._assert_keypair_type(res_dict)
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stub_out("nova.db.api.key_pair_get", _db_key_pair_get)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 'FAKE')
def _assert_keypair_type(self, res_dict):
self.assertNotIn('type', res_dict['keypair'])
class KeypairPolicyTestV21(test.NoDBTestCase):
KeyPairController = keypairs_v21.KeypairController()
policy_path = 'os_compute_api:os-keypairs'
def setUp(self):
super(KeypairPolicyTestV21, self).setUp()
@staticmethod
def _db_key_pair_get(context, user_id, name=None):
if name is not None:
return dict(test_keypair.fake_keypair,
name='foo', public_key='XXX', fingerprint='YYY',
type='ssh')
else:
return db_key_pair_get_all_by_user(context, user_id)
self.stub_out("nova.objects.keypair.KeyPair._get_from_db",
_db_key_pair_get)
self.req = fakes.HTTPRequest.blank('')
def test_keypair_list_fail_policy(self):
rules = {self.policy_path + ':index': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.KeyPairController.index,
self.req)
@mock.patch('nova.objects.KeyPairList.get_by_user')
def test_keypair_list_pass_policy(self, mock_get):
rules = {self.policy_path + ':index': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
res = self.KeyPairController.index(self.req)
self.assertIn('keypairs', res)
def test_keypair_show_fail_policy(self):
rules = {self.policy_path + ':show': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.KeyPairController.show,
self.req, 'FAKE')
def test_keypair_show_pass_policy(self):
rules = {self.policy_path + ':show': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
res = self.KeyPairController.show(self.req, 'FAKE')
self.assertIn('keypair', res)
def test_keypair_create_fail_policy(self):
body = {'keypair': {'name': 'create_test'}}
rules = {self.policy_path + ':create': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.KeyPairController.create,
self.req, body=body)
def _assert_keypair_create(self, mock_create, req):
mock_create.assert_called_with(req, 'fake_user', 'create_test', 'ssh')
@mock.patch.object(compute_api.KeypairAPI, 'create_key_pair')
def test_keypair_create_pass_policy(self, mock_create):
keypair_obj = objects.KeyPair(name='', public_key='',
fingerprint='', user_id='')
mock_create.return_value = (keypair_obj, 'dummy')
body = {'keypair': {'name': 'create_test'}}
rules = {self.policy_path + ':create': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
res = self.KeyPairController.create(self.req, body=body)
self.assertIn('keypair', res)
req = self.req.environ['nova.context']
self._assert_keypair_create(mock_create, req)
def test_keypair_delete_fail_policy(self):
rules = {self.policy_path + ':delete': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.KeyPairController.delete,
self.req, 'FAKE')
@mock.patch('nova.objects.KeyPair.destroy_by_name')
def test_keypair_delete_pass_policy(self, mock_destroy):
rules = {self.policy_path + ':delete': ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.KeyPairController.delete(self.req, 'FAKE')
class KeypairsTestV22(KeypairsTestV21):
wsgi_api_version = '2.2'
def test_keypair_list(self):
res_dict = self.controller.index(self.req)
expected = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE',
type='ssh')}]}
self.assertEqual(expected, res_dict)
def _assert_keypair_type(self, res_dict):
self.assertEqual('ssh', res_dict['keypair']['type'])
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
pass
def test_create_server_keypair_name_with_leading_trailing_compat_mode(
self):
pass
class KeypairsTestV210(KeypairsTestV22):
wsgi_api_version = '2.10'
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
pass
def test_create_server_keypair_name_with_leading_trailing_compat_mode(
self):
pass
def test_keypair_list_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api, 'get_key_pairs') as mock_g:
self.controller.index(req)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_list_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'get_key_pairs'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_keypair_show_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api, 'get_key_pair') as mock_g:
self.controller.show(req, 'FAKE')
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_show_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'get_key_pair'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, 'FAKE')
def test_keypair_delete_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version,
use_admin_context=True)
with mock.patch.object(self.controller.api,
'delete_key_pair') as mock_g:
self.controller.delete(req, 'FAKE')
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('foo', userid)
def test_keypair_delete_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs/FAKE?user_id=foo',
version=self.wsgi_api_version)
with mock.patch.object(self.controller.api, 'delete_key_pair'):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete, req, 'FAKE')
def test_keypair_create_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version,
use_admin_context=True)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a'}}
with mock.patch.object(self.controller.api,
'create_key_pair',
return_value=(mock.MagicMock(), 1)) as mock_g:
res = self.controller.create(req, body=body)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('8861f37f-034e-4ca8-8abe-6d13c074574a', userid)
self.assertIn('keypair', res)
def test_keypair_import_other_user(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version,
use_admin_context=True)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a',
'public_key': 'public_key'}}
with mock.patch.object(self.controller.api,
'import_key_pair') as mock_g:
res = self.controller.create(req, body=body)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('8861f37f-034e-4ca8-8abe-6d13c074574a', userid)
self.assertIn('keypair', res)
def test_keypair_create_other_user_not_admin(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs',
version=self.wsgi_api_version)
body = {'keypair': {'name': 'create_test',
'user_id': '8861f37f-034e-4ca8-8abe-6d13c074574a'}}
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create,
req, body=body)
def test_keypair_list_other_user_invalid_in_old_microversion(self):
req = fakes.HTTPRequest.blank(self.base_url +
'/os-keypairs?user_id=foo',
version="2.9",
use_admin_context=True)
with mock.patch.object(self.controller.api, 'get_key_pairs') as mock_g:
self.controller.index(req)
userid = mock_g.call_args_list[0][0][1]
self.assertEqual('fake_user', userid)
class KeypairsTestV235(test.TestCase):
base_url = '/v2/%s' % fakes.FAKE_PROJECT_ID
wsgi_api_version = '2.35'
def _setup_app_and_controller(self):
self.app_server = fakes.wsgi_app_v21()
self.controller = keypairs_v21.KeypairController()
def setUp(self):
super(KeypairsTestV235, self).setUp()
self._setup_app_and_controller()
@mock.patch("nova.db.api.key_pair_get_all_by_user")
def test_keypair_list_limit_and_marker(self, mock_kp_get):
mock_kp_get.side_effect = db_key_pair_get_all_by_user
req = fakes.HTTPRequest.blank(
self.base_url + '/os-keypairs?limit=3&marker=fake_marker',
version=self.wsgi_api_version, use_admin_context=True)
res_dict = self.controller.index(req)
mock_kp_get.assert_called_once_with(
req.environ['nova.context'], 'fake_user',
limit=3, marker='fake_marker')
response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE',
type='ssh')}]}
self.assertEqual(res_dict, response)
@mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
def test_keypair_list_limit_and_marker_invalid_marker(self, mock_kp_get):
mock_kp_get.side_effect = exception.MarkerNotFound(marker='unknown_kp')
req = fakes.HTTPRequest.blank(
self.base_url + '/os-keypairs?limit=3&marker=unknown_kp',
version=self.wsgi_api_version, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
def test_keypair_list_limit_and_marker_invalid_limit(self):
req = fakes.HTTPRequest.blank(
self.base_url + '/os-keypairs?limit=abc&marker=fake_marker',
version=self.wsgi_api_version, use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
@mock.patch("nova.db.api.key_pair_get_all_by_user")
def test_keypair_list_limit_and_marker_invalid_in_old_microversion(
self, mock_kp_get):
mock_kp_get.side_effect = db_key_pair_get_all_by_user
req = fakes.HTTPRequest.blank(
self.base_url + '/os-keypairs?limit=3&marker=fake_marker',
version="2.30", use_admin_context=True)
self.controller.index(req)
mock_kp_get.assert_called_once_with(
req.environ['nova.context'], 'fake_user',
limit=None, marker=None)
class KeypairsTestV275(test.TestCase):
def setUp(self):
super(KeypairsTestV275, self).setUp()
self.controller = keypairs_v21.KeypairController()
@mock.patch("nova.db.api.key_pair_get_all_by_user")
@mock.patch('nova.objects.KeyPair.get_by_name')
def test_keypair_list_additional_param_old_version(self, mock_get_by_name,
mock_kp_get):
req = fakes.HTTPRequest.blank(
'/os-keypairs?unknown=3',
version='2.74', use_admin_context=True)
self.controller.index(req)
self.controller.show(req, 1)
with mock.patch.object(self.controller.api,
'delete_key_pair'):
self.controller.delete(req, 1)
def test_keypair_list_additional_param(self):
req = fakes.HTTPRequest.blank(
'/os-keypairs?unknown=3',
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
def test_keypair_show_additional_param(self):
req = fakes.HTTPRequest.blank(
'/os-keypairs?unknown=3',
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.show,
req, 1)
def test_keypair_delete_additional_param(self):
req = fakes.HTTPRequest.blank(
'/os-keypairs?unknown=3',
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.delete,
req, 1)
| rahulunair/nova | nova/tests/unit/api/openstack/compute/test_keypairs.py | Python | apache-2.0 | 27,057 |
from __future__ import unicode_literals
import datetime
from boto.ec2.elb.attributes import (
LbAttributes,
ConnectionSettingAttribute,
ConnectionDrainingAttribute,
AccessLogAttribute,
CrossZoneLoadBalancingAttribute,
)
from boto.ec2.elb.policies import (
Policies,
OtherPolicy,
)
from moto.core import BaseBackend, BaseModel
from moto.ec2.models import ec2_backends
from .exceptions import (
LoadBalancerNotFoundError,
TooManyTagsError,
BadHealthCheckDefinition,
DuplicateLoadBalancerName,
)
class FakeHealthCheck(BaseModel):
def __init__(self, timeout, healthy_threshold, unhealthy_threshold,
interval, target):
self.timeout = timeout
self.healthy_threshold = healthy_threshold
self.unhealthy_threshold = unhealthy_threshold
self.interval = interval
self.target = target
if not target.startswith(('HTTP', 'TCP', 'HTTPS', 'SSL')):
raise BadHealthCheckDefinition
class FakeListener(BaseModel):
def __init__(self, load_balancer_port, instance_port, protocol, ssl_certificate_id):
self.load_balancer_port = load_balancer_port
self.instance_port = instance_port
self.protocol = protocol.upper()
self.ssl_certificate_id = ssl_certificate_id
self.policy_names = []
def __repr__(self):
return "FakeListener(lbp: %s, inp: %s, pro: %s, cid: %s, policies: %s)" % (self.load_balancer_port, self.instance_port, self.protocol, self.ssl_certificate_id, self.policy_names)
class FakeBackend(BaseModel):
def __init__(self, instance_port):
self.instance_port = instance_port
self.policy_names = []
def __repr__(self):
return "FakeBackend(inp: %s, policies: %s)" % (self.instance_port, self.policy_names)
class FakeLoadBalancer(BaseModel):
def __init__(self, name, zones, ports, scheme='internet-facing', vpc_id=None, subnets=None):
self.name = name
self.health_check = None
self.instance_ids = []
self.zones = zones
self.listeners = []
self.backends = []
self.created_time = datetime.datetime.now()
self.scheme = scheme
self.attributes = FakeLoadBalancer.get_default_attributes()
self.policies = Policies()
self.policies.other_policies = []
self.policies.app_cookie_stickiness_policies = []
self.policies.lb_cookie_stickiness_policies = []
self.subnets = subnets or []
self.vpc_id = vpc_id or 'vpc-56e10e3d'
self.tags = {}
self.dns_name = "%s.us-east-1.elb.amazonaws.com" % (name)
for port in ports:
listener = FakeListener(
protocol=(port.get('protocol') or port['Protocol']),
load_balancer_port=(
port.get('load_balancer_port') or port['LoadBalancerPort']),
instance_port=(
port.get('instance_port') or port['InstancePort']),
ssl_certificate_id=port.get(
'ssl_certificate_id', port.get('SSLCertificateId')),
)
self.listeners.append(listener)
# it is unclear per the AWS documentation as to when or how backend
# information gets set, so let's guess and set it here *shrug*
backend = FakeBackend(
instance_port=(
port.get('instance_port') or port['InstancePort']),
)
self.backends.append(backend)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
elb_backend = elb_backends[region_name]
new_elb = elb_backend.create_load_balancer(
name=properties.get('LoadBalancerName', resource_name),
zones=properties.get('AvailabilityZones', []),
ports=properties['Listeners'],
scheme=properties.get('Scheme', 'internet-facing'),
)
instance_ids = properties.get('Instances', [])
for instance_id in instance_ids:
elb_backend.register_instances(new_elb.name, [instance_id])
policies = properties.get('Policies', [])
port_policies = {}
for policy in policies:
policy_name = policy["PolicyName"]
other_policy = OtherPolicy()
other_policy.policy_name = policy_name
elb_backend.create_lb_other_policy(new_elb.name, other_policy)
for port in policy.get("InstancePorts", []):
policies_for_port = port_policies.get(port, set())
policies_for_port.add(policy_name)
port_policies[port] = policies_for_port
for port, policies in port_policies.items():
elb_backend.set_load_balancer_policies_of_backend_server(
new_elb.name, port, list(policies))
health_check = properties.get('HealthCheck')
if health_check:
elb_backend.configure_health_check(
load_balancer_name=new_elb.name,
timeout=health_check['Timeout'],
healthy_threshold=health_check['HealthyThreshold'],
unhealthy_threshold=health_check['UnhealthyThreshold'],
interval=health_check['Interval'],
target=health_check['Target'],
)
return new_elb
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
elb_backend = elb_backends[region_name]
try:
elb_backend.delete_load_balancer(resource_name)
except KeyError:
pass
@property
def physical_resource_id(self):
return self.name
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'CanonicalHostedZoneName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneName" ]"')
elif attribute_name == 'CanonicalHostedZoneNameID':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "CanonicalHostedZoneNameID" ]"')
elif attribute_name == 'DNSName':
return self.dns_name
elif attribute_name == 'SourceSecurityGroup.GroupName':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.GroupName" ]"')
elif attribute_name == 'SourceSecurityGroup.OwnerAlias':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SourceSecurityGroup.OwnerAlias" ]"')
raise UnformattedGetAttTemplateException()
@classmethod
def get_default_attributes(cls):
attributes = LbAttributes()
cross_zone_load_balancing = CrossZoneLoadBalancingAttribute()
cross_zone_load_balancing.enabled = False
attributes.cross_zone_load_balancing = cross_zone_load_balancing
connection_draining = ConnectionDrainingAttribute()
connection_draining.enabled = False
attributes.connection_draining = connection_draining
access_log = AccessLogAttribute()
access_log.enabled = False
attributes.access_log = access_log
connection_settings = ConnectionSettingAttribute()
connection_settings.idle_timeout = 60
attributes.connecting_settings = connection_settings
return attributes
def add_tag(self, key, value):
if len(self.tags) >= 10 and key not in self.tags:
raise TooManyTagsError()
self.tags[key] = value
def list_tags(self):
return self.tags
def remove_tag(self, key):
if key in self.tags:
del self.tags[key]
def delete(self, region):
''' Not exposed as part of the ELB API - used for CloudFormation. '''
elb_backends[region].delete_load_balancer(self.name)
class ELBBackend(BaseBackend):
def __init__(self, region_name=None):
self.region_name = region_name
self.load_balancers = {}
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_load_balancer(self, name, zones, ports, scheme='internet-facing', subnets=None):
vpc_id = None
ec2_backend = ec2_backends[self.region_name]
if subnets:
subnet = ec2_backend.get_subnet(subnets[0])
vpc_id = subnet.vpc_id
if name in self.load_balancers:
raise DuplicateLoadBalancerName(name)
new_load_balancer = FakeLoadBalancer(
name=name, zones=zones, ports=ports, scheme=scheme, subnets=subnets, vpc_id=vpc_id)
self.load_balancers[name] = new_load_balancer
return new_load_balancer
def create_load_balancer_listeners(self, name, ports):
balancer = self.load_balancers.get(name, None)
if balancer:
for port in ports:
protocol = port['protocol']
instance_port = port['instance_port']
lb_port = port['load_balancer_port']
ssl_certificate_id = port.get('sslcertificate_id')
for listener in balancer.listeners:
if lb_port == listener.load_balancer_port:
break
else:
balancer.listeners.append(FakeListener(
lb_port, instance_port, protocol, ssl_certificate_id))
return balancer
def describe_load_balancers(self, names):
balancers = self.load_balancers.values()
if names:
matched_balancers = [
balancer for balancer in balancers if balancer.name in names]
if len(names) != len(matched_balancers):
missing_elb = list(set(names) - set(matched_balancers))[0]
raise LoadBalancerNotFoundError(missing_elb)
return matched_balancers
else:
return balancers
def delete_load_balancer_listeners(self, name, ports):
balancer = self.load_balancers.get(name, None)
listeners = []
if balancer:
for lb_port in ports:
for listener in balancer.listeners:
if int(lb_port) == int(listener.load_balancer_port):
continue
else:
listeners.append(listener)
balancer.listeners = listeners
return balancer
def delete_load_balancer(self, load_balancer_name):
self.load_balancers.pop(load_balancer_name, None)
def get_load_balancer(self, load_balancer_name):
return self.load_balancers.get(load_balancer_name)
def configure_health_check(self, load_balancer_name, timeout,
healthy_threshold, unhealthy_threshold, interval,
target):
check = FakeHealthCheck(timeout, healthy_threshold, unhealthy_threshold,
interval, target)
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.health_check = check
return check
def set_load_balancer_listener_sslcertificate(self, name, lb_port, ssl_certificate_id):
balancer = self.load_balancers.get(name, None)
if balancer:
for idx, listener in enumerate(balancer.listeners):
if lb_port == listener.load_balancer_port:
balancer.listeners[
idx].ssl_certificate_id = ssl_certificate_id
return balancer
def register_instances(self, load_balancer_name, instance_ids):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.instance_ids.extend(instance_ids)
return load_balancer
def deregister_instances(self, load_balancer_name, instance_ids):
load_balancer = self.get_load_balancer(load_balancer_name)
new_instance_ids = [
instance_id for instance_id in load_balancer.instance_ids if instance_id not in instance_ids]
load_balancer.instance_ids = new_instance_ids
return load_balancer
def set_cross_zone_load_balancing_attribute(self, load_balancer_name, attribute):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.attributes.cross_zone_load_balancing = attribute
return load_balancer
def set_access_log_attribute(self, load_balancer_name, attribute):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.attributes.access_log = attribute
return load_balancer
def set_connection_draining_attribute(self, load_balancer_name, attribute):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.attributes.connection_draining = attribute
return load_balancer
def set_connection_settings_attribute(self, load_balancer_name, attribute):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.attributes.connecting_settings = attribute
return load_balancer
def create_lb_other_policy(self, load_balancer_name, other_policy):
load_balancer = self.get_load_balancer(load_balancer_name)
if other_policy.policy_name not in [p.policy_name for p in load_balancer.policies.other_policies]:
load_balancer.policies.other_policies.append(other_policy)
return load_balancer
def create_app_cookie_stickiness_policy(self, load_balancer_name, policy):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.policies.app_cookie_stickiness_policies.append(policy)
return load_balancer
def create_lb_cookie_stickiness_policy(self, load_balancer_name, policy):
load_balancer = self.get_load_balancer(load_balancer_name)
load_balancer.policies.lb_cookie_stickiness_policies.append(policy)
return load_balancer
def set_load_balancer_policies_of_backend_server(self, load_balancer_name, instance_port, policies):
load_balancer = self.get_load_balancer(load_balancer_name)
backend = [b for b in load_balancer.backends if int(
b.instance_port) == instance_port][0]
backend_idx = load_balancer.backends.index(backend)
backend.policy_names = policies
load_balancer.backends[backend_idx] = backend
return load_balancer
def set_load_balancer_policies_of_listener(self, load_balancer_name, load_balancer_port, policies):
load_balancer = self.get_load_balancer(load_balancer_name)
listener = [l for l in load_balancer.listeners if int(
l.load_balancer_port) == load_balancer_port][0]
listener_idx = load_balancer.listeners.index(listener)
listener.policy_names = policies
load_balancer.listeners[listener_idx] = listener
return load_balancer
elb_backends = {}
for region in ec2_backends.keys():
elb_backends[region] = ELBBackend(region)
| heddle317/moto | moto/elb/models.py | Python | apache-2.0 | 15,471 |
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Contains functions for constructing binary or multiclass rate expressions.
There are a number of rates (e.g. error_rate()) that can be defined for either
binary classification or multiclass contexts. The former rates are implemented
in binary_rates.py, and the latter in multiclass_rates.py. In this file, the
given functions choose which rate to create based on the type of the context:
for multiclass contexts, they'll call the corresponding implementation in
multiclass_rates.py, otherwise, they'll call binary_rates.py.
Many of the functions in this file take the optional "positive_class" parameter,
which tells us which classes should be considered "positive" (for e.g. the
positive prediction rate). This parameter *must* be provided for multiclass
contexts, and must *not* be provided for non-multiclass contexts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_constrained_optimization.python.rates import basic_expression
from tensorflow_constrained_optimization.python.rates import binary_rates
from tensorflow_constrained_optimization.python.rates import defaults
from tensorflow_constrained_optimization.python.rates import deferred_tensor
from tensorflow_constrained_optimization.python.rates import expression
from tensorflow_constrained_optimization.python.rates import multiclass_rates
from tensorflow_constrained_optimization.python.rates import subsettable_context
from tensorflow_constrained_optimization.python.rates import term
def _is_multiclass(context):
"""Returns True iff we're given a multiclass context."""
if not isinstance(context, subsettable_context.SubsettableContext):
raise TypeError("context must be a SubsettableContext object")
raw_context = context.raw_context
return raw_context.num_classes is not None
def _ratio_bound(numerator_expression, denominator_expression, lower_bound,
upper_bound):
"""Creates an `Expression` for a bound on a ratio.
The result of this function is an `Expression` representing:
numerator / denominator_bound
where denominator_bound is a newly-created slack variable projected to satisfy
the following (in an update op):
denominator_lower_bound <= denominator_bound <= 1
Additionally, the following constraint will be added if lower_bound is True:
denominator_bound >= denominator_expression
and/or the following if upper_bound is true:
denominator_bound <= denominator_expression
These constraints are placed in the "extra_constraints" field of the resulting
`Expression`.
If you're going to be lower-bounding or maximizing the result of this
function, then need to set the lower_bound parameter to `True`. Likewise, if
you're going to be upper-bounding or minimizing the result of this function,
then the upper_bound parameter must be `True`. At least one of these
parameters *must* be `True`, and it's permitted for both of them to be `True`
(but we recommend against this, since it would result in equality constraints,
which might cause problems during optimization and/or post-processing).
Args:
numerator_expression: `Expression`, the numerator of the ratio.
denominator_expression: `Expression`, the denominator of the ratio. The
value of this expression must be between zero and one.
lower_bound: bool, `True` if you want the result of this function to
lower-bound the ratio.
upper_bound: bool, `True` if you want the result of this function to
upper-bound the ratio.
Returns:
An `Expression` representing the ratio.
Raises:
TypeError: if either numerator_expression or denominator_expression is not
an `Expression`.
ValueError: if both lower_bound and upper_bound are `False`.
"""
if not (isinstance(numerator_expression, expression.Expression) and
isinstance(denominator_expression, expression.Expression)):
raise TypeError(
"both numerator_expression and denominator_expression must be "
"Expressions (perhaps you need to call wrap_rate() to create an "
"Expression from a Tensor?)")
# One could set both lower_bound and upper_bound to True, in which case the
# result of this function could be treated as the ratio itself (instead of a
# {lower,upper} bound of it). However, this would come with some drawbacks: it
# would of course make optimization more difficult, but more importantly, it
# would potentially cause post-processing for feasibility (e.g. using
# "shrinking") to fail to find a feasible solution.
if not (lower_bound or upper_bound):
raise ValueError("at least one of lower_bound or upper_bound must be True")
# We use an "update_ops_fn" instead of a "constraint" (which we would usually
# prefer) to perform the projection because we want to grab the denominator
# lower bound out of the structure_memoizer.
def update_ops_fn(denominator_bound_variable, structure_memoizer,
value_memoizer):
"""Projects denominator_bound onto the feasible region."""
del value_memoizer
denominator_bound = tf.maximum(
structure_memoizer[defaults.DENOMINATOR_LOWER_BOUND_KEY],
tf.minimum(1.0, denominator_bound_variable))
return [denominator_bound_variable.assign(denominator_bound)]
# Ideally the slack variable would have the same dtype as the predictions, but
# we might not know their dtype (e.g. in eager mode), so instead we always use
# float32 with auto_cast=True.
denominator_bound = deferred_tensor.DeferredVariable(
1.0,
trainable=True,
name="tfco_denominator_bound",
dtype=tf.float32,
update_ops_fn=update_ops_fn,
auto_cast=True)
denominator_bound_basic_expression = basic_expression.BasicExpression(
[term.TensorTerm(denominator_bound)])
denominator_bound_expression = expression.ExplicitExpression(
penalty_expression=denominator_bound_basic_expression,
constraint_expression=denominator_bound_basic_expression)
extra_constraints = []
if lower_bound:
extra_constraints.append(
denominator_expression <= denominator_bound_expression)
if upper_bound:
extra_constraints.append(
denominator_bound_expression <= denominator_expression)
return expression.ConstrainedExpression(
expression=numerator_expression._positive_scalar_div(denominator_bound), # pylint: disable=protected-access
extra_constraints=extra_constraints)
def _ratio(numerator_expression, denominator_expression):
"""Creates an `Expression` for a ratio.
The result of this function is an `Expression` representing:
numerator / denominator_bound
where denominator_bound satisfies the following:
denominator_lower_bound <= denominator_bound <= 1
The resulting `Expression` will include both the implicit denominator_bound
slack variable, and implicit constraints.
Args:
numerator_expression: `Expression`, the numerator of the ratio.
denominator_expression: `Expression`, the denominator of the ratio.
Returns:
An `Expression` representing the ratio.
Raises:
TypeError: if either numerator_expression or denominator_expression is not
an `Expression`.
"""
return expression.BoundedExpression(
lower_bound=_ratio_bound(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression,
lower_bound=True,
upper_bound=False),
upper_bound=_ratio_bound(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression,
lower_bound=False,
upper_bound=True))
def positive_prediction_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a positive prediction rate.
A positive prediction rate is the number of examples within the given context
on which the model makes a positive prediction, divided by the number of
examples within the context. For multiclass problems, the positive_class
argument, which tells us which class (or classes) should be treated as
positive, must also be provided.
Please see the docstrings of positive_prediction_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the positive prediction rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if positive_class is provided for a non-multiclass context, or
is *not* provided for a multiclass context. In the latter case, an error
will also be raised if positive_class is an integer outside the range
[0,num_classes), or is a collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.positive_prediction_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"positive_prediction_rate unless it's also given a "
"multiclass context")
return binary_rates.positive_prediction_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def negative_prediction_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a negative prediction rate.
A negative prediction rate is the number of examples within the given context
on which the model makes a negative prediction, divided by the number of
examples within the context. For multiclass problems, the positive_class
argument, which tells us which class (or classes) should be treated as
positive, must also be provided.
Please see the docstrings of negative_prediction_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the negative prediction rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if positive_class is provided for a non-multiclass context, or
is *not* provided for a multiclass context. In the latter case, an error
will also be raised if positive_class is an integer outside the range
[0,num_classes), or is a collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.negative_prediction_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"negative_prediction_rate unless it's also given a "
"multiclass context")
return binary_rates.negative_prediction_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def error_rate(context,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for an error rate.
An error rate is the number of examples within the given context on which the
model makes an incorrect prediction, divided by the number of examples within
the context.
Please see the docstrings of error_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the error rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass).
ValueError: if the context doesn't contain labels.
"""
if _is_multiclass(context):
return multiclass_rates.error_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return binary_rates.error_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def accuracy_rate(context,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for an accuracy rate.
An accuracy rate is the number of examples within the given context on which
the model makes a correct prediction, divided by the number of examples within
the context.
Please see the docstrings of accuracy_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the accuracy rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass).
ValueError: if the context doesn't contain labels.
"""
if _is_multiclass(context):
return multiclass_rates.accuracy_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return binary_rates.accuracy_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def true_positive_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true positive rate.
A true positive rate is the number of positively-labeled examples within the
given context on which the model makes a positive prediction, divided by the
number of positively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of true_positive_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true positive rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_positive_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"true_positive_rate unless it's also given a multiclass "
"context")
return binary_rates.true_positive_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def false_negative_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false negative rate.
A false negative rate is the number of positively-labeled examples within the
given context on which the model makes a negative prediction, divided by the
number of positively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_negative_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false negative rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_negative_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"false_negative_rate unless it's also given a multiclass "
"context")
return binary_rates.false_negative_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def false_positive_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false positive rate.
A false positive rate is the number of negatively-labeled examples within the
given context on which the model makes a positive prediction, divided by the
number of negatively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_positive_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false positive rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_positive_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"false_positive_rate unless it's also given a multiclass "
"context")
return binary_rates.false_positive_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def true_negative_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true negative rate.
A true negative rate is the number of negatively-labeled examples within the
given context on which the model makes a negative prediction, divided by the
number of negatively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of true_negative_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true negative rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_negative_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"true_negative_rate unless it's also given a multiclass "
"context")
return binary_rates.true_negative_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def true_positive_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true positive proportion.
A true positive proportion is the number of positively-labeled examples within
the given context on which the model makes a positive prediction, divided by
the total number of examples within the context. For multiclass problems, the
positive_class argument, which tells us which class (or classes) should be
treated as positive, must also be provided.
Please see the docstrings of true_positive_proportion() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true positive proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_positive_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"true_positive_proportion unless it's also given a multiclass "
"context")
return binary_rates.true_positive_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def false_negative_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false negative proportion.
A false negative proportion is the number of positively-labeled examples
within the given context on which the model makes a negative prediction,
divided by the total number of examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_negative_proportion() in binary_rates.py
and multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false negative proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_negative_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"false_negative_proportion unless it's also given a multiclass "
"context")
return binary_rates.false_negative_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def false_positive_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false positive proportion.
A false positive proportion is the number of negatively-labeled examples
within the given context on which the model makes a positive prediction,
divided by the total number of examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_positive_proportion() in binary_rates.py
and multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false positive proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_positive_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"false_positive_proportion unless it's also given a multiclass "
"context")
return binary_rates.false_positive_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def true_negative_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true negative proportion.
A true negative proportion is the number of negatively-labeled examples within
the given context on which the model makes a negative prediction, divided by
the total number of examples within the context. For multiclass problems, the
positive_class argument, which tells us which class (or classes) should be
treated as positive, must also be provided.
Please see the docstrings of true_negative_proportion() in binary_rates.py
and multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true negative proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_negative_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"true_negative_proportion unless it's also given a multiclass "
"context")
return binary_rates.true_negative_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def precision_ratio(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates two `Expression`s representing precision as a ratio.
A precision is the number of positively-labeled examples within the given
context on which the model makes a positive prediction, divided by the number
of examples within the context on which the model makes a positive prediction.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Please see the docstrings of precision_ratio() in binary_rates.py and
multiclass_rates.py for further details.
The reason for decomposing a precision as a separate numerator and denominator
is to make it easy to set up constraints of the form (for example):
> precision := numerator / denominator >= 0.9
for which you can multiply through by the denominator to yield the equivalent
constraint:
> numerator >= 0.9 * denominator
This latter form is something that we can straightforwardly handle.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An (`Expression`, `Expression`) pair representing the numerator and
denominator of a precision, respectively.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.precision_ratio(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"precision_ratio unless it's also given a multiclass "
"context")
return binary_rates.precision_ratio(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def precision(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression`s for precision.
A precision is the number of positively-labeled examples within the given
context on which the model makes a positive prediction, divided by the number
of examples within the context on which the model makes a positive prediction.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the precision.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
numerator_expression, denominator_expression = precision_ratio(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return _ratio(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression)
def f_score_ratio(context,
beta=1.0,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates two `Expression`s representing F-score as a ratio.
An F score [Wikipedia](https://en.wikipedia.org/wiki/F1_score), is a harmonic
mean of recall and precision, where the parameter beta weights the importance
of the precision component. If beta=1, the result is the usual harmonic mean
(the F1 score) of these two quantities. If beta=0, the result is the
precision, and as beta goes to infinity, the result converges to the recall.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Please see the docstrings of f_score_ratio() in binary_rates.py and
multiclass_rates.py for further details.
The reason for decomposing an F-score as a separate numerator and denominator
is to make it easy to set up constraints of the form (for example):
> f_score := numerator / denominator >= 0.9
for which you can multiply through by the denominator to yield the equivalent
constraint:
> numerator >= 0.9 * denominator
This latter form is something that we can straightforwardly handle.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
beta: non-negative float, the beta parameter to the F-score. If beta=0, then
the result is precision, and if beta=1 (the default), then the result is
the F1-score.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An (`Expression`, `Expression`) pair representing the numerator and
denominator of an F-score, respectively.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.f_score_ratio(
context=context,
positive_class=positive_class,
beta=beta,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"f_score_ratio unless it's also given a multiclass "
"context")
return binary_rates.f_score_ratio(
context=context,
beta=beta,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
def f_score(context,
beta=1.0,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for F-score.
An F score [Wikipedia](https://en.wikipedia.org/wiki/F1_score), is a harmonic
mean of recall and precision, where the parameter beta weights the importance
of the precision component. If beta=1, the result is the usual harmonic mean
(the F1 score) of these two quantities. If beta=0, the result is the
precision, and as beta goes to infinity, the result converges to the recall.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
beta: non-negative float, the beta parameter to the F-score. If beta=0, then
the result is precision, and if beta=1 (the default), then the result is
the F1-score.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the F-score.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
numerator_expression, denominator_expression = f_score_ratio(
context=context,
beta=beta,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return _ratio(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression)
| google-research/tensorflow_constrained_optimization | tensorflow_constrained_optimization/python/rates/general_rates.py | Python | apache-2.0 | 52,315 |
# Copyright 2015 Philipp Pahl, Sven Schubert, Daniel Britzger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model definition for the master data set.
The entities of the master data set consist of 'edges' and 'properties' and conform to the
fact based model. <Reference>
Every entity has a unique data_unit_index. It is horizontally partitioned wrt. the
partition key and a granularity.
An edge or property must define a unique data_unit_index, the partition key and granularity.
The schema contains the edges and properties of an entity and the entities which are related by the edge."""
import peachbox.model
class UserReviewEdge(peachbox.model.MasterDataSet,TaskImportModel):
"""A particular realization of an 'edge'. Here: the user review edge """
data_unit_index = 0
partition_key = 'true_as_of_seconds'
partition_granularity = 60*60*24*360
schema = [{'field':'user_id', 'type':'StringType'},
{'field':'review_id', 'type':'StringType'}]
def lhs_node(self, row):
pass
def calc_value(self,field,row):
field = 'review_id'
val = 4*3*row.review_id
self.set_value(field,val)
def import(row):
self.lhs_node(row.user_id)
self.rhs_node(row.review_id)
self.partition_key(row.time)
class ProductReviewEdge(peachbox.model.MasterDataSet):
"""A particular realization of an 'edge'. Here: the product review edge """
data_unit_index = 1
partition_key = 'true_as_of_seconds'
partition_granularity = 60*60*24*360
schema = [{'field':'review_id', 'type':'StringType'},
{'field':'product_id', 'type':'StringType'}]
class ReviewProperties(peachbox.model.MasterDataSet):
"""A particular realization of a node, containing several properties. Here: the review properties """
data_unit_index = 2
partition_key = 'true_as_of_seconds'
partition_granularity = 60*60*24*360
time_fill_method = fill_name('time')
model = [{'field':'review_id', 'type':'StringType', 'fill_method': fill_review_id},
{'field':'helpful', 'type':'IntegerType', 'fill_method': helpful},
{'field':'nothelpful', 'type':'IntegerType', 'fill_method':fill_nothelpful},
{'field':'score', 'type':'IntegerType'},
{'field':'summary', 'type':'StringType'},
{'field':'text', 'type':'StringType'}]
source_fields = [{'field:review_id','type:StringType','validation:notempty'},
{'field':'text','validation:notempty'}]
def __init__(self):
self.build_model()
def helpful(self, row, field=''):
lambda row: int(row['helpfulness'].split('/')[0])
def fill_review_id(self, row, field):
user_id = row['user_id']
product_id = row['product_id']
true_as_of_seconds = row['time']
return unicode(hash(user_id+product_id+str(true_as_of_seconds)))
def fill_nothelpful(self, row, field):
return int(row['helpfulness'].split('/')[1]) - fill_method['helpful'](row,'helpful')
class UserProperties(peachbox.model.MasterDataSet):
"""A particular realization of properties. Here: the user properties """
data_unit_index = 3
partition_key = 'true_as_seconds'
partition_granularity = 60*60*24*360
schema = [{'field':'user_id', 'type':'StringType'},
{'field':'profile_name', 'type':'StringType'}]
| PeachstoneIO/peachbox | tutorials/tutorial_movie_reviews/model/master.py | Python | apache-2.0 | 3,918 |
# Copyright (c) 2013,2014 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import functools
import inspect
import re
from six.moves.urllib import parse as urlparse
import keystoneauth1.identity.v2 as v2
import keystoneauth1.identity.v3 as v3
import keystoneauth1.session as kssession
from congress.datasources import constants
def get_openstack_required_config():
return {'auth_url': constants.REQUIRED,
'endpoint': constants.OPTIONAL,
'region': constants.OPTIONAL,
'username': constants.REQUIRED,
'password': constants.REQUIRED,
'tenant_name': constants.REQUIRED,
'project_name': constants.OPTIONAL,
'poll_time': constants.OPTIONAL}
def update_state_on_changed(root_table_name):
"""Decorator to check raw data before retranslating.
If raw data is same with cached self.raw_state,
don't translate data, return empty list directly.
If raw data is changed, translate it and update state.
"""
def outer(f):
@functools.wraps(f)
def inner(self, raw_data, *args, **kw):
if (root_table_name not in self.raw_state or
# TODO(RuiChen): workaround for oslo-incubator bug/1499369,
# enable self.raw_state cache, once the bug is resolved.
raw_data is not self.raw_state[root_table_name]):
result = f(self, raw_data, *args, **kw)
self._update_state(root_table_name, result)
self.raw_state[root_table_name] = raw_data
else:
result = []
return result
return inner
return outer
def add_column(colname, desc=None):
"""Adds column in the form of dict."""
return {'name': colname, 'desc': desc}
def inspect_methods(client, api_prefix):
"""Inspect all callable methods from client for congress."""
# some methods are referred multiple times, we should
# save them here to avoid infinite loop
obj_checked = []
method_checked = []
# For depth-first search
obj_stack = []
# save all inspected methods that will be returned
allmethods = []
obj_checked.append(client)
obj_stack.append(client)
while len(obj_stack) > 0:
cur_obj = obj_stack.pop()
# everything starts with '_' are considered as internal only
for f in [f for f in dir(cur_obj) if not f.startswith('_')]:
p = getattr(cur_obj, f, None)
if inspect.ismethod(p):
m_p = {}
# to get a name that can be called by Congress, no need
# to return the full path
m_p['name'] = cur_obj.__module__.replace(api_prefix, '')
if m_p['name'] == '':
m_p['name'] = p.__name__
else:
m_p['name'] = m_p['name'] + '.' + p.__name__
# skip checked methods
if m_p['name'] in method_checked:
continue
m_doc = inspect.getdoc(p)
# not return deprecated methods
if m_doc and "DEPRECATED:" in m_doc:
continue
if m_doc:
m_doc = re.sub('\n|\s+', ' ', m_doc)
x = re.split(' :param ', m_doc)
m_p['desc'] = x.pop(0)
y = inspect.getargspec(p)
m_p['args'] = []
while len(y.args) > 0:
m_p_name = y.args.pop(0)
if m_p_name == 'self':
continue
if len(x) > 0:
m_p_desc = x.pop(0)
else:
m_p_desc = "None"
m_p['args'].append({'name': m_p_name,
'desc': m_p_desc})
else:
m_p['args'] = []
m_p['desc'] = ''
allmethods.append(m_p)
method_checked.append(m_p['name'])
elif inspect.isfunction(p):
m_p = {}
m_p['name'] = cur_obj.__module__.replace(api_prefix, '')
if m_p['name'] == '':
m_p['name'] = f
else:
m_p['name'] = m_p['name'] + '.' + f
# TODO(zhenzanz): Never see doc for function yet.
# m_doc = inspect.getdoc(p)
m_p['args'] = []
m_p['desc'] = ''
allmethods.append(m_p)
method_checked.append(m_p['name'])
elif isinstance(p, object) and hasattr(p, '__module__'):
# avoid infinite loop by checking that p not in obj_checked.
# don't use 'in' since that uses ==, and some clients err
if ((not any(p is x for x in obj_checked)) and
(not inspect.isbuiltin(p))):
if re.match(api_prefix, p.__module__):
if (not inspect.isclass(p)):
obj_stack.append(p)
return allmethods
# Note (thread-safety): blocking function
def get_keystone_session(creds):
url_parts = urlparse.urlparse(creds['auth_url'])
path = url_parts.path.lower()
if path.startswith('/v3'):
# Use v3 plugin to authenticate
# Note (thread-safety): blocking call
auth = v3.Password(
auth_url=creds['auth_url'],
username=creds['username'],
password=creds['password'],
project_name=creds.get('project_name') or creds.get('tenant_name'),
user_domain_name=creds.get('user_domain_name', 'Default'),
project_domain_name=creds.get('project_domain_name', 'Default'))
else:
# Use v2 plugin
# Note (thread-safety): blocking call
auth = v2.Password(auth_url=creds['auth_url'],
username=creds['username'],
password=creds['password'],
tenant_name=creds['tenant_name'])
# Note (thread-safety): blocking call?
session = kssession.Session(auth=auth)
return session
| ramineni/my_congress | congress/datasources/datasource_utils.py | Python | apache-2.0 | 6,907 |
#! /usr/bin/python
import json
import os
import re
import datetime
class Advisories(object):
today = datetime.datetime.now().strftime("%Y-%m-%d")
def __init__(self, initial_advisories_path=None, format="txt"):
self.advisories = []
self.added_packages = {}
if initial_advisories_path is not None:
f = open(initial_advisories_path, "r")
if format == 'json':
s = f.read()
if s.startswith("advisories ="):
s = s.replace("advisories = ", "", 1)
s = s.rstrip(";\n")
self.advisories = json.loads(s)
else:
for line in f:
self.parse_line(line)
f.close()
def parse_line(self, line):
line = line.strip()
if line.startswith("#") or line == "":
return
d, p, v, f, desc = line.split(";", 4)
pkgs = p.split(",")
flags = f.split(" ")
desc = desc.replace("\"", "\\\"")
obj = {
"date": d,
"packages": pkgs,
"toolkit_version": v,
"flags": flags,
"description": desc,
}
self.advisories.append(obj)
def add_advisories(self, packages):
for p in packages:
if p.arch == 'src' and p.name not in self.added_packages and \
".src.rpm" in p.path:
pfd = os.popen('rpm -q -p "%s" --changelog' % p.path)
pfd.readline() # Date line
changelog = ""
for l in pfd:
if l.startswith("*"):
break
else:
if l.startswith("- "):
l = l.replace("- ", "", 1)
changelog += l
pfd.close()
changelog = changelog.strip().replace("\n", "<br />")
pfd = os.popen('rpm -q -p "%s" -l' % p.path)
files = []
for l in pfd:
if ".tar.gz" in l:
l = l.replace(".tar.gz", "").strip()
matches = re.match(l, r"([a-z-]+)(-[0-9.]+)")
if matches is not None:
l = matches.group(1).replace("-", "_") + \
matches.group(2)
files.append(l.replace(".tar.gz", "").strip())
pfd.close()
if len(files) > 0:
obj = {
"date": Advisories.today,
"packages": files,
"toolkit_version": "6.0",
"flags": ["bug"],
"description": changelog
}
self.advisories.append(obj)
self.added_packages[p.name] = obj
def to_json(self):
return json.dumps(self.advisories)
def new_to_text(self):
s = ""
for k in self.added_packages:
a = self.added_packages[k]
date = a['date']
pkgs = " ".join(a['packages'])
toolkit_version = a['toolkit_version']
flags = " ".join(a['flags'])
desc = a['description'].replace("\\\"", "\"")
s += "%s;%s;%s;%s;%s\n" % \
(date, pkgs, toolkit_version, flags, desc)
return s
def to_text(self):
s = ""
for a in self.advisories:
date = a['date']
pkgs = " ".join(a['packages'])
toolkit_version = a['toolkit_version']
flags = " ".join(a['flags'])
desc = a['description'].replace("\\\"", "\"")
s += "%s;%s;%s;%s;%s\n" % \
(date, pkgs, toolkit_version, flags, desc)
return s
| globus/globus-release-tools | share/python/repo/advisories.py | Python | apache-2.0 | 3,857 |
import sqlalchemy
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import mapper, sessionmaker
import subprocess
class PygrationState(object):
'''Python object representing the state table'''
def __init__(self, migration=None, step_id=None, step_name=None):
self.migration = migration
self.step_id = step_id
self.step_name = step_name
self.sequence = None
self.add_state = None
self.simdrop_state = None
self.drop_state = None
def __repr__(self):
return "<PygrationState(%s, %s)>" % (self.migration, self.step_id)
class Table(object):
metadata = sqlalchemy.MetaData()
engine = None
pygration_state = None
@classmethod
def define(cls, schema=None):
cls.pygration_state = sqlalchemy.Table('pygration_state', cls.metadata
, Column('migration', String(length=160), primary_key=True)
, Column('step_id', String(length=160), primary_key=True)
, Column('step_name', String(length=160))
, Column('sequence', Integer)
, Column('add_state', String(length=16))
, Column('simdrop_state', String(length=16))
, Column('drop_state', String(length=16))
, schema=schema
)
class FileLoader(object):
'''Object for running SQL from a file on the file system'''
def __init__(self, binary, args = [], formatting_dict = {}):
self._binary = binary
self._args = [arg.format(filename="{filename}", **formatting_dict) for arg in args]
def __call__(self, filename):
args = [arg.format(filename=filename) for arg in self._args]
print self._binary, args
subprocess.check_call([self._binary] + args)
def open(url=None, drivername=None, schema=None, username=None,
password=None, host=None, port=None, database=None, query=None):
"""Open the DB through a SQLAlchemy engine.
Returns an open session.
"""
if url is None and drivername is None:
raise Exception("Either a url or a driver name is required to open a db connection")
if url is None:
url = sqlalchemy.engine.url.URL(drivername = drivername,
username = username,
password = password,
host = host,
port = port,
database = database,
query = query)
Table.engine = sqlalchemy.create_engine(url)
Table.metadata.bind = Table.engine
Session = sessionmaker()
Session.configure(bind=Table.engine)
session = Session()
Table.define(schema)
mapper(PygrationState, Table.pygration_state)
return session
| mdg/pygrate | pygration/db.py | Python | apache-2.0 | 2,877 |
# -*- coding: utf-8 -*-
from collections import defaultdict
import subprocess
import os
import bisect
import re
from .common import log_fmt
class GitProcess():
GIT_BIN = None
def __init__(self, repoDir, args, text=None):
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
self._process = subprocess.Popen(
[GitProcess.GIT_BIN] + args,
cwd=repoDir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
universal_newlines=text)
@property
def process(self):
return self._process
@property
def returncode(self):
return self._process.returncode
def communicate(self):
return self._process.communicate()
class Ref():
INVALID = -1
TAG = 0
HEAD = 1
REMOTE = 2
def __init__(self, type, name):
self._type = type
self._name = name
def __str__(self):
string = "type: {0}\n".format(self._type)
string += "name: {0}".format(self._name)
return string
def __lt__(self, other):
return self._type < other._type
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@classmethod
def fromRawString(cls, string):
if not string or len(string) < 46:
return None
sha1 = string[0:40]
name = string[41:]
if not name.startswith("refs/"):
return None
name = name[5:]
_type = Ref.INVALID
_name = None
if name.startswith("heads/"):
_type = Ref.HEAD
_name = name[6:]
elif name.startswith("remotes") \
and not name.endswith("HEAD"):
_type = Ref.REMOTE
_name = name
elif name.startswith("tags/"):
_type = Ref.TAG
if name.endswith("^{}"):
_name = name[5:-3]
else:
_name = name[5:]
else:
return None
return cls(_type, _name)
class Git():
REPO_DIR = os.getcwd()
REPO_TOP_DIR = os.getcwd()
REF_MAP = {}
REV_HEAD = None
# local uncommitted changes
LUC_SHA1 = "0000000000000000000000000000000000000000"
# local changes checked
LCC_SHA1 = "0000000000000000000000000000000000000001"
@staticmethod
def available():
return GitProcess.GIT_BIN is not None
@staticmethod
def run(args, text=None):
return GitProcess(Git.REPO_DIR, args, text)
@staticmethod
def checkOutput(args, text=None):
process = Git.run(args, text)
data = process.communicate()[0]
if process.returncode != 0:
return None
return data
@staticmethod
def repoTopLevelDir(directory):
"""get top level repo directory
if @directory is not a repository, None returned"""
if not os.path.isdir(directory):
return None
if not os.path.exists(directory):
return None
args = ["rev-parse", "--show-toplevel"]
process = GitProcess(directory, args)
realDir = process.communicate()[0]
if process.returncode != 0:
return None
return realDir.decode("utf-8").replace("\n", "")
@staticmethod
def refs():
args = ["show-ref", "-d"]
data = Git.checkOutput(args)
if not data:
return None
lines = data.decode("utf-8").split('\n')
refMap = defaultdict(list)
for line in lines:
ref = Ref.fromRawString(line)
if not ref:
continue
sha1 = line[0:40]
bisect.insort(refMap[sha1], ref)
return refMap
@staticmethod
def revHead():
args = ["rev-parse", "HEAD"]
data = Git.checkOutput(args)
if not data:
return None
return data.decode("utf-8").rstrip('\n')
@staticmethod
def branches():
args = ["branch", "-a"]
data = Git.checkOutput(args)
if not data:
return None
return data.decode("utf-8").split('\n')
@staticmethod
def commitSummary(sha1):
fmt = "%h%x01%s%x01%ad%x01%an%x01%ae"
args = ["show", "-s",
"--pretty=format:{0}".format(fmt),
"--date=short", sha1]
data = Git.checkOutput(args)
if not data:
return None
parts = data.decode("utf-8").split("\x01")
return {"sha1": parts[0],
"subject": parts[1],
"date": parts[2],
"author": parts[3],
"email": parts[4]}
@staticmethod
def abbrevCommit(sha1):
args = ["show", "-s", "--pretty=format:%h", sha1]
data = Git.checkOutput(args)
if not data:
return sha1[:7]
return data.rstrip().decode("utf-8")
@staticmethod
def commitSubject(sha1):
args = ["show", "-s", "--pretty=format:%s", sha1]
data = Git.checkOutput(args)
return data
@staticmethod
def commitRawDiff(sha1, filePath=None, gitArgs=None):
if sha1 == Git.LCC_SHA1:
args = ["diff-index", "--cached", "HEAD"]
elif sha1 == Git.LUC_SHA1:
args = ["diff-files"]
else:
args = ["diff-tree", "-r", "--root", sha1]
args.extend(["-p", "--textconv", "--submodule",
"-C", "--no-commit-id", "-U3"])
if gitArgs:
args.extend(gitArgs)
if filePath:
args.append("--")
args.append(filePath)
data = Git.checkOutput(args)
if not data:
return None
return data
@staticmethod
def externalDiff(branchDir, commit, path=None, tool=None):
args = ["difftool", "--no-prompt"]
if commit.sha1 == Git.LUC_SHA1:
pass
elif commit.sha1 == Git.LCC_SHA1:
args.append("--cached")
else:
args.append("{0}^..{0}".format(commit.sha1))
if tool:
args.append("--tool={}".format(tool))
if path:
args.append("--")
args.append(path)
cwd = branchDir if branchDir else Git.REPO_DIR
process = GitProcess(cwd, args)
@staticmethod
def conflictFiles():
args = ["diff", "--name-only",
"--diff-filter=U",
"-no-color"]
data = Git.checkOutput(args)
if not data:
return None
return data.rstrip(b'\n').decode("utf-8").split('\n')
@staticmethod
def gitDir():
args = ["rev-parse", "--git-dir"]
data = Git.checkOutput(args)
if not data:
return None
return data.rstrip(b'\n').decode("utf-8")
@staticmethod
def gitPath(name):
dir = Git.gitDir()
if not dir:
return None
if dir[-1] != '/' and dir[-1] != '\\':
dir += '/'
return dir + name
@staticmethod
def mergeBranchName():
"""return the current merge branch name"""
# TODO: is there a better way?
path = Git.gitPath("MERGE_MSG")
if not os.path.exists(path):
return None
name = None
with open(path, "r") as f:
line = f.readline()
m = re.match("Merge.* '(.*)'.*", line)
if m:
name = m.group(1)
# likely a sha1
if name and re.match("[a-f0-9]{7,40}", name):
data = Git.checkOutput(["branch", "--remotes",
"--contains", name])
if data:
data = data.rstrip(b'\n')
if data:
# might have more than one branch
name = data.decode("utf-8").split('\n')[0].strip()
return name
@staticmethod
def resolveBy(ours, path):
args = ["checkout",
"--ours" if ours else "--theirs",
path]
process = Git.run(args)
process.communicate()
if process.returncode != 0:
return False
args = ["add", path]
process = Git.run(args)
process.communicate()
return True if process.returncode == 0 else False
@staticmethod
def undoMerge(path):
"""undo a merge on the @path"""
if not path:
return False
args = ["checkout", "-m", path]
process = Git.run(args)
process.communicate()
return process.returncode == 0
@staticmethod
def hasLocalChanges(branch, cached=False):
# A remote branch should never have local changes
if branch.startswith("remotes/"):
return False
dir = Git.branchDir(branch)
# only branch checked out can have local changes
if not dir:
return False
args = ["diff", "--quiet"]
if cached:
args.append("--cached")
process = GitProcess(dir, args)
process.communicate()
return process.returncode == 1
@staticmethod
def branchDir(branch):
"""returned the branch directory if it checked out
otherwise returned an empty string"""
if not branch or branch.startswith("remotes/"):
return ""
# Use the repo dir directly
# since we are unable to get two detached branch
if branch.startswith("(HEAD detached"):
return Git.REPO_DIR
args = ["worktree", "list"]
data = Git.checkOutput(args)
if not data:
return ""
worktree_re = re.compile(
r"(\S+)\s+[a-f0-9]+\s+(\[(\S+)\]|\(detached HEAD\))$")
worktrees = data.rstrip(b'\n').decode("utf8").split('\n')
for wt in worktrees:
m = worktree_re.fullmatch(wt)
if not m:
print("Oops! Wrong format for worktree:", wt)
elif m.group(3) == branch:
return m.group(1)
return ""
@staticmethod
def generateDiff(sha1, filePath):
data = Git.commitRawDiff(sha1)
if not data:
return False
with open(filePath, "wb+") as f:
f.write(data)
return True
@staticmethod
def generatePatch(sha1, filePath):
args = ["format-patch", "-1", "--stdout", sha1]
data = Git.checkOutput(args)
if not data:
return False
with open(filePath, "wb+") as f:
f.write(data)
return True
@staticmethod
def revertCommit(branch, sha1):
branchDir = Git.branchDir(branch)
args = ["revert", "--no-edit", sha1]
process = GitProcess(branchDir, args)
_, error = process.communicate()
if process.returncode != 0 and error is not None:
error = error.decode("utf-8")
return process.returncode, error
@staticmethod
def resetCommitTo(branch, sha1, method):
branchDir = Git.branchDir(branch)
args = ["reset", "--" + method, sha1]
process = GitProcess(branchDir, args)
_, error = process.communicate()
if process.returncode != 0 and error is not None:
error = error.decode("utf-8")
return process.returncode, error
@staticmethod
def repoUrl():
args = ["config", "remote.origin.url"]
data = Git.checkOutput(args)
if data:
return data.rstrip(b'\n').decode("utf-8")
return ""
@staticmethod
def runWithError(args):
process = Git.run(args)
_, error = process.communicate()
if process.returncode != 0 and error is not None:
error = error.decode("utf-8")
return process.returncode, error
@staticmethod
def setConfigValue(key, value, isGlobal=True):
if not key:
return 0, None
args = ["config"]
if isGlobal:
args.append("--global")
args.append(key)
if value:
args.append(value)
else:
args.insert(1, "--unset")
return Git.runWithError(args)
@staticmethod
def removeSection(section, isGlobal=True):
if not section:
return 0, None
args = ["config"]
if isGlobal:
args.append("--global")
args.append("--remove-section")
args.append(section)
return Git.runWithError(args)
@staticmethod
def setDiffTool(name, cmd, isGlobal=True):
if not name:
return 0, None
if not cmd:
Git.removeSection("difftool.%s" % name)
# treat as OK
return 0, None
key = "difftool.%s.cmd" % name
return Git.setConfigValue(key, cmd, isGlobal)
@staticmethod
def setMergeTool(name, cmd, isGlobal=True):
if not name:
return 0, None
if not cmd:
Git.removeSection("mergetool.%s" % name)
return 0, None
key = "mergetool.%s.cmd" % name
ret, error = Git.setConfigValue(key, cmd, isGlobal)
if ret != 0:
return ret, error
key = "mergetool.%s.trustExitCode" % name
return Git.setConfigValue(key, "true", isGlobal)
@staticmethod
def getConfigValue(key, isGlobal=True):
if not key:
return ""
args = ["config", "--get", key]
if isGlobal:
args.insert(1, "--global")
data = Git.checkOutput(args, True)
if data is None:
return ""
return data.rstrip("\n")
@staticmethod
def diffToolCmd(name, isGlobal=True):
if not name:
return ""
return Git.getConfigValue("difftool.%s.cmd" % name)
@staticmethod
def mergeToolCmd(name, isGlobal=True):
if not name:
return ""
return Git.getConfigValue("mergetool.%s.cmd" % name)
| timxx/gitc | qgitc/gitutils.py | Python | apache-2.0 | 14,220 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with Stackdriver Logging via JSON-over-HTTP."""
import functools
from google.cloud import _http
from google.cloud.iterator import HTTPIterator
from google.cloud.logging import __version__
from google.cloud.logging._helpers import entry_from_resource
from google.cloud.logging.sink import Sink
from google.cloud.logging.metric import Metric
_CLIENT_INFO = _http.CLIENT_INFO_TEMPLATE.format(__version__)
class Connection(_http.JSONConnection):
"""A connection to Google Stackdriver Logging via the JSON REST API.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that owns the current connection.
"""
API_BASE_URL = 'https://logging.googleapis.com'
"""The base of the API call URL."""
API_VERSION = 'v2'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}'
"""A template for the URL of a particular API call."""
_EXTRA_HEADERS = {
_http.CLIENT_INFO_HEADER: _CLIENT_INFO,
}
class _LoggingAPI(object):
"""Helper mapping logging-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_entries(self, projects, filter_=None, order_by=None,
page_size=None, page_token=None):
"""Return a page of log entry resources.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: str
:param filter_:
a filter expression. See
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`~google.cloud.logging.ASCENDING`
or :data:`~google.cloud.logging.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry`
accessible to the current API.
"""
extra_params = {'projectIds': projects}
if filter_ is not None:
extra_params['filter'] = filter_
if order_by is not None:
extra_params['orderBy'] = order_by
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/entries:list'
# We attach a mutable loggers dictionary so that as Logger
# objects are created by entry_from_resource, they can be
# re-used by other log entries from the same logger.
loggers = {}
item_to_value = functools.partial(
_item_to_entry, loggers=loggers)
iterator = HTTPIterator(
client=self._client, path=path,
item_to_value=item_to_value, items_key='entries',
page_token=page_token, extra_params=extra_params)
# This method uses POST to make a read-only request.
iterator._HTTP_METHOD = 'POST'
return iterator
def write_entries(self, entries, logger_name=None, resource=None,
labels=None):
"""API call: log an entry resource via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write
:type entries: sequence of mapping
:param entries: the log entry resources to log.
:type logger_name: str
:param logger_name: name of default logger to which to log the entries;
individual entries may override.
:type resource: mapping
:param resource: default resource to associate with entries;
individual entries may override.
:type labels: mapping
:param labels: default labels to associate with entries;
individual entries may override.
"""
data = {'entries': list(entries)}
if logger_name is not None:
data['logName'] = logger_name
if resource is not None:
data['resource'] = resource
if labels is not None:
data['labels'] = labels
self.api_request(method='POST', path='/entries:write', data=data)
def logger_delete(self, project, logger_name):
"""API call: delete all entries in a logger via a DELETE request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete
:type project: str
:param project: ID of project containing the log entries to delete
:type logger_name: str
:param logger_name: name of logger containing the log entries to delete
"""
path = '/projects/%s/logs/%s' % (project, logger_name)
self.api_request(method='DELETE', path=path)
class _SinksAPI(object):
"""Helper mapping sink-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_sinks(self, project, page_size=None, page_token=None):
"""List sinks for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list
:type project: str
:param project: ID of the project whose sinks are to be listed.
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.sink.Sink`
accessible to the current API.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/projects/%s/sinks' % (project,)
return HTTPIterator(
client=self._client, path=path,
item_to_value=_item_to_sink, items_key='sinks',
page_token=page_token, extra_params=extra_params)
def sink_create(self, project, sink_name, filter_, destination):
"""API call: create a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create
:type project: str
:param project: ID of the project in which to create the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
"""
target = '/projects/%s/sinks' % (project,)
data = {
'name': sink_name,
'filter': filter_,
'destination': destination,
}
self.api_request(method='POST', path=target, data=data)
def sink_get(self, project, sink_name):
"""API call: retrieve a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:rtype: dict
:returns: The JSON sink object returned from the API.
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
return self.api_request(method='GET', path=target)
def sink_update(self, project, sink_name, filter_, destination):
"""API call: update a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:rtype: dict
:returns: The returned (updated) resource.
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
data = {
'name': sink_name,
'filter': filter_,
'destination': destination,
}
return self.api_request(method='PUT', path=target, data=data)
def sink_delete(self, project, sink_name):
"""API call: delete a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
self.api_request(method='DELETE', path=target)
class _MetricsAPI(object):
"""Helper mapping sink-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_metrics(self, project, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
:type project: str
:param project: ID of the project whose metrics are to be listed.
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.metric.Metric`
accessible to the current API.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/projects/%s/metrics' % (project,)
return HTTPIterator(
client=self._client, path=path,
item_to_value=_item_to_metric, items_key='metrics',
page_token=page_token, extra_params=extra_params)
def metric_create(self, project, metric_name, filter_, description=None):
"""API call: create a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create
:type project: str
:param project: ID of the project in which to create the metric.
:type metric_name: str
:param metric_name: the name of the metric
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the metric.
:type description: str
:param description: description of the metric.
"""
target = '/projects/%s/metrics' % (project,)
data = {
'name': metric_name,
'filter': filter_,
'description': description,
}
self.api_request(method='POST', path=target, data=data)
def metric_get(self, project, metric_name):
"""API call: retrieve a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/get
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:rtype: dict
:returns: The JSON metric object returned from the API.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
return self.api_request(method='GET', path=target)
def metric_update(self, project, metric_name, filter_, description):
"""API call: update a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/update
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the metric.
:type description: str
:param description: description of the metric.
:rtype: dict
:returns: The returned (updated) resource.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
data = {
'name': metric_name,
'filter': filter_,
'description': description,
}
return self.api_request(method='PUT', path=target, data=data)
def metric_delete(self, project, metric_name):
"""API call: delete a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/delete
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
self.api_request(method='DELETE', path=target)
def _item_to_entry(iterator, resource, loggers):
"""Convert a log entry resource to the native object.
.. note::
This method does not have the correct signature to be used as
the ``item_to_value`` argument to
:class:`~google.cloud.iterator.Iterator`. It is intended to be
patched with a mutable ``loggers`` argument that can be updated
on subsequent calls. For an example, see how the method is
used above in :meth:`_LoggingAPI.list_entries`.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Log entry JSON resource returned from the API.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The next log entry in the page.
"""
return entry_from_resource(resource, iterator.client, loggers)
def _item_to_sink(iterator, resource):
"""Convert a sink resource to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Sink JSON resource returned from the API.
:rtype: :class:`~google.cloud.logging.sink.Sink`
:returns: The next sink in the page.
"""
return Sink.from_api_repr(resource, iterator.client)
def _item_to_metric(iterator, resource):
"""Convert a metric resource to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Metric JSON resource returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
"""
return Metric.from_api_repr(resource, iterator.client)
| tartavull/google-cloud-python | logging/google/cloud/logging/_http.py | Python | apache-2.0 | 17,745 |
#!/usr/bin/env python
class AllPermutations(object):
def __init__(self, arr):
self.arr = arr
def all_permutations(self):
results = []
used = []
self._all_permutations(self.arr, used, results)
return results
def _all_permutations(self, to_use, used, results):
if len(to_use) == 0:
results.append(used)
for i, x in enumerate(to_use):
new_used = used + [x]
new_to_use = to_use[:i] + to_use[i+1:]
self._all_permutations(new_to_use, new_used, results)
def main():
arr = [1, 2, 3, 4]
ap = AllPermutations(arr)
results = ap.all_permutations()
for x in results:
print x
print len(results)
if __name__ == "__main__":
main() | davjohnst/fundamentals | fundamentals/backtracking/all_permutations.py | Python | apache-2.0 | 772 |
#!/usr/bin/env python
import re
from codecs import open
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version():
version = ''
with open('grimreaper.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError("Cannot find version's information")
return version
def get_long_description():
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
with open('CHANGES.rst', 'r', 'utf-8') as f:
changes = f.read()
return readme + '\n\n' + changes
setup(
name='GrimReapersPie',
version=get_version(),
description='Python client to the GrimReaper process killer.',
long_description=get_long_description(),
author='Mateusz Pawlik',
author_email='[email protected]',
url='https://github.com/matee911/GrimReapersPie',
py_modules=['grimreaper'],
package_data={'': ['LICENSE', 'CHANGES.rst', 'README.rst']},
license='Apache 2.0',
keywords='management',
classifiers=(
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
),
)
| matee911/GrimReapersPie | setup.py | Python | apache-2.0 | 1,788 |
"""
This component provides HA sensor support for Ring Door Bell/Chimes.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ring/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_ENTITY_NAMESPACE, CONF_MONITORED_CONDITIONS)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from . import ATTRIBUTION, DATA_RING, DEFAULT_ENTITY_NAMESPACE
DEPENDENCIES = ['ring']
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
# Sensor types: Name, category, units, icon, kind
SENSOR_TYPES = {
'battery': [
'Battery', ['doorbell', 'stickup_cams'], '%', 'battery-50', None],
'last_activity': [
'Last Activity', ['doorbell', 'stickup_cams'], None, 'history', None],
'last_ding': [
'Last Ding', ['doorbell'], None, 'history', 'ding'],
'last_motion': [
'Last Motion', ['doorbell', 'stickup_cams'], None,
'history', 'motion'],
'volume': [
'Volume', ['chime', 'doorbell', 'stickup_cams'], None,
'bell-ring', None],
'wifi_signal_category': [
'WiFi Signal Category', ['chime', 'doorbell', 'stickup_cams'], None,
'wifi', None],
'wifi_signal_strength': [
'WiFi Signal Strength', ['chime', 'doorbell', 'stickup_cams'], 'dBm',
'wifi', None],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ENTITY_NAMESPACE, default=DEFAULT_ENTITY_NAMESPACE):
cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Ring device."""
ring = hass.data[DATA_RING]
sensors = []
for device in ring.chimes: # ring.chimes is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'chime' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
for device in ring.doorbells: # ring.doorbells is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'doorbell' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
for device in ring.stickup_cams: # ring.stickup_cams is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'stickup_cams' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
add_entities(sensors, True)
return True
class RingSensor(Entity):
"""A sensor implementation for Ring device."""
def __init__(self, hass, data, sensor_type):
"""Initialize a sensor for Ring device."""
super(RingSensor, self).__init__()
self._sensor_type = sensor_type
self._data = data
self._extra = None
self._icon = 'mdi:{}'.format(SENSOR_TYPES.get(self._sensor_type)[3])
self._kind = SENSOR_TYPES.get(self._sensor_type)[4]
self._name = "{0} {1}".format(
self._data.name, SENSOR_TYPES.get(self._sensor_type)[0])
self._state = None
self._tz = str(hass.config.time_zone)
self._unique_id = '{}-{}'.format(self._data.id, self._sensor_type)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
attrs['device_id'] = self._data.id
attrs['firmware'] = self._data.firmware
attrs['kind'] = self._data.kind
attrs['timezone'] = self._data.timezone
attrs['type'] = self._data.family
attrs['wifi_name'] = self._data.wifi_name
if self._extra and self._sensor_type.startswith('last_'):
attrs['created_at'] = self._extra['created_at']
attrs['answered'] = self._extra['answered']
attrs['recording_status'] = self._extra['recording']['status']
attrs['category'] = self._extra['kind']
return attrs
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._sensor_type == 'battery' and self._state is not None:
return icon_for_battery_level(battery_level=int(self._state),
charging=False)
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES.get(self._sensor_type)[2]
def update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Pulling data from %s sensor", self._name)
self._data.update()
if self._sensor_type == 'volume':
self._state = self._data.volume
if self._sensor_type == 'battery':
self._state = self._data.battery_life
if self._sensor_type.startswith('last_'):
history = self._data.history(limit=5,
timezone=self._tz,
kind=self._kind,
enforce_limit=True)
if history:
self._extra = history[0]
created_at = self._extra['created_at']
self._state = '{0:0>2}:{1:0>2}'.format(
created_at.hour, created_at.minute)
if self._sensor_type == 'wifi_signal_category':
self._state = self._data.wifi_signal_category
if self._sensor_type == 'wifi_signal_strength':
self._state = self._data.wifi_signal_strength
| jamespcole/home-assistant | homeassistant/components/ring/sensor.py | Python | apache-2.0 | 6,215 |
import sys
q_to_time = {}
i = 0
for line in open(sys.argv[1]):
try:
line = line.strip()
cols = line.split('\t')
q_to_time[cols[0]] = [(int(cols[2].split(' ')[0]), i)]
i += 1
except ValueError:
continue
i = 0
for line in open(sys.argv[2]):
try:
line = line.strip()
cols = line.split('\t')
q_to_time[cols[0]].append((int(cols[2].split(' ')[0]), i))
i += 1
except KeyError:
continue
except ValueError:
continue
for k,v in q_to_time.items():
if v[0][0] < v[1][0]:
smaller = float(v[0][0])
larger = float(v[1][0])
else:
smaller = float(v[1][0])
larger = float(v[0][0])
try:
if (larger / smaller > 2):
print('SIGNIFICANT DIFFERENCE: ' + k + ': (' + str(v[0][0]) + ', ' +
str(v[0][1]) + ') vs (' + str(v[1][0]) + ', ' + str(v[1][1])
+ ').')
print(' -> FACTOR: ' + str(larger / smaller))
except:
print('problem with : ' + k + ' ' + str(larger) + ' ' + str(smaller))
| Buchhold/QLever | misc/compare_shuffled.py | Python | apache-2.0 | 1,155 |
# -*- test-case-name: txdav.common.datastore.upgrade.sql.test -*-
# #
# Copyright (c) 2011-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #
from twext.enterprise.dal.syntax import Update
from twisted.internet.defer import inlineCallbacks
from txdav.base.propertystore.base import PropertyName
from txdav.common.datastore.sql_tables import _ABO_KIND_GROUP, schema
from txdav.common.datastore.upgrade.sql.upgrades.util import updateAddressBookDataVersion, \
doToEachHomeNotAtVersion, removeProperty, cleanPropertyStore, \
logUpgradeStatus
from txdav.xml import element
"""
AddressBook Data upgrade from database version 1 to 2
"""
UPGRADE_TO_VERSION = 2
@inlineCallbacks
def doUpgrade(sqlStore):
"""
fill in members tables and increment data version
"""
yield populateMemberTables(sqlStore)
yield removeResourceType(sqlStore)
# bump data version
yield updateAddressBookDataVersion(sqlStore, UPGRADE_TO_VERSION)
@inlineCallbacks
def populateMemberTables(sqlStore):
"""
Set the group kind and and members tables
"""
@inlineCallbacks
def doIt(txn, homeResourceID):
"""
KIND is set to person by schema upgrade.
To upgrade MEMBERS and FOREIGN_MEMBERS:
1. Set group KIND (avoids assert)
2. Write groups. Write logic will fill in MEMBERS and FOREIGN_MEMBERS
(Remember that all members resource IDs must already be in the address book).
"""
home = yield txn.addressbookHomeWithResourceID(homeResourceID)
abObjectResources = yield home.addressbook().objectResources()
for abObject in abObjectResources:
component = yield abObject.component()
lcResourceKind = component.resourceKind().lower() if component.resourceKind() else component.resourceKind()
if lcResourceKind == "group":
# update kind
abo = schema.ADDRESSBOOK_OBJECT
yield Update(
{abo.KIND: _ABO_KIND_GROUP},
Where=abo.RESOURCE_ID == abObject._resourceID,
).on(txn)
abObject._kind = _ABO_KIND_GROUP
# update rest
yield abObject.setComponent(component)
logUpgradeStatus("Starting Addressbook Populate Members")
# Do this to each calendar home not already at version 2
yield doToEachHomeNotAtVersion(sqlStore, schema.ADDRESSBOOK_HOME, UPGRADE_TO_VERSION, doIt, "Populate Members")
@inlineCallbacks
def removeResourceType(sqlStore):
logUpgradeStatus("Starting Addressbook Remove Resource Type")
sqlTxn = sqlStore.newTransaction(label="addressbook_upgrade_from_1_to_2.removeResourceType")
yield removeProperty(sqlTxn, PropertyName.fromElement(element.ResourceType))
yield sqlTxn.commit()
yield cleanPropertyStore()
logUpgradeStatus("End Addressbook Remove Resource Type")
| red-hood/calendarserver | txdav/common/datastore/upgrade/sql/upgrades/addressbook_upgrade_from_1_to_2.py | Python | apache-2.0 | 3,445 |
#!/usr/bin/env python3.4
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module has the class for controlling Mini-Circuits RCDAT series
attenuators over Telnet.
See http://www.minicircuits.com/softwaredownload/Prog_Manual-6-Programmable_Attenuator.pdf
"""
from mobly.controllers import attenuator
from mobly.controllers.attenuator_lib import telnet_client
class AttenuatorDevice(object):
"""This provides a specific telnet-controlled implementation of
AttenuatorDevice for Mini-Circuits RC-DAT attenuators.
Attributes:
path_count: The number of signal attenuation path this device has.
"""
def __init__(self, path_count=1):
self.path_count = path_count
# The telnet client used to communicate with the attenuator device.
self._telnet_client = telnet_client.TelnetClient(
tx_cmd_separator="\r\n", rx_cmd_separator="\r\n", prompt="")
@property
def is_open(self):
"""This function returns the state of the telnet connection to the
underlying AttenuatorDevice.
Returns:
True if there is a successfully open connection to the
AttenuatorDevice.
"""
return bool(self._telnet_client.is_open)
def open(self, host, port=23):
"""Opens a telnet connection to the desired AttenuatorDevice and
queries basic information.
Args::
host: A valid hostname (IP address or DNS-resolvable name) to an
MC-DAT attenuator instrument.
port: An optional port number (defaults to telnet default 23)
"""
self._telnet_client.open(host, port)
config_str = self._telnet_client.cmd("MN?")
if config_str.startswith("MN="):
config_str = config_str[len("MN="):]
self.properties = dict(
zip(['model', 'max_freq', 'max_atten'], config_str.split("-", 2)))
self.max_atten = float(self.properties['max_atten'])
def close(self):
"""Closes a telnet connection to the desired attenuator device.
This should be called as part of any teardown procedure prior to the
attenuator instrument leaving scope.
"""
if self.is_open:
self._telnet_client.close()
def set_atten(self, idx, value):
"""Sets the attenuation value for a particular signal path.
Args:
idx: Zero-based index int which is the identifier for a particular
signal path in an instrument. For instruments that only has one
channel, this is ignored by the device.
value: A float that is the attenuation value to set.
Raises:
Error is raised if the underlying telnet connection to the
instrument is not open.
IndexError is raised if the index of the attenuator is greater than
the maximum index of the underlying instrument.
ValueError is raised if the requested set value is greater than the
maximum attenuation value.
"""
if not self.is_open:
raise attenuator.Error(
"Connection to attenuator at %s is not open!" %
self._telnet_client.host)
if idx + 1 > self.path_count:
raise IndexError("Attenuator index out of range!", self.path_count,
idx)
if value > self.max_atten:
raise ValueError("Attenuator value out of range!", self.max_atten,
value)
# The actual device uses one-based index for channel numbers.
self._telnet_client.cmd("CHAN:%s:SETATT:%s" % (idx + 1, value))
def get_atten(self, idx=0):
"""This function returns the current attenuation from an attenuator at a
given index in the instrument.
Args:
idx: This zero-based index is the identifier for a particular
attenuator in an instrument.
Raises:
Error is raised if the underlying telnet connection to the
instrument is not open.
Returns:
A float that is the current attenuation value.
"""
if not self.is_open:
raise attenuator.Error(
"Connection to attenuator at %s is not open!" %
self._telnet_client.host)
if idx + 1 > self.path_count or idx < 0:
raise IndexError("Attenuator index out of range!", self.path_count,
idx)
atten_val_str = self._telnet_client.cmd("CHAN:%s:ATT?" % (idx + 1))
atten_val = float(atten_val_str)
return atten_val
| yusufm/mobly | mobly/controllers/attenuator_lib/minicircuits.py | Python | apache-2.0 | 5,184 |
import core
logger = core.log.getLogger("monitoring-utils")
class MonitoringUtils(object):
def __init__(self):
pass
@staticmethod
def check_existing_tag_in_topology(root, node, node_type, node_urns,
domain=None):
tag_exists = False
try:
elements = []
if not isinstance(node_urns, list):
node_urns = [node_urns]
try:
for node_urn in node_urns:
if node == "link":
elements.extend(
MonitoringUtils.
check_existing_link_tag_in_topology(
root, node_type, node_urn))
else:
node_elements = MonitoringUtils.\
check_existing_generic_tag_in_topology(
root, node, node_type, node_urn, domain)
if len(node_elements) > 0:
elements = node_elements
except:
pass
if len(elements) > 0:
tag_exists = True
except:
pass
return tag_exists
@staticmethod
def check_existing_generic_tag_in_topology(root, node, node_type, node_urn,
domain=None):
elements = []
if node_type == "tn":
if domain is not None:
domain = domain if "urn" in domain else \
"urn:publicid:IDN+ocf:" + domain
if node_type is None:
elements = root.xpath(
"//topology[@name='%s']//%s[@id='%s']" %
(domain, node, node_urn))
elements = root.xpath(
"//topology[@name='%s']//%s[@type='%s'][@id='%s']" %
(domain, node, node_type, node_urn))
else:
elements = root.xpath(
"//%s[@type='%s'][@id='%s']" %
(node, node_type, node_urn))
if node_type is None:
elements = root.xpath("//%s[@id='%s']" % (node, node_urn))
return elements
@staticmethod
def check_existing_link_tag_in_topology(root, node_type, node_urn):
elements = []
interfaces_same_link = True
elem = root.xpath(
"//link[@type='%s']//interface_ref[@client_id='%s']" %
(node_type, node_urn))
if node_type is None:
elem = root.xpath(
"//link//interface_ref[@client_id='%s']" % node_urn)
for element in elements:
if element.getparent() == elem[0].getparent():
interfaces_same_link &= True
else:
interfaces_same_link &= False
if interfaces_same_link:
elements.extend(elem)
return elements
@staticmethod
def find_virtual_link_end_to_end(hybrid_links):
# Retrieve the endpoints of the slice ("abstract link" in M/MS)
e2e_link_urns = set()
# 1) Check for SDNRM-SDNRM end-paths
for se_link in hybrid_links:
# 1) Check for SDN-SDN end paths
# 2) Check for SDN-TN end paths
for link_end in [":ofam", ":tnrm"]:
if link_end in se_link["source"]:
e2e_link_urns.add(se_link["source"])
if link_end in se_link["destination"]:
e2e_link_urns.add(se_link["destination"])
return list(e2e_link_urns)
@staticmethod
def find_virtual_links(topology_root):
links_ids = []
for link_id in topology_root.xpath("//topology//link[@id]"):
links_ids.append(link_id.attrib["id"])
return links_ids
@staticmethod
def find_slice_name(topology_root):
slice_name = ""
try:
slice_name = topology_root.xpath("//topology")[0].attrib["name"]
except Exception as e:
logger.warning("Unable to retrieve slice name for topology. \
Details: %s" % e)
return slice_name
| ict-felix/stack | modules/resource/orchestrator/src/monitoring/utils.py | Python | apache-2.0 | 4,165 |
# file eulcommon/djangoextras/formfields.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Custom generic form fields for use with Django forms.
----
'''
import re
from django.core.validators import RegexValidator
from django.forms import CharField, ChoiceField
from django.forms.widgets import Select, TextInput, Widget
from django.utils.safestring import mark_safe
# regular expression to validate and parse W3C dates
W3C_DATE_RE = re.compile(r'^(?P<year>\d{4})(?:-(?P<month>[0-1]\d)(?:-(?P<day>[0-3]\d))?)?$')
validate_w3c_date = RegexValidator(W3C_DATE_RE,
u'Enter a valid W3C date in one of these formats: YYYY, YYYY-MM, or YYYY-MM-DD',
'invalid')
class W3CDateWidget(Widget):
'''Multi-part date widget that generates three text input boxes for year,
month, and day. Expects and generates dates in any of these W3C formats,
depending on which fields are filled in: YYYY-MM-DD, YYYY-MM, or YYYY.
'''
# based in part on SelectDateWidget from django.forms.extras.widgets
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def value_from_datadict(self, data, files, name):
'''Generate a single value from multi-part form data. Constructs a W3C
date based on values that are set, leaving out day and month if they are
not present.
:param data: dictionary of data submitted by the form
:param files: - unused
:param name: base name of the form field
:returns: string value
'''
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == 'YYYY':
y = ''
if m == 'MM':
m = ''
if d == 'DD':
d = ''
date = y
if m:
date += '-%s' % m
if d:
date += '-%s' % d
return date
# TODO: split out logic so it is easier to extend and customize display
def render(self, name, value, attrs=None):
'''Render the widget as HTML inputs for display on a form.
:param name: form field base name
:param value: date value
:param attrs: - unused
:returns: HTML text with three inputs for year/month/day
'''
# expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None)
year, month, day = 'YYYY', 'MM', 'DD'
if value:
# use the regular expression to pull out year, month, and day values
# if regular expression does not match, inputs will be empty
match = W3C_DATE_RE.match(value)
if match:
date_parts = match.groupdict()
year = date_parts['year']
month = date_parts['month']
day = date_parts['day']
year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };')
month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };')
day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };')
# display widget fields in YYYY-MM-DD order to match W3C date format,
# and putting required field(s) on the left
output = [year_html, month_html, day_html]
return mark_safe(u' / \n'.join(output))
def create_textinput(self, name, field, value, **extra_attrs):
'''Generate and render a :class:`django.forms.widgets.TextInput` for
a single year, month, or day input.
If size is specified in the extra attributes, it will also be used to
set the maximum length of the field.
:param name: base name of the input field
:param field: pattern for this field (used with name to generate input name)
:param value: initial value for the field
:param extra_attrs: any extra widget attributes
:returns: rendered HTML output for the text input
'''
# TODO: move id-generation logic out for re-use
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
# use size to set maximum length
if 'size' in extra_attrs:
extra_attrs['maxlength'] = extra_attrs['size']
local_attrs = self.build_attrs(id=field % id_, **extra_attrs)
txtinput = TextInput()
return txtinput.render(field % name, value, local_attrs)
class W3CDateField(CharField):
'''W3C date field that uses a :class:`~eulcore.django.forms.fields.W3CDateWidget`
for presentation and uses a simple regular expression to do basic validation
on the input (but does not actually test that it is a valid date).
'''
widget = W3CDateWidget
default_error_messages = {
'invalid': u'Enter a date in one of these formats: YYYY, YYYY-MM, or YYYY-MM-DD',
}
default_validators = [validate_w3c_date]
class DynamicSelect(Select):
'''A :class:`~django.forms.widgets.Select` widget whose choices are not
static, but instead generated dynamically when referenced.
:param choices: callable; this will be called to generate choices each
time they are referenced.
'''
def __init__(self, attrs=None, choices=None):
# Skip right over Select and go to its parents. Select just sets
# self.choices, which will break since it's a property here.
super(DynamicSelect, self).__init__(attrs)
if choices is None:
choices = lambda: ()
self._choices = choices
def _get_choices(self):
return self._choices()
def _set_choices(self, choices):
self._choices = choices
choices = property(_get_choices, _set_choices)
class DynamicChoiceField(ChoiceField):
'''A :class:`django.forms.ChoiceField` whose choices are not static, but
instead generated dynamically when referenced.
:param choices: callable; this will be called to generate choices each
time they are referenced
'''
widget = DynamicSelect
def __init__(self, choices=None, widget=None, *args, **kwargs):
# ChoiceField.__init__ tries to set static choices, which won't
# work since our choices are dynamic, so we're going to have to skip
# over it.
# First normalize our choices
if choices is None:
choices = lambda: ()
self._choices = choices
# Then normalize our widget, constructing it with our choices
# function if we need to construct it.
if widget is None:
widget = self.widget
if isinstance(widget, type):
widget = widget(choices=self._choices)
# Now call call super.__init__(), but bypass ChoiceField.
# ChoiceField just sets static choices manually and then calls its
# own super. We don't have static choices, so ChoiceField.__init__()
# would break if we called it. Skip over ChoiceField and go straight
# to *its* super.__init__().
super(ChoiceField, self).__init__(widget=widget, *args, **kwargs)
def _get_choices(self):
return self._choices()
def _set_choices(self, choices):
# if choices is updated, update the widget choice callable also
self._choices = choices
self.widget._choices = self._choices
choices = property(_get_choices, _set_choices)
| emory-libraries/eulcommon | eulcommon/djangoextras/formfields.py | Python | apache-2.0 | 8,195 |
import httplib
import logging
from error import HapiError
class NullHandler(logging.Handler):
def emit(self, record):
pass
def get_log(name):
logger = logging.getLogger(name)
logger.addHandler(NullHandler())
return logger
| jonathan-s/happy | happy/utils.py | Python | apache-2.0 | 255 |
import serial
from sys import platform as platform
import serial.tools.list_ports
import serial.threaded
from pymouse import PyMouse
from Voice.GoogleTTS import speak
import threading
import math
import copy
import time
import json
data_repository_right = {
"id" : [],
"name" : [],
"shortcuts" : [],
"time_period": [],
"0":[], # "max_acc_@R_x" : [],
"1":[], # "max_acc_^R_x": [],
"2":[], # "max_acc_#R_x": [],
"3":[], # "max_acc_$R_x": [],
"4":[], # "max_acc_%R_x": [],
"5":[], # "max_acc_@R_y" : [],
"6":[], # "max_acc_^R_y": [],
"7":[], # "max_acc_#R_y": [],
"8":[], # "max_acc_$R_y": [],
"9":[], # "max_acc_%R_y": [],
"10":[], # "max_acc_@R_z": [],
"11":[], # "max_acc_^R_z": [],
"12":[], # "max_acc_#R_z": [],
"13":[], # "max_acc_$R_z": [],
"14":[], # "max_acc_%R_z": [],
"15":[], # "min_acc_@R_x": [],
"16":[], # "min_acc_^R_x": [],
"17":[], # "min_acc_#R_x": [],
"18":[], # "min_acc_$R_x": [],
"19":[], # "min_acc_%R_x": [],
"20":[], # "min_acc_@R_y": [],
"21":[], # "min_acc_^R_y": [],
"22":[], # "min_acc_#R_y": [],
"23":[], # "min_acc_$R_y": [],
"24":[], # "min_acc_%R_y": [],
"25":[], # "min_acc_@R_z": [],
"26":[], # "min_acc_^R_z": [],
"27":[], # "min_acc_#R_z": [],
"28":[], # "min_acc_$R_z": [],
"29":[], # "min_acc_%R_z": [],
"30":[], # "start_angle_@R_x":[],
"31":[], # "start_angle_^R_x": [],
"32":[], # "start_angle_#R_x": [],
"33":[], # "start_angle_$R_x": [],
"34":[], # "start_angle_%R_x": [],
"35":[], # "start_angle_@R_y": [],
"36":[], # "start_angle_^R_y": [],
"37":[], # "start_angle_#R_y": [],
"38":[], # "start_angle_$R_y": [],
"39":[], # "start_angle_%R_y": [],
"40":[], # "start_angle_@R_z": [],
"41":[], # "start_angle_^R_z": [],
"42":[], # "start_angle_#R_z": [],
"43":[], # "start_angle_$R_z": [],
"44":[], # "start_angle_%R_z": [],
"45":[], # "end_angle_@R_x": [],
"46":[], # "end_angle_^R_x": [],
"47":[], # "end_angle_#R_x": [],
"48":[], # "end_angle_$R_x": [],
"49":[], # "end_angle_%R_x": [],
"50":[], # "end_angle_@R_y": [],
"51":[], # "end_angle_^R_y": [],
"52":[], # "end_angle_#R_y": [],
"53":[], # "end_angle_$R_y": [],
"54":[], # "end_angle_%R_y": [],
"55":[], # "end_angle_@R_z": [],
"56":[], # "end_angle_^R_z": [],
"57":[], # "end_angle_#R_z": [],
"58":[], # "end_angle_$R_z": [],
"59":[], # "end_angle_%R_z": [],
}
data_repository_left = {
"id": [],
"name": [],
"shortcuts": [],
"time_period": [],
0: [], # "max_acc_@L_x" : [],
1: [], # "max_acc_^L_x": [],
2: [], # "max_acc_#L_x": [],
3: [], # "max_acc_$L_x": [],
4: [], # "max_acc_%L_x": [],
5: [], # "max_acc_@L_y" : [],
6: [], # "max_acc_^L_y": [],
7: [], # "max_acc_#L_y": [],
8: [], # "max_acc_$L_y": [],
9: [], # "max_acc_%L_y": [],
10: [], # "max_acc_@L_z": [],
11: [], # "max_acc_^L_z": [],
12: [], # "max_acc_#L_z": [],
13: [], # "max_acc_$L_z": [],
14: [], # "max_acc_%L_z": [],
15: [], # "min_acc_@L_x": [],
16: [], # "min_acc_^L_x": [],
17: [], # "min_acc_#L_x": [],
18: [], # "min_acc_$L_x": [],
19: [], # "min_acc_%L_x": [],
20: [], # "min_acc_@L_y": [],
21: [], # "min_acc_^L_y": [],
22: [], # "min_acc_#L_y": [],
23: [], # "min_acc_$L_y": [],
24: [], # "min_acc_%L_y": [],
25: [], # "min_acc_@L_z": [],
26: [], # "min_acc_^L_z": [],
27: [], # "min_acc_#L_z": [],
28: [], # "min_acc_$L_z": [],
29: [], # "min_acc_%L_z": [],
30: [], # "start_angle_@L_x":[],
31: [], # "start_angle_^L_x": [],
32: [], # "start_angle_#L_x": [],
33: [], # "start_angle_$L_x": [],
34: [], # "start_angle_%L_x": [],
35: [], # "start_angle_@L_y": [],
36: [], # "start_angle_^L_y": [],
37: [], # "start_angle_#L_y": [],
38: [], # "start_angle_$L_y": [],
39: [], # "start_angle_%L_y": [],
40: [], # "start_angle_@L_z": [],
41: [], # "start_angle_^L_z": [],
42: [], # "start_angle_#L_z": [],
43: [], # "start_angle_$L_z": [],
44: [], # "start_angle_%L_z": [],
45: [], # "end_angle_@L_x": [],
46: [], # "end_angle_^L_x": [],
47: [], # "end_angle_#L_x": [],
48: [], # "end_angle_$L_x": [],
49: [], # "end_angle_%L_x": [],
50: [], # "end_angle_@L_y": [],
51: [], # "end_angle_^L_y": [],
52: [], # "end_angle_#L_y": [],
53: [], # "end_angle_$L_y": [],
54: [], # "end_angle_%L_y": [],
55: [], # "end_angle_@L_z": [],
56: [], # "end_angle_^L_z": [],
57: [], # "end_angle_#L_z": [],
58: [], # "end_angle_$L_z": [],
59: [], # "end_angle_%L_z": [],
}
right_data = {
0: 0, # "acc_@R_x"
1: 0, # "acc_^R_x"
2: 0, # "acc_#R_x"
3: 0, # "acc_$R_x"
4: 0, # "acc_%R_x"
5: 0, # "acc_@R_y"
6: 0, # "acc_^R_y"
7: 0, # "acc_#R_y"
8: 0, # "acc_$R_y"
9: 0, # "acc_%R_y"
10: 0, # "acc_@R_z"
11: 0, # "acc_^R_z"
12: 0, # "acc_#R_z"
13: 0, # "acc_$R_z"
14: 0, # "acc_%R_z"
15: 0, # "angle_@R_x"
16: 0, # "angle_^R_x"
17: 0, # "angle_#R_x"
18: 0, # "angle_$R_x"
19: 0, # "angle_%R_x"
20: 0, # "angle_@R_y"
21: 0, # "angle_^R_y"
22: 0, # "angle_#R_y"
23: 0, # "angle_$R_y"
24: 0, # "angle_%R_y"
25: 0, # "angle_@R_z"
26: 0, # "angle_^R_z"
27: 0, # "angle_#R_z"
28: 0, # "angle_$R_z"
29: 0 # "angle_%R_z"
}
left_data = {
0: 0, # "acc_@L_x"
1: 0, # "acc_^L_x"
2: 0, # "acc_#L_x"
3: 0, # "acc_$L_x"
4: 0, # "acc_%L_x"
5: 0, # "acc_@L_y"
6: 0, # "acc_^L_y"
7: 0, # "acc_#L_y"
8: 0, # "acc_$L_y"
9: 0, # "acc_%L_y"
10: 0, # "acc_@L_z"
11: 0, # "acc_^L_z"
12: 0, # "acc_#L_z"
13: 0, # "acc_$L_z"
14: 0, # "acc_%L_z"
15: 0, # "angle_@L_x"
16: 0, # "angle_^L_x"
17: 0, # "angle_#L_x"
18: 0, # "angle_$L_x"
19: 0, # "angle_%L_x"
20: 0, # "angle_@L_y"
21: 0, # "angle_^L_y"
22: 0, # "angle_#L_y"
23: 0, # "angle_$L_y"
24: 0, # "angle_%L_y"
25: 0, # "angle_@L_z"
26: 0, # "angle_^L_z"
27: 0, # "angle_#L_z"
28: 0, # "angle_$L_z"
29: 0 # "angle_%L_z"
}
pre_right_data = copy.deepcopy(right_data)
pre_left_data = copy.deepcopy(left_data)
average_right_data = copy.deepcopy(right_data)
movement_Sensitivity_x= 2
movement_Sensitivity_y= 2
movement_Sensitivity_z= 2
threshold_movement_Sensitivity = 14000
recognition_Gap_Interval = 200
initial_Gap_Interval = 200
angle_tolerance = 5
acc_tolerance = 0.5
def get_OS_Right():
port = "/dev/tty.Right-DevB"
# LINUX
if platform == "linux" or platform == "linux2":
port = "/dev/tty.Right-DevB"
# MAC OS
elif platform == "darwin":
port = "/dev/tty.Right-DevB"
# WINDOWS
elif platform == "win32":
port = "COM4"
return port
def get_OS_Left():
port = "/dev/tty.LEFT-DevB"
# LINUX
if platform == "linux" or platform == "linux2":
port = "/dev/tty.LEFT-DevB"
# MAC OS
elif platform == "darwin":
port = "/dev/tty.LEFT-DevB"
# WINDOWS
elif platform == "win32":
port = "COM4"
return port
def bluetooth(serRight, serLeft, recognitionFlag=0):
global pre_right_data
global pre_left_data
global average_right_data
global right_data
global left_data
global data_repository_right
iteration_Count = 0
averageFlag = True
#------Recognition variables--------------
recognitionCount = 0
recognitionGapCount = 0
start_time = 0
recognitionMode = False
#Get current id
try:
curr_id = data_repository_right["id"][-1] + 1
except:
curr_id = 0
initialize_data_repository_right()
while True:
# %: Pinky finger, ^: index finger, @: thumb, $: ring
#-------------RIGHT HAND--------------------------------
try:
line = serRight.readline()
line = line.decode('utf-8')
line = line.strip('\r')
line = line.strip('\n')
if "@" in line: #THUMB
#print(line[0])
right_data[0] = get_data(serRight)
#print(right_data[0])
right_data[5] = get_data(serRight) # Meter per seconds square
#print(right_data[5])
right_data[10] = get_data(serRight)
#print(right_data[10])
right_data[15] = get_data(serRight)
#print(right_data[15])
right_data[20] = get_data(serRight) # Angle in degrees
#print(right_data[20])
right_data[25] = get_data(serRight)
#print(right_data[25])
elif "^" in line: #INDEX FINGER
#print(line[0])
right_data[1] = get_data(serRight)
#print(right_data[1])
right_data[6] = get_data(serRight) # Meter per seconds square
#print(right_data[6])
right_data[11] = get_data(serRight)
#print(right_data[11])
right_data[16] = get_data(serRight)
#print(right_data[16])
right_data[21] = get_data(serRight) # Angle in degrees
#print(right_data[21])
right_data[26] = get_data(serRight)
#print(right_data[26])
elif "#" in line: #MIDDLE FINGER
#print(line[0])
right_data[2] = get_data(serRight)
#print(right_data[2])
right_data[7] = get_data(serRight) # Meter per seconds square
#print(right_data[7])
right_data[12] = get_data(serRight)
#print(right_data[12])
right_data[17] = get_data(serRight)
#print(right_data[17])
right_data[22] = get_data(serRight) # Angle in degrees
#print(right_data[22])
right_data[27] = get_data(serRight)
#print(right_data[27])
elif "$" in line: #RING FINGER
#print(line[0])
right_data[3] = get_data(serRight)
#print(right_data[3])
right_data[8] = get_data(serRight) # Meter per seconds square
#print(right_data[8])
right_data[13] = get_data(serRight)
#print(right_data[13])
right_data[18] = get_data(serRight)
#print(right_data[18])
right_data[23] = get_data(serRight) # Angle in degrees
#print(right_data[23])
right_data[28] = get_data(serRight)
#print(right_data[28])
elif "%" in line: #PINKY FINGER
#print(line[0])
right_data[4] = get_data(serRight)
#print(right_data[4])
right_data[9] = get_data(serRight) # Meter per seconds square
#print(right_data[9])
right_data[14] = get_data(serRight)
#print(right_data[14])
right_data[19] = get_data(serRight)
#print(right_data[19])
right_data[24] = get_data(serRight) # Angle in degrees
#print(right_data[14])
right_data[29] = get_data(serRight)
#print(right_data[29])
except Exception as e:
print("Exception", format(e))
pass
# Refining by taking average of values
if iteration_Count < initial_Gap_Interval and averageFlag == True:
count = 0
for curr_Key in right_data:
if count > 14: break
average_right_data[curr_Key] += right_data[curr_Key]
elif iteration_Count >= initial_Gap_Interval and averageFlag == True:
count = 0
for curr_Key in right_data:
if count > 14: break
try:
average_right_data[curr_Key] /= initial_Gap_Interval
except:
pass
count += 1
averageFlag = False
elif iteration_Count >= initial_Gap_Interval and averageFlag == False:
count = 0
for curr_Key in right_data:
if count > 14: break
try:
right_data[curr_Key] /= average_right_data[curr_Key]
except:
pass
count += 1
if recognitionFlag != 1:
for eachID in data_repository_right["id"]:
fingerCount = 0 #Finger Recognised count
for max_x, max_y, max_z, min_x, min_y, min_z, start_angle_x, start_angle_y, start_angle_z, right_x, right_y, right_z, right_angle_x, right_angle_y, right_angle_z in zip(list(range(0,5)), list(range(5, 10)), list(range(10, 15)), list(range(15, 20)), list(range(20, 25)), list(range(25, 30)), list(range(30, 35)), list(range(35, 40)), list(range(40, 45)), list(range(0, 5)), list(range(5, 10)),list(range(10, 15)),list(range(15, 20)),list(range(20, 25)),list(range(25, 30))):
if (right_data[right_x] > data_repository_right[str(max_x)][eachID] - acc_tolerance)\
and (right_data[right_x] < data_repository_right[str(max_x)][eachID] + acc_tolerance)\
and (right_data[right_y] > data_repository_right[str(max_y)][eachID] - acc_tolerance)\
and (right_data[right_y] < data_repository_right[str(max_y)][eachID] + acc_tolerance)\
and (right_data[right_z] > data_repository_right[str(max_z)][eachID] - acc_tolerance)\
and (right_data[right_z] < data_repository_right[str(max_z)][eachID] + acc_tolerance)\
and (right_data[right_angle_x] < (data_repository_right[str(start_angle_x)][eachID] + angle_tolerance))\
and (right_data[right_angle_x] > (data_repository_right[str(start_angle_x)][eachID] - angle_tolerance))\
and (right_data[right_angle_y] < (data_repository_right[str(start_angle_y)][eachID] + angle_tolerance))\
and (right_data[right_angle_y] > (data_repository_right[str(start_angle_y)][eachID] - angle_tolerance))\
and (right_data[right_angle_z] < (data_repository_right[str(start_angle_z)][eachID] + angle_tolerance))\
and (right_data[right_angle_z] > (data_repository_right[str(start_angle_z)][eachID] - angle_tolerance)):
fingerCount += 1
if fingerCount == 3:
print("Initial condition true")
else:
print("not matched", "\t", fingerCount)
#print(data_repository_right, end="\n\n")
#print(right_data, end="\n\n")
# ----------------RECOGNITION----------------------------
i=0
j=0
pos=0
match = False
while(i<len(data_repository_right.get(0))):
while(j+15<60):
#If current data of Thumb (angles and accln) is greater than min and less than max value
if(right_data.get(j) < data_repository_right.get(j)[i]) and (right_data.get(j) > data_repository_right.get(j+15)[i]):
pos = i
match = True
else:
match = False
j = j+5
if (j==15):
j=30
i+=1
if match:
shortcut = data_repository_right.get("shortcuts")[pos]
#Implement Shortcut
if recognitionFlag == 1 and iteration_Count > initial_Gap_Interval:
if recognitionCount > 5:
print(data_repository_right)
print("Ok Recognized")
recognitionFlag = 0
try:
with open('DataRepositoryRight.json', 'w') as outfile:
json.dump(data_repository_right, outfile)
except:
print("Could not write DataRepositoryRight.json")
#return
else: print("Repeat", recognitionCount)
curr_time = time.time()
for x_values, y_values, z_values in zip(list(range(5)), list(range(5, 10)),list(range(10, 15))):
#only x, y, z acceleration values of each finger
if math.fabs(right_data[x_values]) > movement_Sensitivity_x and math.fabs(right_data[y_values]) > movement_Sensitivity_y and math.fabs(right_data[z_values]) > movement_Sensitivity_z:
if recognitionMode == False:
print("Recognition period ON", "True")
start_time = curr_time
store_gesture(False, "right",name="Dummy", shortcuts="dummy", curr_id= curr_id)
recognitionMode = True
elif recognitionMode == True and recognitionGapCount > recognition_Gap_Interval:
recognitionMode = False
time_period = curr_time - start_time
store_gesture(True, "right", time=time_period , curr_id=curr_id)
print("Recognition period OFF", "False")
recognitionCount += 1
recognitionGapCount = 0
break
#----------------------------------------END----------------
pre_right_data = copy.deepcopy(right_data)
pre_left_data = copy.deepcopy(left_data)
iteration_Count += 1
if recognitionMode == True:
recognitionGapCount += 1
def initialize_data_repository_right():
global data_repository_right
data_repository_right["id"].append(0)
data_repository_right["name"].append(" ")
data_repository_right["shortcuts"].append(" ")
data_repository_right["time_period"].append(0)
for i in list(range(60)):
data_repository_right[str(i)].append(0)
def store_gesture(recognitionModeEnd, hand="right", time= 0, name="Dummy", shortcuts="dummy", curr_id = 0):
if hand == "right":
if recognitionModeEnd == False:
data_repository_right["id"][curr_id] = curr_id
data_repository_right["name"][curr_id] = name
data_repository_right["shortcuts"][curr_id] = shortcuts
for i in list(range(15)): # Max Acceleration
# val = get_data_from_Data_Repository(str(i), curr_id)
# if val < right_data[i]:
data_repository_right[str(i)][curr_id] = right_data[i]
for i, j in zip(list(range(15,30)), list(range(15))): #Min Acceleration
# val = get_data_from_Data_Repository(str(i), curr_id)
# if val > right_data[j] or val == 0:
data_repository_right[str(i)][curr_id] = right_data[j]
for i, j in zip(list(range(30, 45)), list(range(15, 30))): #Start Index
# val = get_data_from_Data_Repository(str(i),curr_id)
#if val == 0:
# data_repository_right[str(i)][curr_id] = right_data[j]
#else:
data_repository_right[str(i)][curr_id] = right_data[j] #Average
#------------------------------------------------------------------------------------------------
elif recognitionModeEnd == True:
for i, j in zip(list(range(45, 60)), list(range(15, 30))): #End Index
# val = get_data_from_Data_Repository(str(i), curr_id)
# if val == 0:
# data_repository_right[str(i)][curr_id] = right_data[j]
# else:
data_repository_right[str(i)][curr_id] = right_data[j]
# val = get_data_from_Data_Repository("time_period", curr_id)
# if val == 0:
data_repository_right["time_period"][curr_id] = time # Time period
# else:
# data_repository_right["time_period"][curr_id] = (time + val) / 2 # Time period
elif hand == "left":
pass
return
def get_data_from_Data_Repository(key, curr_id):
global data_repository_right
try:
val = data_repository_right[key][curr_id]
except:
val = 0
return val
def mouse(acc_x, acc_y, acc_z, angle_x, angle_y, angle_z, pre_coor_x, pre_coor_y):
# Condition for mouse
'''
current_coor_x = dim_x
current_coor_y = dim_y
pre_coor_x = 0
pre_coor_y = 0
'''
m = PyMouse()
dim_x, dim_y = m.screen_size()
sensitivity = 10000 * 1.5 #between
pixel_accel_x = (angle_x * 3779.5275591) / sensitivity # pixel per second square
pixel_accel_y = (angle_y * 3779.5275591) / sensitivity
pixel_accel_z = (angle_z * 3779.5275591) / sensitivity
temp_dist_x = 0.5 * pixel_accel_x
temp_dist_y = 0.5 * pixel_accel_y
if temp_dist_x + pre_coor_x <= dim_x and temp_dist_x + pre_coor_x >= 0:
current_coor_x = int(pre_coor_x + temp_dist_x)
if temp_dist_y + pre_coor_y <= dim_y and temp_dist_y + pre_coor_y >= 0:
current_coor_y = int(pre_coor_y + temp_dist_y)
#m.move(current_coor_x, current_coor_y)
print(current_coor_x, "\t", current_coor_y)
pre_coor_x = current_coor_x
pre_coor_y = current_coor_y
return pre_coor_x, pre_coor_y
def get_data(ser):
line = ser.readline()
line = line.decode()
line = line.strip('\r')
line = line.strip('\n')
try:
return int(line)
except: return 0
def gesture_Recognition():
global data_repository_right
global data_repository_left
#Left and Right hand connection---------------------------------------------------------
serRight = serial.Serial(get_OS_Right(), baudrate=115200, timeout=1)
print("Connected Right")
# serLeft = serial.Serial(get_OS_Left(), baudrate=115200, timeout=1)
# print("Connected Left")
#Load Data repository -----------------------------------------------------------------------
try:
with open('DataRepositoryRight.json', 'r') as inputFile:
data_repository_right = json.load(inputFile)
except:
print("DataRepositoryRight.json file not found")
try:
with open('DataRepositoryLeft.json', 'r') as inputFile:
data_repository_left = json.load(inputFile)
except:
print("DataRepositoryLeft.json file not found")
#Connection-----------------------------------------------------------------------------------------
if serRight.isOpen():# or serLeft.isOpen():
bluetooth(serRight,0, recognitionFlag=0)
else:
print("Both are unreachable")
return 0
def main():
pass
if __name__ == '__main__':
gesture_Recognition()
| ashwinpilgaonkar/Flick | Bluetooth/Bluetooth.py | Python | apache-2.0 | 25,172 |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import oslo_messaging
import six
import testtools
from sahara import conductor as cond
from sahara import context
from sahara import exceptions as exc
from sahara.plugins import base as pl_base
from sahara.plugins import provisioning as pr_base
from sahara.service import api as service_api
from sahara.service.api import v10 as api
from sahara.tests.unit import base
from sahara.utils import cluster as c_u
conductor = cond.API
SAMPLE_CLUSTER = {
'plugin_name': 'fake',
'hadoop_version': 'test_version',
'tenant_id': 'tenant_1',
'name': 'test_cluster',
'user_keypair_id': 'my_keypair',
'node_groups': [
{
'auto_security_group': True,
'name': 'ng_1',
'flavor_id': '42',
'node_processes': ['p1', 'p2'],
'count': 1
},
{
'auto_security_group': False,
'name': 'ng_2',
'flavor_id': '42',
'node_processes': ['p3', 'p4'],
'count': 3
},
{
'auto_security_group': False,
'name': 'ng_3',
'flavor_id': '42',
'node_processes': ['p3', 'p4'],
'count': 1
}
],
'cluster_configs': {
'service_1': {
'config_2': 'value_2'
},
'service_2': {
'config_1': 'value_1'
}
},
}
SCALE_DATA = {
'resize_node_groups': [
{
'name': 'ng_1',
'count': 3,
},
{
'name': 'ng_2',
'count': 2,
}
],
'add_node_groups': [
{
'auto_security_group': True,
'name': 'ng_4',
'flavor_id': '42',
'node_processes': ['p1', 'p2'],
'count': 1
},
]
}
class FakePlugin(pr_base.ProvisioningPluginBase):
_info = {}
name = "fake"
def __init__(self, calls_order):
self.calls_order = calls_order
def configure_cluster(self, cluster):
pass
def start_cluster(self, cluster):
pass
def get_description(self):
return "Some description"
def get_title(self):
return "Fake plugin"
def validate(self, cluster):
self.calls_order.append('validate')
def get_open_ports(self, node_group):
self.calls_order.append('get_open_ports')
def validate_scaling(self, cluster, to_be_enlarged, additional):
self.calls_order.append('validate_scaling')
def get_versions(self):
return ['0.1', '0.2']
def get_node_processes(self, version):
return {'HDFS': ['namenode', 'datanode']}
def get_configs(self, version):
return []
def recommend_configs(self, cluster, scaling=False):
self.calls_order.append('recommend_configs')
class FakePluginManager(pl_base.PluginManager):
def __init__(self, calls_order):
super(FakePluginManager, self).__init__()
self.plugins['fake'] = FakePlugin(calls_order)
class FakeOps(object):
def __init__(self, calls_order):
self.calls_order = calls_order
def provision_cluster(self, id):
self.calls_order.append('ops.provision_cluster')
conductor.cluster_update(
context.ctx(), id, {'status': c_u.CLUSTER_STATUS_ACTIVE})
def provision_scaled_cluster(self, id, to_be_enlarged):
self.calls_order.append('ops.provision_scaled_cluster')
# Set scaled to see difference between active and scaled
for (ng, count) in six.iteritems(to_be_enlarged):
conductor.node_group_update(context.ctx(), ng, {'count': count})
conductor.cluster_update(context.ctx(), id, {'status': 'Scaled'})
def terminate_cluster(self, id):
self.calls_order.append('ops.terminate_cluster')
class TestApi(base.SaharaWithDbTestCase):
def setUp(self):
super(TestApi, self).setUp()
self.calls_order = []
self.override_config('plugins', ['fake'])
pl_base.PLUGINS = FakePluginManager(self.calls_order)
service_api.setup_api(FakeOps(self.calls_order))
oslo_messaging.notify.notifier.Notifier.info = mock.Mock()
self.ctx = context.ctx()
@mock.patch('sahara.service.quotas.check_cluster', return_value=None)
def test_create_cluster_success(self, check_cluster):
cluster = api.create_cluster(SAMPLE_CLUSTER)
self.assertEqual(1, check_cluster.call_count)
result_cluster = api.get_cluster(cluster.id)
self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, result_cluster.status)
expected_count = {
'ng_1': 1,
'ng_2': 3,
'ng_3': 1,
}
ng_count = 0
for ng in result_cluster.node_groups:
self.assertEqual(expected_count[ng.name], ng.count)
ng_count += 1
self.assertEqual(3, ng_count)
api.terminate_cluster(result_cluster.id)
self.assertEqual(
['get_open_ports', 'recommend_configs', 'validate',
'ops.provision_cluster',
'ops.terminate_cluster'], self.calls_order)
@mock.patch('sahara.service.quotas.check_cluster', return_value=None)
def test_create_multiple_clusters_success(self, check_cluster):
MULTIPLE_CLUSTERS = SAMPLE_CLUSTER.copy()
MULTIPLE_CLUSTERS['count'] = 2
clusters = api.create_multiple_clusters(MULTIPLE_CLUSTERS)
self.assertEqual(2, check_cluster.call_count)
result_cluster1 = api.get_cluster(clusters['clusters'][0])
result_cluster2 = api.get_cluster(clusters['clusters'][1])
self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, result_cluster1.status)
self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, result_cluster2.status)
expected_count = {
'ng_1': 1,
'ng_2': 3,
'ng_3': 1,
}
ng_count = 0
for ng in result_cluster1.node_groups:
self.assertEqual(expected_count[ng.name], ng.count)
ng_count += 1
self.assertEqual(3, ng_count)
api.terminate_cluster(result_cluster1.id)
api.terminate_cluster(result_cluster2.id)
self.assertEqual(
['get_open_ports', 'recommend_configs', 'validate',
'ops.provision_cluster',
'get_open_ports', 'recommend_configs', 'validate',
'ops.provision_cluster',
'ops.terminate_cluster',
'ops.terminate_cluster'], self.calls_order)
@mock.patch('sahara.service.quotas.check_cluster')
def test_create_multiple_clusters_failed(self, check_cluster):
MULTIPLE_CLUSTERS = SAMPLE_CLUSTER.copy()
MULTIPLE_CLUSTERS['count'] = 2
check_cluster.side_effect = exc.QuotaException(
'resource', 'requested', 'available')
with testtools.ExpectedException(exc.QuotaException):
api.create_cluster(SAMPLE_CLUSTER)
self.assertEqual('Error', api.get_clusters()[0].status)
@mock.patch('sahara.service.quotas.check_cluster')
def test_create_cluster_failed(self, check_cluster):
check_cluster.side_effect = exc.QuotaException(
'resource', 'requested', 'available')
with testtools.ExpectedException(exc.QuotaException):
api.create_cluster(SAMPLE_CLUSTER)
self.assertEqual('Error', api.get_clusters()[0].status)
@mock.patch('sahara.service.quotas.check_cluster', return_value=None)
@mock.patch('sahara.service.quotas.check_scaling', return_value=None)
def test_scale_cluster_success(self, check_scaling, check_cluster):
cluster = api.create_cluster(SAMPLE_CLUSTER)
api.scale_cluster(cluster.id, SCALE_DATA)
result_cluster = api.get_cluster(cluster.id)
self.assertEqual('Scaled', result_cluster.status)
expected_count = {
'ng_1': 3,
'ng_2': 2,
'ng_3': 1,
'ng_4': 1,
}
ng_count = 0
for ng in result_cluster.node_groups:
self.assertEqual(expected_count[ng.name], ng.count)
ng_count += 1
self.assertEqual(4, ng_count)
api.terminate_cluster(result_cluster.id)
self.assertEqual(
['get_open_ports', 'recommend_configs', 'validate',
'ops.provision_cluster', 'get_open_ports', 'get_open_ports',
'recommend_configs', 'validate_scaling',
'ops.provision_scaled_cluster',
'ops.terminate_cluster'], self.calls_order)
@mock.patch('sahara.service.quotas.check_cluster', return_value=None)
@mock.patch('sahara.service.quotas.check_scaling', return_value=None)
def test_scale_cluster_failed(self, check_scaling, check_cluster):
cluster = api.create_cluster(SAMPLE_CLUSTER)
check_scaling.side_effect = exc.QuotaException(
'resource', 'requested', 'available')
with testtools.ExpectedException(exc.QuotaException):
api.scale_cluster(cluster.id, {})
def test_cluster_update(self):
with mock.patch('sahara.service.quotas.check_cluster'):
cluster = api.create_cluster(SAMPLE_CLUSTER)
updated_cluster = api.update_cluster(
cluster.id, {'description': 'Cluster'})
self.assertEqual('Cluster', updated_cluster.description)
def test_get_plugin(self):
# processing to dict
data = api.get_plugin('fake', '0.1').dict
self.assertIsNotNone(data)
self.assertEqual(
len(pr_base.list_of_common_configs()), len(data.get('configs')))
self.assertEqual(['fake', '0.1'], data.get('required_image_tags'))
self.assertEqual(
{'HDFS': ['namenode', 'datanode']}, data.get('node_processes'))
self.assertIsNone(api.get_plugin('fake', '0.3'))
data = api.get_plugin('fake').dict
self.assertIsNotNone(data.get('version_labels'))
self.assertIsNotNone(data.get('plugin_labels'))
del data['plugin_labels']
del data['version_labels']
self.assertEqual({
'description': "Some description",
'name': 'fake',
'title': 'Fake plugin',
'versions': ['0.1', '0.2']}, data)
self.assertIsNone(api.get_plugin('name1', '0.1'))
def test_update_plugin(self):
data = api.get_plugin('fake', '0.1').dict
self.assertIsNotNone(data)
updated = api.update_plugin('fake', values={
'plugin_labels': {'enabled': {'status': False}}}).dict
self.assertFalse(updated['plugin_labels']['enabled']['status'])
updated = api.update_plugin('fake', values={
'plugin_labels': {'enabled': {'status': True}}}).dict
self.assertTrue(updated['plugin_labels']['enabled']['status'])
# restore to original status
updated = api.update_plugin('fake', values={
'plugin_labels': data['plugin_labels']}).dict
self.assertEqual(data['plugin_labels']['enabled']['status'],
updated['plugin_labels']['enabled']['status'])
| openstack/sahara | sahara/tests/unit/service/api/test_v10.py | Python | apache-2.0 | 11,635 |
import re
import datetime
from collections import OrderedDict
TimePattern = re.compile("^[0-9]{4}\-[0-9]{2}\-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{3}Z$")
TimeFormat = "%Y-%m-%dT%H:%M:%S.%fZ"
TimePatternSimple = re.compile("^[0-9]{4}\-[0-9]{2}\-[0-9]{2} [0-9]{2}:[0-9]{2}$")
TimeFormatSimple = "%Y-%m-%d %H:%M"
def convert_time_fields(item):
if not item:
return
for k, v in item.iteritems():
if v is None:
continue
if isinstance(v, dict):
convert_time_fields(v)
elif TimePattern.match(v):
item[k] = datetime.datetime.strptime(v, TimeFormat)
def convert_to_csv(items):
def escape(s):
if type(s) is str and ',' in s:
return '"' + s + '"'
return str(s)
def join(items):
return ','.join(map(lambda i: escape(i), items))
header = join(items[0].keys())
lines = [join(item.values()) for item in items]
return header + "\n" + "\n".join(lines)
def parse_user_accept_languages(header):
if header:
return list(OrderedDict.fromkeys(map(lambda h: h.split(';')[0].split('-')[0], header.split(','))))
else:
return []
| flarno11/teslasuc | lib.py | Python | apache-2.0 | 1,173 |
"""replace.py
Tool for replacing variable names in Damon.
Copyright (c) 2009 - 2011, Mark H. Moulton for Pythias Consulting, LLC.
Purpose: This tool was developed to replace CamelCase argument
variables with all lower case equivalents across all Damon modules.
It is being retained because it is readily adaptable to other
global search-and-replace problems.
Damon Version: 1.0.15
Damon Release Date: 5/1/2012
Damon is written in Python 2.7.2 and Numpy 1.6.1, as distributed
by Enthought (EPD 7.2-2 (64-bit)).
License
-------
This program references one or more software modules that are
under copyright to Pythias Consulting, LLC. Therefore, it is subject
to either the Gnu Affero General Public License or the Pythias
Commercial License, a copy of which is contained in the current
working directory.
How To Use
----------
The program is configured to convert CamelCase function/method
variables (but not regular variables) to lower case. To adapt
it to other uses, edit according to the following principles:
* Edit the names.extend(...) statement to get the
correct variables to edit. If you already know
the names to replace, you can comment out this
part of the program and rely on the special
dictionary.
* Edit the creation of the replace_ {} dictionary to
capture the names you are after. Currently, it
applies the s.lower() function, but it can be
anything.
* Set mode = 'inspect'. Obtain and review all names
to be replaced to make sure no new names will clash
with reserved Python or other package names or will
in other ways mangle the program.
The convention is to add a trailing underscore_ where
a Python clash would happen.
* Edit the removeit [] list to specify module
contents to ignore
* Edit the special {} dictionarary to specify
how to handle names that need special
handling.
* This function replaces only complete words -- those
governed by the regular expression '\b' (consult re
"regular expressions" module in the standard library).
Edit the re.sub(...) statement to replace characters
or other types of patterns.
* Make sure to save a backup of the module to be edited.
It is quite possible that a global search-and-replace
will result in unintended side-effects that require
debugging.
* Under filenames, list the Python modules in the current
working directory that you want to edit.
* Otherwise, you don't need to do any other file handling.
The program will automatically open and edit a Python
module in place.
* Set mode = 'replace' and hit F5 to run the program.
"""
import os
import sys
import cPickle
import inspect
import fileinput
import re
import glob
import damon1
#############
## Specs ##
#############
# Set mode to: <'inspect','replace'>
mode = 'replace'
# Files to edit
testpath = damon1.__path__[0]+'/tests/'
sys.path.append(testpath)
testfiles = glob.glob(testpath+'test_*.py')
testfiles.extend([testpath+'ut_template.py'])
files2inspect = ['core.py','tools.py','utils.py']
files2edit = files2inspect + testfiles + ['__init__.py','template.py']
print 'files2edit=\n',files2edit
# Module contents to ignore when getting variable names
removeit = ['core','utils','tools','npla','__package__','np','__doc__',
'core','cPickle','__builtins__','__file__','sys','__name__',
'npr','npma','os','__module__','__dict__','__weakref__',
'__doc__','self','npt','tab']
# Names that need special attention
special = {'DamonObj':'Damon',
'baseResid':'base_resid',
'RunSpecs':'runspecs',
'finSE':'fin_se',
'baseEst':'base_est',
'RandPercentNaN':'rand_nan',
'RandRange':'rand_range',
'Ents2Destd':'ents2restore',
'finEAR':'fin_ear',
'FixedRangeEnts':'ents2nan',
'FixedRangeLoc':'range2nan',
'AddSourceIDs':'source_ids',
'AddDataDict':'add_datadict',
'finEst':'fin_est',
'restoreInvalid':'restore_invalid',
'extractValid':'extract_valid',
'FacCoords':'fac_coords',
'Fac0Coord':'fac0coord',
'fac1coord':'fac1coord',
'finFit':'fin_fit',
'PredEnts':'pred_ents',
'Jolt':'jolt_',
'baseEAR':'base_ear',
'TabDataRCD':'tab_datadict',
'MissingLbls':'miss4headers',
'RecodeRange1':'recode1',
'RecodeRange2':'recode2',
'RecodeRange3':'recode3',
'baseSE':'base_se',
'baseFit':'base_fit',
'CondCoord':'condcoord_',
'ConstructLabel':'construct_label',
'ConstructEnts':'construct_ents',
'mergeAnsKey':'merge_anskey',
'XtraHeadRng':'extra_headers',
'PercentNaN':'p_nan',
'ScoreMC':'score_mc',
'RespCat':'resp_cat',
'Dtype':'dtype',
'finResid':'fin_resid',
'ConstructAtts':'construct_atts',
'ResidType':'resid_type',
'TargData':'targ_data',
'TargLabels':'targ_labels',
'OrigData':'orig_data',
'ItemDiff':'itemdiff',
'itemDiff':'item_diff',
'ParseParams':'parse_params',
'Params':'params',
'scoreMC':'score_mc',
'ObjEst':'obj_est',
'TargMeanSD':'mean_sd',
'BankF0Ents':'bankf0',
'BankF1Ents':'bankf1',
'ObjEnts':'obj_ents',
'OutputAs':'output_as',
'RespCats':'resp_cats',
'RLRow':'rl_row',
'RLCol':'rl_col',
'CLRow':'cl_row',
'CLCol':'cl_col',
'CoreRow':'core_row',
'CoreCol':'core_col',
'WholeRow':'whole_row',
'WholeCol':'whole_col',
'WholeArray':'whole',
'Fileh':'fileh',
'TextFile':'textfile',
'TextFiles':'textfiles',
'DataDictLink':'datadict_link',
'DataDictWhole':'datadict_whole',
'Pickle':'pickle',
'RCD_Whole':'RCD_whole',
'RCD_Dicts':'RCD_dicts',
'RCD_Dicts_Whole':'RCD_dicts_whole',
'ChunkFunc':'chunkfunc',
'ChunkDict':'chunkdict',
'Model':'model',
'Num':'num',
'extractValid_out':'extract_valid_out',
'pseudoMiss_out':'pseudomiss_out',
'scoreMC_out':'score_mc_out',
'baseEst_out':'base_est_out',
'baseResid_out':'base_resid_out',
'baseEAR_out':'base_ear_out',
'baseSE_out':'base_se_out',
'baseFit_out':'base_fit_out',
'finEst_out':'fin_est_out',
'est2Logit_out':'est2logit_out',
'itemDiff_out':'item_diff_out',
'fillMiss_out':'fillmiss_out',
'finResid_out':'fin_resid_out',
'finFit_out':'fin_fit_out',
'mergeAnsKey_out':'merge_anskey_out',
'restoreInvalid_out':'restore_invalid_out',
'summStat_out':'summstat_out',
'RowEnts':'row_ents',
'ColEnts':'col_ents',
'ObjPerDim':'objperdim',
'Stability':'stability',
'Objectivity':'objectivity',
'BestDim':'bestdim',
'MaxPosDim':'maxposdim',
'Accuracy':'accuracy',
'PsMsResid':'psmsresid',
'Fac0SE':'fac0_se',
'Fac1SE':'fac1_se',
'Fac0Infit':'fac0_infit',
'Fac1Infit':'fac1_infit',
'Fac0Outfit':'fac0_outfit',
'Fac1Outfit':'fac1_outfit',
'Reliability':'reliability',
'CellVar':'cellvar',
'CellFit':'cellfit',
'MsIndex':'msindex',
'PsMsIndex':'psmsindex',
'TrueMsIndex':'true_msindex',
'ParsedMsIndex':'parsed_msindex',
'ParsedTrueMsIndex':'parsed_true_msindex',
'ParsedPsMsIndex':'parsed_psmsindex',
'ObjEstimates':'obj_estimates',
'ObjCoords':'obj_coord',
'EARCoord':'ear_coord',
'EntCoord':'ent_coord',
'StepCoord':'step_coord',
'Facet0':'facet0',
'Facet1':'facet1',
'logitEAR_out':'logit_ear_out',
'logitSE_out':'logit_se_out',
'ObsPerCellFactor':'obspercell_factor',
'SECoord':'se_coord',
'Logit':'Logit',
'EquateParams':'equate_params',
'Ratio':'ratio',
'Interval':'interval',
'Sigmoid':'sigmoid',
'ChangeLog':'changelog',
'ObjParams':'obj_params',
'PyTable.hd5':'pytable.hd5',
'seedBank.pkl':'seedbank.pkl',
'MyDamonObj':'my_DamonObj',
'MyDmnObj':'my_obj',
'StdParams':'std_params',
'EAR':'EAR',
'Facet':'Facet',
'InputArray':'input_array',
'Array':'array',
'Arrays':'arrays',
'Data':'data',
'File':'file',
'U':'U',
'x':'x',
'X':'X',
'R':'R',
'C':'C',
'V':'V',
'E':'E',
'InitEArray':'init_earray',
'InvUTU':'invUTU_',
'invUTU':'invUTU',
'Range':'range_',
'Type':'type_',
'Return':'return_',
'ArrayNames':'array_names',
'CondFacet':'cond_facet',
'DataDict':'datadict',
'SolveMethod':'solve_meth',
'SolveMethSpecs':'solve_meth_specs',
'SourceIDs':'source_ids',
'TargetIDs':'target_ids',
'InclTarg':'targ_in_sum',
'SigmThresh':'sigma_thresh',
'PredAlpha':'pred_alpha',
'OrigObs':'orig_obs',
'BiasedEst':'biased_est',
'Shape':'shape',
'MissLeftColLabels':'fill_left',
'MissTopRowLabels':'fill_top',
'MinRating':'min_rating',
'RegRMSE':'rmse_reg',
'ErrArray':'st_err',
'SumSqRowPtBis':'row_ptbis',
'SumSqColPtBis':'col_ptbis',
'TargDataIndex':'targ_data_ind',
'TupData':'tup_data',
'PredKey':'pred_key',
'MissMethod':'miss_meth',
'AttRow':'att_row',
'CountChars':'count_chars',
'nKeyColHeaders':'nheaders4cols_key',
'ExtrEst':'extr_est',
'EARArray':'ear',
'DataRCD':'datadict',
'PyTables':'pytables',
'Format':'format_',
'MethSpecs':'meth_specs',
'NearestVal':'nearest_val',
'Median':'median_',
'EstShape':'est_shape',
'Tests':'tests_',
'Val':'Val',
'Res':'Res',
'Locals':'_locals',
'Locals1':'_locals1',
'_baseEAR':'_base_ear',
'_finResid':'_fin_resid',
'_extractValid':'_extract_valid',
'_mergeAnsKey':'_merge_anskey',
'_scoreMC':'_score_mc',
'_finEst':'_fin_est',
'_baseFit':'_base_fit',
'_baseResid':'_base_resid',
'_baseSE':'_base_se',
'_finFit':'_fin_fit',
'_baseEst':'_base_est',
'_restoreInvalid':'_restore_invalid',
'_itemdiff':'_item_diff',
}
#############
## Get ##
## Names ##
#############
if mode == 'inspect':
objs = []
names = []
# Import module
for i in range(len(files2inspect)):
stringmod = files2inspect[i].replace('.py','')
mod = __import__(stringmod)
modobjs = mod.__dict__.keys()
# Remove unneeded objects
for obj in removeit:
try:
modobjs.remove(obj)
except ValueError:
pass
# Include top-level function names in list
names.extend(modobjs)
# Get names automatically
for obj in modobjs:
try:
names.extend(inspect.getargspec(mod.__dict__[obj])[0])
except TypeError:
try:
subobjs = mod.__dict__[obj].__dict__.keys()
for subobj in removeit:
try:
subobjs.remove(subobj)
except ValueError:
pass
names.extend(subobjs)
for subobj in subobjs:
names.extend(inspect.getargspec(mod.__dict__[obj].__dict__[subobj])[0])
for name in removeit:
try:
names.remove(name)
except ValueError:
pass
except:
pass
#####################
## Build ##
## replace_ dict ##
#####################
replace_ = {}
for name in names:
replace_[name] = name.lower() # replace name with lowercase version
for specname in special.keys():
replace_[specname] = special[specname]
if mode == 'inspect':
print 'replace_ dictionary:\n',replace_
# Save as pickle
dbfile = open('replaceDB.pkl','wb')
cPickle.dump(replace_,dbfile)
dbfile.close()
###############
## Edit ##
## Modules ##
###############
if mode == 'replace':
print 'replace() is working...\n'
# Use replace dictionary in pickle db
dbfile = open('replaceDB.pkl','rb')
replace_ = cPickle.load(dbfile)
dbfile.close()
for filename in files2edit:
print 'Working on',filename
# Edit line
for line in fileinput.input(filename,inplace=True):
# Replace all specified names in line
for name in replace_.keys():
line = re.sub(r'\b'+name+r'\b',replace_[name],line)
# Replace line with fully edited line
print line,
print 'replace() is done.'
##############
## Run ##
## Module ##
##############
# To run functions that are defined in this module
##if __name__ == "__main__":
## A = MyFunc(...)
## print A
| Sohojoe/damon | damon1/replace_it.py | Python | apache-2.0 | 14,439 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mango
@contact: [email protected]
@create: 16/7/4
hail hydra!
"""
__author__ = "mango"
__version__ = "0.1"
from kazoo.client import KazooClient
import logging
logging.basicConfig()
class ZookeeperClient(object):
def __init__(self, zk_host):
self.zk_hosts = zk_host
self.zk = KazooClient(hosts=self.zk_hosts)
def connect(self):
try:
self.zk.start()
return 0
except Exception, e:
return 1
def create_node(self, nodename, content):
try:
self.zk.create(nodename, content)
return 0
except Exception, e:
print e
return 1
def create_ephemeral_node(self, nodename, content):
try:
self.zk.create(nodename, content, ephemeral=True)
return 0
except Exception, e:
print e
return 1
def create_node_file(self, nodename, configfile_path):
try:
with open(configfile_path, 'r') as fp:
config = fp.read()
self.zk.create(nodename, config)
return 0
except Exception, e:
print e
return 1
def get_data(self, nodename):
data, stat = self.zk.get(nodename)
return data, stat
def set_data(self, namenode, content):
try:
self.zk.set(namenode, content)
return 0
except Exception, e:
return 1
def set_data_file(self, nodename, configfile_path):
try:
with open(configfile_path, 'r') as fp:
config = fp.read()
self.zk.set(nodename, config)
return 0
except Exception, e:
print e
return 1
def delete_node(self, nodename):
try:
self.zk.delete(nodename)
return 0
except Exception, e:
print e
return 1
def exists_node(self, nodename):
try:
result = self.zk.exists(nodename)
return result
except Exception, e:
return 1
def zk_stop(self):
self.zk.stop() | w4n9H/PythonSkillTree | Storage/BigData/Zookeeper/ZKClient.py | Python | apache-2.0 | 2,203 |
"""A simple multi-agent env with two agents playing rock paper scissors.
This demonstrates running the following policies in competition:
(1) heuristic policy of repeating the same move
(2) heuristic policy of beating the last opponent move
(3) LSTM/feedforward PG policies
(4) LSTM policy with custom entropy loss
"""
import argparse
from gym.spaces import Discrete
import os
import random
from ray import tune
from ray.rllib.agents.pg import PGTrainer, PGTFPolicy, PGTorchPolicy
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.examples.env.rock_paper_scissors import RockPaperScissors
from ray.rllib.examples.policy.rock_paper_scissors_dummies import \
BeatLastHeuristic, AlwaysSameHeuristic
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import check_learning_achieved
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
parser = argparse.ArgumentParser()
parser.add_argument("--torch", action="store_true")
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--stop-iters", type=int, default=150)
parser.add_argument("--stop-reward", type=float, default=1000.0)
parser.add_argument("--stop-timesteps", type=int, default=100000)
def run_same_policy(args, stop):
"""Use the same policy for both agents (trivial case)."""
config = {
"env": RockPaperScissors,
"framework": "torch" if args.torch else "tf",
}
results = tune.run("PG", config=config, stop=stop, verbose=1)
if args.as_test:
# Check vs 0.0 as we are playing a zero-sum game.
check_learning_achieved(results, 0.0)
def run_heuristic_vs_learned(args, use_lstm=False, trainer="PG"):
"""Run heuristic policies vs a learned agent.
The learned agent should eventually reach a reward of ~5 with
use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy
can perform better is since it can distinguish between the always_same vs
beat_last heuristics.
"""
def select_policy(agent_id):
if agent_id == "player1":
return "learned"
else:
return random.choice(["always_same", "beat_last"])
config = {
"env": RockPaperScissors,
"gamma": 0.9,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"num_workers": 0,
"num_envs_per_worker": 4,
"rollout_fragment_length": 10,
"train_batch_size": 200,
"multiagent": {
"policies_to_train": ["learned"],
"policies": {
"always_same": (AlwaysSameHeuristic, Discrete(3), Discrete(3),
{}),
"beat_last": (BeatLastHeuristic, Discrete(3), Discrete(3), {}),
"learned": (None, Discrete(3), Discrete(3), {
"model": {
"use_lstm": use_lstm
},
"framework": "torch" if args.torch else "tf",
}),
},
"policy_mapping_fn": select_policy,
},
"framework": "torch" if args.torch else "tf",
}
cls = get_agent_class(trainer) if isinstance(trainer, str) else trainer
trainer_obj = cls(config=config)
env = trainer_obj.workers.local_worker().env
for _ in range(args.stop_iters):
results = trainer_obj.train()
print(results)
# Timesteps reached.
if results["timesteps_total"] > args.stop_timesteps:
break
# Reward (difference) reached -> all good, return.
elif env.player1_score - env.player2_score > args.stop_reward:
return
# Reward (difference) not reached: Error if `as_test`.
if args.as_test:
raise ValueError(
"Desired reward difference ({}) not reached! Only got to {}.".
format(args.stop_reward, env.player1_score - env.player2_score))
def run_with_custom_entropy_loss(args, stop):
"""Example of customizing the loss function of an existing policy.
This performs about the same as the default loss does."""
def entropy_policy_gradient_loss(policy, model, dist_class, train_batch):
logits, _ = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
if args.torch:
# required by PGTorchPolicy's stats fn.
policy.pi_err = torch.tensor([0.0])
return torch.mean(-0.1 * action_dist.entropy() -
(action_dist.logp(train_batch["actions"]) *
train_batch["advantages"]))
else:
return (-0.1 * action_dist.entropy() - tf.reduce_mean(
action_dist.logp(train_batch["actions"]) *
train_batch["advantages"]))
policy_cls = PGTorchPolicy if args.torch else PGTFPolicy
EntropyPolicy = policy_cls.with_updates(
loss_fn=entropy_policy_gradient_loss)
EntropyLossPG = PGTrainer.with_updates(
name="EntropyPG", get_policy_class=lambda _: EntropyPolicy)
run_heuristic_vs_learned(args, use_lstm=True, trainer=EntropyLossPG)
if __name__ == "__main__":
args = parser.parse_args()
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
run_same_policy(args, stop=stop)
print("run_same_policy: ok.")
run_heuristic_vs_learned(args, use_lstm=False)
print("run_heuristic_vs_learned(w/o lstm): ok.")
run_heuristic_vs_learned(args, use_lstm=True)
print("run_heuristic_vs_learned (w/ lstm): ok.")
run_with_custom_entropy_loss(args, stop=stop)
print("run_with_custom_entropy_loss: ok.")
| richardliaw/ray | rllib/examples/rock_paper_scissors_multiagent.py | Python | apache-2.0 | 5,789 |
from os.path import dirname
import numpy as np
from ..os import open_file, exists_isdir, makedirs
from ..log import get_logger
logger = get_logger()
def read_or_write(data_f, fallback=None):
"""Loads the data file if it exists. Otherwise, if fallback is provided,
call fallback and save its return to disk.
Args:
data_f (str): Path to the data file, whose extension will be used for
deciding how to load the data.
fallback (function, optional): Fallback function used if data file
doesn't exist. Its return will be saved to ``data_f`` for future
loadings. It should not take arguments, but if yours requires taking
arguments, just wrap yours with::
fallback=lambda: your_fancy_func(var0, var1)
Returns:
Data loaded if ``data_f`` exists; otherwise, ``fallback``'s return
(``None`` if no fallback).
Writes
- Return by the fallback, if provided.
"""
# Decide data file type
ext = data_f.split('.')[-1].lower()
def load_func(path):
with open_file(path, 'rb') as h:
data = np.load(h)
return data
def save_func(data, path):
if ext == 'npy':
save = np.save
elif ext == 'npz':
save = np.savez
else:
raise NotImplementedError(ext)
with open_file(path, 'wb') as h:
save(h, data)
# Load or call fallback
if exists_isdir(data_f)[0]:
data = load_func(data_f)
msg = "Loaded: "
else:
msg = "File doesn't exist "
if fallback is None:
data = None
msg += "(fallback not provided): "
else:
data = fallback()
out_dir = dirname(data_f)
makedirs(out_dir)
save_func(data, data_f)
msg += "(fallback provided); fallback return now saved to: "
msg += data_f
logger.info(msg)
return data
| google/nerfactor | third_party/xiuminglib/xiuminglib/io/np.py | Python | apache-2.0 | 1,973 |
from core_tests_base import CoreTestsBase, FakeTessagon, FakeTileSubClass
class TestTile(CoreTestsBase):
# Note: these tests are highly dependent on the behavior of
# FakeTessagon and FakeAdaptor
def test_add_vert(self):
tessagon = FakeTessagon()
tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0],
v_range=[2.5, 3.0])
tile.add_vert(['top', 'left'], 0.25, 0.75)
assert tile.blend(0.25, 0.75) == [0.625, 2.875]
# One vert added
assert tile.verts['top']['left'] == tile.f(0.625, 2.875)
assert tile.verts['top']['right'] is None
assert tile.verts['bottom']['left'] is None
assert tile.verts['bottom']['right'] is None
def test_add_vert_u_symmetric(self):
tessagon = FakeTessagon()
tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0],
v_range=[2.5, 3.0],
u_symmetric=True)
tile.add_vert(['top', 'left'], 0.25, 0.75)
# [0.75, 0.75] is reflection of [0.25, 0.75] in U direction
assert tile.blend(0.75, 0.75) == [0.875, 2.875]
# Two verts added
assert tile.verts['top']['left'] == tile.f(0.625, 2.875)
assert tile.verts['top']['right'] == tile.f(0.875, 2.875)
assert tile.verts['bottom']['left'] is None
assert tile.verts['bottom']['right'] is None
def test_add_vert_v_symmetric(self):
tessagon = FakeTessagon()
tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0],
v_range=[2.5, 3.0],
v_symmetric=True)
tile.add_vert(['top', 'left'], 0.25, 0.75)
# [0.25, 0.25] is reflection of [0.25, 0.75] in V direction
assert tile.blend(0.25, 0.25) == [0.625, 2.625]
# Two verts added
assert tile.verts['top']['left'] == tile.f(0.625, 2.875)
assert tile.verts['top']['right'] is None
assert tile.verts['bottom']['left'] == tile.f(0.625, 2.625)
assert tile.verts['bottom']['right'] is None
def test_add_vert_u_v_symmetric(self):
tessagon = FakeTessagon()
tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0],
v_range=[2.5, 3.0],
u_symmetric=True, v_symmetric=True)
tile.add_vert(['top', 'left'], 0.25, 0.75)
# [0.75, 0.25] is reflection of [0.25, 0.75] in U and V directions
assert tile.blend(0.75, 0.25) == [0.875, 2.625]
# Four verts added
assert tile.verts['top']['left'] == tile.f(0.625, 2.875)
assert tile.verts['top']['right'] == tile.f(0.875, 2.875)
assert tile.verts['bottom']['left'] == tile.f(0.625, 2.625)
assert tile.verts['bottom']['right'] == tile.f(0.875, 2.625)
| cwant/tessagon | tests/core/test_tile.py | Python | apache-2.0 | 2,833 |
# old single process tictactoe game to be rewritten....
import numpy
import os
class TicTacToePlayer:
def __init__(self, playerType, playerName, ttt_game_settings):
if playerType not in ['AI', 'Terminal']:
raise(ValueError())
self.type=playerType
self.name=playerName
self.board_size = ttt_game_settings['board_size']
self.nr_of_positions = ttt_game_settings['board_size'][0]*ttt_game_settings['board_size'][1]
def playMove(self, board):
if self.type == 'AI':
return numpy.random.randint(1,self.nr_of_positions+1)
elif self.type == 'Terminal':
return self.playFromTerminal(board)
def playFromTerminal(self,board):
os.system('cls' if os.name == 'nt' else 'clear')
print(board)
while True:
try:
moveAttempt = input("%s! Play a position (1-%i): " % (self.name, self.nr_of_positions))
if ((1 <= int(moveAttempt) <= self.nr_of_positions)):
row = (int(moveAttempt)-1) // self.board_size[1]
col = (int(moveAttempt)-1) % self.board_size[1]
if board[row,col] == 0:
break # valid move!
else:
print("That position is already taken!")
else:
print("That is not an integer between 1 and %i!" % self.nr_of_positions)
except ValueError:
print("That is not an integer!")
return int(moveAttempt)
def getName(self):
return self.name
class TicTacToeGame:
def __init__(self, player1Name="player1", playerMinus1Name="player2", player1Type="Terminal", playerMinus1Type="Terminal"\
, settings=dict(board_size=(3,3), win_length=3)):
self.game, self.board_size, self.win_length = self.initializa_game(settings)
self.player1 = TicTacToePlayer(\
playerType=player1Type,\
playerName=player1Name,\
ttt_game_settings=settings)
self.playerMinus1 = TicTacToePlayer(\
playerType=playerMinus1Type,\
playerName=playerMinus1Name,\
ttt_game_settings=settings)
def getGame(self):
return self.game
def initializa_game(self,settings):
board_size_in = settings['board_size']
win_length = settings['win_length']
board_size = numpy.asarray(board_size_in,dtype='int')
if len(board_size) != 2: raise(ValueError('Not a good size!'))
if win_length > min(board_size) or not isinstance(win_length, int): raise(ValueError('Not a X in rows config.'))
return [], board_size, win_length
def getBoard(self):
board = numpy.zeros(shape=self.board_size)
currPlayerIs1 = True
for move in self.game:
row = (int(move)-1) // self.board_size[1]
col = (int(move)-1) % self.board_size[1]
board[row,col] = 1 if currPlayerIs1 else -1
currPlayerIs1 = not currPlayerIs1
return board
def playMove(self,move):
if int(move) in self.game \
or int(move) > self.board_size[0]*self.board_size[1]:
return False #invalid move
else:
self.game.append(int(move))
return True
def play(self):
currPlayerIs1 = True
while True:
moveAttempt = self.player1.playMove(self.getBoard()) if currPlayerIs1 else self.playerMinus1.playMove(self.getBoard())
if self.playMove(moveAttempt):
currPlayerIs1 = not currPlayerIs1
gameHasEnded, endMessage = self.checkGameEnded()
if gameHasEnded:
print(endMessage)
print(self.getBoard())
print("Thank you for playing Tic-Tac-Toe!")
break
def checkWinner(self):
board = self.getBoard()
lastmove = self.game[-1]
row = (int(lastmove)-1) // self.board_size[1]
col = (int(lastmove)-1) % self.board_size[1]
lastmove = (row,col)
currPlayerName = self.player1.getName() if len(self.game) % 2 == 1 else self.playerMinus1.getName()
num_N = self.checkStreak(lastmove,'N')
num_S = self.checkStreak(lastmove,'S')
if 1+num_N + num_S == self.win_length:
return True, "%s won!" % currPlayerName
num_E = self.checkStreak(lastmove,'E')
num_W = self.checkStreak(lastmove,'W')
if 1+num_E + num_W == self.win_length:
return True, "%s won!" % currPlayerName
num_NE = self.checkStreak(lastmove,'NE')
num_SW = self.checkStreak(lastmove,'SW')
if 1+num_NE + num_SW == self.win_length:
return True, "%s won!" % currPlayerName
num_SE = self.checkStreak(lastmove,'SE')
num_NW = self.checkStreak(lastmove,'NW')
if 1+num_SE + num_NW == self.win_length:
return True, "%s won!" % currPlayerName
return False, ""
def checkStreak(self,position,direction):
if direction == 'N':
parsed_dir = (-1,0)
elif direction =='S':
parsed_dir = (1,0)
elif direction == 'E':
parsed_dir = (0,1)
elif direction == 'W':
parsed_dir = (0,-1)
elif direction == 'NW':
parsed_dir = (-1,-1)
elif direction == 'SW':
parsed_dir = (1,-1)
elif direction == 'NE':
parsed_dir = (-1,1)
elif direction == 'SE':
parsed_dir = (1,1)
next_pos = numpy.asarray(position)+numpy.asarray(parsed_dir)
board = self.getBoard()
if next_pos[0] <0 or next_pos[1] < 0: return 0
if next_pos[0] >= self.board_size[0] or next_pos[1] >= self.board_size[1]: return 0
player_here = board[position[0],position[1]]
next_player = board[next_pos[0],next_pos[1]]
if player_here != next_player:
return 0
else:
return self.checkStreak(next_pos,direction) + 1
def checkGameEnded(self):
gameHasEnded, endMessage = self.checkWinner()
if not gameHasEnded:
if len(self.game) == self.board_size[0]*self.board_size[1]:
gameHasEnded = True
endMessage = "Its a tie!"
return gameHasEnded, endMessage
if __name__ == "__main__":
ttt_game_settings = dict(board_size=(4,4), win_length=3)
myGame = TicTacToeGame(player1Name="Ludvig", playerMinus1Name="PC", playerMinus1Type="AI",settings=ttt_game_settings)
myGame.play()
| dirrelito/animated-invention | ThreadedTicTacToe/_old/ttt.py | Python | apache-2.0 | 5,563 |
import os
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import shap
import mlflow
# prepare training data
X, y = load_iris(return_X_y=True, as_frame=True)
# train a model
model = RandomForestClassifier()
model.fit(X, y)
# log an explanation
with mlflow.start_run() as run:
mlflow.shap.log_explanation(model.predict_proba, X)
# list artifacts
client = mlflow.tracking.MlflowClient()
artifact_path = "model_explanations_shap"
artifacts = [x.path for x in client.list_artifacts(run.info.run_id, artifact_path)]
print("# artifacts:")
print(artifacts)
# load back the logged explanation
dst_path = client.download_artifacts(run.info.run_id, artifact_path)
base_values = np.load(os.path.join(dst_path, "base_values.npy"))
shap_values = np.load(os.path.join(dst_path, "shap_values.npy"))
# show a force plot
shap.force_plot(base_values[0], shap_values[0, 0, :], X.iloc[0, :], matplotlib=True)
| mlflow/mlflow | examples/shap/multiclass_classification.py | Python | apache-2.0 | 964 |
# !/usr/bin/python
# coding=utf-8
#
# @Author: LiXiaoYu
# @Time: 2013-10-17
# @Info: lang
_lang = {
'_MODULE_NOT_EXIST_':'无法加载模块',
'_ERROR_ACTION_':'非法操作',
'_LANGUAGE_NOT_LOAD_':'无法加载语言包',
'_TEMPLATE_NOT_EXIST_':'模板不存在',
'_MODULE_':'模块',
'_ACTION_':'操作',
'_ACTION_NOT_EXIST_':'控制器不存在或者没有定义',
'_MODEL_NOT_EXIST_':'模型不存在或者没有定义',
'_VALID_ACCESS_':'没有权限',
'_XML_TAG_ERROR_':'XML标签语法错误',
'_DATA_TYPE_INVALID_':'非法数据对象!',
'_OPERATION_WRONG_':'操作出现错误',
'_NOT_LOAD_DB_':'无法加载数据库',
'_NOT_SUPPORT_DB_':'系统暂时不支持数据库',
'_NO_DB_CONFIG_':'没有定义数据库配置',
'_NOT_SUPPERT_':'系统不支持',
'_CACHE_TYPE_INVALID_':'无法加载缓存类型',
'_FILE_NOT_WRITEABLE_':'目录(文件)不可写',
'_METHOD_NOT_EXIST_':'您所请求的方法不存在!',
'_CLASS_NOT_EXIST_':'实例化一个不存在的类!',
'_CLASS_CONFLICT_':'类名冲突',
'_TEMPLATE_ERROR_':'模板引擎错误',
'_CACHE_WRITE_ERROR_':'缓存文件写入失败!',
'_TAGLIB_NOT_EXIST_':'标签库未定义',
'_OPERATION_FAIL_':'操作失败!',
'_OPERATION_SUCCESS_':'操作成功!',
'_SELECT_NOT_EXIST_':'记录不存在!',
'_EXPRESS_ERROR_':'表达式错误',
'_TOKEN_ERROR_':'表单令牌错误',
'_RECORD_HAS_UPDATE_':'记录已经更新',
'_NOT_ALLOW_PHP_':'模板禁用PHP代码',
}
| lxy235/lserver | src/Common/Lang/zh_cn.py | Python | apache-2.0 | 1,569 |
#!/usr/bin/env python3
#
# Copyright (c) 2015, Roberto Riggio
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CREATE-NET nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CREATE-NET ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CREATE-NET BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Basic Zephyr manager."""
from empower.core.app import EmpowerApp
from empower.core.app import DEFAULT_PERIOD
from empower.main import RUNTIME
from empower.datatypes.etheraddress import EtherAddress
from empower.core.resourcepool import ResourcePool
from empower.lvapp.lvappconnection import LVAPPConnection
import time, datetime, threading
import empower.apps.zephyr.zephyrLinker as linker
starttime = datetime.datetime.now()
class Zephyr(EmpowerApp):
"""Basic mobility manager.
Command Line Parameters:
tenant_id: tenant id
limit: handover limit in dBm (optional, default -80)
every: loop period in ms (optional, default 5000ms)
Example:
./empower-runtime.py apps.mobilitymanager.mobilitymanager \
--tenant_id=52313ecb-9d00-4b7d-b873-b55d3d9ada26
"""
def __init__(self, **kwargs):
self.__limit = linker.DEFAULT_RSSI_LIMIT
EmpowerApp.__init__(self, **kwargs)
# Register an wtp up event
self.wtpup(callback=self.wtp_up_callback)
# Register an lvap join event
self.lvapjoin(callback=self.lvap_join_callback)
# Register an lvap leave event
self.lvapleave(callback=self.lvap_leave_callback)
def lvap_leave_callback(self, lvap):
"""Called when an LVAP disassociates from a tennant."""
self.log.info("LVAP %s left %s" % (lvap.addr, lvap.ssid))
def wtp_up_callback(self, wtp):
"""Called when a new WTP connects to the controller."""
for block in wtp.supports:
self.ucqm(block=block, every=self.every)
def lvap_join_callback(self, lvap):
"""Called when an joins the network."""
self.rssi(lvap=lvap.addr, value=self.limit, relation='LT',
callback=self.low_rssi)
def handover(self, lvap):
""" Handover the LVAP to a WTP with
an RSSI higher that -65dB. """
self.log.info("Running handover...")
self.log.info("LVAP: %s - Limit RSSI : %u dB" % (lvap.addr, self.limit))
self.log.info("Initialize the Resource Pool")
pool = ResourcePool()
for wtp in self.wtps():
#for wtpd, lvaps in wtpdict.items():
#self.log.info("WTP in wtps : %s WTP in dict : %s are equal : %u\n" % (str(wtp.addr), wtpd, (wtp.addr == wtpd)))
templist = linker.wtpdict[str(wtp.addr)]
length = len(templist)
self.log.info("Pooling WTP: %s" % str(wtp.addr))
self.log.info(wtp.supports)
pool = pool | wtp.supports
self.log.info("Select matching Resource Blocks")
matches = pool #& lvap.scheduled_on
self.log.info(matches)
self.log.info("LVAP1 LOOP 107")
counter=0
for lvap in self.lvaps():
self.log.info("!!!!!!!!!!!!!!!%d : %s" % (counter, lvap.addr))
counter=counter+1
for block in matches:
self.log.info("Time : %f \n LVAP : %s \n addr : %s \n last_rssi_avg : %.2f \n last_rssi_std : %.2f \n last_packets : %u \n mov_rrsi : %.2f\n" % (time.time(),
lvap.addr,
block.ucqm[lvap.addr]['addr'],
block.ucqm[lvap.addr]['last_rssi_avg'],
block.ucqm[lvap.addr]['last_rssi_std'],
block.ucqm[lvap.addr]['last_packets'],
block.ucqm[lvap.addr]['mov_rssi']))
if (lvap.addr=="78:44:76:BF:DA:D4"):
self.log.info("LVAP: %s is leaving" % lvap.addr)
#del lvap.downlink[block] #deletes lvap
# Initialize the Resource Pool
pool = ResourcePool()
# Update the Resource Pool with all
# the available Resourse Blocks
for wtp in self.wtps():
if (str(wtp.addr) in linker.wtpdict):
if (len(linker.wtpdict[str(wtp.addr)]) < linker.wtpdict_limit[str(wtp.addr)]):
pool = pool | wtp.supports
# Select matching Resource Blocks
matches = pool & lvap.scheduled_on
# Filter Resource Blocks by RSSI
valid = [block for block in matches
if block.ucqm[lvap.addr]['mov_rssi'] >= self.limit]
#valid = self.blocks(lvap, self.limit)
if not valid:
self.log.info("not valid")
return
for block in valid:
self.log.info("valid LVAP: %s - Current RSSI : %u dB" % (lvap.addr, float(block.ucqm[lvap.addr]['mov_rssi'])))
new_block = max(valid, key=lambda x: x.ucqm[lvap.addr]['mov_rssi'])
self.log.info("LVAP %s setting new block %s" % (lvap.addr, new_block))
lvap.scheduled_on = new_block
@property
def limit(self):
"""Return loop period."""
return self.__limit
@limit.setter
def limit(self, value):
"""Set limit."""
limit = int(value)
if limit > 0 or limit < -100:
raise ValueError("Invalid value for limit")
self.log.info("Setting limit %u dB" % value)
self.__limit = limit
def set_limit(self, value):
"""Set limit."""
limit = int(value)
if limit > 0 or limit < -100:
raise ValueError("Invalid value for limit")
self.log.info("Setting limit %u dB" % value)
self.__limit = limit
def low_rssi(self, trigger):
""" Perform handover if an LVAP's rssi is
going below the threshold. """
self.log.info("Received trigger from %s rssi %u dB",
trigger.event['block'],
trigger.event['current'])
lvap = self.lvap(trigger.lvap)
if not lvap:
return
self.handover(lvap)
def wtp_clientlimit(self):
self.log.info("Running Client Limit...")
wtp_c=0
for wtp in self.wtps():
#Create lvaplist for the specific wtp
lvaplist = []
for lvap in self.lvaps():
if lvap.wtp.addr == wtp.addr:
#self.log.info("LVAP before list : %s" % lvap.addr)
lvaplist.append(str(lvap.addr))
#self.log.info("LVAP after list : %s" % lvaplist[-1])
#Check if limit is not given and provide the default
#if str(wtp.addr) not in linker.wtpdict_limit:
#linker.wtpdict_limit[str(wtp.addr)]=linker.DEFAULT_LVAP_NUMBER_LIMIT
#Check if wtp is not on the list and add it
if str(wtp.addr) not in linker.wtpdict:
linker.wtpdict[str(wtp.addr)] = lvaplist
#If limit is -1 then wtp has no limit
if linker.wtpdict_limit[str(wtp.addr)] == -1:
self.log.info("################ WTP : %s has unlimited LVAPs (limit %f) %s ######################\n" % (wtp, linker.wtpdict_limit[str(wtp.addr)], linker.wtpdict[str(wtp.addr)]))
continue
#If wtp client limit is exceeded, then handover the excess lvaps to new wtp
elif len(lvaplist) > linker.wtpdict_limit[str(wtp.addr)]:
self.log.info("################ WTP : %s has more LVAPs than the limit %f ######################\n" % (wtp, linker.wtpdict_limit[str(wtp.addr)]))
self.log.info(lvaplist)
self.log.info(linker.wtpdict[str(wtp.addr)])
diff = [a for a in lvaplist+linker.wtpdict[str(wtp.addr)] if (a not in lvaplist) or (a not in linker.wtpdict[str(wtp.addr)])]
self.log.info(diff)
numoflvaptohandover=len(lvaplist) - linker.wtpdict_limit[str(wtp.addr)]
self.log.info(numoflvaptohandover)
for lvap in self.lvaps():
#If lvap is the extra lvap in wtp then find wtp with best rssi and handover to that
if lvap.addr in diff or lvap.addr in lvaplist:
self.log.info("If lvap in diff")
# Initialize the Resource Pool
pool = ResourcePool()
# Update the Resource Pool with all
# the available Resourse Blocks
for other_wtp in self.wtps():
if other_wtp.addr != wtp.addr:
if linker.wtpdict_limit[str(other_wtp.addr)] < len(linker.wtpdict[str(other_wtp.addr)]):
self.log.info(linker.wtpdict_limit[str(other_wtp.addr)])
self.log.info(len(linker.wtpdict[str(other_wtp.addr)]))
pool = pool | other_wtp.supports
# Select matching Resource Blocks
matches = pool & lvap.scheduled_on
max_rssi = -float("inf")
first_block=1;
for block in matches:
if first_block == 1:
first_block=0
max_rssi=block.ucqm[lvap.addr]['mov_rssi']
else:
if max_rssi < block.ucqm[lvap.addr]['mov_rssi']:
max_rssi=block.ucqm[lvap.addr]['mov_rssi']
# Filter Resource Blocks by RSSI
valid = [block for block in matches
if block.ucqm[lvap.addr]['mov_rssi'] >= max_rssi]
if not valid:
self.log.info("not valid")
continue
for block in valid:
self.log.info("valid LVAP: %s - Current RSSI : %.2f dB" % (lvap.addr, float(block.ucqm[lvap.addr]['mov_rssi'])))
#Remove from lvaplist
lvaplist.remove(str(lvap.addr))
new_block = max(valid, key=lambda x: x.ucqm[lvap.addr]['mov_rssi'])
self.log.info("LVAP %s setting new block %s" % (lvap.addr, new_block))
lvap.scheduled_on = new_block
numoflvaptohandover=numoflvaptohandover-1
else:
continue
#if all lvaps have been handovered then break
if numoflvaptohandover == 0:
break
else:
self.log.info("################ WTP : %s has LVAPs' limit %f %s ######################\n" % (wtp, linker.wtpdict_limit[str(wtp.addr)], linker.wtpdict[str(wtp.addr)]))
#Update lvaplist for given wtp
linker.wtpdict[str(wtp.addr)] = lvaplist
for wtp, lvaps in linker.wtpdict.items():
temp = None
insert_comma = 0
for lvap in lvaps:
if insert_comma == 0:
temp = lvap
insert_comma=1
continue
temp = temp + ', ' + lvap #str(lvaps).strip('['']')#.strip('[EtherAddress'']')
self.log.info("WTP : %s has %u LVAPs : %s\n" % (wtp, len(lvaps), temp))
#self.wtp_lvap_limit(wtp,lvaps)
#if len(lvaps) > linker.DEFAULT_LVAP_NUMBER_LIMIT:
#self.log.info("################WTP : %s has more LVAPs than the limit######################\n" % wtp)
#for wtp in self.wtps()
def lvap_timelimit(self):
self.log.info("Running Time Limit...")
self.log.info("DEFAULT_LVAP_TIME_LIMIT : %d" % linker.DEFAULT_LVAP_TIME_LIMIT)
deletionlist = []
for lvap, endtime in linker.lvap_timer.items():
#self.log.info("LVAP")
formated_endtime = datetime.datetime.strptime(endtime, '%Y-%m-%d %H:%M:%S')
currenttime = datetime.datetime.now()
if (currenttime - formated_endtime).total_seconds() >= 0:
self.log.info("$$$$$$$$$$$$$ LVAP: %s Time ends" % lvap)
deletionlist.append(lvap)
else:
self.log.info("$$$$$$$$$$$$$ LVAP: %s Time continues" % lvap)
for dlvap in deletionlist:
self.log.info("$$$$$$$$$$$$$ Removing Timer LVAP: %s" % dlvap)
linker.removeLVAPTimer(self,dlvap)
for lvap in self.lvaps():
if str(lvap.addr) == dlvap:
lvaplabel=RUNTIME.get_label(lvap.addr)
self.log.info("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#del lvap.downlink[lvap.block] #deletes lvap
#del RUNTIME.lvaps[lvap.addr]
for wtp in self.wtps():
if lvap.wtp.addr == wtp.addr:
#wtp.connection.send_del_lvap(lvap)
RUNTIME.remove_lvap(lvap.addr)
temp = linker.wtpdict[str(wtp.addr)]
temp.remove(str(lvap.addr))
#del RUNTIME.lvaps[lvap.addr]
break
#self.remove_lvap(lvap)
lvaplabel=RUNTIME.get_label(lvap.addr)
#self.log.info(lvaplabel)
self.log.info("Deleting LVAP %s from db" % lvaplabel)
self.log.info("Removing %s %s from allowed LVAPs" % (lvaplabel, lvap.addr))
RUNTIME.remove_allowed(lvap.addr)
self.log.info("Adding %s %s to denied LVAPs" % (lvaplabel, lvap.addr))
RUNTIME.add_denied(lvap.addr,lvaplabel)
self.log.info("LVAP %s deleted" % lvaplabel)
break
#pool = ResourcePool()
#for lvap in self.lvaps():
# matches = pool
# for block in matches:
# self.log.info("zephyr : LVAP: %s - Current RSSI : %f dB" % (lvap.addr, float(block.ucqm[lvap.addr]['mov_rssi'])))
def loop(self):
""" Periodic job. """
self.log.info("Periodic job.\n")
self.log.info("Allowed LVAPs: %s" % (RUNTIME.allowed))
self.log.info("Denied LVAPs: %s\n" % (RUNTIME.denied))
if linker.initialize_limit == 1:
for wtp in self.wtps():
#Check if limit is not given and provide the default
if str(wtp.addr) not in linker.wtpdict_limit:
linker.wtpdict_limit[str(wtp.addr)]=linker.DEFAULT_LVAP_NUMBER_LIMIT
linker.initialize_limit = 0
self.log.info("Setting limit to default")
self.wtp_clientlimit()
self.lvap_timelimit()
self.log.info("Current limit %u linker limit to %u" % (self.limit,linker.RSSI_LIMIT))
if self.limit != linker.RSSI_LIMIT:
self.log.info("Current limit %u setting limit to %u" % (self.limit,linker.RSSI_LIMIT))
self.set_limit(linker.RSSI_LIMIT)
# Handover every active LVAP to
# the best WTP
counterlvap=0
for lvap in self.lvaps():
self.handover(lvap)
counterlvap=counterlvap+1
self.log.info("Active LVAPs: %u" % counterlvap)
def launch(tenant_id, limit=linker.DEFAULT_RSSI_LIMIT, every=DEFAULT_PERIOD):
""" Initialize the module. """
return Zephyr(tenant_id=tenant_id, limit=limit, every=every) | LokiNetworks/empower-runtime | empower/apps/zephyr/zephyr.py | Python | apache-2.0 | 17,023 |
class StoreChangeLogger:
def __init__(self, store_name, context) -> None:
self.topic = f'{context.application_id}-{store_name}-changelog'
self.context = context
self.partition = context.task_id.partition
self.record_collector = context.state_record_collector
def log_change(self, key: bytes, value: bytes) -> None:
if self.record_collector:
self.record_collector.send(self.topic, key, value, self.context.timestamp, partition=self.partition)
| wintoncode/winton-kafka-streams | winton_kafka_streams/state/logging/store_change_logger.py | Python | apache-2.0 | 503 |
Subsets and Splits