gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import sys
import imp
__all__ = ['inject', 'import_patched', 'monkey_patch', 'is_monkey_patched']
__exclude = set(('__builtins__', '__file__', '__name__'))
class SysModulesSaver(object):
"""Class that captures some subset of the current state of
sys.modules. Pass in an iterator of module names to the
constructor."""
def __init__(self, module_names=()):
self._saved = {}
imp.acquire_lock()
self.save(*module_names)
def save(self, *module_names):
"""Saves the named modules to the object."""
for modname in module_names:
self._saved[modname] = sys.modules.get(modname, None)
def restore(self):
"""Restores the modules that the saver knows about into
sys.modules.
"""
try:
for modname, mod in self._saved.iteritems():
if mod is not None:
sys.modules[modname] = mod
else:
try:
del sys.modules[modname]
except KeyError:
pass
finally:
imp.release_lock()
def inject(module_name, new_globals, *additional_modules):
"""Base method for "injecting" greened modules into an imported module. It
imports the module specified in *module_name*, arranging things so
that the already-imported modules in *additional_modules* are used when
*module_name* makes its imports.
*new_globals* is either None or a globals dictionary that gets populated
with the contents of the *module_name* module. This is useful when creating
a "green" version of some other module.
*additional_modules* should be a collection of two-element tuples, of the
form (<name>, <module>). If it's not specified, a default selection of
name/module pairs is used, which should cover all use cases but may be
slower because there are inevitably redundant or unnecessary imports.
"""
patched_name = '__patched_module_' + module_name
if patched_name in sys.modules:
# returning already-patched module so as not to destroy existing
# references to patched modules
return sys.modules[patched_name]
if not additional_modules:
# supply some defaults
additional_modules = (
_green_os_modules() +
_green_select_modules() +
_green_socket_modules() +
_green_thread_modules() +
_green_time_modules())
#_green_MySQLdb()) # enable this after a short baking-in period
# after this we are gonna screw with sys.modules, so capture the
# state of all the modules we're going to mess with, and lock
saver = SysModulesSaver([name for name, m in additional_modules])
saver.save(module_name)
# Cover the target modules so that when you import the module it
# sees only the patched versions
for name, mod in additional_modules:
sys.modules[name] = mod
## Remove the old module from sys.modules and reimport it while
## the specified modules are in place
sys.modules.pop(module_name, None)
try:
module = __import__(module_name, {}, {}, module_name.split('.')[:-1])
if new_globals is not None:
## Update the given globals dictionary with everything from this new module
for name in dir(module):
if name not in __exclude:
new_globals[name] = getattr(module, name)
## Keep a reference to the new module to prevent it from dying
sys.modules[patched_name] = module
finally:
saver.restore() ## Put the original modules back
return module
def import_patched(module_name, *additional_modules, **kw_additional_modules):
"""Imports a module in a way that ensures that the module uses "green"
versions of the standard library modules, so that everything works
nonblockingly.
The only required argument is the name of the module to be imported.
"""
return inject(
module_name,
None,
*additional_modules + tuple(kw_additional_modules.items()))
def patch_function(func, *additional_modules):
"""Decorator that returns a version of the function that patches
some modules for the duration of the function call. This is
deeply gross and should only be used for functions that import
network libraries within their function bodies that there is no
way of getting around."""
if not additional_modules:
# supply some defaults
additional_modules = (
_green_os_modules() +
_green_select_modules() +
_green_socket_modules() +
_green_thread_modules() +
_green_time_modules())
def patched(*args, **kw):
saver = SysModulesSaver()
for name, mod in additional_modules:
saver.save(name)
sys.modules[name] = mod
try:
return func(*args, **kw)
finally:
saver.restore()
return patched
def _original_patch_function(func, *module_names):
"""Kind of the contrapositive of patch_function: decorates a
function such that when it's called, sys.modules is populated only
with the unpatched versions of the specified modules. Unlike
patch_function, only the names of the modules need be supplied,
and there are no defaults. This is a gross hack; tell your kids not
to import inside function bodies!"""
def patched(*args, **kw):
saver = SysModulesSaver(module_names)
for name in module_names:
sys.modules[name] = original(name)
try:
return func(*args, **kw)
finally:
saver.restore()
return patched
def original(modname):
""" This returns an unpatched version of a module; this is useful for
Eventlet itself (i.e. tpool)."""
# note that it's not necessary to temporarily install unpatched
# versions of all patchable modules during the import of the
# module; this is because none of them import each other, except
# for threading which imports thread
original_name = '__original_module_' + modname
if original_name in sys.modules:
return sys.modules.get(original_name)
# re-import the "pure" module and store it in the global _originals
# dict; be sure to restore whatever module had that name already
saver = SysModulesSaver((modname,))
sys.modules.pop(modname, None)
# some rudimentary dependency checking -- fortunately the modules
# we're working on don't have many dependencies so we can just do
# some special-casing here
deps = {'threading':'thread', 'Queue':'threading'}
if modname in deps:
dependency = deps[modname]
saver.save(dependency)
sys.modules[dependency] = original(dependency)
try:
real_mod = __import__(modname, {}, {}, modname.split('.')[:-1])
if modname == 'Queue' and not hasattr(real_mod, '_threading'):
# tricky hack: Queue's constructor in <2.7 imports
# threading on every instantiation; therefore we wrap
# it so that it always gets the original threading
real_mod.Queue.__init__ = _original_patch_function(
real_mod.Queue.__init__,
'threading')
# save a reference to the unpatched module so it doesn't get lost
sys.modules[original_name] = real_mod
finally:
saver.restore()
return sys.modules[original_name]
already_patched = {}
def monkey_patch(**on):
"""Globally patches certain system modules to be greenthread-friendly.
The keyword arguments afford some control over which modules are patched.
If no keyword arguments are supplied, all possible modules are patched.
If keywords are set to True, only the specified modules are patched. E.g.,
``monkey_patch(socket=True, select=True)`` patches only the select and
socket modules. Most arguments patch the single module of the same name
(os, time, select). The exceptions are socket, which also patches the ssl
module if present; and thread, which patches thread, threading, and Queue.
It's safe to call monkey_patch multiple times.
"""
accepted_args = set(('os', 'select', 'socket',
'thread', 'time', 'psycopg', 'MySQLdb'))
default_on = on.pop("all",None)
for k in on.iterkeys():
if k not in accepted_args:
raise TypeError("monkey_patch() got an unexpected "\
"keyword argument %r" % k)
if default_on is None:
default_on = not (True in on.values())
for modname in accepted_args:
if modname == 'MySQLdb':
# MySQLdb is only on when explicitly patched for the moment
on.setdefault(modname, False)
on.setdefault(modname, default_on)
modules_to_patch = []
patched_thread = False
if on['os'] and not already_patched.get('os'):
modules_to_patch += _green_os_modules()
already_patched['os'] = True
if on['select'] and not already_patched.get('select'):
modules_to_patch += _green_select_modules()
already_patched['select'] = True
if on['socket'] and not already_patched.get('socket'):
modules_to_patch += _green_socket_modules()
already_patched['socket'] = True
if on['thread'] and not already_patched.get('thread'):
patched_thread = True
modules_to_patch += _green_thread_modules()
already_patched['thread'] = True
if on['time'] and not already_patched.get('time'):
modules_to_patch += _green_time_modules()
already_patched['time'] = True
if on.get('MySQLdb') and not already_patched.get('MySQLdb'):
modules_to_patch += _green_MySQLdb()
already_patched['MySQLdb'] = True
if on['psycopg'] and not already_patched.get('psycopg'):
try:
from eventlet.support import psycopg2_patcher
psycopg2_patcher.make_psycopg_green()
already_patched['psycopg'] = True
except ImportError:
# note that if we get an importerror from trying to
# monkeypatch psycopg, we will continually retry it
# whenever monkey_patch is called; this should not be a
# performance problem but it allows is_monkey_patched to
# tell us whether or not we succeeded
pass
imp.acquire_lock()
try:
for name, mod in modules_to_patch:
orig_mod = sys.modules.get(name)
if orig_mod is None:
orig_mod = __import__(name)
for attr_name in mod.__patched__:
patched_attr = getattr(mod, attr_name, None)
if patched_attr is not None:
setattr(orig_mod, attr_name, patched_attr)
# hacks ahead; this is necessary to prevent a KeyError on program exit
if patched_thread:
_patch_main_thread(sys.modules['threading'])
finally:
imp.release_lock()
def _patch_main_thread(mod):
"""This is some gnarly patching specific to the threading module;
threading will always be initialized prior to monkeypatching, and
its _active dict will have the wrong key (it uses the real thread
id but once it's patched it will use the greenlet ids); so what we
do is rekey the _active dict so that the main thread's entry uses
the greenthread key. Other threads' keys are ignored."""
thread = original('thread')
curthread = mod._active.pop(thread.get_ident(), None)
if curthread:
import eventlet.green.thread
mod._active[eventlet.green.thread.get_ident()] = curthread
def is_monkey_patched(module):
"""Returns True if the given module is monkeypatched currently, False if
not. *module* can be either the module itself or its name.
Based entirely off the name of the module, so if you import a
module some other way than with the import keyword (including
import_patched), this might not be correct about that particular
module."""
return module in already_patched or \
getattr(module, '__name__', None) in already_patched
def _green_os_modules():
from eventlet.green import os
return [('os', os)]
def _green_select_modules():
from eventlet.green import select
return [('select', select)]
def _green_socket_modules():
from eventlet.green import socket
try:
from eventlet.green import ssl
return [('socket', socket), ('ssl', ssl)]
except ImportError:
return [('socket', socket)]
def _green_thread_modules():
from eventlet.green import Queue
from eventlet.green import thread
from eventlet.green import threading
return [('Queue', Queue), ('thread', thread), ('threading', threading)]
def _green_time_modules():
from eventlet.green import time
return [('time', time)]
def _green_MySQLdb():
try:
from eventlet.green import MySQLdb
return [('MySQLdb', MySQLdb)]
except ImportError:
return []
def slurp_properties(source, destination, ignore=[], srckeys=None):
"""Copy properties from *source* (assumed to be a module) to
*destination* (assumed to be a dict).
*ignore* lists properties that should not be thusly copied.
*srckeys* is a list of keys to copy, if the source's __all__ is
untrustworthy.
"""
if srckeys is None:
srckeys = source.__all__
destination.update(dict([(name, getattr(source, name))
for name in srckeys
if not (
name.startswith('__') or
name in ignore)
]))
if __name__ == "__main__":
import sys
sys.argv.pop(0)
monkey_patch()
execfile(sys.argv[0])
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api import httpbody_pb2 # type: ignore
from google.cloud.retail_v2.types import import_config
from google.cloud.retail_v2.types import purge_config
from google.cloud.retail_v2.types import user_event
from google.cloud.retail_v2.types import user_event_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-retail",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class UserEventServiceTransport(abc.ABC):
"""Abstract transport class for UserEventService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "retail.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.write_user_event: gapic_v1.method.wrap_method(
self.write_user_event, default_timeout=None, client_info=client_info,
),
self.collect_user_event: gapic_v1.method.wrap_method(
self.collect_user_event, default_timeout=None, client_info=client_info,
),
self.purge_user_events: gapic_v1.method.wrap_method(
self.purge_user_events,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.import_user_events: gapic_v1.method.wrap_method(
self.import_user_events,
default_retry=retries.Retry(
initial=0.1,
maximum=300.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.rejoin_user_events: gapic_v1.method.wrap_method(
self.rejoin_user_events, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def write_user_event(
self,
) -> Callable[
[user_event_service.WriteUserEventRequest],
Union[user_event.UserEvent, Awaitable[user_event.UserEvent]],
]:
raise NotImplementedError()
@property
def collect_user_event(
self,
) -> Callable[
[user_event_service.CollectUserEventRequest],
Union[httpbody_pb2.HttpBody, Awaitable[httpbody_pb2.HttpBody]],
]:
raise NotImplementedError()
@property
def purge_user_events(
self,
) -> Callable[
[purge_config.PurgeUserEventsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def import_user_events(
self,
) -> Callable[
[import_config.ImportUserEventsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def rejoin_user_events(
self,
) -> Callable[
[user_event_service.RejoinUserEventsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("UserEventServiceTransport",)
|
|
import csv
import sys
import typing
import collections
from pathlib import Path
import chardet
import warnings
import pycldf.dataset
from pathlib import Path
from csvw.dsv import UnicodeDictReader
from beastling.util import log
def sniff(filename, default_dialect: typing.Optional[csv.Dialect] = csv.excel):
"""Read the beginning of the file and guess its csv dialect.
Parameters
----------
filename: str or pathlib.Path
Path to a csv file to be sniffed
Returns
-------
csv.Dialect
"""
with Path(filename).open("rb") as fp:
# On large files, csv.Sniffer seems to need a lot of data to make a
# successful inference...
sample = fp.read(1024)
encoding = chardet.detect(sample)["encoding"]
sample = sample.decode(encoding)
while True:
try:
dialect = csv.Sniffer().sniff(sample, [",", "\t"])
dialect.encoding = encoding
return dialect
except csv.Error: # pragma: no cover
blob = fp.read(1024).decode(encoding)
sample += blob
if not blob:
# If blob is emtpy we've somehow hit the end of the file
# without figuring out the dialect. Something is probably
# quite wrong with the file, but let's default to Excel and
# hope for the best...
if default_dialect is not None:
return default_dialect
raise
def sanitise_name(name):
"""
Take a name for a language or a feature which has come from somewhere like
a CLDF dataset and make sure it does not contain any characters which
will cause trouble for BEAST or postanalysis tools.
"""
return name.replace(" ", "_")
def load_data(filename, file_format=None, lang_column=None, value_column=None, expect_multiple=False):
# Handle CSV dialect issues
if str(filename) == 'stdin':
filename = sys.stdin
# We can't sniff from stdin, so guess comma-delimited and hope for
# the best
dialect = "excel" # Default dialect for csv module
elif file_format and file_format.lower() == "cldf":
return read_cldf_dataset(filename, value_column, expect_multiple=expect_multiple)
elif file_format and file_format.lower() == "cldf-legacy":
# CLDF pre-1.0 standard says delimiter is indicated by file extension
if filename.suffix.lower() == ".csv" or str(filename) == "stdin":
dialect = "excel"
elif filename.suffix.lower() == ".tsv":
dialect = "excel-tab"
else:
raise ValueError("CLDF standard dictates that filenames must end in .csv or .tsv")
elif filename.suffix == ".json" or filename.name in {"forms.csv", "values.csv"}:
# TODO: Should we just let the pycldf module try its hands on the file
# and fall back to other formats if that doesn't work?
return read_cldf_dataset(filename, value_column, expect_multiple=expect_multiple)
else:
# Use CSV dialect sniffer in all other cases
dialect = sniff(filename)
# Read
with UnicodeDictReader(filename, dialect=dialect) as reader:
# Guesstimate file format if user has not been explicit
if file_format is None:
file_format = 'cldf-legacy' if all(
[f in reader.fieldnames for f in ("Language_ID", "Value")]) and any(
[f in reader.fieldnames for f in ("Feature_ID", "Parameter_ID")]
) else 'beastling'
# Load data
if file_format == 'cldf-legacy':
data = load_cldf_data(reader, value_column, filename, expect_multiple=expect_multiple)
elif file_format == 'beastling':
data = load_beastling_data(reader, lang_column, filename, expect_multiple=expect_multiple)
else:
raise ValueError("File format specification '{:}' not understood".format(file_format))
return data, {}
_language_column_names = ("iso", "iso_code", "glotto", "glottocode", "language", "language_id", "lang", "lang_id")
def load_beastling_data(reader, lang_column, filename, expect_multiple=False):
if not lang_column:
for candidate in reader.fieldnames:
if candidate.lower() in _language_column_names:
lang_column = candidate
break
if not lang_column or lang_column not in reader.fieldnames:
raise ValueError("Cold not find language column in data file %s" % filename)
data = collections.defaultdict(lambda: collections.defaultdict(lambda: "?"))
for row in reader:
if row[lang_column] in data:
raise ValueError("Duplicated language identifier '%s' found in data file %s" % (row[lang_column], filename))
lang = row.pop(lang_column)
if expect_multiple:
data[lang] = collections.defaultdict(lambda : "?", {key: [value] for key, value in row.items()})
else:
data[lang] = collections.defaultdict(lambda : "?", row)
return data
def load_cldf_data(reader, value_column, filename, expect_multiple=False):
value_column = value_column or "Value"
if "Feature_ID" in reader.fieldnames:
feature_column = "Feature_ID"
elif "Parameter_ID" in reader.fieldnames:
feature_column = "Parameter_ID"
else:
raise ValueError("Could not find Feature_ID or Parameter_ID column, is %s a valid CLDF file?" % filename)
data = collections.defaultdict(lambda: collections.defaultdict(lambda: "?"))
for row in reader:
lang = row["Language_ID"]
if lang not in data:
if expect_multiple:
data[lang] = collections.defaultdict(lambda: [])
else:
data[lang] = collections.defaultdict(lambda: "?")
if expect_multiple:
data[lang][row[feature_column]].append(row[value_column])
else:
data[lang][row[feature_column]] = row[value_column]
return data
def iterlocations(filename):
with UnicodeDictReader(filename, dialect=sniff(filename, default_dialect=None)) as reader:
# Identify fieldnames
fieldnames = [(n.lower(), n) for n in reader.fieldnames]
fieldmap = {}
for field, aliases in [
('language identifier', _language_column_names),
('latitude', ("latitude", "lat")),
('longitude', ("longitude", "lon", "long")),
]:
for lname, fieldname in fieldnames:
if lname in aliases:
fieldmap[field] = fieldname
break
else:
raise ValueError(
"Could not find a {0} column in location data file {1}".format(field, filename))
for row in reader:
(lat, lon) = row[fieldmap['latitude']], row[fieldmap['longitude']]
try:
lat = float(lat) if lat != "?" else lat
lon = float(lon) if lon != "?" else lon
except ValueError:
lat, lon = "?", "?"
yield (row[fieldmap['language identifier']].strip(), (lat, lon))
def get_dataset(fname):
"""Load a CLDF dataset.
Load the file as `json` CLDF metadata description file, or as metadata-free
dataset contained in a single csv file.
The distinction is made depending on the file extension: `.json` files are
loaded as metadata descriptions, all other files are matched against the
CLDF module specifications. Directories are checked for the presence of
any CLDF datasets in undefined order of the dataset types.
Parameters
----------
fname : str or Path
Path to a CLDF dataset
Returns
-------
Dataset
"""
fname = Path(fname)
if not fname.exists():
raise FileNotFoundError('{:} does not exist'.format(fname))
if fname.suffix == '.json':
return pycldf.dataset.Dataset.from_metadata(fname)
return pycldf.dataset.Dataset.from_data(fname)
# TODO: Change the behaviour to always expect multiple.
def read_cldf_dataset(filename, code_column=None, expect_multiple=False):
"""Load a CLDF dataset.
Load the file as `json` CLDF metadata description file, or as metadata-free
dataset contained in a single csv file.
The distinction is made depending on the file extension: `.json` files are
loaded as metadata descriptions, all other files are matched against the
CLDF module specifications. Directories are checked for the presence of
any CLDF datasets in undefined order of the dataset types.
Parameters
----------
fname : str or Path
Path to a CLDF dataset
Returns
-------
Dataset
"""
dataset = get_dataset(filename)
if expect_multiple:
data = collections.defaultdict(lambda: collections.defaultdict(lambda: []))
else:
data = collections.defaultdict(lambda: collections.defaultdict(lambda: "?"))
# Make sure this is a kind of dataset BEASTling can handle
if dataset.module not in ("Wordlist", "StructureDataset"):
raise ValueError("BEASTling does not know how to interpret CLDF {:} data.".format(
dataset.module))
# Build dictionaries of nice IDs for languages and features
col_map = dataset.column_names
lang_ids, language_code_map = build_lang_ids(dataset, col_map)
feature_ids = {}
if col_map.parameters:
for row in dataset["ParameterTable"]:
feature_ids[row[col_map.parameters.id]] = sanitise_name(row[col_map.parameters.name])
# Build actual data dictionary, based on dataset type
if dataset.module == "Wordlist":
# We search for cognatesetReferences in the FormTable or a separate CognateTable.
cognate_column_in_form_table = True
# If we find them in CognateTable, we store them keyed with formReference:
if not code_column: # If code_column is given explicitly, we don't have to search!
code_column = col_map.forms.cognatesetReference
if not code_column:
if (col_map.cognates and
col_map.cognates.cognatesetReference and
col_map.cognates.formReference):
code_column = col_map.cognates.cognatesetReference
form_reference = col_map.cognates.formReference
if expect_multiple:
cognatesets = collections.defaultdict(list)
for row in dataset["CognateTable"]:
cognatesets[row[form_reference]].append(row[code_column])
else:
cognatesets = collections.defaultdict(lambda: "?")
for row in dataset["CognateTable"]:
cognatesets[row[form_reference]] = row[code_column]
else:
raise ValueError(
"Dataset {:} has no cognatesetReference column in its "
"primary table or in a separate cognate table. "
"Is this a metadata-free wordlist and you forgot to "
"specify code_column explicitly?".format(filename))
form_column = dataset["FormTable", "id"].name
cognate_column_in_form_table = False
language_column = col_map.forms.languageReference
parameter_column = col_map.forms.parameterReference
warnings.filterwarnings(
"ignore", '.*Unspecified column "Cognate_Set"', UserWarning, "csvw\.metadata", 0)
warnings.filterwarnings(
"ignore", '.*Unspecified column "{:}"'.format(code_column), UserWarning, "csvw\.metadata", 0)
# We know how to deal with a 'Cognate_Set' column, even in a metadata-free CSV file
for row in dataset["FormTable"].iterdicts():
lang_id = lang_ids.get(row[language_column], row[language_column])
feature_id = feature_ids.get(row[parameter_column], row[parameter_column])
if cognate_column_in_form_table:
if expect_multiple:
data[lang_id][feature_id].append(row[code_column])
else:
data[lang_id][feature_id] = row[code_column]
else:
data[lang_id][feature_id] = cognatesets[row[col_map.forms.id]]
return data, language_code_map
if dataset.module == "StructureDataset":
code_column = col_map.values.codeReference or col_map.values.value
for row in dataset["ValueTable"]:
lang_id = lang_ids.get(
row[col_map.values.languageReference], row[col_map.values.languageReference])
feature_id = feature_ids.get(
row[col_map.values.parameterReference], row[col_map.values.parameterReference])
if expect_multiple:
data[lang_id][feature_id].append(row[code_column] or '')
else:
data[lang_id][feature_id] = row[code_column] or ''
return data, language_code_map
def build_lang_ids(dataset, col_map):
if col_map.languages is None:
# No language table so we can't do anything
return {}, {}
col_map = col_map.languages
lang_ids = {}
language_code_map = {}
# First check for unique names and Glottocodes
names = []
gcs = []
langs = []
for row in dataset["LanguageTable"]:
langs.append(row)
names.append(row[col_map.name])
if row[col_map.glottocode]:
gcs.append(row[col_map.glottocode])
unique_names = len(set(names)) == len(names)
unique_gcs = len(set(gcs)) == len(gcs) == len(names)
log.info('{0} are used as language identifiers'.format(
'Names' if unique_names else ('Glottocodes' if unique_gcs else 'dataset-local IDs')))
for row in langs:
if unique_names:
# Use names if they're unique, for human-friendliness
lang_ids[row[col_map.id]] = sanitise_name(row[col_map.name])
elif unique_gcs:
# Otherwise, use glottocodes as at least they are meaningful
lang_ids[row[col_map.id]] = row[col_map.glottocode]
else:
# As a last resort, use the IDs which are guaranteed to be unique
lang_ids[row[col_map.id]] = row[col_map.id]
if row[col_map.glottocode]:
language_code_map[lang_ids[row[col_map.id]]] = row[col_map.glottocode]
return lang_ids, language_code_map
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Starts(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "streamtube"
_path_str = "streamtube.starts"
_valid_props = {"x", "xsrc", "y", "ysrc", "z", "zsrc"}
# x
# -
@property
def x(self):
"""
Sets the x components of the starting position of the
streamtubes
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
Sets the y components of the starting position of the
streamtubes
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for y .
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# z
# -
@property
def z(self):
"""
Sets the z components of the starting position of the
streamtubes
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for z .
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
Sets the x components of the starting position of the
streamtubes
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the y components of the starting position of the
streamtubes
ysrc
Sets the source reference on Chart Studio Cloud for y
.
z
Sets the z components of the starting position of the
streamtubes
zsrc
Sets the source reference on Chart Studio Cloud for z
.
"""
def __init__(
self,
arg=None,
x=None,
xsrc=None,
y=None,
ysrc=None,
z=None,
zsrc=None,
**kwargs
):
"""
Construct a new Starts object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.streamtube.Starts`
x
Sets the x components of the starting position of the
streamtubes
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the y components of the starting position of the
streamtubes
ysrc
Sets the source reference on Chart Studio Cloud for y
.
z
Sets the z components of the starting position of the
streamtubes
zsrc
Sets the source reference on Chart Studio Cloud for z
.
Returns
-------
Starts
"""
super(Starts, self).__init__("starts")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.streamtube.Starts
constructor must be a dict or
an instance of :class:`plotly.graph_objs.streamtube.Starts`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
import pytest
from unittest.mock import patch, Mock
from aiocache import SimpleMemoryCache, RedisCache, MemcachedCache, caches, Cache, AIOCACHE_CACHES
from aiocache.factory import _class_from_string, _create_cache
from aiocache.exceptions import InvalidCacheType
from aiocache.serializers import JsonSerializer, PickleSerializer
from aiocache.plugins import TimingPlugin, HitMissRatioPlugin
def test_class_from_string():
assert _class_from_string("aiocache.RedisCache") == RedisCache
def test_create_simple_cache():
redis = _create_cache(RedisCache, endpoint="127.0.0.10", port=6378)
assert isinstance(redis, RedisCache)
assert redis.endpoint == "127.0.0.10"
assert redis.port == 6378
def test_create_cache_with_everything():
redis = _create_cache(
RedisCache,
serializer={"class": PickleSerializer, "encoding": "encoding"},
plugins=[{"class": "aiocache.plugins.TimingPlugin"}],
)
assert isinstance(redis.serializer, PickleSerializer)
assert redis.serializer.encoding == "encoding"
assert isinstance(redis.plugins[0], TimingPlugin)
class TestCache:
def test_cache_types(self):
assert Cache.MEMORY == SimpleMemoryCache
assert Cache.REDIS == RedisCache
assert Cache.MEMCACHED == MemcachedCache
@pytest.mark.parametrize(
"cache_type", [Cache.MEMORY.NAME, Cache.REDIS.NAME, Cache.MEMCACHED.NAME]
)
def test_new(self, cache_type):
kwargs = {"a": 1, "b": 2}
cache_class = Cache.get_scheme_class(cache_type)
with patch("aiocache.{}.__init__".format(cache_class.__name__)) as init:
cache = Cache(cache_class, **kwargs)
assert isinstance(cache, cache_class)
init.assert_called_once_with(**kwargs)
def test_new_defaults_to_memory(self):
assert isinstance(Cache(), Cache.MEMORY)
def test_new_invalid_cache_raises(self):
with pytest.raises(InvalidCacheType) as e:
Cache(object)
assert str(e.value) == "Invalid cache type, you can only use {}".format(
list(AIOCACHE_CACHES.keys())
)
@pytest.mark.parametrize("scheme", [Cache.MEMORY.NAME, Cache.REDIS.NAME, Cache.MEMCACHED.NAME])
def test_get_scheme_class(self, scheme):
assert Cache.get_scheme_class(scheme) == AIOCACHE_CACHES[scheme]
def test_get_scheme_class_invalid(self):
with pytest.raises(InvalidCacheType):
Cache.get_scheme_class("http")
@pytest.mark.parametrize("scheme", [Cache.MEMORY.NAME, Cache.REDIS.NAME, Cache.MEMCACHED.NAME])
def test_from_url_returns_cache_from_scheme(self, scheme):
assert isinstance(Cache.from_url("{}://".format(scheme)), Cache.get_scheme_class(scheme))
@pytest.mark.parametrize(
"url,expected_args",
[
("redis://", {}),
("redis://localhost", {"endpoint": "localhost"}),
("redis://localhost/", {"endpoint": "localhost"}),
("redis://localhost:6379", {"endpoint": "localhost", "port": 6379}),
(
"redis://localhost/?arg1=arg1&arg2=arg2",
{"endpoint": "localhost", "arg1": "arg1", "arg2": "arg2"},
),
(
"redis://localhost:6379/?arg1=arg1&arg2=arg2",
{"endpoint": "localhost", "port": 6379, "arg1": "arg1", "arg2": "arg2"},
),
("redis:///?arg1=arg1", {"arg1": "arg1"}),
("redis:///?arg2=arg2", {"arg2": "arg2"}),
(
"redis://:password@localhost:6379",
{"endpoint": "localhost", "password": "password", "port": 6379},
),
(
"redis://:password@localhost:6379?password=pass",
{"endpoint": "localhost", "password": "password", "port": 6379},
),
],
)
def test_from_url_calls_cache_with_args(self, url, expected_args):
with patch("aiocache.factory.Cache") as mock:
Cache.from_url(url)
mock.assert_called_once_with(mock.get_scheme_class.return_value, **expected_args)
def test_calls_parse_uri_path_from_cache(self):
with patch("aiocache.factory.Cache") as mock:
mock.get_scheme_class.return_value.parse_uri_path = Mock(return_value={"arg1": "arg1"})
Cache.from_url("redis:///")
mock.get_scheme_class.return_value.parse_uri_path.assert_called_once_with("/")
mock.assert_called_once_with(mock.get_scheme_class.return_value, arg1="arg1")
def test_from_url_invalid_protocol(self):
with pytest.raises(InvalidCacheType):
Cache.from_url("http://")
class TestCacheHandler:
@pytest.fixture(autouse=True)
def remove_caches(self):
caches._caches = {}
def test_add_new_entry(self):
alias = "memory"
config = {
"cache": "aiocache.SimpleMemoryCache",
"serializer": {"class": "aiocache.serializers.StringSerializer"},
}
caches.add(alias, config)
assert caches.get_config()[alias] == config
def test_add_updates_existing_entry(self):
alias = "memory"
config = {
"cache": "aiocache.SimpleMemoryCache",
"serializer": {"class": "aiocache.serializers.StringSerializer"},
}
caches.add(alias, {})
caches.add(alias, config)
assert caches.get_config()[alias] == config
def test_get_wrong_alias(self):
with pytest.raises(KeyError):
caches.get("wrong_cache")
with pytest.raises(KeyError):
caches.create("wrong_cache")
def test_reuse_instance(self):
assert caches.get("default") is caches.get("default")
def test_create_not_reuse(self):
assert caches.create("default") is not caches.create("default")
def test_create_extra_args(self):
caches.set_config(
{
"default": {
"cache": "aiocache.RedisCache",
"endpoint": "127.0.0.9",
"db": 10,
"port": 6378,
}
}
)
cache = caches.create("default", namespace="whatever", endpoint="127.0.0.10", db=10)
assert cache.namespace == "whatever"
assert cache.endpoint == "127.0.0.10"
assert cache.db == 10
def test_create_deprecated(self):
with patch("aiocache.factory.warnings.warn") as mock:
caches.create(cache="aiocache.SimpleMemoryCache")
mock.assert_called_once_with(
"Creating a cache with an explicit config is deprecated, use 'aiocache.Cache'",
DeprecationWarning,
)
def test_retrieve_cache(self):
caches.set_config(
{
"default": {
"cache": "aiocache.RedisCache",
"endpoint": "127.0.0.10",
"port": 6378,
"ttl": 10,
"serializer": {
"class": "aiocache.serializers.PickleSerializer",
"encoding": "encoding",
},
"plugins": [
{"class": "aiocache.plugins.HitMissRatioPlugin"},
{"class": "aiocache.plugins.TimingPlugin"},
],
}
}
)
cache = caches.get("default")
assert isinstance(cache, RedisCache)
assert cache.endpoint == "127.0.0.10"
assert cache.port == 6378
assert cache.ttl == 10
assert isinstance(cache.serializer, PickleSerializer)
assert cache.serializer.encoding == "encoding"
assert len(cache.plugins) == 2
def test_retrieve_cache_new_instance(self):
caches.set_config(
{
"default": {
"cache": "aiocache.RedisCache",
"endpoint": "127.0.0.10",
"port": 6378,
"serializer": {
"class": "aiocache.serializers.PickleSerializer",
"encoding": "encoding",
},
"plugins": [
{"class": "aiocache.plugins.HitMissRatioPlugin"},
{"class": "aiocache.plugins.TimingPlugin"},
],
}
}
)
cache = caches.create("default")
assert isinstance(cache, RedisCache)
assert cache.endpoint == "127.0.0.10"
assert cache.port == 6378
assert isinstance(cache.serializer, PickleSerializer)
assert cache.serializer.encoding == "encoding"
assert len(cache.plugins) == 2
def test_create_cache_str_no_alias(self):
cache = caches.create(cache="aiocache.RedisCache")
assert isinstance(cache, RedisCache)
assert cache.endpoint == "127.0.0.1"
assert cache.port == 6379
def test_create_cache_class_no_alias(self):
cache = caches.create(cache=RedisCache)
assert isinstance(cache, RedisCache)
assert cache.endpoint == "127.0.0.1"
assert cache.port == 6379
def test_create_cache_ensure_alias_or_cache(self):
with pytest.raises(TypeError):
caches.create()
def test_alias_config_is_reusable(self):
caches.set_config(
{
"default": {
"cache": "aiocache.RedisCache",
"endpoint": "127.0.0.10",
"port": 6378,
"serializer": {"class": "aiocache.serializers.PickleSerializer"},
"plugins": [
{"class": "aiocache.plugins.HitMissRatioPlugin"},
{"class": "aiocache.plugins.TimingPlugin"},
],
},
"alt": {"cache": "aiocache.SimpleMemoryCache"},
}
)
default = caches.create(**caches.get_alias_config("default"))
alt = caches.create(**caches.get_alias_config("alt"))
assert isinstance(default, RedisCache)
assert default.endpoint == "127.0.0.10"
assert default.port == 6378
assert isinstance(default.serializer, PickleSerializer)
assert len(default.plugins) == 2
assert isinstance(alt, SimpleMemoryCache)
def test_multiple_caches(self):
caches.set_config(
{
"default": {
"cache": "aiocache.RedisCache",
"endpoint": "127.0.0.10",
"port": 6378,
"serializer": {"class": "aiocache.serializers.PickleSerializer"},
"plugins": [
{"class": "aiocache.plugins.HitMissRatioPlugin"},
{"class": "aiocache.plugins.TimingPlugin"},
],
},
"alt": {"cache": "aiocache.SimpleMemoryCache"},
}
)
default = caches.get("default")
alt = caches.get("alt")
assert isinstance(default, RedisCache)
assert default.endpoint == "127.0.0.10"
assert default.port == 6378
assert isinstance(default.serializer, PickleSerializer)
assert len(default.plugins) == 2
assert isinstance(alt, SimpleMemoryCache)
def test_default_caches(self):
assert caches.get_config() == {
"default": {
"cache": "aiocache.SimpleMemoryCache",
"serializer": {"class": "aiocache.serializers.NullSerializer"},
}
}
def test_get_alias_config(self):
assert caches.get_alias_config("default") == {
"cache": "aiocache.SimpleMemoryCache",
"serializer": {"class": "aiocache.serializers.NullSerializer"},
}
def test_set_empty_config(self):
with pytest.raises(ValueError):
caches.set_config({})
def test_set_config_updates_existing_values(self):
assert not isinstance(caches.get("default").serializer, JsonSerializer)
caches.set_config(
{
"default": {
"cache": "aiocache.SimpleMemoryCache",
"serializer": {"class": "aiocache.serializers.JsonSerializer"},
}
}
)
assert isinstance(caches.get("default").serializer, JsonSerializer)
def test_set_config_removes_existing_caches(self):
caches.set_config(
{
"default": {"cache": "aiocache.SimpleMemoryCache"},
"alt": {"cache": "aiocache.SimpleMemoryCache"},
}
)
caches.get("default")
caches.get("alt")
assert len(caches._caches) == 2
caches.set_config(
{
"default": {"cache": "aiocache.SimpleMemoryCache"},
"alt": {"cache": "aiocache.SimpleMemoryCache"},
}
)
assert caches._caches == {}
def test_set_config_no_default(self):
with pytest.raises(ValueError):
caches.set_config(
{
"no_default": {
"cache": "aiocache.RedisCache",
"endpoint": "127.0.0.10",
"port": 6378,
"serializer": {"class": "aiocache.serializers.PickleSerializer"},
"plugins": [
{"class": "aiocache.plugins.HitMissRatioPlugin"},
{"class": "aiocache.plugins.TimingPlugin"},
],
}
}
)
def test_ensure_plugins_order(self):
caches.set_config(
{
"default": {
"cache": "aiocache.RedisCache",
"plugins": [
{"class": "aiocache.plugins.HitMissRatioPlugin"},
{"class": "aiocache.plugins.TimingPlugin"},
],
}
}
)
cache = caches.get("default")
assert isinstance(cache.plugins[0], HitMissRatioPlugin)
cache = caches.create("default")
assert isinstance(cache.plugins[0], HitMissRatioPlugin)
|
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import django.test
from django.urls import reverse
import mock
from oslo_serialization import jsonutils
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.network_topology import views
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
JSON_URL = reverse('horizon:project:network_topology:json')
INDEX_URL = reverse('horizon:project:network_topology:index')
class NetworkTopologyTests(test.TestCase):
trans = views.TranslationHelper()
@test.create_mocks({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'network_list',
'router_list',
'port_list')})
def test_json_view(self):
self._test_json_view()
@django.test.utils.override_settings(
OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
@test.create_mocks({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'port_list')})
def test_json_view_router_disabled(self):
self._test_json_view(router_enable=False)
@django.test.utils.override_settings(CONSOLE_TYPE=None)
@test.create_mocks({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'network_list',
'router_list',
'port_list')})
def test_json_view_console_disabled(self):
self._test_json_view(with_console=False)
def _test_json_view(self, router_enable=True, with_console=True):
self.mock_server_list.return_value = [self.servers.list(), False]
tenant_networks = [net for net in self.networks.list()
if not net['router:external']]
external_networks = [net for net in self.networks.list()
if net['router:external']]
self.mock_network_list_for_tenant.return_value = tenant_networks
# router1 : gateway port not in the port list
# router2 : no gateway port
# router3 : gateway port included in port list
routers = self.routers.list() + self.routers_with_rules.list()
if router_enable:
self.mock_router_list.return_value = routers
self.mock_network_list.return_value = external_networks
self.mock_port_list.return_value = self.ports.list()
res = self.client.get(JSON_URL)
self.assertEqual('text/json', res['Content-Type'])
data = jsonutils.loads(res.content)
# servers
expect_server_urls = []
for server in self.servers.list():
expect_server = {
'id': server.id,
'name': server.name,
'status': server.status.title(),
'original_status': server.status,
'task': None,
'url': '/project/instances/%s/' % server.id
}
if server.status != 'BUILD' and with_console:
expect_server['console'] = 'auto_console'
expect_server_urls.append(expect_server)
self.assertEqual(expect_server_urls, data['servers'])
# routers
if router_enable:
expect_router_urls = [
{'id': router.id,
'external_gateway_info':
router.external_gateway_info,
'name': router.name,
'status': router.status.title(),
'original_status': router.status,
'url': '/project/routers/%s/' % router.id}
for router in routers]
self.assertEqual(expect_router_urls, data['routers'])
else:
self.assertFalse(data['routers'])
# networks
expect_net_urls = []
if router_enable:
expect_net_urls += [{
'id': net.id,
'url': '/project/networks/%s/detail' % net.id,
'name': net.name,
'router:external': net.router__external,
'status': net.status.title(),
'original_status': net.status,
'subnets': [{
'cidr': snet.cidr,
'id': snet.id,
'url': '/project/networks/subnets/%s/detail' % snet.id}
for snet in net.subnets]}
for net in external_networks]
expect_net_urls.extend([{
'id': net.id,
'url': '/project/networks/%s/detail' % net.id,
'name': net.name,
'router:external': net.router__external,
'status': net.status.title(),
'allow_delete_subnet': True,
'original_status': net.status,
'subnets': [{
'cidr': subnet.cidr,
'id': subnet.id,
'url': '/project/networks/subnets/%s/detail' % subnet.id}
for subnet in net.subnets]}
for net in tenant_networks])
for exp_net in expect_net_urls:
if exp_net['url'] is None:
del exp_net['url']
self.assertEqual(expect_net_urls, data['networks'])
valid_network_ids = [net.id for net in tenant_networks]
if router_enable:
valid_network_ids = [net.id for net in self.networks.list()]
# ports
expect_port_urls = [
{'id': port.id,
'device_id': port.device_id,
'device_owner': port.device_owner,
'fixed_ips': port.fixed_ips,
'network_id': port.network_id,
'status': port.status.title(),
'original_status': port.status,
'url': '/project/networks/ports/%s/detail' % port.id}
for port in self.ports.list()
if port.network_id in valid_network_ids]
if router_enable:
# fake port for router1 gateway (router1 on ext_net)
router1 = routers[0]
ext_net = external_networks[0]
expect_port_urls.append(
{'id': 'gateway%s' % ext_net.id,
'device_id': router1.id,
'network_id': ext_net.id,
'fixed_ips': []})
self.assertEqual(expect_port_urls, data['ports'])
self.mock_server_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_network_list_for_tenant.assert_called_once_with(
test.IsHttpRequest(), self.tenant.id,
include_pre_auto_allocate=False)
if router_enable:
self.mock_router_list.assert_called_once_with(
test.IsHttpRequest(), tenant_id=self.tenant.id)
self.mock_network_list.assert_called_once_with(
test.IsHttpRequest(), **{'router:external': True})
self.mock_port_list.assert_called_once_with(
test.IsHttpRequest())
class NetworkTopologyCreateTests(test.TestCase):
def _test_new_button_disabled_when_quota_exceeded(
self, expected_string,
networks_quota=10, routers_quota=10, instances_quota=10):
quota_data = self.quota_usages.first()
quota_data['network']['available'] = networks_quota
quota_data['router']['available'] = routers_quota
quota_data['instances']['available'] = instances_quota
self.mock_tenant_quota_usages.return_value = quota_data
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/network_topology/index.html')
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
self.mock_tenant_quota_usages.assert_has_calls([
mock.call(test.IsHttpRequest(), targets=('instances', )),
mock.call(test.IsHttpRequest(), targets=('network', )),
mock.call(test.IsHttpRequest(), targets=('router', )),
] * 3)
@test.create_mocks({quotas: ('tenant_quota_usages',)})
def test_create_network_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createnetwork')
classes = 'btn btn-default ajax-modal'
link_name = "Create Network (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(expected_string,
networks_quota=0)
@test.create_mocks({quotas: ('tenant_quota_usages',)})
def test_create_router_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createrouter')
classes = 'btn btn-default ajax-modal'
link_name = "Create Router (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='Routers__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(expected_string,
routers_quota=0)
@test.update_settings(LAUNCH_INSTANCE_LEGACY_ENABLED=True)
@test.create_mocks({quotas: ('tenant_quota_usages',)})
def test_launch_instance_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:launchinstance')
classes = 'btn btn-default btn-launch ajax-modal'
link_name = "Launch Instance (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='instances__action_launch'>" \
"<span class='fa fa-cloud-upload'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(expected_string,
instances_quota=0)
|
|
#!/usr/bin/env python
# portable serial port access with python
#
# This is a module that gathers a list of serial ports including details on OSX
#
# code originally from https://github.com/makerbot/pyserial/tree/master/serial/tools
# with contributions from cibomahto, dgs3, FarMcKon, tedbrandston
# and modifications by cliechti
#
# this is distributed under a free software license, see license.txt
# List all of the callout devices in OS/X by querying IOKit.
# See the following for a reference of how to do this:
# http://developer.apple.com/library/mac/#documentation/DeviceDrivers/Conceptual/WorkingWSerial/WWSerial_SerialDevs/SerialDevices.html#//apple_ref/doc/uid/TP30000384-CIHGEAFD
# More help from darwin_hid.py
# Also see the 'IORegistryExplorer' for an idea of what we are actually searching
import ctypes
from ctypes import util
import re
iokit = ctypes.cdll.LoadLibrary(ctypes.util.find_library('IOKit'))
cf = ctypes.cdll.LoadLibrary(ctypes.util.find_library('CoreFoundation'))
kIOMasterPortDefault = ctypes.c_void_p.in_dll(iokit, "kIOMasterPortDefault")
kCFAllocatorDefault = ctypes.c_void_p.in_dll(cf, "kCFAllocatorDefault")
kCFStringEncodingMacRoman = 0
iokit.IOServiceMatching.restype = ctypes.c_void_p
iokit.IOServiceGetMatchingServices.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
iokit.IOServiceGetMatchingServices.restype = ctypes.c_void_p
iokit.IORegistryEntryGetParentEntry.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
iokit.IORegistryEntryCreateCFProperty.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint32]
iokit.IORegistryEntryCreateCFProperty.restype = ctypes.c_void_p
iokit.IORegistryEntryGetPath.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
iokit.IORegistryEntryGetPath.restype = ctypes.c_void_p
iokit.IORegistryEntryGetName.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
iokit.IORegistryEntryGetName.restype = ctypes.c_void_p
iokit.IOObjectGetClass.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
iokit.IOObjectGetClass.restype = ctypes.c_void_p
iokit.IOObjectRelease.argtypes = [ctypes.c_void_p]
cf.CFStringCreateWithCString.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int32]
cf.CFStringCreateWithCString.restype = ctypes.c_void_p
cf.CFStringGetCStringPtr.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
cf.CFStringGetCStringPtr.restype = ctypes.c_char_p
cf.CFNumberGetValue.argtypes = [ctypes.c_void_p, ctypes.c_uint32, ctypes.c_void_p]
cf.CFNumberGetValue.restype = ctypes.c_void_p
def get_string_property(device_t, property):
""" Search the given device for the specified string property
@param device_t Device to search
@param property String to search for.
@return Python string containing the value, or None if not found.
"""
key = cf.CFStringCreateWithCString(
kCFAllocatorDefault,
property.encode("mac_roman"),
kCFStringEncodingMacRoman
)
CFContainer = iokit.IORegistryEntryCreateCFProperty(
device_t,
key,
kCFAllocatorDefault,
0
);
output = None
if CFContainer:
output = cf.CFStringGetCStringPtr(CFContainer, 0)
return output
def get_int_property(device_t, property):
""" Search the given device for the specified string property
@param device_t Device to search
@param property String to search for.
@return Python string containing the value, or None if not found.
"""
key = cf.CFStringCreateWithCString(
kCFAllocatorDefault,
property.encode("mac_roman"),
kCFStringEncodingMacRoman
)
CFContainer = iokit.IORegistryEntryCreateCFProperty(
device_t,
key,
kCFAllocatorDefault,
0
);
number = ctypes.c_uint16()
if CFContainer:
output = cf.CFNumberGetValue(CFContainer, 2, ctypes.byref(number))
return number.value
def IORegistryEntryGetName(device):
pathname = ctypes.create_string_buffer(100) # TODO: Is this ok?
iokit.IOObjectGetClass(
device,
ctypes.byref(pathname)
)
return pathname.value
def GetParentDeviceByType(device, parent_type):
""" Find the first parent of a device that implements the parent_type
@param IOService Service to inspect
@return Pointer to the parent type, or None if it was not found.
"""
# First, try to walk up the IOService tree to find a parent of this device that is a IOUSBDevice.
while IORegistryEntryGetName(device) != parent_type:
parent = ctypes.c_void_p()
response = iokit.IORegistryEntryGetParentEntry(
device,
"IOService".encode("mac_roman"),
ctypes.byref(parent)
)
# If we weren't able to find a parent for the device, we're done.
if response != 0:
return None
device = parent
return device
def GetIOServicesByType(service_type):
"""
"""
serial_port_iterator = ctypes.c_void_p()
response = iokit.IOServiceGetMatchingServices(
kIOMasterPortDefault,
iokit.IOServiceMatching(service_type),
ctypes.byref(serial_port_iterator)
)
services = []
while iokit.IOIteratorIsValid(serial_port_iterator):
service = iokit.IOIteratorNext(serial_port_iterator)
if not service:
break
services.append(service)
iokit.IOObjectRelease(serial_port_iterator)
return services
def comports():
# Scan for all iokit serial ports
services = GetIOServicesByType('IOSerialBSDClient')
ports = []
for service in services:
info = []
# First, add the callout device file.
info.append(get_string_property(service, "IOCalloutDevice"))
# If the serial port is implemented by a
usb_device = GetParentDeviceByType(service, "IOUSBDevice")
if usb_device != None:
info.append(get_string_property(usb_device, "USB Product Name"))
info.append(
"USB VID:PID=%x:%x SNR=%s"%(
get_int_property(usb_device, "idVendor"),
get_int_property(usb_device, "idProduct"),
get_string_property(usb_device, "USB Serial Number"))
)
else:
info.append('n/a')
info.append('n/a')
ports.append(info)
return ports
# test
if __name__ == '__main__':
for port, desc, hwid in sorted(comports()):
print("%s: %s [%s]" % (port, desc, hwid))
|
|
from unittest import mock
from django.contrib.postgres.indexes import (
BloomIndex, BrinIndex, BTreeIndex, GinIndex, GistIndex, HashIndex,
SpGistIndex,
)
from django.db import connection
from django.db.models import CharField
from django.db.models.functions import Length
from django.db.models.query_utils import Q
from django.db.utils import NotSupportedError
from django.test import skipUnlessDBFeature
from django.test.utils import register_lookup
from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase
from .models import CharFieldModel, IntegerArrayModel
class IndexTestMixin:
def test_name_auto_generation(self):
index = self.index_class(fields=['field'])
index.set_name_with_model(CharFieldModel)
self.assertRegex(index.name, r'postgres_te_field_[0-9a-f]{6}_%s' % self.index_class.suffix)
def test_deconstruction_no_customization(self):
index = self.index_class(fields=['title'], name='test_title_%s' % self.index_class.suffix)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.%s' % self.index_class.__name__)
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'test_title_%s' % self.index_class.suffix})
class BloomIndexTests(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = BloomIndex
def test_suffix(self):
self.assertEqual(BloomIndex.suffix, 'bloom')
def test_deconstruction(self):
index = BloomIndex(fields=['title'], name='test_bloom', length=80, columns=[4])
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.BloomIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'fields': ['title'],
'name': 'test_bloom',
'length': 80,
'columns': [4],
})
def test_invalid_fields(self):
msg = 'Bloom indexes support a maximum of 32 fields.'
with self.assertRaisesMessage(ValueError, msg):
BloomIndex(fields=['title'] * 33, name='test_bloom')
def test_invalid_columns(self):
msg = 'BloomIndex.columns must be a list or tuple.'
with self.assertRaisesMessage(ValueError, msg):
BloomIndex(fields=['title'], name='test_bloom', columns='x')
msg = 'BloomIndex.columns cannot have more values than fields.'
with self.assertRaisesMessage(ValueError, msg):
BloomIndex(fields=['title'], name='test_bloom', columns=[4, 3])
def test_invalid_columns_value(self):
msg = 'BloomIndex.columns must contain integers from 1 to 4095.'
for length in (0, 4096):
with self.subTest(length), self.assertRaisesMessage(ValueError, msg):
BloomIndex(fields=['title'], name='test_bloom', columns=[length])
def test_invalid_length(self):
msg = 'BloomIndex.length must be None or an integer from 1 to 4096.'
for length in (0, 4097):
with self.subTest(length), self.assertRaisesMessage(ValueError, msg):
BloomIndex(fields=['title'], name='test_bloom', length=length)
class BrinIndexTests(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = BrinIndex
def test_suffix(self):
self.assertEqual(BrinIndex.suffix, 'brin')
def test_deconstruction(self):
index = BrinIndex(fields=['title'], name='test_title_brin', autosummarize=True, pages_per_range=16)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.BrinIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'fields': ['title'],
'name': 'test_title_brin',
'autosummarize': True,
'pages_per_range': 16,
})
def test_invalid_pages_per_range(self):
with self.assertRaisesMessage(ValueError, 'pages_per_range must be None or a positive integer'):
BrinIndex(fields=['title'], name='test_title_brin', pages_per_range=0)
class BTreeIndexTests(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = BTreeIndex
def test_suffix(self):
self.assertEqual(BTreeIndex.suffix, 'btree')
def test_deconstruction(self):
index = BTreeIndex(fields=['title'], name='test_title_btree', fillfactor=80)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.BTreeIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'test_title_btree', 'fillfactor': 80})
class GinIndexTests(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = GinIndex
def test_suffix(self):
self.assertEqual(GinIndex.suffix, 'gin')
def test_deconstruction(self):
index = GinIndex(
fields=['title'],
name='test_title_gin',
fastupdate=True,
gin_pending_list_limit=128,
)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.GinIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'fields': ['title'],
'name': 'test_title_gin',
'fastupdate': True,
'gin_pending_list_limit': 128,
})
class GistIndexTests(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = GistIndex
def test_suffix(self):
self.assertEqual(GistIndex.suffix, 'gist')
def test_deconstruction(self):
index = GistIndex(fields=['title'], name='test_title_gist', buffering=False, fillfactor=80)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.GistIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {
'fields': ['title'],
'name': 'test_title_gist',
'buffering': False,
'fillfactor': 80,
})
class HashIndexTests(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = HashIndex
def test_suffix(self):
self.assertEqual(HashIndex.suffix, 'hash')
def test_deconstruction(self):
index = HashIndex(fields=['title'], name='test_title_hash', fillfactor=80)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.HashIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'test_title_hash', 'fillfactor': 80})
class SpGistIndexTests(IndexTestMixin, PostgreSQLSimpleTestCase):
index_class = SpGistIndex
def test_suffix(self):
self.assertEqual(SpGistIndex.suffix, 'spgist')
def test_deconstruction(self):
index = SpGistIndex(fields=['title'], name='test_title_spgist', fillfactor=80)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.indexes.SpGistIndex')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'test_title_spgist', 'fillfactor': 80})
class SchemaTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_gin_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn('field', self.get_constraints(IntegerArrayModel._meta.db_table))
# Add the index
index_name = 'integer_array_model_field_gin'
index = GinIndex(fields=['field'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(IntegerArrayModel, index)
constraints = self.get_constraints(IntegerArrayModel._meta.db_table)
# Check gin index was added
self.assertEqual(constraints[index_name]['type'], GinIndex.suffix)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(IntegerArrayModel, index)
self.assertNotIn(index_name, self.get_constraints(IntegerArrayModel._meta.db_table))
def test_gin_fastupdate(self):
index_name = 'integer_array_gin_fastupdate'
index = GinIndex(fields=['field'], name=index_name, fastupdate=False)
with connection.schema_editor() as editor:
editor.add_index(IntegerArrayModel, index)
constraints = self.get_constraints(IntegerArrayModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], 'gin')
self.assertEqual(constraints[index_name]['options'], ['fastupdate=off'])
with connection.schema_editor() as editor:
editor.remove_index(IntegerArrayModel, index)
self.assertNotIn(index_name, self.get_constraints(IntegerArrayModel._meta.db_table))
def test_partial_gin_index(self):
with register_lookup(CharField, Length):
index_name = 'char_field_gin_partial_idx'
index = GinIndex(fields=['field'], name=index_name, condition=Q(field__length=40))
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], 'gin')
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_partial_gin_index_with_tablespace(self):
with register_lookup(CharField, Length):
index_name = 'char_field_gin_partial_idx'
index = GinIndex(
fields=['field'],
name=index_name,
condition=Q(field__length=40),
db_tablespace='pg_default',
)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
self.assertIn('TABLESPACE "pg_default" ', str(index.create_sql(CharFieldModel, editor)))
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], 'gin')
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_gin_parameters(self):
index_name = 'integer_array_gin_params'
index = GinIndex(fields=['field'], name=index_name, fastupdate=True, gin_pending_list_limit=64)
with connection.schema_editor() as editor:
editor.add_index(IntegerArrayModel, index)
constraints = self.get_constraints(IntegerArrayModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], 'gin')
self.assertEqual(constraints[index_name]['options'], ['gin_pending_list_limit=64', 'fastupdate=on'])
with connection.schema_editor() as editor:
editor.remove_index(IntegerArrayModel, index)
self.assertNotIn(index_name, self.get_constraints(IntegerArrayModel._meta.db_table))
@skipUnlessDBFeature('has_bloom_index')
def test_bloom_index(self):
index_name = 'char_field_model_field_bloom'
index = BloomIndex(fields=['field'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], BloomIndex.suffix)
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
@skipUnlessDBFeature('has_bloom_index')
def test_bloom_parameters(self):
index_name = 'char_field_model_field_bloom_params'
index = BloomIndex(fields=['field'], name=index_name, length=512, columns=[3])
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], BloomIndex.suffix)
self.assertEqual(constraints[index_name]['options'], ['length=512', 'col1=3'])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_bloom_index_not_supported(self):
index_name = 'bloom_index_exception'
index = BloomIndex(fields=['field'], name=index_name)
msg = 'Bloom indexes require PostgreSQL 9.6+.'
with self.assertRaisesMessage(NotSupportedError, msg):
with mock.patch('django.db.backends.postgresql.features.DatabaseFeatures.has_bloom_index', False):
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_brin_index(self):
index_name = 'char_field_model_field_brin'
index = BrinIndex(fields=['field'], name=index_name, pages_per_range=4)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], BrinIndex.suffix)
self.assertEqual(constraints[index_name]['options'], ['pages_per_range=4'])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
@skipUnlessDBFeature('has_brin_autosummarize')
def test_brin_parameters(self):
index_name = 'char_field_brin_params'
index = BrinIndex(fields=['field'], name=index_name, autosummarize=True)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], BrinIndex.suffix)
self.assertEqual(constraints[index_name]['options'], ['autosummarize=on'])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_brin_autosummarize_not_supported(self):
index_name = 'brin_options_exception'
index = BrinIndex(fields=['field'], name=index_name, autosummarize=True)
with self.assertRaisesMessage(NotSupportedError, 'BRIN option autosummarize requires PostgreSQL 10+.'):
with mock.patch('django.db.backends.postgresql.features.DatabaseFeatures.has_brin_autosummarize', False):
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_btree_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn('field', self.get_constraints(CharFieldModel._meta.db_table))
# Add the index.
index_name = 'char_field_model_field_btree'
index = BTreeIndex(fields=['field'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
# The index was added.
self.assertEqual(constraints[index_name]['type'], BTreeIndex.suffix)
# Drop the index.
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_btree_parameters(self):
index_name = 'integer_array_btree_fillfactor'
index = BTreeIndex(fields=['field'], name=index_name, fillfactor=80)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], BTreeIndex.suffix)
self.assertEqual(constraints[index_name]['options'], ['fillfactor=80'])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_gist_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn('field', self.get_constraints(CharFieldModel._meta.db_table))
# Add the index.
index_name = 'char_field_model_field_gist'
index = GistIndex(fields=['field'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
# The index was added.
self.assertEqual(constraints[index_name]['type'], GistIndex.suffix)
# Drop the index.
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_gist_parameters(self):
index_name = 'integer_array_gist_buffering'
index = GistIndex(fields=['field'], name=index_name, buffering=True, fillfactor=80)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], GistIndex.suffix)
self.assertEqual(constraints[index_name]['options'], ['buffering=on', 'fillfactor=80'])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_hash_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn('field', self.get_constraints(CharFieldModel._meta.db_table))
# Add the index.
index_name = 'char_field_model_field_hash'
index = HashIndex(fields=['field'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
# The index was added.
self.assertEqual(constraints[index_name]['type'], HashIndex.suffix)
# Drop the index.
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_hash_parameters(self):
index_name = 'integer_array_hash_fillfactor'
index = HashIndex(fields=['field'], name=index_name, fillfactor=80)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], HashIndex.suffix)
self.assertEqual(constraints[index_name]['options'], ['fillfactor=80'])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_spgist_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn('field', self.get_constraints(CharFieldModel._meta.db_table))
# Add the index.
index_name = 'char_field_model_field_spgist'
index = SpGistIndex(fields=['field'], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
# The index was added.
self.assertEqual(constraints[index_name]['type'], SpGistIndex.suffix)
# Drop the index.
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
def test_spgist_parameters(self):
index_name = 'integer_array_spgist_fillfactor'
index = SpGistIndex(fields=['field'], name=index_name, fillfactor=80)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]['type'], SpGistIndex.suffix)
self.assertEqual(constraints[index_name]['options'], ['fillfactor=80'])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(CharFieldModel._meta.db_table))
|
|
"""
Support for native homogeneous lists.
"""
import math
import operator
from llvmlite import ir
from numba.core import types, typing, errors, cgutils
from numba.core.imputils import (lower_builtin, lower_cast,
iternext_impl, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked,
RefType)
from numba.core.extending import overload_method, overload
from numba.core.utils import cached_property
from numba.misc import quicksort
from numba.cpython import slicing
from numba import literal_unroll
def get_list_payload(context, builder, list_type, value):
"""
Given a list value and type, get its payload structure (as a
reference, so that mutations are seen by all).
"""
payload_type = types.ListPayload(list_type)
payload = context.nrt.meminfo_data(builder, value.meminfo)
ptrty = context.get_data_type(payload_type).as_pointer()
payload = builder.bitcast(payload, ptrty)
return context.make_data_helper(builder, payload_type, ref=payload)
def get_itemsize(context, list_type):
"""
Return the item size for the given list type.
"""
llty = context.get_data_type(list_type.dtype)
return context.get_abi_sizeof(llty)
class _ListPayloadMixin(object):
@property
def size(self):
return self._payload.size
@size.setter
def size(self, value):
self._payload.size = value
@property
def dirty(self):
return self._payload.dirty
@property
def data(self):
return self._payload._get_ptr_by_name('data')
def _gep(self, idx):
return cgutils.gep(self._builder, self.data, idx)
def getitem(self, idx):
ptr = self._gep(idx)
data_item = self._builder.load(ptr)
return self._datamodel.from_data(self._builder, data_item)
def fix_index(self, idx):
"""
Fix negative indices by adding the size to them. Positive
indices are left untouched.
"""
is_negative = self._builder.icmp_signed('<', idx,
ir.Constant(idx.type, 0))
wrapped_index = self._builder.add(idx, self.size)
return self._builder.select(is_negative, wrapped_index, idx)
def is_out_of_bounds(self, idx):
"""
Return whether the index is out of bounds.
"""
underflow = self._builder.icmp_signed('<', idx,
ir.Constant(idx.type, 0))
overflow = self._builder.icmp_signed('>=', idx, self.size)
return self._builder.or_(underflow, overflow)
def clamp_index(self, idx):
"""
Clamp the index in [0, size].
"""
builder = self._builder
idxptr = cgutils.alloca_once_value(builder, idx)
zero = ir.Constant(idx.type, 0)
size = self.size
underflow = self._builder.icmp_signed('<', idx, zero)
with builder.if_then(underflow, likely=False):
builder.store(zero, idxptr)
overflow = self._builder.icmp_signed('>=', idx, size)
with builder.if_then(overflow, likely=False):
builder.store(size, idxptr)
return builder.load(idxptr)
def guard_index(self, idx, msg):
"""
Raise an error if the index is out of bounds.
"""
with self._builder.if_then(self.is_out_of_bounds(idx), likely=False):
self._context.call_conv.return_user_exc(self._builder,
IndexError, (msg,))
def fix_slice(self, slice):
"""
Fix slice start and stop to be valid (inclusive and exclusive, resp)
indexing bounds.
"""
return slicing.fix_slice(self._builder, slice, self.size)
def incref_value(self, val):
"Incref an element value"
self._context.nrt.incref(self._builder, self.dtype, val)
def decref_value(self, val):
"Decref an element value"
self._context.nrt.decref(self._builder, self.dtype, val)
class ListPayloadAccessor(_ListPayloadMixin):
"""
A helper object to access the list attributes given the pointer to the
payload type.
"""
def __init__(self, context, builder, list_type, payload_ptr):
self._context = context
self._builder = builder
self._ty = list_type
self._datamodel = context.data_model_manager[list_type.dtype]
payload_type = types.ListPayload(list_type)
ptrty = context.get_data_type(payload_type).as_pointer()
payload_ptr = builder.bitcast(payload_ptr, ptrty)
payload = context.make_data_helper(builder, payload_type,
ref=payload_ptr)
self._payload = payload
class ListInstance(_ListPayloadMixin):
def __init__(self, context, builder, list_type, list_val):
self._context = context
self._builder = builder
self._ty = list_type
self._list = context.make_helper(builder, list_type, list_val)
self._itemsize = get_itemsize(context, list_type)
self._datamodel = context.data_model_manager[list_type.dtype]
@property
def dtype(self):
return self._ty.dtype
@property
def _payload(self):
# This cannot be cached as it can be reallocated
return get_list_payload(self._context, self._builder, self._ty, self._list)
@property
def parent(self):
return self._list.parent
@parent.setter
def parent(self, value):
self._list.parent = value
@property
def value(self):
return self._list._getvalue()
@property
def meminfo(self):
return self._list.meminfo
def set_dirty(self, val):
if self._ty.reflected:
self._payload.dirty = cgutils.true_bit if val else cgutils.false_bit
def clear_value(self, idx):
"""Remove the value at the location
"""
self.decref_value(self.getitem(idx))
# it's necessary for the dtor which just decref every slot on it.
self.zfill(idx, self._builder.add(idx, idx.type(1)))
def setitem(self, idx, val, incref, decref_old_value=True):
# Decref old data
if decref_old_value:
self.decref_value(self.getitem(idx))
ptr = self._gep(idx)
data_item = self._datamodel.as_data(self._builder, val)
self._builder.store(data_item, ptr)
self.set_dirty(True)
if incref:
# Incref the underlying data
self.incref_value(val)
def inititem(self, idx, val, incref=True):
ptr = self._gep(idx)
data_item = self._datamodel.as_data(self._builder, val)
self._builder.store(data_item, ptr)
if incref:
self.incref_value(val)
def zfill(self, start, stop):
"""Zero-fill the memory at index *start* to *stop*
*stop* MUST not be smaller than *start*.
"""
builder = self._builder
base = self._gep(start)
end = self._gep(stop)
intaddr_t = self._context.get_value_type(types.intp)
size = builder.sub(builder.ptrtoint(end, intaddr_t),
builder.ptrtoint(base, intaddr_t))
cgutils.memset(builder, base, size, ir.IntType(8)(0))
@classmethod
def allocate_ex(cls, context, builder, list_type, nitems):
"""
Allocate a ListInstance with its storage.
Return a (ok, instance) tuple where *ok* is a LLVM boolean and
*instance* is a ListInstance object (the object's contents are
only valid when *ok* is true).
"""
intp_t = context.get_value_type(types.intp)
if isinstance(nitems, int):
nitems = ir.Constant(intp_t, nitems)
payload_type = context.get_data_type(types.ListPayload(list_type))
payload_size = context.get_abi_sizeof(payload_type)
itemsize = get_itemsize(context, list_type)
# Account for the fact that the payload struct contains one entry
payload_size -= itemsize
ok = cgutils.alloca_once_value(builder, cgutils.true_bit)
self = cls(context, builder, list_type, None)
# Total allocation size = <payload header size> + nitems * itemsize
allocsize, ovf = cgutils.muladd_with_overflow(builder, nitems,
ir.Constant(intp_t, itemsize),
ir.Constant(intp_t, payload_size))
with builder.if_then(ovf, likely=False):
builder.store(cgutils.false_bit, ok)
with builder.if_then(builder.load(ok), likely=True):
meminfo = context.nrt.meminfo_new_varsize_dtor(
builder, size=allocsize, dtor=self.get_dtor())
with builder.if_else(cgutils.is_null(builder, meminfo),
likely=False) as (if_error, if_ok):
with if_error:
builder.store(cgutils.false_bit, ok)
with if_ok:
self._list.meminfo = meminfo
self._list.parent = context.get_constant_null(types.pyobject)
self._payload.allocated = nitems
self._payload.size = ir.Constant(intp_t, 0) # for safety
self._payload.dirty = cgutils.false_bit
# Zero the allocated region
self.zfill(self.size.type(0), nitems)
return builder.load(ok), self
def define_dtor(self):
"Define the destructor if not already defined"
context = self._context
builder = self._builder
mod = builder.module
# Declare dtor
fnty = ir.FunctionType(ir.VoidType(), [cgutils.voidptr_t])
fn = cgutils.get_or_insert_function(mod, fnty,
'.dtor.list.{}'.format(self.dtype))
if not fn.is_declaration:
# End early if the dtor is already defined
return fn
fn.linkage = 'linkonce_odr'
# Populate the dtor
builder = ir.IRBuilder(fn.append_basic_block())
base_ptr = fn.args[0] # void*
# get payload
payload = ListPayloadAccessor(context, builder, self._ty, base_ptr)
# Loop over all data to decref
intp = payload.size.type
with cgutils.for_range_slice(
builder, start=intp(0), stop=payload.size, step=intp(1),
intp=intp) as (idx, _):
val = payload.getitem(idx)
context.nrt.decref(builder, self.dtype, val)
builder.ret_void()
return fn
def get_dtor(self):
""""Get the element dtor function pointer as void pointer.
It's safe to be called multiple times.
"""
# Define and set the Dtor
dtor = self.define_dtor()
dtor_fnptr = self._builder.bitcast(dtor, cgutils.voidptr_t)
return dtor_fnptr
@classmethod
def allocate(cls, context, builder, list_type, nitems):
"""
Allocate a ListInstance with its storage. Same as allocate_ex(),
but return an initialized *instance*. If allocation failed,
control is transferred to the caller using the target's current
call convention.
"""
ok, self = cls.allocate_ex(context, builder, list_type, nitems)
with builder.if_then(builder.not_(ok), likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot allocate list",))
return self
@classmethod
def from_meminfo(cls, context, builder, list_type, meminfo):
"""
Allocate a new list instance pointing to an existing payload
(a meminfo pointer).
Note the parent field has to be filled by the caller.
"""
self = cls(context, builder, list_type, None)
self._list.meminfo = meminfo
self._list.parent = context.get_constant_null(types.pyobject)
context.nrt.incref(builder, list_type, self.value)
# Payload is part of the meminfo, no need to touch it
return self
def resize(self, new_size):
"""
Ensure the list is properly sized for the new size.
"""
def _payload_realloc(new_allocated):
payload_type = context.get_data_type(types.ListPayload(self._ty))
payload_size = context.get_abi_sizeof(payload_type)
# Account for the fact that the payload struct contains one entry
payload_size -= itemsize
allocsize, ovf = cgutils.muladd_with_overflow(
builder, new_allocated,
ir.Constant(intp_t, itemsize),
ir.Constant(intp_t, payload_size))
with builder.if_then(ovf, likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot resize list",))
ptr = context.nrt.meminfo_varsize_realloc(builder, self._list.meminfo,
size=allocsize)
cgutils.guard_memory_error(context, builder, ptr,
"cannot resize list")
self._payload.allocated = new_allocated
context = self._context
builder = self._builder
intp_t = new_size.type
itemsize = get_itemsize(context, self._ty)
allocated = self._payload.allocated
two = ir.Constant(intp_t, 2)
eight = ir.Constant(intp_t, 8)
# allocated < new_size
is_too_small = builder.icmp_signed('<', allocated, new_size)
# (allocated >> 2) > new_size
is_too_large = builder.icmp_signed('>', builder.ashr(allocated, two), new_size)
with builder.if_then(is_too_large, likely=False):
# Exact downsize to requested size
# NOTE: is_too_large must be aggressive enough to avoid repeated
# upsizes and downsizes when growing a list.
_payload_realloc(new_size)
with builder.if_then(is_too_small, likely=False):
# Upsize with moderate over-allocation (size + size >> 2 + 8)
new_allocated = builder.add(eight,
builder.add(new_size,
builder.ashr(new_size, two)))
_payload_realloc(new_allocated)
self.zfill(self.size, new_allocated)
self._payload.size = new_size
self.set_dirty(True)
def move(self, dest_idx, src_idx, count):
"""
Move `count` elements from `src_idx` to `dest_idx`.
"""
dest_ptr = self._gep(dest_idx)
src_ptr = self._gep(src_idx)
cgutils.raw_memmove(self._builder, dest_ptr, src_ptr,
count, itemsize=self._itemsize)
self.set_dirty(True)
class ListIterInstance(_ListPayloadMixin):
def __init__(self, context, builder, iter_type, iter_val):
self._context = context
self._builder = builder
self._ty = iter_type
self._iter = context.make_helper(builder, iter_type, iter_val)
self._datamodel = context.data_model_manager[iter_type.yield_type]
@classmethod
def from_list(cls, context, builder, iter_type, list_val):
list_inst = ListInstance(context, builder, iter_type.container, list_val)
self = cls(context, builder, iter_type, None)
index = context.get_constant(types.intp, 0)
self._iter.index = cgutils.alloca_once_value(builder, index)
self._iter.meminfo = list_inst.meminfo
return self
@property
def _payload(self):
# This cannot be cached as it can be reallocated
return get_list_payload(self._context, self._builder,
self._ty.container, self._iter)
@property
def value(self):
return self._iter._getvalue()
@property
def index(self):
return self._builder.load(self._iter.index)
@index.setter
def index(self, value):
self._builder.store(value, self._iter.index)
#-------------------------------------------------------------------------------
# Constructors
def build_list(context, builder, list_type, items):
"""
Build a list of the given type, containing the given items.
"""
nitems = len(items)
inst = ListInstance.allocate(context, builder, list_type, nitems)
# Populate list
inst.size = context.get_constant(types.intp, nitems)
for i, val in enumerate(items):
inst.setitem(context.get_constant(types.intp, i), val, incref=True)
return impl_ret_new_ref(context, builder, list_type, inst.value)
@lower_builtin(list, types.IterableType)
def list_constructor(context, builder, sig, args):
def list_impl(iterable):
res = []
res.extend(iterable)
return res
return context.compile_internal(builder, list_impl, sig, args)
@lower_builtin(list)
def list_constructor(context, builder, sig, args):
list_type = sig.return_type
list_len = 0
inst = ListInstance.allocate(context, builder, list_type, list_len)
return impl_ret_new_ref(context, builder, list_type, inst.value)
#-------------------------------------------------------------------------------
# Various operations
@lower_builtin(len, types.List)
def list_len(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
return inst.size
@lower_builtin('getiter', types.List)
def getiter_list(context, builder, sig, args):
inst = ListIterInstance.from_list(context, builder, sig.return_type, args[0])
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
@lower_builtin('iternext', types.ListIter)
@iternext_impl(RefType.BORROWED)
def iternext_listiter(context, builder, sig, args, result):
inst = ListIterInstance(context, builder, sig.args[0], args[0])
index = inst.index
nitems = inst.size
is_valid = builder.icmp_signed('<', index, nitems)
result.set_valid(is_valid)
with builder.if_then(is_valid):
result.yield_(inst.getitem(index))
inst.index = builder.add(index, context.get_constant(types.intp, 1))
@lower_builtin(operator.getitem, types.List, types.Integer)
def getitem_list(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
index = args[1]
index = inst.fix_index(index)
inst.guard_index(index, msg="getitem out of range")
result = inst.getitem(index)
return impl_ret_borrowed(context, builder, sig.return_type, result)
@lower_builtin(operator.setitem, types.List, types.Integer, types.Any)
def setitem_list(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
index = args[1]
value = args[2]
index = inst.fix_index(index)
inst.guard_index(index, msg="setitem out of range")
inst.setitem(index, value, incref=True)
return context.get_dummy_value()
@lower_builtin(operator.getitem, types.List, types.SliceType)
def getslice_list(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
slice = context.make_helper(builder, sig.args[1], args[1])
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
inst.fix_slice(slice)
# Allocate result and populate it
result_size = slicing.get_slice_length(builder, slice)
result = ListInstance.allocate(context, builder, sig.return_type,
result_size)
result.size = result_size
with cgutils.for_range_slice_generic(builder, slice.start, slice.stop,
slice.step) as (pos_range, neg_range):
with pos_range as (idx, count):
value = inst.getitem(idx)
result.inititem(count, value, incref=True)
with neg_range as (idx, count):
value = inst.getitem(idx)
result.inititem(count, value, incref=True)
return impl_ret_new_ref(context, builder, sig.return_type, result.value)
@lower_builtin(operator.setitem, types.List, types.SliceType, types.Any)
def setitem_list(context, builder, sig, args):
dest = ListInstance(context, builder, sig.args[0], args[0])
src = ListInstance(context, builder, sig.args[2], args[2])
slice = context.make_helper(builder, sig.args[1], args[1])
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
dest.fix_slice(slice)
src_size = src.size
avail_size = slicing.get_slice_length(builder, slice)
size_delta = builder.sub(src.size, avail_size)
zero = ir.Constant(size_delta.type, 0)
one = ir.Constant(size_delta.type, 1)
with builder.if_else(builder.icmp_signed('==', slice.step, one)) as (then, otherwise):
with then:
# Slice step == 1 => we can resize
# Compute the real stop, e.g. for dest[2:0] = [...]
real_stop = builder.add(slice.start, avail_size)
# Size of the list tail, after the end of slice
tail_size = builder.sub(dest.size, real_stop)
with builder.if_then(builder.icmp_signed('>', size_delta, zero)):
# Grow list then move list tail
dest.resize(builder.add(dest.size, size_delta))
dest.move(builder.add(real_stop, size_delta), real_stop,
tail_size)
with builder.if_then(builder.icmp_signed('<', size_delta, zero)):
# Move list tail then shrink list
dest.move(builder.add(real_stop, size_delta), real_stop,
tail_size)
dest.resize(builder.add(dest.size, size_delta))
dest_offset = slice.start
with cgutils.for_range(builder, src_size) as loop:
value = src.getitem(loop.index)
dest.setitem(builder.add(loop.index, dest_offset), value, incref=True)
with otherwise:
with builder.if_then(builder.icmp_signed('!=', size_delta, zero)):
msg = "cannot resize extended list slice with step != 1"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
with cgutils.for_range_slice_generic(
builder, slice.start, slice.stop, slice.step) as (pos_range, neg_range):
with pos_range as (index, count):
value = src.getitem(count)
dest.setitem(index, value, incref=True)
with neg_range as (index, count):
value = src.getitem(count)
dest.setitem(index, value, incref=True)
return context.get_dummy_value()
@lower_builtin(operator.delitem, types.List, types.Integer)
def delitem_list_index(context, builder, sig, args):
def list_delitem_impl(lst, i):
lst.pop(i)
return context.compile_internal(builder, list_delitem_impl, sig, args)
@lower_builtin(operator.delitem, types.List, types.SliceType)
def delitem_list(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
slice = context.make_helper(builder, sig.args[1], args[1])
slicing.guard_invalid_slice(context, builder, sig.args[1], slice)
inst.fix_slice(slice)
slice_len = slicing.get_slice_length(builder, slice)
one = ir.Constant(slice_len.type, 1)
with builder.if_then(builder.icmp_signed('!=', slice.step, one), likely=False):
msg = "unsupported del list[start:stop:step] with step != 1"
context.call_conv.return_user_exc(builder, NotImplementedError, (msg,))
# Compute the real stop, e.g. for dest[2:0]
start = slice.start
real_stop = builder.add(start, slice_len)
# Decref the removed range
with cgutils.for_range_slice(
builder, start, real_stop, start.type(1)
) as (idx, _):
inst.decref_value(inst.getitem(idx))
# Size of the list tail, after the end of slice
tail_size = builder.sub(inst.size, real_stop)
inst.move(start, real_stop, tail_size)
inst.resize(builder.sub(inst.size, slice_len))
return context.get_dummy_value()
# XXX should there be a specific module for Sequence or collection base classes?
@lower_builtin(operator.contains, types.Sequence, types.Any)
def in_seq(context, builder, sig, args):
def seq_contains_impl(lst, value):
for elem in lst:
if elem == value:
return True
return False
return context.compile_internal(builder, seq_contains_impl, sig, args)
@lower_builtin(bool, types.Sequence)
def sequence_bool(context, builder, sig, args):
def sequence_bool_impl(seq):
return len(seq) != 0
return context.compile_internal(builder, sequence_bool_impl, sig, args)
@overload(operator.truth)
def sequence_truth(seq):
if isinstance(seq, types.Sequence):
def impl(seq):
return len(seq) != 0
return impl
@lower_builtin(operator.add, types.List, types.List)
def list_add(context, builder, sig, args):
a = ListInstance(context, builder, sig.args[0], args[0])
b = ListInstance(context, builder, sig.args[1], args[1])
a_size = a.size
b_size = b.size
nitems = builder.add(a_size, b_size)
dest = ListInstance.allocate(context, builder, sig.return_type, nitems)
dest.size = nitems
with cgutils.for_range(builder, a_size) as loop:
value = a.getitem(loop.index)
value = context.cast(builder, value, a.dtype, dest.dtype)
dest.setitem(loop.index, value, incref=True)
with cgutils.for_range(builder, b_size) as loop:
value = b.getitem(loop.index)
value = context.cast(builder, value, b.dtype, dest.dtype)
dest.setitem(builder.add(loop.index, a_size), value, incref=True)
return impl_ret_new_ref(context, builder, sig.return_type, dest.value)
@lower_builtin(operator.iadd, types.List, types.List)
def list_add_inplace(context, builder, sig, args):
assert sig.args[0].dtype == sig.return_type.dtype
dest = _list_extend_list(context, builder, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, dest.value)
@lower_builtin(operator.mul, types.List, types.Integer)
@lower_builtin(operator.mul, types.Integer, types.List)
def list_mul(context, builder, sig, args):
if isinstance(sig.args[0], types.List):
list_idx, int_idx = 0, 1
else:
list_idx, int_idx = 1, 0
src = ListInstance(context, builder, sig.args[list_idx], args[list_idx])
src_size = src.size
mult = args[int_idx]
zero = ir.Constant(mult.type, 0)
mult = builder.select(cgutils.is_neg_int(builder, mult), zero, mult)
nitems = builder.mul(mult, src_size)
dest = ListInstance.allocate(context, builder, sig.return_type, nitems)
dest.size = nitems
with cgutils.for_range_slice(builder, zero, nitems, src_size, inc=True) as (dest_offset, _):
with cgutils.for_range(builder, src_size) as loop:
value = src.getitem(loop.index)
dest.setitem(builder.add(loop.index, dest_offset), value, incref=True)
return impl_ret_new_ref(context, builder, sig.return_type, dest.value)
@lower_builtin(operator.imul, types.List, types.Integer)
def list_mul_inplace(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
src_size = inst.size
mult = args[1]
zero = ir.Constant(mult.type, 0)
mult = builder.select(cgutils.is_neg_int(builder, mult), zero, mult)
nitems = builder.mul(mult, src_size)
inst.resize(nitems)
with cgutils.for_range_slice(builder, src_size, nitems, src_size, inc=True) as (dest_offset, _):
with cgutils.for_range(builder, src_size) as loop:
value = inst.getitem(loop.index)
inst.setitem(builder.add(loop.index, dest_offset), value, incref=True)
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
#-------------------------------------------------------------------------------
# Comparisons
@lower_builtin(operator.is_, types.List, types.List)
def list_is(context, builder, sig, args):
a = ListInstance(context, builder, sig.args[0], args[0])
b = ListInstance(context, builder, sig.args[1], args[1])
ma = builder.ptrtoint(a.meminfo, cgutils.intp_t)
mb = builder.ptrtoint(b.meminfo, cgutils.intp_t)
return builder.icmp_signed('==', ma, mb)
@lower_builtin(operator.eq, types.List, types.List)
def list_eq(context, builder, sig, args):
aty, bty = sig.args
a = ListInstance(context, builder, aty, args[0])
b = ListInstance(context, builder, bty, args[1])
a_size = a.size
same_size = builder.icmp_signed('==', a_size, b.size)
res = cgutils.alloca_once_value(builder, same_size)
with builder.if_then(same_size):
with cgutils.for_range(builder, a_size) as loop:
v = a.getitem(loop.index)
w = b.getitem(loop.index)
itemres = context.generic_compare(builder, operator.eq,
(aty.dtype, bty.dtype), (v, w))
with builder.if_then(builder.not_(itemres)):
# Exit early
builder.store(cgutils.false_bit, res)
loop.do_break()
return builder.load(res)
@lower_builtin(operator.ne, types.List, types.List)
def list_ne(context, builder, sig, args):
def list_ne_impl(a, b):
return not (a == b)
return context.compile_internal(builder, list_ne_impl, sig, args)
@lower_builtin(operator.le, types.List, types.List)
def list_le(context, builder, sig, args):
def list_le_impl(a, b):
m = len(a)
n = len(b)
for i in range(min(m, n)):
if a[i] < b[i]:
return True
elif a[i] > b[i]:
return False
return m <= n
return context.compile_internal(builder, list_le_impl, sig, args)
@lower_builtin(operator.lt, types.List, types.List)
def list_lt(context, builder, sig, args):
def list_lt_impl(a, b):
m = len(a)
n = len(b)
for i in range(min(m, n)):
if a[i] < b[i]:
return True
elif a[i] > b[i]:
return False
return m < n
return context.compile_internal(builder, list_lt_impl, sig, args)
@lower_builtin(operator.ge, types.List, types.List)
def list_ge(context, builder, sig, args):
def list_ge_impl(a, b):
return b <= a
return context.compile_internal(builder, list_ge_impl, sig, args)
@lower_builtin(operator.gt, types.List, types.List)
def list_gt(context, builder, sig, args):
def list_gt_impl(a, b):
return b < a
return context.compile_internal(builder, list_gt_impl, sig, args)
#-------------------------------------------------------------------------------
# Methods
@lower_builtin("list.append", types.List, types.Any)
def list_append(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
item = args[1]
n = inst.size
new_size = builder.add(n, ir.Constant(n.type, 1))
inst.resize(new_size)
inst.setitem(n, item, incref=True)
return context.get_dummy_value()
@lower_builtin("list.clear", types.List)
def list_clear(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
inst.resize(context.get_constant(types.intp, 0))
return context.get_dummy_value()
@lower_builtin("list.copy", types.List)
def list_copy(context, builder, sig, args):
def list_copy_impl(lst):
return list(lst)
return context.compile_internal(builder, list_copy_impl, sig, args)
@lower_builtin("list.count", types.List, types.Any)
def list_count(context, builder, sig, args):
def list_count_impl(lst, value):
res = 0
for elem in lst:
if elem == value:
res += 1
return res
return context.compile_internal(builder, list_count_impl, sig, args)
def _list_extend_list(context, builder, sig, args):
src = ListInstance(context, builder, sig.args[1], args[1])
dest = ListInstance(context, builder, sig.args[0], args[0])
src_size = src.size
dest_size = dest.size
nitems = builder.add(src_size, dest_size)
dest.resize(nitems)
dest.size = nitems
with cgutils.for_range(builder, src_size) as loop:
value = src.getitem(loop.index)
value = context.cast(builder, value, src.dtype, dest.dtype)
dest.setitem(builder.add(loop.index, dest_size), value, incref=True)
return dest
@lower_builtin("list.extend", types.List, types.IterableType)
def list_extend(context, builder, sig, args):
if isinstance(sig.args[1], types.List):
# Specialize for list operands, for speed.
_list_extend_list(context, builder, sig, args)
return context.get_dummy_value()
def list_extend(lst, iterable):
# Speed hack to avoid NRT refcount operations inside the loop
meth = lst.append
for v in iterable:
meth(v)
return context.compile_internal(builder, list_extend, sig, args)
@lower_builtin("list.index", types.List, types.Any)
def list_index(context, builder, sig, args):
def list_index_impl(lst, value):
for i in range(len(lst)):
if lst[i] == value:
return i
# XXX references are leaked when raising
raise ValueError("value not in list")
return context.compile_internal(builder, list_index_impl, sig, args)
@lower_builtin("list.index", types.List, types.Any,
types.Integer)
def list_index(context, builder, sig, args):
def list_index_impl(lst, value, start):
n = len(lst)
if start < 0:
start += n
if start < 0:
start = 0
for i in range(start, len(lst)):
if lst[i] == value:
return i
# XXX references are leaked when raising
raise ValueError("value not in list")
return context.compile_internal(builder, list_index_impl, sig, args)
@lower_builtin("list.index", types.List, types.Any,
types.Integer, types.Integer)
def list_index(context, builder, sig, args):
def list_index_impl(lst, value, start, stop):
n = len(lst)
if start < 0:
start += n
if start < 0:
start = 0
if stop < 0:
stop += n
if stop > n:
stop = n
for i in range(start, stop):
if lst[i] == value:
return i
# XXX references are leaked when raising
raise ValueError("value not in list")
return context.compile_internal(builder, list_index_impl, sig, args)
@lower_builtin("list.insert", types.List, types.Integer,
types.Any)
def list_insert(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
index = inst.fix_index(args[1])
index = inst.clamp_index(index)
value = args[2]
n = inst.size
one = ir.Constant(n.type, 1)
new_size = builder.add(n, one)
inst.resize(new_size)
inst.move(builder.add(index, one), index, builder.sub(n, index))
inst.setitem(index, value, incref=True, decref_old_value=False)
return context.get_dummy_value()
@lower_builtin("list.pop", types.List)
def list_pop(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
n = inst.size
cgutils.guard_zero(context, builder, n,
(IndexError, "pop from empty list"))
n = builder.sub(n, ir.Constant(n.type, 1))
res = inst.getitem(n)
inst.incref_value(res) # incref the pop'ed element
inst.clear_value(n) # clear the storage space
inst.resize(n)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin("list.pop", types.List, types.Integer)
def list_pop(context, builder, sig, args):
inst = ListInstance(context, builder, sig.args[0], args[0])
idx = inst.fix_index(args[1])
n = inst.size
cgutils.guard_zero(context, builder, n,
(IndexError, "pop from empty list"))
inst.guard_index(idx, "pop index out of range")
res = inst.getitem(idx)
one = ir.Constant(n.type, 1)
n = builder.sub(n, ir.Constant(n.type, 1))
inst.move(idx, builder.add(idx, one), builder.sub(n, idx))
inst.resize(n)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin("list.remove", types.List, types.Any)
def list_remove(context, builder, sig, args):
def list_remove_impl(lst, value):
for i in range(len(lst)):
if lst[i] == value:
lst.pop(i)
return
# XXX references are leaked when raising
raise ValueError("list.remove(x): x not in list")
return context.compile_internal(builder, list_remove_impl, sig, args)
@lower_builtin("list.reverse", types.List)
def list_reverse(context, builder, sig, args):
def list_reverse_impl(lst):
for a in range(0, len(lst) // 2):
b = -a - 1
lst[a], lst[b] = lst[b], lst[a]
return context.compile_internal(builder, list_reverse_impl, sig, args)
# -----------------------------------------------------------------------------
# Sorting
def gt(a, b):
return a > b
sort_forwards = quicksort.make_jit_quicksort().run_quicksort
sort_backwards = quicksort.make_jit_quicksort(lt=gt).run_quicksort
arg_sort_forwards = quicksort.make_jit_quicksort(is_argsort=True,
is_list=True).run_quicksort
arg_sort_backwards = quicksort.make_jit_quicksort(is_argsort=True, lt=gt,
is_list=True).run_quicksort
def _sort_check_reverse(reverse):
if isinstance(reverse, types.Omitted):
rty = reverse.value
elif isinstance(reverse, types.Optional):
rty = reverse.type
else:
rty = reverse
if not isinstance(rty, (types.Boolean, types.Integer, int, bool)):
msg = "an integer is required for 'reverse' (got type %s)" % reverse
raise errors.TypingError(msg)
return rty
def _sort_check_key(key):
if isinstance(key, types.Optional):
msg = ("Key must concretely be None or a Numba JIT compiled function, "
"an Optional (union of None and a value) was found")
raise errors.TypingError(msg)
if not (cgutils.is_nonelike(key) or isinstance(key, types.Dispatcher)):
msg = "Key must be None or a Numba JIT compiled function"
raise errors.TypingError(msg)
@overload_method(types.List, "sort")
def ol_list_sort(lst, key=None, reverse=False):
_sort_check_key(key)
_sort_check_reverse(reverse)
if cgutils.is_nonelike(key):
KEY = False
sort_f = sort_forwards
sort_b = sort_backwards
elif isinstance(key, types.Dispatcher):
KEY = True
sort_f = arg_sort_forwards
sort_b = arg_sort_backwards
def impl(lst, key=None, reverse=False):
if KEY is True:
_lst = [key(x) for x in lst]
else:
_lst = lst
if reverse is False or reverse == 0:
tmp = sort_f(_lst)
else:
tmp = sort_b(_lst)
if KEY is True:
lst[:] = [lst[i] for i in tmp]
return impl
@overload(sorted)
def ol_sorted(iterable, key=None, reverse=False):
if not isinstance(iterable, types.IterableType):
return False
_sort_check_key(key)
_sort_check_reverse(reverse)
def impl(iterable, key=None, reverse=False):
lst = list(iterable)
lst.sort(key=key, reverse=reverse)
return lst
return impl
# -----------------------------------------------------------------------------
# Implicit casting
@lower_cast(types.List, types.List)
def list_to_list(context, builder, fromty, toty, val):
# Casting from non-reflected to reflected
assert fromty.dtype == toty.dtype
return val
# -----------------------------------------------------------------------------
# Implementations for types.LiteralList
# -----------------------------------------------------------------------------
_banned_error = errors.TypingError("Cannot mutate a literal list")
# Things that mutate literal lists are banned
@overload_method(types.LiteralList, 'append')
def literal_list_banned_append(lst, obj):
raise _banned_error
@overload_method(types.LiteralList, 'extend')
def literal_list_banned_extend(lst, iterable):
raise _banned_error
@overload_method(types.LiteralList, 'insert')
def literal_list_banned_insert(lst, index, obj):
raise _banned_error
@overload_method(types.LiteralList, 'remove')
def literal_list_banned_remove(lst, value):
raise _banned_error
@overload_method(types.LiteralList, 'pop')
def literal_list_banned_pop(lst, index=-1):
raise _banned_error
@overload_method(types.LiteralList, 'clear')
def literal_list_banned_clear(lst):
raise _banned_error
@overload_method(types.LiteralList, 'sort')
def literal_list_banned_sort(lst, key=None, reverse=False):
raise _banned_error
@overload_method(types.LiteralList, 'reverse')
def literal_list_banned_reverse(lst):
raise _banned_error
_index_end = types.intp.maxval
@overload_method(types.LiteralList, 'index')
def literal_list_index(lst, x, start=0, end=_index_end):
# TODO: To make this work, need consts as slice for start/end so as to
# be able to statically analyse the bounds, then its a just loop body
# versioning based iteration along with enumerate to find the item
if isinstance(lst, types.LiteralList):
msg = "list.index is unsupported for literal lists"
raise errors.TypingError(msg)
@overload_method(types.LiteralList, 'count')
def literal_list_count(lst, x):
if isinstance(lst, types.LiteralList):
def impl(lst, x):
count = 0
for val in literal_unroll(lst):
if val == x:
count += 1
return count
return impl
@overload_method(types.LiteralList, 'copy')
def literal_list_count(lst):
if isinstance(lst, types.LiteralList):
def impl(lst):
return lst # tuples are immutable, as is this, so just return it
return impl
@overload(operator.delitem)
def literal_list_delitem(lst, index):
if isinstance(lst, types.LiteralList):
raise _banned_error
@overload(operator.setitem)
def literal_list_setitem(lst, index, value):
if isinstance(lst, types.LiteralList):
raise errors.TypingError("Cannot mutate a literal list")
@overload(operator.getitem)
def literal_list_getitem(lst, *args):
if not isinstance(lst, types.LiteralList):
return
msg = ("Cannot __getitem__ on a literal list, return type cannot be "
"statically determined.")
raise errors.TypingError(msg)
@overload(len)
def literal_list_len(lst):
if not isinstance(lst, types.LiteralList):
return
l = lst.count
return lambda lst: l
@overload(operator.contains)
def literal_list_contains(lst, item):
if isinstance(lst, types.LiteralList):
def impl(lst, item):
for val in literal_unroll(lst):
if val == item:
return True
return False
return impl
@lower_cast(types.LiteralList, types.LiteralList)
def literallist_to_literallist(context, builder, fromty, toty, val):
if len(fromty) != len(toty):
# Disallowed by typing layer
raise NotImplementedError
olditems = cgutils.unpack_tuple(builder, val, len(fromty))
items = [context.cast(builder, v, f, t)
for v, f, t in zip(olditems, fromty, toty)]
return context.make_tuple(builder, toty, items)
|
|
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as n_const
from oslo_log import log as logging
from sqlalchemy import or_
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as const
from neutron.common import utils as n_utils
from neutron.db import agentschedulers_db
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
from neutron.db import models_v2
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import models as ml2_models
LOG = logging.getLogger(__name__)
class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
"""Mixin class for L3 DVR scheduler.
DVR currently supports the following use cases:
- East/West (E/W) traffic between VMs: this is handled in a
distributed manner across Compute Nodes without a centralized element.
This includes E/W traffic between VMs on the same Compute Node.
- North/South traffic for Floating IPs (FIP N/S): this is supported on the
distributed routers on Compute Nodes without any centralized element.
- North/South traffic for SNAT (SNAT N/S): this is supported via a
centralized element that handles the SNAT traffic.
To support these use cases, DVR routers rely on an L3 agent that runs on a
central node (also known as Network Node or Service Node), as well as, L3
agents that run individually on each Compute Node of an OpenStack cloud.
Each L3 agent creates namespaces to route traffic according to the use
cases outlined above. The mechanism adopted for creating and managing
these namespaces is via (Router, Agent) binding and Scheduling in general.
The main difference between distributed routers and centralized ones is
that in the distributed case, multiple bindings will exist, one for each
of the agents participating in the routed topology for the specific router.
These bindings are created in the following circumstances:
- A subnet is added to a router via router-interface-add, and that subnet
has running VM's deployed in it. A binding will be created between the
router and any L3 agent whose Compute Node is hosting the VM(s).
- An external gateway is set to a router via router-gateway-set. A binding
will be created between the router and the L3 agent running centrally
on the Network Node.
Therefore, any time a router operation occurs (create, update or delete),
scheduling will determine whether the router needs to be associated to an
L3 agent, just like a regular centralized router, with the difference that,
in the distributed case, the bindings required are established based on
the state of the router and the Compute Nodes.
"""
def dvr_handle_new_service_port(self, context, port, dest_host=None):
"""Handle new dvr service port creation.
When a new dvr service port is created, this function will
schedule a dvr router to new compute node if needed and notify
l3 agent on that node.
The 'dest_host' will provide the destinaton host of the port in
case of service port migration.
"""
port_host = dest_host or port[portbindings.HOST_ID]
l3_agent_on_host = (self.get_l3_agents(
context, filters={'host': [port_host]}) or [None])[0]
if not l3_agent_on_host:
return
if dest_host:
# Make sure we create the floatingip agent gateway port
# for the destination node if fip is associated with this
# fixed port
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
(
l3plugin.
check_for_fip_and_create_agent_gw_port_on_host_if_not_exists(
context, port, dest_host))
subnet_ids = [ip['subnet_id'] for ip in port['fixed_ips']]
router_ids = self.get_dvr_routers_by_subnet_ids(context, subnet_ids)
if router_ids:
LOG.debug('DVR: Handle new service port, host %(host)s, '
'router ids %(router_ids)s',
{'host': port_host, 'router_ids': router_ids})
self.l3_rpc_notifier.routers_updated_on_host(
context, router_ids, port_host)
def get_dvr_routers_by_subnet_ids(self, context, subnet_ids):
"""Gets the dvr routers on vmport subnets."""
if not subnet_ids:
return set()
router_ids = set()
filter_sub = {'fixed_ips': {'subnet_id': subnet_ids},
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
subnet_ports = self._core_plugin.get_ports(
context, filters=filter_sub)
for subnet_port in subnet_ports:
router_ids.add(subnet_port['device_id'])
return router_ids
def get_subnet_ids_on_router(self, context, router_id):
"""Return subnet IDs for interfaces attached to the given router."""
subnet_ids = set()
filter_rtr = {'device_id': [router_id]}
int_ports = self._core_plugin.get_ports(context, filters=filter_rtr)
for int_port in int_ports:
int_ips = int_port['fixed_ips']
if int_ips:
int_subnet = int_ips[0]['subnet_id']
subnet_ids.add(int_subnet)
else:
LOG.debug('DVR: Could not find a subnet id '
'for router %s', router_id)
return subnet_ids
def get_dvr_routers_to_remove(self, context, deleted_port):
"""Returns info about which routers should be removed
In case dvr serviceable port was deleted we need to check
if any dvr routers should be removed from l3 agent on port's host
"""
if not n_utils.is_dvr_serviced(deleted_port['device_owner']):
return []
admin_context = context.elevated()
port_host = deleted_port[portbindings.HOST_ID]
subnet_ids = [ip['subnet_id'] for ip in deleted_port['fixed_ips']]
router_ids = self.get_dvr_routers_by_subnet_ids(admin_context,
subnet_ids)
if not router_ids:
LOG.debug('No DVR routers for this DVR port %(port)s '
'on host %(host)s', {'port': deleted_port['id'],
'host': port_host})
return []
agent = self._get_agent_by_type_and_host(
context, n_const.AGENT_TYPE_L3, port_host)
removed_router_info = []
for router_id in router_ids:
snat_binding = context.session.query(
l3agent_sch_db.RouterL3AgentBinding).filter_by(
router_id=router_id).filter_by(
l3_agent_id=agent.id).first()
if snat_binding:
# not removing from the agent hosting SNAT for the router
continue
subnet_ids = self.get_subnet_ids_on_router(admin_context,
router_id)
if self._check_dvr_serviceable_ports_on_host(
admin_context, port_host, subnet_ids):
continue
filter_rtr = {'device_id': [router_id],
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(
admin_context, filters=filter_rtr)
for port in int_ports:
dvr_binding = (ml2_db.
get_dvr_port_binding_by_host(context.session,
port['id'],
port_host))
if dvr_binding:
# unbind this port from router
dvr_binding['router_id'] = None
dvr_binding.update(dvr_binding)
info = {'router_id': router_id, 'host': port_host,
'agent_id': str(agent.id)}
removed_router_info.append(info)
LOG.debug('Router %(router_id)s on host %(host)s to be deleted',
info)
return removed_router_info
def _get_active_l3_agent_routers_sync_data(self, context, host, agent,
router_ids):
if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host, agent,
router_ids=router_ids,
active=True)
return self._get_dvr_sync_data(context, host, agent,
router_ids=router_ids, active=True)
def get_hosts_to_notify(self, context, router_id):
"""Returns all hosts to send notification about router update"""
hosts = super(L3_DVRsch_db_mixin, self).get_hosts_to_notify(
context, router_id)
router = self.get_router(context, router_id)
if router.get('distributed', False):
dvr_hosts = self._get_dvr_hosts_for_router(context, router_id)
dvr_hosts = set(dvr_hosts) - set(hosts)
state = agentschedulers_db.get_admin_state_up_filter()
agents = self.get_l3_agents(context, active=state,
filters={'host': dvr_hosts})
hosts += [a.host for a in agents]
return hosts
def _get_dvr_hosts_for_router(self, context, router_id):
"""Get a list of hosts where specified DVR router should be hosted
It will first get IDs of all subnets connected to the router and then
get a set of hosts where all dvr serviceable ports on those subnets
are bound
"""
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
Binding = ml2_models.PortBinding
Port = models_v2.Port
IPAllocation = models_v2.IPAllocation
query = context.session.query(Binding.host).distinct()
query = query.join(Binding.port)
query = query.join(Port.fixed_ips)
query = query.filter(IPAllocation.subnet_id.in_(subnet_ids))
owner_filter = or_(
Port.device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX),
Port.device_owner.in_(
n_utils.get_other_dvr_serviced_device_owners()))
query = query.filter(owner_filter)
hosts = [item[0] for item in query]
LOG.debug('Hosts for router %s: %s', router_id, hosts)
return hosts
def _get_dvr_subnet_ids_on_host_query(self, context, host):
query = context.session.query(
models_v2.IPAllocation.subnet_id).distinct()
query = query.join(models_v2.IPAllocation.port)
query = query.join(models_v2.Port.port_binding)
query = query.filter(ml2_models.PortBinding.host == host)
owner_filter = or_(
models_v2.Port.device_owner.startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX),
models_v2.Port.device_owner.in_(
n_utils.get_other_dvr_serviced_device_owners()))
query = query.filter(owner_filter)
return query
def _get_dvr_router_ids_for_host(self, context, host):
subnet_ids_on_host_query = self._get_dvr_subnet_ids_on_host_query(
context, host)
query = context.session.query(models_v2.Port.device_id).distinct()
query = query.filter(
models_v2.Port.device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE)
query = query.join(models_v2.Port.fixed_ips)
query = query.filter(
models_v2.IPAllocation.subnet_id.in_(subnet_ids_on_host_query))
router_ids = [item[0] for item in query]
LOG.debug('DVR routers on host %s: %s', host, router_ids)
return router_ids
def _get_router_ids_for_agent(self, context, agent_db, router_ids):
result_set = set(super(L3_DVRsch_db_mixin,
self)._get_router_ids_for_agent(
context, agent_db, router_ids))
router_ids = set(router_ids or [])
if router_ids and result_set == router_ids:
# no need for extra dvr checks if requested routers are
# explicitly scheduled to the agent
return list(result_set)
# dvr routers are not explicitly scheduled to agents on hosts with
# dvr serviceable ports, so need special handling
if self._get_agent_mode(agent_db) in [const.L3_AGENT_MODE_DVR,
const.L3_AGENT_MODE_DVR_SNAT]:
if not router_ids:
result_set |= set(self._get_dvr_router_ids_for_host(
context, agent_db['host']))
else:
for router_id in (router_ids - result_set):
subnet_ids = self.get_subnet_ids_on_router(
context, router_id)
if (subnet_ids and
self._check_dvr_serviceable_ports_on_host(
context, agent_db['host'],
list(subnet_ids))):
result_set.add(router_id)
return list(result_set)
def _check_dvr_serviceable_ports_on_host(self, context, host, subnet_ids):
"""Check for existence of dvr serviceable ports on host
:param context: request context
:param host: host to look ports on
:param subnet_ids: IDs of subnets to look ports on
:return: return True if dvr serviceable port exists on host,
otherwise return False
"""
# db query will return ports for all subnets if subnet_ids is empty,
# so need to check first
if not subnet_ids:
return False
Binding = ml2_models.PortBinding
IPAllocation = models_v2.IPAllocation
Port = models_v2.Port
query = context.session.query(Binding)
query = query.join(Binding.port)
query = query.join(Port.fixed_ips)
query = query.filter(
IPAllocation.subnet_id.in_(subnet_ids))
device_filter = or_(
models_v2.Port.device_owner.startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX),
models_v2.Port.device_owner.in_(
n_utils.get_other_dvr_serviced_device_owners()))
query = query.filter(device_filter)
host_filter = or_(
ml2_models.PortBinding.host == host,
ml2_models.PortBinding.profile.contains(host))
query = query.filter(host_filter)
return query.first() is not None
def _dvr_handle_unbound_allowed_addr_pair_add(
plugin, context, port, allowed_address_pair):
updated_port = plugin.update_unbound_allowed_address_pair_port_binding(
context, port, allowed_address_pair)
if updated_port:
LOG.debug("Allowed address pair port binding updated "
"based on service port binding: %s", updated_port)
plugin.dvr_handle_new_service_port(context, updated_port)
plugin.update_arp_entry_for_dvr_service_port(context, port)
def _dvr_handle_unbound_allowed_addr_pair_del(
plugin, context, port, allowed_address_pair):
updated_port = plugin.remove_unbound_allowed_address_pair_port_binding(
context, port, allowed_address_pair)
if updated_port:
LOG.debug("Allowed address pair port binding removed "
"from service port binding: %s", updated_port)
aa_fixed_ips = plugin._get_allowed_address_pair_fixed_ips(context, port)
if aa_fixed_ips:
plugin.delete_arp_entry_for_dvr_service_port(
context, port, fixed_ips_to_delete=aa_fixed_ips)
def _notify_l3_agent_new_port(resource, event, trigger, **kwargs):
LOG.debug('Received %(resource)s %(event)s', {
'resource': resource,
'event': event})
port = kwargs.get('port')
if not port:
return
if n_utils.is_dvr_serviced(port['device_owner']):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
context = kwargs['context']
l3plugin.dvr_handle_new_service_port(context, port)
l3plugin.update_arp_entry_for_dvr_service_port(context, port)
def _notify_port_delete(event, resource, trigger, **kwargs):
context = kwargs['context']
port = kwargs['port']
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if port:
port_host = port.get(portbindings.HOST_ID)
allowed_address_pairs_list = port.get('allowed_address_pairs')
if allowed_address_pairs_list and port_host:
for address_pair in allowed_address_pairs_list:
_dvr_handle_unbound_allowed_addr_pair_del(
l3plugin, context, port, address_pair)
l3plugin.delete_arp_entry_for_dvr_service_port(context, port)
removed_routers = l3plugin.get_dvr_routers_to_remove(context, port)
for info in removed_routers:
l3plugin.l3_rpc_notifier.router_removed_from_agent(
context, info['router_id'], info['host'])
def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
new_port = kwargs.get('port')
original_port = kwargs.get('original_port')
if new_port and original_port:
original_device_owner = original_port.get('device_owner', '')
new_device_owner = new_port.get('device_owner', '')
is_new_device_dvr_serviced = n_utils.is_dvr_serviced(new_device_owner)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
context = kwargs['context']
is_port_no_longer_serviced = (
n_utils.is_dvr_serviced(original_device_owner) and
not n_utils.is_dvr_serviced(new_device_owner))
is_port_moved = (
original_port[portbindings.HOST_ID] and
original_port[portbindings.HOST_ID] !=
new_port[portbindings.HOST_ID])
if is_port_no_longer_serviced or is_port_moved:
removed_routers = l3plugin.get_dvr_routers_to_remove(
context,
original_port)
if removed_routers:
removed_router_args = {
'context': context,
'port': original_port,
'removed_routers': removed_routers,
}
_notify_port_delete(
event, resource, trigger, **removed_router_args)
if not is_new_device_dvr_serviced:
return
is_new_port_binding_changed = (
new_port[portbindings.HOST_ID] and
(original_port[portbindings.HOST_ID] !=
new_port[portbindings.HOST_ID]))
dest_host = None
new_port_profile = new_port.get(portbindings.PROFILE)
if new_port_profile:
dest_host = new_port_profile.get('migrating_to')
# This check is required to prevent an arp update
# of the allowed_address_pair port.
if new_port_profile.get('original_owner'):
return
# If dest_host is set, then the port profile has changed
# and this port is in migration. The call below will
# pre-create the router on the new host
if ((is_new_port_binding_changed or dest_host) and
is_new_device_dvr_serviced):
l3plugin.dvr_handle_new_service_port(context, new_port,
dest_host=dest_host)
l3plugin.update_arp_entry_for_dvr_service_port(
context, new_port)
return
# Check for allowed_address_pairs and port state
new_port_host = new_port.get(portbindings.HOST_ID)
allowed_address_pairs_list = new_port.get('allowed_address_pairs')
if allowed_address_pairs_list and new_port_host:
new_port_state = new_port.get('admin_state_up')
original_port_state = original_port.get('admin_state_up')
if new_port_state and not original_port_state:
# Case were we activate the port from inactive state.
for address_pair in allowed_address_pairs_list:
_dvr_handle_unbound_allowed_addr_pair_add(
l3plugin, context, new_port, address_pair)
return
elif original_port_state and not new_port_state:
# Case were we deactivate the port from active state.
for address_pair in allowed_address_pairs_list:
_dvr_handle_unbound_allowed_addr_pair_del(
l3plugin, context, original_port, address_pair)
return
elif new_port_state and original_port_state:
# Case were the same port has additional address_pairs
# added.
for address_pair in allowed_address_pairs_list:
_dvr_handle_unbound_allowed_addr_pair_add(
l3plugin, context, new_port, address_pair)
return
is_fixed_ips_changed = (
'fixed_ips' in new_port and
'fixed_ips' in original_port and
new_port['fixed_ips'] != original_port['fixed_ips'])
if kwargs.get('mac_address_updated') or is_fixed_ips_changed:
l3plugin.update_arp_entry_for_dvr_service_port(
context, new_port)
def subscribe():
registry.subscribe(
_notify_l3_agent_port_update, resources.PORT, events.AFTER_UPDATE)
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE)
registry.subscribe(
_notify_port_delete, resources.PORT, events.AFTER_DELETE)
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
import textwrap
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from enum import Enum
from pkg_resources import Requirement
from pants.core.util_rules import archive
from pants.core.util_rules.archive import ExtractedArchive
from pants.engine.fs import CreateDigest, Digest, DigestEntries, DownloadFile, FileDigest, FileEntry
from pants.engine.platform import Platform
from pants.engine.rules import Get, collect_rules, rule
from pants.option.option_types import DictOption, EnumOption, StrListOption, StrOption
from pants.option.subsystem import Subsystem
from pants.util.docutil import doc_url
from pants.util.logging import LogLevel
from pants.util.meta import classproperty
logger = logging.getLogger(__name__)
class UnknownVersion(Exception):
pass
class ExternalToolError(Exception):
pass
class UnsupportedVersion(ExternalToolError):
"""The specified version of the tool is not supported, according to the given version
constraints."""
class UnsupportedVersionUsage(Enum):
"""What action to take in case the requested version of the tool is not supported."""
RaiseError = "error"
LogWarning = "warning"
@dataclass(frozen=True)
class ExternalToolRequest:
download_file_request: DownloadFile
exe: str
@dataclass(frozen=True)
class DownloadedExternalTool:
digest: Digest
exe: str
class ExternalTool(Subsystem, metaclass=ABCMeta):
"""Configuration for an invocable tool that we download from an external source.
Subclass this to configure a specific tool.
Idiomatic use:
class MyExternalTool(ExternalTool):
options_scope = "my-external-tool"
default_version = "1.2.3"
default_known_versions = [
"1.2.3|linux_arm64 |feed6789feed6789feed6789feed6789feed6789feed6789feed6789feed6789|112233",
"1.2.3|linux_x86_64|cafebabacafebabacafebabacafebabacafebabacafebabacafebabacafebaba|878986",
"1.2.3|macos_arm64 |deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef|222222",
"1.2.3|macos_x86_64|1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd|333333",
]
version_constraints = ">=1.2.3, <2.0"
def generate_url(self, plat: Platform) -> str:
...
def generate_exe(self, plat: Platform) -> str:
return "./path-to/binary
@rule
def my_rule(my_external_tool: MyExternalTool) -> Foo:
downloaded_tool = await Get(
DownloadedExternalTool,
ExternalToolRequest,
my_external_tool.get_request(Platform.current)
)
...
"""
# The default values for --version and --known-versions, and the supported versions.
# Subclasses must set appropriately.
default_version: str
default_known_versions: list[str]
version_constraints: str | None = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_version_constraints()
@classproperty
def name(cls):
"""The name of the tool, for use in user-facing messages.
Derived from the classname, but subclasses can override, e.g., with a classproperty.
"""
return cls.__name__.lower()
version = StrOption(
"--version",
default=lambda cls: cls.default_version,
advanced=True,
help=lambda cls: f"Use this version of {cls.name}."
+ (
f"\n\nSupported {cls.name} versions: {cls.version_constraints}"
if cls.version_constraints
else ""
),
)
# Note that you can compute the length and sha256 conveniently with:
# `curl -L $URL | tee >(wc -c) >(shasum -a 256) >/dev/null`
known_versions = StrListOption(
"--known-versions",
default=lambda cls: cls.default_known_versions,
advanced=True,
help=textwrap.dedent(
f"""
Known versions to verify downloads against.
Each element is a pipe-separated string of `version|platform|sha256|length`, where:
- `version` is the version string
- `platform` is one of [{','.join(Platform.__members__.keys())}],
- `sha256` is the 64-character hex representation of the expected sha256
digest of the download file, as emitted by `shasum -a 256`
- `length` is the expected length of the download file in bytes, as emitted by
`wc -c`
E.g., `3.1.2|macos_x86_64|6d0f18cd84b918c7b3edd0203e75569e0c7caecb1367bbbe409b44e28514f5be|42813`.
Values are space-stripped, so pipes can be indented for readability if necessary.
"""
),
)
use_unsupported_version = EnumOption(
"--use-unsupported-version",
advanced=True,
help=lambda cls: textwrap.dedent(
f"""
What action to take in case the requested version of {cls.name} is not supported.
Supported {cls.name} versions: {cls.version_constraints if cls.version_constraints else "unspecified"}
"""
),
default=UnsupportedVersionUsage.RaiseError,
)
@abstractmethod
def generate_url(self, plat: Platform) -> str:
"""Returns the URL for the given version of the tool, runnable on the given os+arch.
Implementations should raise ExternalToolError if they cannot resolve the arguments
to a URL. The raised exception need not have a message - a sensible one will be generated.
"""
def generate_exe(self, plat: Platform) -> str:
"""Returns the path to the tool executable.
If the downloaded artifact is the executable itself, you can leave this unimplemented.
If the downloaded artifact is an archive, this should be overridden to provide a
relative path in the downloaded archive, e.g. `./bin/protoc`.
"""
return f"./{self.generate_url(plat).rsplit('/', 1)[-1]}"
def get_request(self, plat: Platform) -> ExternalToolRequest:
"""Generate a request for this tool."""
for known_version in self.known_versions:
ver, plat_val, sha256, length = self.split_known_version_str(known_version)
if plat.value == plat_val and ver == self.version:
return self.get_request_for(plat_val, sha256, length)
raise UnknownVersion(
f"No known version of {self.name} {self.version} for {plat.value} found in "
f"{self.known_versions}"
)
@classmethod
def split_known_version_str(cls, known_version: str) -> tuple[str, str, str, int]:
try:
ver, plat_val, sha256, length = (x.strip() for x in known_version.split("|"))
except ValueError:
raise ExternalToolError(
f"Bad value for [{cls.options_scope}].known_versions: {known_version}"
)
return ver, plat_val, sha256, int(length)
def get_request_for(self, plat_val: str, sha256: str, length: int) -> ExternalToolRequest:
"""Generate a request for this tool from the given info."""
plat = Platform(plat_val)
digest = FileDigest(fingerprint=sha256, serialized_bytes_length=length)
try:
url = self.generate_url(plat)
exe = self.generate_exe(plat)
except ExternalToolError as e:
raise ExternalToolError(
f"Couldn't find {self.name} version {self.version} on {plat.value}"
) from e
return ExternalToolRequest(DownloadFile(url=url, expected_digest=digest), exe)
def check_version_constraints(self) -> None:
if not self.version_constraints:
return None
# Note that this is not a Python requirement. We're just hackily piggybacking off
# pkg_resource.Requirement's ability to check version constraints.
constraints = Requirement.parse(f"{self.name}{self.version_constraints}")
if constraints.specifier.contains(self.version): # type: ignore[attr-defined]
# all ok
return None
msg = [
f"The option [{self.options_scope}].version is set to {self.version}, which is not "
f"compatible with what this release of Pants expects: {constraints}.",
"Please update the version to a supported value, or consider using a different Pants",
"release if you cannot change the version.",
]
if self.options.use_unsupported_version is UnsupportedVersionUsage.LogWarning:
msg.extend(
[
"Alternatively, you can ignore this warning (at your own peril) by adding this",
"to the GLOBAL section of pants.toml:",
f'ignore_warnings = ["The option [{self.options_scope}].version is set to"].',
]
)
logger.warning(" ".join(msg))
elif self.options.use_unsupported_version is UnsupportedVersionUsage.RaiseError:
msg.append(
f"Alternatively, update [{self.options_scope}].use_unsupported_version to be "
f"'warning'."
)
raise UnsupportedVersion(" ".join(msg))
class TemplatedExternalTool(ExternalTool):
"""Extends the ExternalTool to allow url templating for custom/self-hosted source.
In addition to ExternalTool functionalities, it is needed to set, e.g.:
default_url_template = "https://tool.url/{version}/{platform}-mytool.zip"
default_url_platform_mapping = {
"macos_x86_64": "osx_intel",
"macos_arm64": "osx_arm",
"linux_x86_64": "linux",
}
The platform mapping dict is optional.
"""
default_url_template: str
default_url_platform_mapping: dict[str, str] | None = None
url_template = StrOption(
"--url-template",
default=lambda cls: cls.default_url_template,
advanced=True,
help=(
"URL to download the tool, either as a single binary file or a compressed file "
"(e.g. zip file). You can change this to point to your own hosted file, e.g. to "
"work with proxies or for access via the filesystem through a `file:$abspath` URL (e.g. "
"`file:/this/is/absolute`, possibly by [templating the buildroot in a "
f"config file]({doc_url('options#config-file-entries')})).\n\n"
"Use `{version}` to have the value from --version substituted, and `{platform}` to "
"have a value from --url-platform-mapping substituted in, depending on the "
"current platform. For example, "
"https://github.com/.../protoc-{version}-{platform}.zip."
),
)
url_platform_mapping = DictOption[str](
"--url-platform-mapping",
default=lambda cls: cls.default_url_platform_mapping,
advanced=True,
help=(
"A dictionary mapping platforms to strings to be used when generating the URL "
"to download the tool.\n\nIn --url-template, anytime the `{platform}` string is "
"used, Pants will determine the current platform, and substitute `{platform}` with "
"the respective value from your dictionary.\n\nFor example, if you define "
'`{"macos_x86_64": "apple-darwin", "linux_x86_64": "unknown-linux"}, and run Pants on '
"Linux with an intel architecture, then `{platform}` will be substituted in the --url-template option with "
"unknown-linux."
),
)
def generate_url(self, plat: Platform):
platform = self.url_platform_mapping.get(plat.value, "")
return self.url_template.format(version=self.version, platform=platform)
@rule(level=LogLevel.DEBUG)
async def download_external_tool(request: ExternalToolRequest) -> DownloadedExternalTool:
# Download and extract.
maybe_archive_digest = await Get(Digest, DownloadFile, request.download_file_request)
extracted_archive = await Get(ExtractedArchive, Digest, maybe_archive_digest)
# Confirm executable.
exe_path = request.exe.lstrip("./")
digest = extracted_archive.digest
is_not_executable = False
digest_entries = []
for entry in await Get(DigestEntries, Digest, digest):
if isinstance(entry, FileEntry) and entry.path == exe_path and not entry.is_executable:
# We should recreate the digest with the executable bit set.
is_not_executable = True
entry = dataclasses.replace(entry, is_executable=True)
digest_entries.append(entry)
if is_not_executable:
digest = await Get(Digest, CreateDigest(digest_entries))
return DownloadedExternalTool(digest, request.exe)
def rules():
return (*collect_rules(), *archive.rules())
|
|
"""Support for SNMP enabled switch."""
import logging
import pysnmp.hlapi.asyncio as hlapi
from pysnmp.hlapi.asyncio import (
CommunityData,
ContextData,
ObjectIdentity,
ObjectType,
SnmpEngine,
UdpTransportTarget,
UsmUserData,
getCmd,
setCmd,
)
from pysnmp.proto.rfc1902 import (
Counter32,
Counter64,
Gauge32,
Integer,
Integer32,
IpAddress,
Null,
ObjectIdentifier,
OctetString,
Opaque,
TimeTicks,
Unsigned32,
)
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_PORT,
CONF_USERNAME,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_AUTH_KEY,
CONF_AUTH_PROTOCOL,
CONF_BASEOID,
CONF_COMMUNITY,
CONF_PRIV_KEY,
CONF_PRIV_PROTOCOL,
CONF_VARTYPE,
CONF_VERSION,
DEFAULT_AUTH_PROTOCOL,
DEFAULT_HOST,
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_PRIV_PROTOCOL,
DEFAULT_VARTYPE,
DEFAULT_VERSION,
MAP_AUTH_PROTOCOLS,
MAP_PRIV_PROTOCOLS,
SNMP_VERSIONS,
)
_LOGGER = logging.getLogger(__name__)
CONF_COMMAND_OID = "command_oid"
CONF_COMMAND_PAYLOAD_OFF = "command_payload_off"
CONF_COMMAND_PAYLOAD_ON = "command_payload_on"
DEFAULT_COMMUNITY = "private"
DEFAULT_PAYLOAD_OFF = 0
DEFAULT_PAYLOAD_ON = 1
MAP_SNMP_VARTYPES = {
"Counter32": Counter32,
"Counter64": Counter64,
"Gauge32": Gauge32,
"Integer32": Integer32,
"Integer": Integer,
"IpAddress": IpAddress,
"Null": Null,
# some work todo to support tuple ObjectIdentifier, this just supports str
"ObjectIdentifier": ObjectIdentifier,
"OctetString": OctetString,
"Opaque": Opaque,
"TimeTicks": TimeTicks,
"Unsigned32": Unsigned32,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BASEOID): cv.string,
vol.Optional(CONF_COMMAND_OID): cv.string,
vol.Optional(CONF_COMMAND_PAYLOAD_ON): cv.string,
vol.Optional(CONF_COMMAND_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.In(SNMP_VERSIONS),
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_AUTH_KEY): cv.string,
vol.Optional(CONF_AUTH_PROTOCOL, default=DEFAULT_AUTH_PROTOCOL): vol.In(
MAP_AUTH_PROTOCOLS
),
vol.Optional(CONF_PRIV_KEY): cv.string,
vol.Optional(CONF_PRIV_PROTOCOL, default=DEFAULT_PRIV_PROTOCOL): vol.In(
MAP_PRIV_PROTOCOLS
),
vol.Optional(CONF_VARTYPE, default=DEFAULT_VARTYPE): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SNMP switch."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
community = config.get(CONF_COMMUNITY)
baseoid = config.get(CONF_BASEOID)
command_oid = config.get(CONF_COMMAND_OID)
command_payload_on = config.get(CONF_COMMAND_PAYLOAD_ON)
command_payload_off = config.get(CONF_COMMAND_PAYLOAD_OFF)
version = config.get(CONF_VERSION)
username = config.get(CONF_USERNAME)
authkey = config.get(CONF_AUTH_KEY)
authproto = config.get(CONF_AUTH_PROTOCOL)
privkey = config.get(CONF_PRIV_KEY)
privproto = config.get(CONF_PRIV_PROTOCOL)
payload_on = config.get(CONF_PAYLOAD_ON)
payload_off = config.get(CONF_PAYLOAD_OFF)
vartype = config.get(CONF_VARTYPE)
async_add_entities(
[
SnmpSwitch(
name,
host,
port,
community,
baseoid,
command_oid,
version,
username,
authkey,
authproto,
privkey,
privproto,
payload_on,
payload_off,
command_payload_on,
command_payload_off,
vartype,
)
],
True,
)
class SnmpSwitch(SwitchEntity):
"""Representation of a SNMP switch."""
def __init__(
self,
name,
host,
port,
community,
baseoid,
commandoid,
version,
username,
authkey,
authproto,
privkey,
privproto,
payload_on,
payload_off,
command_payload_on,
command_payload_off,
vartype,
):
"""Initialize the switch."""
self._name = name
self._baseoid = baseoid
self._vartype = vartype
# Set the command OID to the base OID if command OID is unset
self._commandoid = commandoid or baseoid
self._command_payload_on = command_payload_on or payload_on
self._command_payload_off = command_payload_off or payload_off
self._state = None
self._payload_on = payload_on
self._payload_off = payload_off
if version == "3":
if not authkey:
authproto = "none"
if not privkey:
privproto = "none"
self._request_args = [
SnmpEngine(),
UsmUserData(
username,
authKey=authkey or None,
privKey=privkey or None,
authProtocol=getattr(hlapi, MAP_AUTH_PROTOCOLS[authproto]),
privProtocol=getattr(hlapi, MAP_PRIV_PROTOCOLS[privproto]),
),
UdpTransportTarget((host, port)),
ContextData(),
]
else:
self._request_args = [
SnmpEngine(),
CommunityData(community, mpModel=SNMP_VERSIONS[version]),
UdpTransportTarget((host, port)),
ContextData(),
]
async def async_turn_on(self, **kwargs):
"""Turn on the switch."""
# If vartype set, use it - http://snmplabs.com/pysnmp/docs/api-reference.html#pysnmp.smi.rfc1902.ObjectType
await self._execute_command(self._command_payload_on)
async def async_turn_off(self, **kwargs):
"""Turn off the switch."""
await self._execute_command(self._command_payload_off)
async def _execute_command(self, command):
# User did not set vartype and command is not a digit
if self._vartype == "none" and not self._command_payload_on.isdigit():
await self._set(command)
# User set vartype Null, command must be an empty string
elif self._vartype == "Null":
await self._set("")
# user did not set vartype but command is digit: defaulting to Integer
# or user did set vartype
else:
await self._set(MAP_SNMP_VARTYPES.get(self._vartype, Integer)(command))
async def async_update(self):
"""Update the state."""
errindication, errstatus, errindex, restable = await getCmd(
*self._request_args, ObjectType(ObjectIdentity(self._baseoid))
)
if errindication:
_LOGGER.error("SNMP error: %s", errindication)
elif errstatus:
_LOGGER.error(
"SNMP error: %s at %s",
errstatus.prettyPrint(),
errindex and restable[-1][int(errindex) - 1] or "?",
)
else:
for resrow in restable:
if resrow[-1] == self._payload_on:
self._state = True
elif resrow[-1] == Integer(self._payload_on):
self._state = True
elif resrow[-1] == self._payload_off:
self._state = False
elif resrow[-1] == Integer(self._payload_off):
self._state = False
else:
self._state = None
@property
def name(self):
"""Return the switch's name."""
return self._name
@property
def is_on(self):
"""Return true if switch is on; False if off. None if unknown."""
return self._state
async def _set(self, value):
await setCmd(
*self._request_args, ObjectType(ObjectIdentity(self._commandoid), value)
)
|
|
# encoding: utf-8
from __future__ import unicode_literals
from docker.errors import APIError
from pytest import raises
from ..container import (
scalar,
)
from .utils import (
assert_in_logs,
TEST_ORG,
TEST_TAG,
make_container,
validate_dict,
volume,
)
def checked_join(container):
container.join()
assert container.running() is None
return container.inspect()
def checked_purge(container):
container.purge()
assert container.running() is None
assert container.instances() == []
def test_container_run(busybox):
busybox.run(['false'])
instance = scalar(busybox.instances())
validate_dict(
instance,
{
'Command': 'false',
'Names': ['/busybox-running'],
'Image': '{}/{}:{}'.format(TEST_ORG, 'busybox', TEST_TAG)
}
)
details = checked_join(busybox)
validate_dict(
details,
{
'State': {
'ExitCode': 1,
'Running': False,
}
}
)
def test_container_join(busybox):
busybox.run(['sleep', '1'])
instance = busybox.running()
assert instance is not None
assert instance['Command'] == 'sleep 1'
details = busybox.inspect()
validate_dict(
details,
{
'State': {
'ExitCode': 0,
'Running': True,
}
}
)
details = checked_join(busybox)
validate_dict(
details,
{
'State': {
'ExitCode': 0,
'Running': False,
}
}
)
def test_container_logs(busybox):
busybox.run(['echo', 'foo'])
checked_join(busybox)
assert_in_logs(busybox, b'foo\n')
def test_container_environment(busybox):
busybox.environment = {'FOO': 'foo'}
busybox.run(
['env']
)
checked_join(busybox)
env_dict = {
pair[0]: pair[1] for pair in
(
message.split(b'=') for message in
scalar(busybox.logs(all=True))['Logs'].splitlines()
)
}
validate_dict(env_dict, {b'FOO': b'foo'})
def test_container_purge(busybox):
busybox.run(['true'])
details = checked_join(busybox)
assert details
checked_purge(busybox)
with raises(APIError) as e:
busybox.inspect()
assert e.value.response.status_code == 404
def test_container_volumes_rw(busybox):
volume_loc = volume('foo.txt')
busybox.volumes_readwrite = {volume_loc: '/bar.txt'}
busybox.run(['cat', 'bar.txt'])
details = checked_join(busybox)
validate_dict(
details,
{
'Mounts': [{
'Source': volume_loc,
'Destination': '/bar.txt',
'Mode': 'rw',
'RW': True,
'Type': 'bind',
'Propagation': 'rprivate',
}],
}
)
assert_in_logs(busybox, b'This is a volume!\n')
def test_container_volumes_ro(busybox):
volume_loc = volume('foo.txt')
busybox.volumes_readonly = {volume_loc: '/bar.txt'}
busybox.run(['cat', 'bar.txt'])
details = checked_join(busybox)
validate_dict(
details,
{
'Mounts': [{
'Source': volume_loc,
'Destination': '/bar.txt',
'Mode': 'ro',
'RW': False,
'Type': 'bind',
'Propagation': 'rprivate',
}],
}
)
assert_in_logs(busybox, b'This is a volume!\n')
def test_container_extra_hosts(busybox):
busybox.extra_hosts = {'www.test.com': '8.8.8.8'}
busybox.run(['cat', '/etc/hosts'])
assert(checked_join(busybox))
logs_list = busybox.logs(all=True)
assert len(logs_list) == 1
actual_logs = logs_list[0]['Logs'].split(b'\n')
assert b'8.8.8.8\twww.test.com' in actual_logs
def test_container_ports(busybox):
busybox.ports = {
1111: 1112,
2222: None,
(3333, 'udp'): 3334,
4444: ('127.0.0.1', 4445),
}
busybox.run(['sleep', '2147483647'])
instance = scalar(busybox.instances(all=True))
expected_ports = [
{
'IP': '0.0.0.0',
'PrivatePort': 1111,
'PublicPort': 1112,
'Type': 'tcp',
},
{
'IP': '0.0.0.0',
'PrivatePort': 2222,
'Type': 'tcp',
'PublicPort': 'UNKNOWN', # Gets filled in below.
},
{
'IP': '0.0.0.0',
'PrivatePort': 3333,
'PublicPort': 3334,
'Type': 'udp',
},
{
'IP': '127.0.0.1',
'PrivatePort': 4444,
'PublicPort': 4445,
'Type': 'tcp'
},
]
received_ports = sorted(
[
data for data in instance['Ports']
],
key=lambda d: (d['PrivatePort'], d.get('PublicPort', float('inf'))),
)
for i, data in enumerate(received_ports):
if data['PrivatePort'] == 2222:
expected_ports[i]['PublicPort'] = data['PublicPort']
host_port_2222 = data['PublicPort']
assert data == expected_ports[i]
details = busybox.inspect()
expected_host_config_ports = {
'1111/tcp': [{'HostIp': '', 'HostPort': '1112'}],
'2222/tcp': [{'HostIp': '', 'HostPort': ''}],
'3333/udp': [{'HostIp': '', 'HostPort': '3334'}],
'4444/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4445'}],
}
assert details['HostConfig']['PortBindings'] == expected_host_config_ports
expected_network_ports = {
'1111/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '1112'}],
'2222/tcp': [{'HostIp': '0.0.0.0', 'HostPort': str(host_port_2222)}],
'3333/udp': [{'HostIp': '0.0.0.0', 'HostPort': '3334'}],
'4444/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4445'}],
}
assert details['NetworkSettings']['Ports'] == expected_network_ports
def test_container_build_remove(busybox, capsys):
# Ensure that we actually do a build.
busybox.remove_images()
busybox.build()
stdout, stderr = capsys.readouterr()
assert stderr == ''
stdout = stdout.splitlines()
assert len(stdout) == 9
assert stdout[0] == 'Step 1/2 : FROM busybox'
assert stdout[1].startswith(' --->')
assert stdout[2] == 'Step 2/2 : RUN echo testing'
assert stdout[3].startswith(' ---> Running in')
assert stdout[4] == 'testing'
assert stdout[5].startswith('Removing intermediate container')
assert stdout[6].startswith(' --->')
assert stdout[7].startswith('Successfully built')
assert stdout[8].startswith('Successfully tagged')
image = scalar(busybox.images())
assert image['RepoTags'] == [
'{}/{}:{}'.format(TEST_ORG, 'busybox', TEST_TAG)
]
busybox.remove_images()
assert busybox.images() == []
def test_build_failed_pull(capsys):
orphan = make_container('orphan')
orphan.build()
stdout, stderr = capsys.readouterr()
assert stderr == ''
stdout = stdout.splitlines()
assert len(stdout) == 2
assert(
stdout[0] ==
"Step 1/1 : FROM dockorm_fake_org/dockorm_fake_image:dockorm_fake_tag"
)
assert (
"pull access denied for dockorm_fake_org/dockorm_fake_image" in stdout[1]
)
|
|
"""Sequence-to-sequence model with an attention mechanism."""
import random
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import seq2seq
from tensorflow.models.rnn.translate import data_utils
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/pdf/1412.2007v2.pdf
"""
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
with tf.device("/cpu:0"):
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(inputs, labels):
with tf.device("/cpu:0"):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = rnn_cell.GRUCell(size)
if use_lstm:
single_cell = rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell, source_vocab_size,
target_vocab_size, output_projection=output_projection,
feed_previous=do_decode)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, self.target_vocab_size,
lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [tf.nn.xw_plus_b(output, output_projection[0],
output_projection[1])
for output in self.outputs[b]]
else:
self.outputs, self.losses = seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, self.target_vocab_size,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.all_variables())
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of enconder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol.
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
|
|
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for linter_utils.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import os
import tempfile
from core.tests import test_utils
import python_utils
from . import linter_utils
class MemoizeTest(test_utils.GenericTestBase):
"""Tests for the memoize function."""
def test_memoize_with_args(self):
call_counter = collections.Counter()
@linter_utils.memoize
def count_calls(arg):
"""Counts calls made with given arg."""
call_counter[arg] += 1
unique_objs = (python_utils.OBJECT(), python_utils.OBJECT())
self.assertEqual(call_counter[unique_objs[0]], 0)
self.assertEqual(call_counter[unique_objs[1]], 0)
count_calls(unique_objs[0])
self.assertEqual(call_counter[unique_objs[0]], 1)
self.assertEqual(call_counter[unique_objs[1]], 0)
count_calls(unique_objs[0])
count_calls(unique_objs[1])
self.assertEqual(call_counter[unique_objs[0]], 1)
self.assertEqual(call_counter[unique_objs[1]], 1)
def test_memoize_with_kwargs(self):
call_counter = collections.Counter()
@linter_utils.memoize
def count_calls(**kwargs):
"""Counts calls made with given kwargs."""
hashable_kwargs = tuple(sorted(kwargs.items()))
call_counter[hashable_kwargs] += 1
empty_kwargs = ()
nonempty_kwargs = (('kwarg', 0),)
self.assertEqual(call_counter[empty_kwargs], 0)
self.assertEqual(call_counter[nonempty_kwargs], 0)
count_calls()
self.assertEqual(call_counter[empty_kwargs], 1)
self.assertEqual(call_counter[nonempty_kwargs], 0)
count_calls()
count_calls(kwarg=0)
self.assertEqual(call_counter[empty_kwargs], 1)
self.assertEqual(call_counter[nonempty_kwargs], 1)
def test_memoize_with_kwargs_using_default_values(self):
call_counter = collections.Counter()
@linter_utils.memoize
def count_calls(kwarg=0):
"""Counts calls made with given kwargs."""
call_counter[kwarg] += 1
self.assertEqual(call_counter[0], 0)
count_calls()
self.assertEqual(call_counter[0], 1)
count_calls(kwarg=0)
self.assertEqual(call_counter[0], 1)
def test_memoize_with_methods(self):
class CallCounter(python_utils.OBJECT):
"""Counts calls made to an instance."""
def __init__(self):
self.count = 0
@linter_utils.memoize
def __call__(self):
self.count += 1
call_counter_a, call_counter_b = CallCounter(), CallCounter()
self.assertEqual(call_counter_a.count, 0)
self.assertEqual(call_counter_b.count, 0)
call_counter_a()
self.assertEqual(call_counter_a.count, 1)
self.assertEqual(call_counter_b.count, 0)
call_counter_a()
call_counter_b()
self.assertEqual(call_counter_a.count, 1)
self.assertEqual(call_counter_b.count, 1)
def test_memoize_with_classmethods(self):
class GoodCallCounter(python_utils.OBJECT):
"""Counts calls made to the class."""
count = 0
@classmethod
@linter_utils.memoize
def method_decorated_by_memoize_before_classmethod(cls):
"""memoize is called first so this def will work properly."""
cls.count += 1
call_counter_a, call_counter_b = GoodCallCounter(), GoodCallCounter()
self.assertEqual(GoodCallCounter.count, 0)
call_counter_a.method_decorated_by_memoize_before_classmethod()
self.assertEqual(GoodCallCounter.count, 1)
call_counter_a.method_decorated_by_memoize_before_classmethod()
call_counter_b.method_decorated_by_memoize_before_classmethod()
self.assertEqual(GoodCallCounter.count, 1)
with self.assertRaisesRegexp(TypeError, 'not a Python function'):
class BadCallCounter(python_utils.OBJECT): # pylint: disable=unused-variable
"""Counts calls made to the class."""
count = 0
@linter_utils.memoize
@classmethod
def method_decorated_by_classmethod_before_memoize(cls):
"""classmethods are not real functions so trying to memoize
them will raise a TypeError.
"""
def test_memoize_with_argument_values_in_different_orders(self):
call_counter = collections.Counter()
@linter_utils.memoize
def count_calls(a, b, c=0, d=1):
"""Counts calls made with the given arguments."""
key = (a, b, c, d)
call_counter[key] += 1
self.assertEqual(call_counter[(5, 6, 0, 1)], 0)
self.assertEqual(call_counter[(6, 5, 0, 1)], 0)
count_calls(5, 6)
count_calls(6, 5)
self.assertEqual(call_counter[(5, 6, 0, 1)], 1)
self.assertEqual(call_counter[(6, 5, 0, 1)], 1)
count_calls(5, 6, c=0, d=1)
count_calls(6, 5, c=0, d=1)
self.assertEqual(call_counter[(5, 6, 0, 1)], 1)
self.assertEqual(call_counter[(6, 5, 0, 1)], 1)
self.assertEqual(call_counter[(5, 6, 2, 3)], 0)
count_calls(5, 6, c=2, d=3)
self.assertEqual(call_counter[(5, 6, 2, 3)], 1)
count_calls(5, 6, d=3, c=2)
self.assertEqual(call_counter[(5, 6, 3, 2)], 0)
self.assertEqual(call_counter[(5, 6, 2, 3)], 1)
class RedirectStoutTest(test_utils.GenericTestBase):
"""Tests for the redirect_stdout function."""
def test_redirect_stdout(self):
temp_file = tempfile.NamedTemporaryFile()
with python_utils.open_file(temp_file.name, 'r+') as temp_file_contents:
with linter_utils.redirect_stdout(temp_file_contents):
python_utils.PRINT('This is a test')
temp_file_contents.seek(0)
data = temp_file_contents.read()
temp_file.close()
self.assertEqual(data, 'This is a test\n')
class ListDuplicateItemsTest(test_utils.GenericTestBase):
"""Tests for the get_duplicates_from_list_of_strings function."""
def test_get_duplicates_from_list_of_strings_with_duplicat_strings(self):
strings_list = ['A', 'B', 'B', 'C', 'C', 'C']
duplicates = linter_utils.get_duplicates_from_list_of_strings(
strings_list)
self.assertEqual(sorted(duplicates), ['B', 'C'])
def test_get_duplicates_from_list_of_strings_without_duplicat_strings(self):
strings_list = ['A', 'B', 'C']
duplicates = linter_utils.get_duplicates_from_list_of_strings(
strings_list)
self.assertEqual(duplicates, [])
class TempDirTest(test_utils.GenericTestBase):
"""Tests for the temp_dir function."""
def test_directory_only_exists_within_context(self):
with linter_utils.temp_dir() as temp_dir_path:
self.assertTrue(os.path.exists(temp_dir_path))
self.assertTrue(os.path.isdir(temp_dir_path))
self.assertFalse(os.path.exists(temp_dir_path))
def test_directory_is_placed_in_specified_dir(self):
with linter_utils.temp_dir(parent=os.getcwd()) as temp_dir_path:
parent = os.path.abspath(
os.path.join(temp_dir_path, os.path.pardir))
self.assertEqual(parent, os.getcwd())
def test_directory_has_prefix_prepended(self):
with linter_utils.temp_dir(prefix='abc') as temp_dir_path:
self.assertTrue(os.path.basename(temp_dir_path).startswith('abc'))
def test_directory_has_suffix_appended(self):
with linter_utils.temp_dir(suffix='cba') as temp_dir_path:
self.assertTrue(os.path.basename(temp_dir_path).endswith('cba'))
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Oozie API classes.
This is mostly just codifying the datastructure of the Oozie REST API.
http://incubator.apache.org/oozie/docs/3.2.0-incubating/docs/WebServicesAPI.html
"""
import re
from cStringIO import StringIO
from time import mktime
from desktop.lib import i18n
from desktop.lib.exceptions_renderable import PopupException
from desktop.log.access import access_warn
import hadoop.confparse
from liboozie.utils import parse_timestamp, format_time
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
class Action(object):
def __init__(self, json_dict):
for attr in self._ATTRS:
setattr(self, attr, json_dict.get(attr))
self._fixup()
def _fixup(self): pass
def is_finished(self):
return self.status in ('OK', 'SUCCEEDED', 'DONE')
@classmethod
def create(self, action_class, action_dict):
if ControlFlowAction.is_control_flow(action_dict.get('type')):
return ControlFlowAction(action_dict)
else:
return action_class(action_dict)
def __str__(self):
return '%s - %s' % (self.type, self.name)
class ControlFlowAction(Action):
_ATTRS = [
'errorMessage',
'status',
'stats',
'data',
'transition',
'externalStatus',
'cred',
'conf',
'type',
'endTime',
'externalId',
'id',
'startTime',
'externalChildIDs',
'name',
'errorCode',
'trackerUri',
'retries',
'toString',
'consoleUrl'
]
@classmethod
def is_control_flow(self, action_type):
return action_type is not None and ( ':' in action_type or action_type == 'switch' )
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
- protect externalId
"""
super(ControlFlowAction, self)._fixup()
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
if self.retries:
self.retries = int(self.retries)
if self.externalId and not re.match('job_.*', self.externalId):
self.externalId = None
self.conf_dict = {}
class WorkflowAction(Action):
_ATTRS = [
'conf',
'consoleUrl',
'data',
'endTime',
'errorCode',
'errorMessage',
'externalId',
'externalStatus',
'id',
'name',
'retries',
'startTime',
'status',
'trackerUri',
'transition',
'type',
'externalChildIDs',
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(WorkflowAction, self)._fixup()
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
if self.retries:
self.retries = int(self.retries)
if self.conf:
xml = StringIO(i18n.smart_str(self.conf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
def get_absolute_url(self):
related_job_ids = []
if hasattr(self, 'oozie_coordinator') and self.oozie_coordinator:
related_job_ids.append('coordinator_job_id=%s' % self.oozie_coordinator.id)
if hasattr(self, 'oozie_bundle') and self.oozie_bundle:
related_job_ids.append('bundle_job_id=%s' % self.oozie_bundle.id)
if related_job_ids:
extra_params = '?' + '&'.join(related_job_ids)
else:
extra_params = ''
return reverse('oozie:list_oozie_workflow_action', kwargs={'action': self.id}) + extra_params
def get_absolute_log_url(self):
url = None
if self.externalId and re.match('job_.*', self.externalId):
url = self.externalId and reverse('jobbrowser.views.job_single_logs', kwargs={'job': self.externalId}) or ''
return url
def get_external_id_url(self):
url = None
if self.externalId and self.externalId.endswith('W'):
url = reverse('oozie:list_oozie_workflow', kwargs={'job_id': self.externalId}) or ''
elif self.externalId and re.match('job_.*', self.externalId):
url = reverse('jobbrowser.views.single_job', kwargs={'job': self.externalId}) or ''
return url
class CoordinatorAction(Action):
_ATTRS = [
'status',
'runConf',
'errorMessage',
'missingDependencies',
'coordJobId',
'errorCode',
'actionNumber',
'consoleUrl',
'nominalTime',
'externalStatus',
'createdConf',
'createdTime',
'externalId',
'lastModifiedTime',
'type',
'id',
'trackerUri'
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(CoordinatorAction, self)._fixup()
if self.createdTime:
self.createdTime = parse_timestamp(self.createdTime)
if self.nominalTime:
self.nominalTime = parse_timestamp(self.nominalTime)
if self.lastModifiedTime:
self.lastModifiedTime = parse_timestamp(self.lastModifiedTime)
if self.runConf:
xml = StringIO(i18n.smart_str(self.runConf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
self.title = ' %s-%s'% (self.actionNumber, format_time(self.nominalTime))
class BundleAction(Action):
_ATTRS = [
'startTime',
'actions',
'frequency',
'concurrency',
'pauseTime',
'group',
'toString',
'consoleUrl',
'mat_throttling',
'status',
'conf',
'user',
'timeOut',
'coordJobPath',
'timeUnit',
'coordJobId',
'coordJobName',
'nextMaterializedTime',
'coordExternalId',
'acl',
'lastAction',
'executionPolicy',
'timeZone',
'endTime'
]
def _fixup(self):
"""
Fixup:
- time fields as struct_time
- config dict
"""
super(BundleAction, self)._fixup()
self.type = 'coord-action'
self.name = self.coordJobName
if self.conf:
xml = StringIO(i18n.smart_str(self.conf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
def get_progress(self):
"""How much more time before the next action."""
next = mktime(parse_timestamp(self.lastAction))
start = mktime(parse_timestamp(self.startTime))
end = mktime(parse_timestamp(self.endTime))
if end != start:
progress = min(int((1 - (end - next) / (end - start)) * 100), 100)
else:
progress = 100
return progress
class Job(object):
MAX_LOG_SIZE = 3500 * 20 # 20 pages
"""
Accessing log and definition will trigger Oozie API calls.
"""
def __init__(self, api, json_dict):
for attr in self._ATTRS:
setattr(self, attr, json_dict.get(attr))
self._fixup()
self._api = api
self._log = None
self._definition = None
def _fixup(self):
"""
Fixup fields:
- expand actions
- time fields are struct_time
- run is integer
- configuration dict
- log
- definition
"""
if self.startTime:
self.startTime = parse_timestamp(self.startTime)
if self.endTime:
self.endTime = parse_timestamp(self.endTime)
self.actions = [Action.create(self.ACTION, act_dict) for act_dict in self.actions]
if self.conf is not None:
xml = StringIO(i18n.smart_str(self.conf))
self.conf_dict = hadoop.confparse.ConfParse(xml)
else:
self.conf_dict = {}
def _get_log(self):
"""Get the log lazily, trigger Oozie API call at the first access."""
if self._log is None:
self._log = self._api.get_job_log(self.id)
return self._log[-Job.MAX_LOG_SIZE:]
log = property(_get_log)
def _get_definition(self):
"""Get the definition lazily, trigger Oozie API call at the first access."""
if self._definition is None:
self._definition = self._api.get_job_definition(self.id)
return self._definition
definition = property(_get_definition)
def start(self):
self._api.job_control(self.id, 'start')
def suspend(self):
self._api.job_control(self.id, 'suspend')
def resume(self):
self._api.job_control(self.id, 'resume')
def kill(self):
self._api.job_control(self.id, 'kill')
def available_actions(self):
"""
available_actions() -> Zero or more of [ 'start', 'suspend', 'resume', 'kill' ]
"""
if self.status in ('SUCCEEDED', 'KILLED', 'FAILED'):
return []
res = []
if self.status == 'PREP':
res.append('start')
if self.status == 'RUNNING':
res.append('suspend')
if self.status == 'SUSPENDED':
res.append('resume')
res.append('kill')
return res
def check_request_permission(self, request):
"""Raise PopupException if request user doesn't have permission to modify workflow"""
if not request.user.is_superuser and request.user.username != self.user:
access_warn(request, _('Insufficient permission.'))
raise PopupException(_("Permission denied. User %(username)s cannot modify user %(user)s's job.") %
dict(username=request.user.username, user=self.user))
def get_control_flow_actions(self):
return [action for action in self.actions if ControlFlowAction.is_control_flow(action.type)]
def get_working_actions(self):
return [action for action in self.actions if not ControlFlowAction.is_control_flow(action.type)]
def is_running(self):
return self.status in Workflow.RUNNING_STATUSES | Coordinator.RUNNING_STATUSES | Bundle.RUNNING_STATUSES
def __str__(self):
return '%s - %s' % (self.id, self.status)
@property
def has_sla(self):
return '<sla:info>' in self.definition
class Workflow(Job):
_ATTRS = [
'actions',
'appName',
'appPath',
'conf',
'consoleUrl',
'createdTime',
'endTime',
'externalId',
'group',
'id',
'lastModTime',
'run',
'startTime',
'status',
'user',
'acl',
'parentId'
]
ACTION = WorkflowAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'SUSPENDED'])
FINISHED_STATUSES = set(['SUCCEEDED', 'KILLED', 'FAILED'])
def _fixup(self):
super(Workflow, self)._fixup()
if self.createdTime:
self.createdTime = parse_timestamp(self.createdTime)
if self.lastModTime:
self.lastModTime = parse_timestamp(self.lastModTime)
if self.run:
self.run = int(self.run)
@property
def type(self):
return 'Workflow'
def get_parent_job_id(self):
if self.parentId and '@' in self.parentId:
return self.parentId.split('@')[0]
return self.parentId
def get_absolute_url(self, format='html'):
extra_params = []
if format == 'json':
extra_params.append('format=json')
if hasattr(self, 'oozie_coordinator') and self.oozie_coordinator:
extra_params.append('coordinator_job_id=%s' % self.oozie_coordinator.id)
if hasattr(self, 'oozie_bundle') and self.oozie_bundle:
extra_params.append('bundle_job_id=%s' % self.oozie_bundle.id)
if extra_params:
extra_params = '?' + '&'.join(extra_params)
else:
extra_params = ''
return reverse('oozie:list_oozie_workflow', kwargs={'job_id': self.id}) + extra_params
def get_progress(self, full_node_list=None):
if self.status in ('SUCCEEDED', 'KILLED', 'FAILED'):
return 100 # Case of decision nodes
else:
if full_node_list is not None: # Should remove the un-reached branches if decision node
total_actions = len(full_node_list) - 1 # -1 because of Kill node
else:
total_actions = len(self.actions)
return int(sum([action.is_finished() for action in self.actions]) / float(max(total_actions, 1)) * 100)
class Coordinator(Job):
_ATTRS = [
'acl',
'actions',
'conf',
'concurrency',
'consoleUrl',
'coordExternalId',
'coordJobId',
'coordJobName',
'coordJobPath',
'endTime',
'executionPolicy',
'frequency',
'group',
'lastAction',
'mat_throttling',
'nextMaterializedTime',
'pauseTime',
'startTime',
'status',
'timeOut',
'timeUnit',
'timeZone',
'user',
'bundleId'
]
ACTION = CoordinatorAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'RUNNINGWITHERROR', 'PREPSUSPENDED', 'SUSPENDED', 'SUSPENDEDWITHERROR', 'PREPPAUSED', 'PAUSED', 'PAUSEDWITHERROR'])
FINISHED_STATUSES = set(['SUCCEEDED', 'DONEWITHERROR', 'KILLED', 'FAILED'])
def _fixup(self):
super(Coordinator, self)._fixup()
if self.nextMaterializedTime is not None:
self.nextMaterializedTime = parse_timestamp(self.nextMaterializedTime)
else:
self.nextMaterializedTime = self.startTime
# For when listing/mixing all the jobs together
self.id = self.coordJobId
self.appName = self.coordJobName
@property
def type(self):
return 'Coordinator'
def get_absolute_url(self, oozie_bundle=None, format='html'):
extra_params = []
if format == 'json':
extra_params.append('format=json')
if oozie_bundle:
extra_params.append('bundle_job_id=%s' % oozie_bundle.id)
if hasattr(self, 'bundleId') and self.bundleId:
extra_params.append('bundle_job_id=%s' % self.bundleId)
if extra_params:
extra_params = '?' + '&'.join(extra_params)
else:
extra_params = ''
return reverse('oozie:list_oozie_coordinator', kwargs={'job_id': self.id}) + extra_params
def get_progress(self):
"""How much more time before the final materialization."""
next = mktime(self.nextMaterializedTime)
start = mktime(self.startTime)
end = mktime(self.endTime)
if end != start:
progress = min(int((1 - (end - next) / (end - start)) * 100), 100)
else:
progress = 100
# Manage case of a rerun
action_count = float(len(self.actions))
if action_count != 0 and progress == 100:
progress = int(sum([action.is_finished() for action in self.actions]) / action_count * 100)
return progress
@classmethod
def aggreate(cls, actions):
if not actions:
return []
result = []
first = prev = actions[0]
for a in actions[1:]:
if int(a) != int(prev) + 1:
result.append('-'.join((first, prev)))
first = a
prev = a
result.append('-'.join((first, prev)))
return result
@property
def human_frequency(self):
from oozie.models import Coordinator
return Coordinator.CRON_MAPPING.get(self.frequency, self.frequency)
class Bundle(Job):
_ATTRS = [
'status',
'toString',
'group',
'conf',
'bundleJobName',
'startTime',
'bundleCoordJobs',
'kickoffTime',
'acl',
'bundleJobPath',
'createdTime',
'timeOut',
'consoleUrl',
'bundleExternalId',
'timeUnit',
'pauseTime',
'bundleJobId',
'endTime',
'user',
]
ACTION = BundleAction
RUNNING_STATUSES = set(['PREP', 'RUNNING', 'RUNNINGWITHERROR', 'SUSPENDED', 'PREPSUSPENDED', 'SUSPENDEDWITHERROR', 'PAUSED', 'PAUSEDWITHERROR', 'PREPPAUSED'])
FINISHED_STATUSES = set(['SUCCEEDED', 'DONEWITHERROR', 'KILLED', 'FAILED'])
def _fixup(self):
self.actions = self.bundleCoordJobs
super(Bundle, self)._fixup()
# For when listing/mixing all the jobs together
self.id = self.bundleJobId
self.appName = self.bundleJobName
@property
def type(self):
return 'Bundle'
def get_absolute_url(self, format='html'):
extra_params = ''
if format == 'json':
extra_params = '?format=json'
return reverse('oozie:list_oozie_bundle', kwargs={'job_id': self.id}) + extra_params
def get_progress(self):
progresses = [action.get_progress() for action in self.actions]
count = len(progresses)
if count != 0:
return sum(progresses) / float(count)
else:
return 0
class JobList(object):
"""
Represents a list of Oozie jobs (Workflows or Coordinators or Bundles).
"""
_ATTRS = [
'offset',
'len',
'total',
'jobs',
]
def __init__(self, klass, jobs_key, api, json_dict, filters=None):
"""
json_dict is the oozie json.
filters is (optionally) the list of filters used to select this list
"""
self._api = api
self.offset = int(json_dict['offset'])
self.total = int(json_dict['total'])
self.jobs = [klass(self._api, wf_dict) for wf_dict in json_dict[jobs_key]]
self.filters = filters
class WorkflowList(JobList):
def __init__(self, api, json_dict, filters=None):
super(WorkflowList, self).__init__(Workflow, 'workflows', api, json_dict, filters)
class CoordinatorList(JobList):
def __init__(self, api, json_dict, filters=None):
super(CoordinatorList, self).__init__(Coordinator, 'coordinatorjobs', api, json_dict, filters)
class BundleList(JobList):
def __init__(self, api, json_dict, filters=None):
super(BundleList, self).__init__(Bundle, 'bundlejobs', api, json_dict, filters)
|
|
#!/usr/bin/python2.7
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage: build.py <0 or more of accessible, core, generators, langfiles>
# build.py with no parameters builds all files.
# core builds blockly_compressed, blockly_uncompressed, and blocks_compressed.
# accessible builds blockly_accessible_compressed,
# blockly_accessible_uncompressed, and blocks_compressed.
# generators builds every <language>_compressed.js.
# langfiles builds every msg/js/<LANG>.js file.
# This script generates four versions of Blockly's core files. The first pair
# are:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# The second pair are:
# blockly_accessible_compressed.js
# blockly_accessible_uncompressed.js
# These files are analogous to blockly_compressed and blockly_uncompressed,
# but also include the visually-impaired module for Blockly.
#
# This script also generates:
# blocks_compressed.js: The compressed Blockly language blocks.
# javascript_compressed.js: The compressed Javascript generator.
# python_compressed.js: The compressed Python generator.
# dart_compressed.js: The compressed Dart generator.
# lua_compressed.js: The compressed Lua generator.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import sys
if sys.version_info[0] != 2:
raise Exception("Blockly build only compatible with Python 2.x.\n"
"You are using: " + sys.version)
for arg in sys.argv[1:len(sys.argv)]:
if (arg != 'core' and
arg != 'accessible' and
arg != 'generators' and
arg != 'langfiles'):
raise Exception("Invalid argument: \"" + arg + "\". Usage: build.py <0 or more of accessible," +
" core, generators, langfiles>")
import errno, glob, httplib, json, os, re, subprocess, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date.
del sys.path[-1]
return module
HEADER = ("// Do not edit this file; automatically generated by build.py.\n"
"'use strict';\n")
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths, target_filename):
threading.Thread.__init__(self)
self.search_paths = search_paths
self.target_filename = target_filename
def run(self):
f = open(self.target_filename, 'w')
f.write(HEADER)
f.write("""
var isNodeJS = !!(typeof module !== 'undefined' && module.exports &&
typeof window === 'undefined');
if (isNodeJS) {
var window = {};
require('closure-library');
}
window.BLOCKLY_DIR = (function() {
if (!isNodeJS) {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_(.*)uncompressed\.js$');
for (var i = 0, script; script = scripts[i]; i++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
}
return '';
})();
window.BLOCKLY_BOOT = function() {
var dir = '';
if (isNodeJS) {
require('closure-library');
dir = 'blockly';
} else {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'developers.google.com/blockly/guides/modify/web/closure');
}
dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
}
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort()
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write("goog.require('%s');\n" % provide)
f.write("""
delete this.BLOCKLY_DIR;
delete this.BLOCKLY_BOOT;
};
if (isNodeJS) {
window.BLOCKLY_BOOT();
module.exports = Blockly;
} else {
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script>var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script src="' + window.BLOCKLY_DIR +
'/../closure-library/closure/goog/base.js"></script>');
document.write('<script>window.BLOCKLY_BOOT();</script>');
}
""")
f.close()
print("SUCCESS: " + self.target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths, bundles):
threading.Thread.__init__(self)
self.search_paths = search_paths
self.bundles = bundles
def run(self):
if ('core' in self.bundles):
self.gen_core()
if ('accessible' in self.bundles):
self.gen_accessible()
if ('core' in self.bundles or 'accessible' in self.bundles):
self.gen_blocks()
if ('generators' in self.bundles):
self.gen_generator("javascript")
self.gen_generator("python")
self.gen_generator("php")
self.gen_generator("dart")
self.gen_generator("lua")
def gen_core(self):
target_filename = "blockly_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join("core", "blockly.js")])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_accessible(self):
target_filename = "blockly_accessible_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("language_out", "ES5"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join("accessible", "app.component.js")])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_blocks(self):
target_filename = "blocks_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Blocks to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Blocks');"))
filenames = glob.glob(os.path.join("blocks", "*.js"))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
# Remove Blockly.Blocks to be compatible with Blockly.
remove = "var Blockly={Blocks:{}};"
self.do_compile(params, target_filename, filenames, remove)
def gen_generator(self, language):
target_filename = language + "_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Generator to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Generator');"))
filenames = glob.glob(
os.path.join("generators", language, "*.js"))
filenames.insert(0, os.path.join("generators", language + ".js"))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
filenames.insert(0, "[goog.provide]")
# Remove Blockly.Generator to be compatible with Blockly.
remove = "var Blockly={Generator:{}};"
self.do_compile(params, target_filename, filenames, remove)
def do_compile(self, params, target_filename, filenames, remove):
# Send the request to Google.
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection("closure-compiler.appspot.com")
conn.request("POST", "/compile", urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close()
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith("Input_"):
return "???"
n = int(name[6:]) - 1
return filenames[n]
if json_data.has_key("serverErrors"):
errors = json_data["serverErrors"]
for error in errors:
print("SERVER ERROR: %s" % target_filename)
print(error["error"])
elif json_data.has_key("errors"):
errors = json_data["errors"]
for error in errors:
print("FATAL ERROR")
print(error["error"])
if error["file"]:
print("%s at line %d:" % (
file_lookup(error["file"]), error["lineno"]))
print(error["line"])
print((" " * error["charno"]) + "^")
sys.exit(1)
else:
if json_data.has_key("warnings"):
warnings = json_data["warnings"]
for warning in warnings:
print("WARNING")
print(warning["warning"])
if warning["file"]:
print("%s at line %d:" % (
file_lookup(warning["file"]), warning["lineno"]))
print(warning["line"])
print((" " * warning["charno"]) + "^")
print()
if not json_data.has_key("compiledCode"):
print("FATAL ERROR: Compiler did not return compiledCode.")
sys.exit(1)
code = HEADER + "\n" + json_data["compiledCode"]
code = code.replace(remove, "")
# Trim down Google's Apache licences.
# The Closure Compiler used to preserve these until August 2015.
# Delete this in a few months if the licences don't return.
LICENSE = re.compile("""/\\*
[\w ]+
(Copyright \\d+ Google Inc.)
https://developers.google.com/blockly/
Licensed under the Apache License, Version 2.0 \(the "License"\);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
\\*/""")
code = re.sub(LICENSE, r"\n// \1 Apache License 2.0", code)
stats = json_data["statistics"]
original_b = stats["originalSize"]
compressed_b = stats["compressedSize"]
if original_b > 0 and compressed_b > 0:
f = open(target_filename, "w")
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print("SUCCESS: " + target_filename)
print("Size changed from %d KB to %d KB (%d%%)." % (
original_kb, compressed_kb, ratio))
else:
print("UNKNOWN ERROR")
class Gen_langfiles(threading.Thread):
"""Generate JavaScript file for each natural language supported.
Runs in a separate thread.
"""
def __init__(self, force_gen):
threading.Thread.__init__(self)
self.force_gen = force_gen
def _rebuild(self, srcs, dests):
# Determine whether any of the files in srcs is newer than any in dests.
try:
return (max(os.path.getmtime(src) for src in srcs) >
min(os.path.getmtime(dest) for dest in dests))
except OSError as e:
# Was a file not found?
if e.errno == errno.ENOENT:
# If it was a source file, we can't proceed.
if e.filename in srcs:
print("Source file missing: " + e.filename)
sys.exit(1)
else:
# If a destination file was missing, rebuild.
return True
else:
print("Error checking file creation times: " + e)
def run(self):
# The files msg/json/{en,qqq,synonyms}.json depend on msg/messages.js.
if (self.force_gen or
self._rebuild([os.path.join("msg", "messages.js")],
[os.path.join("msg", "json", f) for f in
["en.json", "qqq.json", "synonyms.json"]])):
try:
subprocess.check_call([
"python",
os.path.join("i18n", "js_to_json.py"),
"--input_file", "msg/messages.js",
"--output_dir", "msg/json/",
"--quiet"])
except (subprocess.CalledProcessError, OSError) as e:
# Documentation for subprocess.check_call says that CalledProcessError
# will be raised on failure, but I found that OSError is also possible.
print("Error running i18n/js_to_json.py: ", e)
sys.exit(1)
# Checking whether it is necessary to rebuild the js files would be a lot of
# work since we would have to compare each <lang>.json file with each
# <lang>.js file. Rebuilding is easy and cheap, so just go ahead and do it.
try:
# Use create_messages.py to create .js files from .json files.
cmd = [
"python",
os.path.join("i18n", "create_messages.py"),
"--source_lang_file", os.path.join("msg", "json", "en.json"),
"--source_synonym_file", os.path.join("msg", "json", "synonyms.json"),
"--source_constants_file", os.path.join("msg", "json", "constants.json"),
"--key_file", os.path.join("msg", "json", "keys.json"),
"--output_dir", os.path.join("msg", "js"),
"--quiet"]
json_files = glob.glob(os.path.join("msg", "json", "*.json"))
json_files = [file for file in json_files if not
(file.endswith(("keys.json", "synonyms.json", "qqq.json", "constants.json")))]
cmd.extend(json_files)
subprocess.check_call(cmd)
except (subprocess.CalledProcessError, OSError) as e:
print("Error running i18n/create_messages.py: ", e)
sys.exit(1)
# Output list of .js files created.
for f in json_files:
# This assumes the path to the current directory does not contain "json".
f = f.replace("json", "js")
if os.path.isfile(f):
print("SUCCESS: " + f)
else:
print("FAILED to create " + f)
if __name__ == "__main__":
try:
calcdeps = import_path(os.path.join(
os.path.pardir, "closure-library", "closure", "bin", "calcdeps.py"))
except ImportError:
if os.path.isdir(os.path.join(os.path.pardir, "closure-library-read-only")):
# Dir got renamed when Closure moved from Google Code to GitHub in 2014.
print("Error: Closure directory needs to be renamed from"
"'closure-library-read-only' to 'closure-library'.\n"
"Please rename this directory.")
elif os.path.isdir(os.path.join(os.path.pardir, "google-closure-library")):
# When Closure is installed by npm, it is named "google-closure-library".
#calcdeps = import_path(os.path.join(
# os.path.pardir, "google-closure-library", "closure", "bin", "calcdeps.py"))
print("Error: Closure directory needs to be renamed from"
"'google-closure-library' to 'closure-library'.\n"
"Please rename this directory.")
else:
print("""Error: Closure not found. Read this:
developers.google.com/blockly/guides/modify/web/closure""")
sys.exit(1)
core_search_paths = calcdeps.ExpandDirectories(
["core", os.path.join(os.path.pardir, "closure-library")])
full_search_paths = calcdeps.ExpandDirectories(
["accessible", "core", os.path.join(os.path.pardir, "closure-library")])
if (len(sys.argv) == 1):
args = ['core', 'accessible', 'generators', 'defaultlangfiles']
else:
args = sys.argv
# Uncompressed and compressed are run in parallel threads.
# Uncompressed is limited by processor speed.
if ('core' in args):
Gen_uncompressed(core_search_paths, 'blockly_uncompressed.js').start()
if ('accessible' in args):
Gen_uncompressed(full_search_paths, 'blockly_accessible_uncompressed.js').start()
# Compressed is limited by network and server speed.
Gen_compressed(full_search_paths, args).start()
# This is run locally in a separate thread
# defaultlangfiles checks for changes in the msg files, while manually asking
# to build langfiles will force the messages to be rebuilt.
if ('langfiles' in args or 'defaultlangfiles' in args):
Gen_langfiles('langfiles' in args).start()
|
|
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run interop (cross-language) tests in parallel."""
from __future__ import print_function
import argparse
import atexit
import itertools
import json
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
import uuid
import six
import traceback
import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
_DEFAULT_SERVER_PORT=8080
_SKIP_CLIENT_COMPRESSION = ['client_compressed_unary',
'client_compressed_streaming']
_SKIP_SERVER_COMPRESSION = ['server_compressed_unary',
'server_compressed_streaming']
_SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
_SKIP_ADVANCED = ['status_code_and_message',
'custom_metadata',
'unimplemented_method',
'unimplemented_service']
_TEST_TIMEOUT = 3*60
# disable this test on core-based languages,
# see https://github.com/grpc/grpc/issues/9779
_SKIP_DATA_FRAME_PADDING = ['data_frame_padding']
class CXXLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.http2_cwd = None
self.safename = 'cxx'
def client_cmd(self, args):
return ['bins/opt/interop_client'] + args
def client_cmd_http2interop(self, args):
return ['bins/opt/http2_client'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['bins/opt/interop_server'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'c++'
class CSharpLanguage:
def __init__(self):
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
self.safename = str(self)
def client_cmd(self, args):
return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'csharp'
class CSharpCoreCLRLanguage:
def __init__(self):
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
self.safename = str(self)
def client_cmd(self, args):
return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'csharpcoreclr'
class JavaLanguage:
def __init__(self):
self.client_cwd = '../grpc-java'
self.server_cwd = '../grpc-java'
self.http2_cwd = '../grpc-java'
self.safename = str(self)
def client_cmd(self, args):
return ['./run-test-client.sh'] + args
def client_cmd_http2interop(self, args):
return ['./interop-testing/build/install/grpc-interop-testing/bin/http2-client'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['./run-test-server.sh'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'java'
class GoLanguage:
def __init__(self):
# TODO: this relies on running inside docker
self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
self.safename = str(self)
def client_cmd(self, args):
return ['go', 'run', 'client.go'] + args
def client_cmd_http2interop(self, args):
return ['go', 'run', 'negative_http2_client.go'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['go', 'run', 'server.go'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'go'
class Http2Server:
"""Represents the HTTP/2 Interop Test server
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.server_cwd = None
self.safename = str(self)
def server_cmd(self, args):
return ['python test/http2_test/http2_test_server.py']
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _TEST_CASES
def __str__(self):
return 'http2'
class Http2Client:
"""Represents the HTTP/2 Interop Test
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES
def unimplemented_test_cases_server(self):
return _TEST_CASES
def __str__(self):
return 'http2'
class NodeLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/run_tests/interop/with_nvm.sh',
'node', 'src/node/interop/interop_client.js'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['tools/run_tests/interop/with_nvm.sh',
'node', 'src/node/interop/interop_server.js'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'node'
class PHPLanguage:
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['src/php/bin/interop_client.sh'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'php'
class PHP7Language:
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['src/php/bin/interop_client.sh'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'php7'
class RubyLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/run_tests/interop/with_rvm.sh',
'ruby', 'src/ruby/pb/test/client.rb'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['tools/run_tests/interop/with_rvm.sh',
'ruby', 'src/ruby/pb/test/server.rb'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'ruby'
class PythonLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.http2_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return [
'py27/bin/python',
'src/python/grpcio_tests/setup.py',
'run_interop',
'--client',
'--args="{}"'.format(' '.join(args))
]
def client_cmd_http2interop(self, args):
return [ 'py27/bin/python',
'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
'py27/bin/python',
'src/python/grpcio_tests/setup.py',
'run_interop',
'--server',
'--args="{}"'.format(' '.join(args))
]
def global_env(self):
return {'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'python'
_LANGUAGES = {
'c++' : CXXLanguage(),
'csharp' : CSharpLanguage(),
'csharpcoreclr' : CSharpCoreCLRLanguage(),
'go' : GoLanguage(),
'java' : JavaLanguage(),
'node' : NodeLanguage(),
'php' : PHPLanguage(),
'php7' : PHP7Language(),
'ruby' : RubyLanguage(),
'python' : PythonLanguage(),
}
# languages supported as cloud_to_cloud servers
_SERVERS = ['c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python']
_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
'empty_stream', 'client_streaming', 'server_streaming',
'cancel_after_begin', 'cancel_after_first_response',
'timeout_on_sleeping_server', 'custom_metadata',
'status_code_and_message', 'unimplemented_method',
'client_compressed_unary', 'server_compressed_unary',
'client_compressed_streaming', 'server_compressed_streaming',
'unimplemented_service']
_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
'oauth2_auth_token', 'per_rpc_creds']
_HTTP2_TEST_CASES = ['tls', 'framing']
_HTTP2_SERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test']
_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' }
_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys()
_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = ['java', 'go', 'python', 'c++']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
"""Wraps given cmdline array to create 'docker run' cmdline from it."""
docker_cmdline = ['docker', 'run', '-i', '--rm=true']
# turn environ into -e docker args
if environ:
for k,v in environ.items():
docker_cmdline += ['-e', '%s=%s' % (k,v)]
# set working directory
workdir = DOCKER_WORKDIR_ROOT
if cwd:
workdir = os.path.join(workdir, cwd)
docker_cmdline += ['-w', workdir]
docker_cmdline += docker_args + [image] + cmdline
return docker_cmdline
def manual_cmdline(docker_cmdline):
"""Returns docker cmdline adjusted for manual invocation."""
print_cmdline = []
for item in docker_cmdline:
if item.startswith('--name='):
continue
# add quotes when necessary
if any(character.isspace() for character in item):
item = "\"%s\"" % item
print_cmdline.append(item)
return ' '.join(print_cmdline)
def write_cmdlog_maybe(cmdlog, filename):
"""Returns docker cmdline adjusted for manual invocation."""
if cmdlog:
with open(filename, 'w') as logfile:
logfile.write('#!/bin/bash\n')
logfile.writelines("%s\n" % line for line in cmdlog)
print('Command log written to file %s' % filename)
def bash_cmdline(cmdline):
"""Creates bash -c cmdline from args list."""
# Use login shell:
# * makes error messages clearer if executables are missing
return ['bash', '-c', ' '.join(cmdline)]
def auth_options(language, test_case):
"""Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
language = str(language)
cmdargs = []
env = {}
# TODO(jtattermusch): this file path only works inside docker
key_filepath = '/root/service_account/stubbyCloudTestingTest-ee3fce360ac5.json'
oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
key_file_arg = '--service_account_key_file=%s' % key_filepath
default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
if language in ['csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', 'ruby']:
env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
else:
cmdargs += [key_file_arg]
if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
cmdargs += [oauth_scope_arg]
if test_case == 'oauth2_auth_token' and language == 'c++':
# C++ oauth2 test uses GCE creds and thus needs to know the default account
cmdargs += [default_account_arg]
if test_case == 'compute_engine_creds':
cmdargs += [oauth_scope_arg, default_account_arg]
return (cmdargs, env)
def _job_kill_handler(job):
if job._spec.container_name:
dockerjob.docker_kill(job._spec.container_name)
# When the job times out and we decide to kill it,
# we need to wait a before restarting the job
# to prevent "container name already in use" error.
# TODO(jtattermusch): figure out a cleaner way to to this.
time.sleep(2)
def cloud_to_prod_jobspec(language, test_case, server_host_name,
server_host_detail, docker_image=None, auth=False,
manual_cmd_log=None):
"""Creates jobspec for cloud-to-prod interop test"""
container_name = None
cmdargs = [
'--server_host=%s' % server_host_detail[0],
'--server_host_override=%s' % server_host_detail[1],
'--server_port=443',
'--use_tls=true',
'--test_case=%s' % test_case]
environ = dict(language.cloud_to_prod_env(), **language.global_env())
if auth:
auth_cmdargs, auth_env = auth_options(language, test_case)
cmdargs += auth_cmdargs
environ.update(auth_env)
cmdline = bash_cmdline(language.client_cmd(cmdargs))
cwd = language.client_cwd
if docker_image:
container_name = dockerjob.random_name('interop_client_%s' %
language.safename)
cmdline = docker_run_cmdline(cmdline,
image=docker_image,
cwd=cwd,
environ=environ,
docker_args=['--net=host',
'--name=%s' % container_name])
if manual_cmd_log is not None:
manual_cmd_log.append(manual_cmdline(cmdline))
cwd = None
environ = None
suite_name='cloud_to_prod_auth' if auth else 'cloud_to_prod'
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname='%s:%s:%s:%s' % (suite_name, server_host_name, language,
test_case),
timeout_seconds=_TEST_TIMEOUT,
flake_retries=5 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0,
kill_handler=_job_kill_handler)
if docker_image:
test_job.container_name = container_name
return test_job
def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
server_port, docker_image=None, insecure=False,
manual_cmd_log=None):
"""Creates jobspec for cloud-to-cloud interop test"""
interop_only_options = [
'--server_host_override=foo.test.google.fr',
'--use_tls=%s' % ('false' if insecure else 'true'),
'--use_test_ca=true',
]
client_test_case = test_case
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[test_case]
if client_test_case in language.unimplemented_test_cases():
print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case))
sys.exit(1)
common_options = [
'--test_case=%s' % client_test_case,
'--server_host=%s' % server_host,
'--server_port=%s' % server_port,
]
if test_case in _HTTP2_SERVER_TEST_CASES:
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
client_options = interop_only_options + common_options
cmdline = bash_cmdline(language.client_cmd(client_options))
cwd = language.client_cwd
else:
cmdline = bash_cmdline(language.client_cmd_http2interop(common_options))
cwd = language.http2_cwd
else:
cmdline = bash_cmdline(language.client_cmd(common_options+interop_only_options))
cwd = language.client_cwd
environ = language.global_env()
if docker_image:
container_name = dockerjob.random_name('interop_client_%s' % language.safename)
cmdline = docker_run_cmdline(cmdline,
image=docker_image,
environ=environ,
cwd=cwd,
docker_args=['--net=host',
'--name=%s' % container_name])
if manual_cmd_log is not None:
manual_cmd_log.append(manual_cmdline(cmdline))
cwd = None
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
test_case),
timeout_seconds=_TEST_TIMEOUT,
flake_retries=5 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0,
kill_handler=_job_kill_handler)
if docker_image:
test_job.container_name = container_name
return test_job
def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
"""Create jobspec for running a server"""
container_name = dockerjob.random_name('interop_server_%s' % language.safename)
cmdline = bash_cmdline(
language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT,
'--use_tls=%s' % ('false' if insecure else 'true')]))
environ = language.global_env()
docker_args = ['--name=%s' % container_name]
if language.safename == 'http2':
# we are running the http2 interop server. Open next N ports beginning
# with the server port. These ports are used for http2 interop test
# (one test case per port).
docker_args += list(
itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
for i in range(
len(_HTTP2_SERVER_TEST_CASES))))
# Enable docker's healthcheck mechanism.
# This runs a Python script inside the container every second. The script
# pings the http2 server to verify it is ready. The 'health-retries' flag
# specifies the number of consecutive failures before docker will report
# the container's status as 'unhealthy'. Prior to the first 'health_retries'
# failures or the first success, the status will be 'starting'. 'docker ps'
# or 'docker inspect' can be used to see the health of the container on the
# command line.
docker_args += [
'--health-cmd=python test/http2_test/http2_server_health_check.py '
'--server_host=%s --server_port=%d'
% ('localhost', _DEFAULT_SERVER_PORT),
'--health-interval=1s',
'--health-retries=5',
'--health-timeout=10s',
]
else:
docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
docker_cmdline = docker_run_cmdline(cmdline,
image=docker_image,
cwd=language.server_cwd,
environ=environ,
docker_args=docker_args)
if manual_cmd_log is not None:
manual_cmd_log.append(manual_cmdline(docker_cmdline))
server_job = jobset.JobSpec(
cmdline=docker_cmdline,
environ=environ,
shortname='interop_server_%s' % language,
timeout_seconds=30*60)
server_job.container_name = container_name
return server_job
def build_interop_image_jobspec(language, tag=None):
"""Creates jobspec for building interop docker image for a language"""
if not tag:
tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
env = {'INTEROP_IMAGE': tag,
'BASE_NAME': 'grpc_interop_%s' % language.safename}
if not args.travis:
env['TTY_FLAG'] = '-t'
# This env variable is used to get around the github rate limit
# error when running the PHP `composer install` command
host_file = '%s/.composer/auth.json' % os.environ['HOME']
if language.safename == 'php' and os.path.exists(host_file):
env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
'-v %s:/root/.composer/auth.json:ro' % host_file
build_job = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
environ=env,
shortname='build_docker_%s' % (language),
timeout_seconds=30*60)
build_job.tag = tag
return build_job
def aggregate_http2_results(stdout):
match = re.search(r'\{"cases[^\]]*\]\}', stdout)
if not match:
return None
results = json.loads(match.group(0))
skipped = 0
passed = 0
failed = 0
failed_cases = []
for case in results['cases']:
if case.get('skipped', False):
skipped += 1
else:
if case.get('passed', False):
passed += 1
else:
failed += 1
failed_cases.append(case.get('name', "NONAME"))
return {
'passed': passed,
'failed': failed,
'skipped': skipped,
'failed_cases': ', '.join(failed_cases),
'percent': 1.0 * passed / (passed + failed)
}
# A dictionary of prod servers to test.
# Format: server_name: (server_host, server_host_override, errors_allowed)
# TODO(adelez): implement logic for errors_allowed where if the indicated tests
# fail, they don't impact the overall test result.
prod_servers = {
'default': ('216.239.32.254',
'grpc-test.sandbox.googleapis.com', False),
'gateway_v2': ('216.239.32.254',
'grpc-test2.sandbox.googleapis.com', True),
'cloud_gateway': ('216.239.32.255', 'grpc-test.sandbox.googleapis.com',
False),
'cloud_gateway_v2': ('216.239.32.255', 'grpc-test2.sandbox.googleapis.com',
True),
'gateway_v4': ('216.239.32.254',
'grpc-test4.sandbox.googleapis.com', True),
'cloud_gateway_v4': ('216.239.32.255', 'grpc-test4.sandbox.googleapis.com',
True),
}
argp = argparse.ArgumentParser(description='Run interop tests.')
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES),
nargs='+',
default=['all'],
help='Clients to run.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('--cloud_to_prod',
default=False,
action='store_const',
const=True,
help='Run cloud_to_prod tests.')
argp.add_argument('--cloud_to_prod_auth',
default=False,
action='store_const',
const=True,
help='Run cloud_to_prod_auth tests.')
argp.add_argument('--prod_servers',
choices=prod_servers.keys(),
default=['default'],
nargs='+',
help=('The servers to run cloud_to_prod and '
'cloud_to_prod_auth tests against.'))
argp.add_argument('-s', '--server',
choices=['all'] + sorted(_SERVERS),
nargs='+',
help='Run cloud_to_cloud servers in a separate docker ' +
'image. Servers can only be started automatically if ' +
'--use_docker option is enabled.',
default=[])
argp.add_argument('--override_server',
action='append',
type=lambda kv: kv.split('='),
help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
default=[])
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the interop tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--manual_run',
default=False,
action='store_const',
const=True,
help='Prepare things for running interop tests manually. ' +
'Preserve docker images after building them and skip '
'actually running the tests. Only print commands to run by ' +
'hand.')
argp.add_argument('--http2_interop',
default=False,
action='store_const',
const=True,
help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
argp.add_argument('--http2_server_interop',
default=False,
action='store_const',
const=True,
help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests')
argp.add_argument('--insecure',
default=False,
action='store_const',
const=True,
help='Whether to use secure channel.')
args = argp.parse_args()
servers = set(s for s in itertools.chain.from_iterable(_SERVERS
if x == 'all' else [x]
for x in args.server))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run interop tests under docker.')
print('')
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
if args.manual_run and not args.use_docker:
print('--manual_run is only supported with --use_docker option enabled.')
sys.exit(1)
if not args.use_docker and servers:
print('Running interop servers is only supported with --use_docker option enabled.')
sys.exit(1)
languages = set(_LANGUAGES[l]
for l in itertools.chain.from_iterable(
six.iterkeys(_LANGUAGES) if x == 'all' else [x]
for x in args.language))
languages_http2_clients_for_http2_server_interop = set()
if args.http2_server_interop:
languages_http2_clients_for_http2_server_interop = set(
_LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
if 'all' in args.language or l in args.language)
http2Interop = Http2Client() if args.http2_interop else None
http2InteropServer = Http2Server() if args.http2_server_interop else None
docker_images={}
if args.use_docker:
# languages for which to build docker images
languages_to_build = set(
_LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers]))
languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
if args.http2_interop:
languages_to_build.add(http2Interop)
if args.http2_server_interop:
languages_to_build.add(http2InteropServer)
build_jobs = []
for l in languages_to_build:
job = build_interop_image_jobspec(l)
docker_images[str(l)] = job.tag
build_jobs.append(job)
if build_jobs:
jobset.message('START', 'Building interop docker images.', do_newline=True)
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs)
if num_failures == 0:
jobset.message('SUCCESS', 'All docker images built successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Failed to build interop docker images.',
do_newline=True)
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
server_manual_cmd_log = [] if args.manual_run else None
client_manual_cmd_log = [] if args.manual_run else None
# Start interop servers.
server_jobs = {}
server_addresses = {}
try:
for s in servers:
lang = str(s)
spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang),
args.insecure, manual_cmd_log=server_manual_cmd_log)
if not args.manual_run:
job = dockerjob.DockerJob(spec)
server_jobs[lang] = job
server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT))
else:
# don't run the server, set server port to a placeholder value
server_addresses[lang] = ('localhost', '${SERVER_PORT}')
http2_server_job = None
if args.http2_server_interop:
# launch a HTTP2 server emulator that creates edge cases
lang = str(http2InteropServer)
spec = server_jobspec(http2InteropServer, docker_images.get(lang),
manual_cmd_log=server_manual_cmd_log)
if not args.manual_run:
http2_server_job = dockerjob.DockerJob(spec)
server_jobs[lang] = http2_server_job
else:
# don't run the server, set server port to a placeholder value
server_addresses[lang] = ('localhost', '${SERVER_PORT}')
jobs = []
if args.cloud_to_prod:
if args.insecure:
print('TLS is always enabled for cloud_to_prod scenarios.')
for server_host_name in args.prod_servers:
for language in languages:
for test_case in _TEST_CASES:
if not test_case in language.unimplemented_test_cases():
if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
test_job = cloud_to_prod_jobspec(
language, test_case, server_host_name,
prod_servers[server_host_name],
docker_image=docker_images.get(str(language)),
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
test_job = cloud_to_prod_jobspec(
http2Interop, test_case, server_host_name,
prod_servers[server_host_name],
docker_image=docker_images.get(str(http2Interop)),
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
if args.cloud_to_prod_auth:
if args.insecure:
print('TLS is always enabled for cloud_to_prod scenarios.')
for server_host_name in args.prod_servers:
for language in languages:
for test_case in _AUTH_TEST_CASES:
if not test_case in language.unimplemented_test_cases():
test_job = cloud_to_prod_jobspec(
language, test_case, server_host_name,
prod_servers[server_host_name],
docker_image=docker_images.get(str(language)), auth=True,
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
for server in args.override_server:
server_name = server[0]
(server_host, server_port) = server[1].split(':')
server_addresses[server_name] = (server_host, server_port)
for server_name, server_address in server_addresses.items():
(server_host, server_port) = server_address
server_language = _LANGUAGES.get(server_name, None)
skip_server = [] # test cases unimplemented by server
if server_language:
skip_server = server_language.unimplemented_test_cases_server()
for language in languages:
for test_case in _TEST_CASES:
if not test_case in language.unimplemented_test_cases():
if not test_case in skip_server:
test_job = cloud_to_cloud_jobspec(language,
test_case,
server_name,
server_host,
server_port,
docker_image=docker_images.get(str(language)),
insecure=args.insecure,
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
if server_name == "go":
# TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
continue
test_job = cloud_to_cloud_jobspec(http2Interop,
test_case,
server_name,
server_host,
server_port,
docker_image=docker_images.get(str(http2Interop)),
insecure=args.insecure,
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
if args.http2_server_interop:
if not args.manual_run:
http2_server_job.wait_for_healthy(timeout_seconds=600)
for language in languages_http2_clients_for_http2_server_interop:
for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT+offset
if not args.manual_run:
server_port = http2_server_job.mapped_port(server_port)
test_job = cloud_to_cloud_jobspec(language,
test_case,
str(http2InteropServer),
'localhost',
server_port,
docker_image=docker_images.get(str(language)),
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
for language in languages:
# HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
# HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
# than specialized http2 clients, reusing existing test implementations.
# For example, in the "data_frame_padding" test, use language's gRPC
# interop clients and make them think that theyre running "large_unary"
# test case. This avoids implementing a new test case in each language.
for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
if test_case not in language.unimplemented_test_cases():
offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT+offset
if not args.manual_run:
server_port = http2_server_job.mapped_port(server_port)
if not args.insecure:
print(('Creating grpc cient to http2 server test case with insecure connection, even though'
' args.insecure is False. Http2 test server only supports insecure connections.'))
test_job = cloud_to_cloud_jobspec(language,
test_case,
str(http2InteropServer),
'localhost',
server_port,
docker_image=docker_images.get(str(language)),
insecure=True,
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
if not jobs:
print('No jobs to run.')
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
if args.manual_run:
print('All tests will skipped --manual_run option is active.')
num_failures, resultset = jobset.run(jobs, newline_on_success=True,
maxjobs=args.jobs,
skip_jobs=args.manual_run)
if num_failures:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
else:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
report_utils.render_junit_xml_report(resultset, 'report.xml')
for name, job in resultset.items():
if "http2" in name:
job[0].http2results = aggregate_http2_results(job[0].message)
http2_server_test_cases = (
_HTTP2_SERVER_TEST_CASES if args.http2_server_interop else [])
report_utils.render_interop_html_report(
set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
_HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures,
args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
args.http2_interop)
except Exception as e:
print('exception occurred:')
traceback.print_exc(file=sys.stdout)
finally:
# Check if servers are still running.
for server, job in server_jobs.items():
if not job.is_running():
print('Server "%s" has exited prematurely.' % server)
dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
for image in six.itervalues(docker_images):
if not args.manual_run:
print('Removing docker image %s' % image)
dockerjob.remove_image(image)
else:
print('Preserving docker image: %s' % image)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Constellation',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'constellationID')),
('name', models.CharField(max_length=100, db_column=b'constellationName')),
('x', models.FloatField()),
('y', models.FloatField()),
('z', models.FloatField()),
],
options={
'db_table': 'mapConstellations',
'managed': False,
},
),
migrations.CreateModel(
name='Faction',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'factionID')),
('name', models.CharField(max_length=300, db_column=b'factionName', blank=True)),
('description', models.CharField(max_length=3000, blank=True)),
('iconid', models.IntegerField(null=True, db_column=b'iconID', blank=True)),
],
options={
'db_table': 'chrFactions',
'managed': False,
},
),
migrations.CreateModel(
name='Location',
fields=[
('itemid', models.IntegerField(serialize=False, primary_key=True, db_column=b'itemID')),
('name', models.CharField(max_length=100, null=True, db_column=b'itemName', blank=True)),
('x', models.FloatField(null=True, db_column=b'x', blank=True)),
('y', models.FloatField(null=True, db_column=b'y', blank=True)),
('z', models.FloatField(null=True, db_column=b'z', blank=True)),
('security', models.FloatField(null=True, db_column=b'security', blank=True)),
],
options={
'db_table': 'mapDenormalize',
'managed': False,
},
),
migrations.CreateModel(
name='MarketGroup',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'marketGroupID')),
('name', models.CharField(max_length=100, null=True, db_column=b'marketGroupName', blank=True)),
('description', models.CharField(max_length=200, null=True, blank=True)),
('hasTypes', models.IntegerField()),
],
options={
'db_table': 'invMarketGroups',
'managed': False,
},
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'regionID')),
('name', models.CharField(max_length=100, db_column=b'regionName')),
('x', models.FloatField()),
('y', models.FloatField()),
('z', models.FloatField()),
],
options={
'db_table': 'mapRegions',
'managed': False,
},
),
migrations.CreateModel(
name='StarbaseResourcePurpose',
fields=[
('purpose', models.IntegerField(serialize=False, primary_key=True)),
('purposeText', models.CharField(max_length=100, null=True, blank=True)),
],
options={
'db_table': 'invControlTowerResourcePurposes',
'managed': False,
},
),
migrations.CreateModel(
name='SystemData',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'solarSystemID')),
('name', models.CharField(max_length=100, db_column=b'solarSystemName')),
('x', models.FloatField()),
('y', models.FloatField()),
('z', models.FloatField()),
('security', models.FloatField()),
],
options={
'db_table': 'mapSolarSystems',
'managed': False,
},
),
migrations.CreateModel(
name='SystemJump',
fields=[
('fromregion', models.IntegerField(db_column=b'fromRegionID')),
('fromconstellation', models.IntegerField(db_column=b'fromConstellationID')),
('fromsystem', models.IntegerField(serialize=False, primary_key=True, db_column=b'fromSolarSystemID')),
('tosystem', models.IntegerField(primary_key=True, db_column=b'toSolarSystemID')),
('toconstellation', models.IntegerField(db_column=b'toConstellationID')),
('toregion', models.IntegerField(db_column=b'toRegionID')),
],
options={
'db_table': 'mapSolarSystemJumps',
'managed': False,
},
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column=b'typeID')),
('name', models.CharField(max_length=100, db_column=b'typeName')),
('description', models.TextField(null=True, blank=True)),
('volume', models.FloatField(null=True, blank=True)),
('published', models.BooleanField()),
],
options={
'db_table': 'invTypes',
'managed': False,
},
),
migrations.CreateModel(
name='Alliance',
fields=[
('id', models.BigIntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=100)),
('shortname', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='ConfigEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('value', models.CharField(max_length=255, null=True, blank=True)),
('user', models.ForeignKey(related_name='settings', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='Corporation',
fields=[
('id', models.BigIntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=100)),
('ticker', models.CharField(max_length=100)),
('member_count', models.IntegerField()),
('alliance', models.ForeignKey(related_name='member_corps', blank=True, to='core.Alliance', null=True)),
],
),
migrations.CreateModel(
name='NewsFeed',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, null=True, blank=True)),
('description', models.CharField(max_length=255, null=True, blank=True)),
('url', models.CharField(max_length=255)),
('user', models.ForeignKey(related_name='feeds', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='LocationWormholeClass',
fields=[
('location', models.ForeignKey(related_name='whclass', primary_key=True, db_column=b'locationID', serialize=False, to='core.Location')),
('sysclass', models.IntegerField(null=True, db_column=b'wormholeClassID', blank=True)),
],
options={
'db_table': 'mapLocationWormholeClasses',
'managed': False,
},
),
migrations.CreateModel(
name='StarbaseResource',
fields=[
('towerType', models.ForeignKey(related_name='posesfueled', primary_key=True, db_column=b'controlTowerTypeID', serialize=False, to='core.Type')),
('quantity', models.IntegerField(null=True, db_column=b'quantity', blank=True)),
('minSecurityLevel', models.FloatField(null=True, db_column=b'minSecurityLevel', blank=True)),
],
options={
'db_table': 'invControlTowerResources',
'managed': False,
},
),
migrations.AddField(
model_name='alliance',
name='executor',
field=models.ForeignKey(related_name='+', blank=True, to='core.Corporation', null=True),
),
migrations.AlterUniqueTogether(
name='configentry',
unique_together=set([('name', 'user')]),
),
]
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class XentTest(test.TestCase):
def _npXent(self, features, labels, dim=-1):
if dim is -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
e = np.exp(features - np.reshape(
np.amax(
features, axis=dim), one_only_on_dim))
probs = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
bp = (probs - labels)
l = -np.sum(labels * np.log(probs + 1.0e-20), axis=dim)
return l, bp
def _testXent(self, np_features, np_labels, use_gpu=False):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.test_session(use_gpu=use_gpu) as sess:
loss, backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def _testXentWrapper(self, np_features, np_labels, dim=-1, use_gpu=False):
np_loss, _ = self._npXent(np_features, np_labels, dim=dim)
with self.test_session(use_gpu=use_gpu) as sess:
loss = nn_ops.softmax_cross_entropy_with_logits(
labels=np_labels, logits=np_features, dim=dim)
tf_loss = sess.run(loss)
print("np_loss:", np_loss)
print("tf_loss:", tf_loss)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
def _testAll(self, features, labels):
self._testXent(features, labels, use_gpu=False)
self._testXent(features, labels, use_gpu=True)
def _testSingleClass(self, use_gpu=False):
for dtype in np.float16, np.float32:
with self.test_session(use_gpu=use_gpu) as sess:
loss, backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(dtype),
np.array([[-1.], [0.], [1.]]).astype(dtype))
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[2.0], [1.0], [0.0]], tf_backprop)
def testSingleClass(self):
self._testSingleClass(True)
self._testSingleClass(False)
def testRankTooLarge(self):
for dtype in np.float16, np.float32:
np_features = np.array(
[[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]).astype(dtype)
np_labels = np.array(
[[[0., 0., 0., 1.]], [[0., .5, .5, 0.]]]).astype(dtype)
self.assertRaisesRegexp(ValueError, "must be rank 2",
gen_nn_ops._softmax_cross_entropy_with_logits,
np_features, np_labels)
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with soft targets (1, 2).
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [[0., 0., 0., 1.], [0., .5, .5, 0.]]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a soft target (1, 2), the backprop is
# [0.032, 0.087 - 0.5 = -0.413, 0.237 - 0.5 = -0.263, 0.644]
# The loss for this batch is [0.5 * -log(0.087), 0.5 * -log(0.237)]
# = [1.3862, 1.9401]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75],
[0.0321, -0.4129, -0.2632, 0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 1.9401]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.test_session():
with self.assertRaises(ValueError):
gen_nn_ops._softmax_cross_entropy_with_logits(
[[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]])
def testNotMatrix(self):
with self.test_session():
with self.assertRaises(ValueError):
gen_nn_ops._softmax_cross_entropy_with_logits([0., 1., 2., 3.],
[0., 1., 0., 1.])
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float16))
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
def testDouble(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float64))
def testGradient(self):
with self.test_session() as sess:
l = constant_op.constant(
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
shape=[3, 4],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=f,
name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
# Check that no extra computation performed. When only first derivative is requested,
# second derivative must not be computed. So when there is no second derivative,
# there is no `BatchMatMul` op in the graph.
op_names = [op.op_def.name for op in sess.graph.get_operations() if op.op_def]
self.assertNotIn('BatchMatMul', op_names)
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
def testGradientLabelWithV2(self):
with self.test_session():
l = constant_op.constant(
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
shape=[3, 4],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits_v2(labels=l, logits=f,
name="xent")
err = gradient_checker.compute_gradient_error(l, [3, 4], x, [3])
self.assertLess(err, 5e-8)
def testSecondGradient(self):
with self.test_session() as sess:
l = constant_op.constant([0.0, 0.0, 1.0/3, 0.0,
1.0/3, 0.0, 0.0, 0.0,
0.0, 0.5/3, 0.0, 0.5/3], shape=[12],
dtype=dtypes.float64, name="l")
f = constant_op.constant([0.1, 0.2, 0.3, 0.4,
0.1, 0.4, 0.9, 1.6,
0.1, 0.8, 2.7, 6.4], shape=[12],
dtype=dtypes.float64, name="f")
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=f,
name="xent")
loss = math_ops.reduce_sum(x)
gradients = gradients_impl.gradients(loss, [f])[0]
err = gradient_checker.compute_gradient_error(f, [12], gradients, [12])
# Check that second derivative is calculated.
# (it is equivalent to being `BatchMatMul` op in the graph because of implementation of xentropy grad)
op_names = [op.op_def.name for op in sess.graph.get_operations() if op.op_def]
self.assertIn('BatchMatMul', op_names)
print("cross entropy hessian err = ", err)
self.assertLess(err, 5e-8)
def testWrapper(self):
features = np.array(
[[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32)
labels = np.array([[[0., 0., 0., 1.], [0., 1., 0., 0.]],
[[0., 0.5, 0.5, 0.], [0.5, 0.5, 0., 0.]],
[[0., 1., 0., 0.], [0., 0., 1., 0.]]]).astype(np.float32)
self._testXentWrapper(features, labels, dim=0, use_gpu=False)
self._testXentWrapper(features, labels, dim=0, use_gpu=True)
self._testXentWrapper(features, labels, dim=1, use_gpu=False)
self._testXentWrapper(features, labels, dim=1, use_gpu=True)
self._testXentWrapper(features, labels, dim=-1, use_gpu=False)
self._testXentWrapper(features, labels, dim=-1, use_gpu=True)
if __name__ == "__main__":
test.main()
|
|
"""
.. _optics-app:
Optics App
==============
App used to help design the lens system used in the microscope. See
:ref:`microscope-optics` for the full details of how the app is used to design
and select all the lenses in the system.
There are three model sections in the app:
Focal length / mag from position
--------------------------------
This section models how the focal length and mag changes given the positions
of the lens, the object seen by the lens, and the image of the object
on the other side of the lens.
You can also add graphs to this section to visualize in 2D and 3D the effect of
changing these parameters.
Image/lens pos and mag from lens/object pos and focal length
-------------------------------------------------------------
This section models how the image and les pos need to be given a desired
lens and object position and focal length.
You can also add graphs to this section to visualize in 2D and 3D the effect of
changing these parameters.
4-lens system
-------------
This section lets you chain upto 4 lenses in a system and for each lens it lets
you change its focal length, its position, and the object position and it
estimates the image position and the mag for that and all subsequent lenses
based on the previous lenses in the chain.
The object position can only be given for the first lens, the others are
computed from the previous lenses.
For each lens in the chain you can also add 2D and 3D graphs that lets you
explore how the output parameters may change, given input parameters.
"""
from kivy.event import EventDispatcher
from kivy_garden.graph import MeshLinePlot, Graph, LinePlot, ContourPlot, \
PointPlot
from kivy.properties import NumericProperty, ObjectProperty, DictProperty, \
ReferenceListProperty, StringProperty, ListProperty, BooleanProperty
from kivy.utils import get_color_from_hex as rgb
from kivy.resources import resource_add_path
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.base import ExceptionManager, ExceptionHandler
from kivy.factory import Factory
from kivy.logger import Logger
from kivy.modules import inspector
import math
from base_kivy_app.utils import ColorTheme
import base_kivy_app.app
import itertools
import sys
from typing import List, Dict, Tuple, Optional
import numpy as np
from os.path import dirname, join, isdir
from collections import deque
from skimage import measure
__all__ = (
'OpticsApp', 'PlotWidget', 'FormulaVariableBehavior',
'PropertyDisplayBinding', 'FormulaWidget', 'LensFocalLengthFormula',
'LensFixedObjectFormula', 'CeedFormula', 'FormulaPlot', 'FormulaGraph')
resource_add_path(join(dirname(base_kivy_app.app.__file__), 'media'))
resource_add_path(
join(dirname(base_kivy_app.app.__file__), 'media', 'flat_icons'))
class FormulaGraph(Graph):
"""The graph on which the 2D/3D plots are drawn.
"""
plot_widget: 'PlotWidget' = ObjectProperty(None)
"""The :class:`PlotWidget` that contains this graph.
"""
def __init__(self, **kwargs):
self._with_stencilbuffer = False
super(FormulaGraph, self).__init__(**kwargs)
from kivy.core.window import Window
Window.fbind('mouse_pos', self._update_mouse_pos)
def _update_mouse_pos(self, instance, pos):
plot_widget = self.plot_widget
plot = plot_widget.plot
if plot is None:
return
pos = self.to_widget(*pos, relative=True)
if not self.collide_plot(*pos):
return
x, y = self.to_data(*pos)
x2var = plot.x2_variable
if not x2var:
plot_widget.mouse_x_val = x
plot_widget.mouse_y_val = y
plot_widget.mouse_x2_val = None
else:
plot_widget.mouse_x_val = x
plot_widget.mouse_x2_val = y
if plot._yvals is None:
plot_widget.mouse_y_val = 0
else:
n = plot.num_points
xi = max(min(
int(n * (x - plot.start) / (plot.end - plot.start)),
n - 1), 0)
yi = max(min(
int(n * (y - plot.x2_start) /
(plot.x2_end - plot.x2_start)),
n - 1), 0)
plot_widget.mouse_y_val = float(plot._yvals[xi, yi])
def on_touch_down(self, touch):
pos = self.to_local(*touch.pos, relative=True)
if not self.collide_plot(*pos):
return super(FormulaGraph, self).on_touch_down(touch)
if super(FormulaGraph, self).on_touch_down(touch):
return True
if not touch.is_double_tap:
return False
x, y = self.to_data(*pos)
plot = self.plot_widget.plot
if plot is None:
return False
formula = plot.formula
xvar = plot.x_variable
x2var = plot.x2_variable
if not x2var:
if getattr(formula, '{}_src'.format(xvar), False):
return False
setattr(formula, xvar, x)
else:
if getattr(formula, '{}_src'.format(xvar), False) and \
getattr(formula, '{}_src'.format(x2var), False):
return False
if not getattr(formula, '{}_src'.format(xvar), False):
setattr(formula, xvar, x)
if not getattr(formula, '{}_src'.format(x2var), False):
setattr(formula, x2var, y)
return True
class FormulaPlot(EventDispatcher):
"""Given a :class:`CeedFormula`, it uses that to compute the the 2D/3D
formula output, given the range of input values.
It supports updating the plot from a formula whose input is the output of
another formula in a chain. In which case it starts with the input to root
formula and evaluates all the formula in the chain until the last one. Then
uses that to evaluate the final formula on a range of values that is then
displayed in the graph.
"""
graph: 'FormulaGraph' = ObjectProperty(None, rebind=True)
"""The graph in which to display the formula outputs.
"""
plot = ObjectProperty()
"""The specific 2D/3D plot in the graph.
"""
start = NumericProperty(0)
"""The horizontal axis start value from which to evaluate.
"""
end = NumericProperty(100)
"""The horizontal axis end value until which to evaluate.
"""
x2_start = NumericProperty(0)
"""The vertical axis start value from which to evaluate/display.
"""
x2_end = NumericProperty(100)
"""The vertical axis end value until which to evaluate/display.
"""
y_start = NumericProperty(0)
"""The depth (z) axis start value from which to display, if 3D.
"""
y_end = NumericProperty(100)
"""The depth (z) axis end value until which to display, if 3D.
"""
num_points = NumericProperty(100)
"""The number of horizontal/vertical points at which to evaluate the
formula.
"""
formula: 'CeedFormula' = ObjectProperty(None, rebind=True)
"""The :class:`CeedFormula` that takes the x (and x2 if 3D) input values and
computes the output values.
"""
x_variable = StringProperty('')
"""Name of the horizontal input variable.
"""
x2_variable = StringProperty('')
"""Name of the vertical input variable, if any (3D).
"""
x_variable_formula: 'CeedFormula' = ObjectProperty(None)
"""The formula used to generate the horizontal input variable, if it is
part of a lens chain and the variable is the result of a previous
formula in the chain.
"""
x2_variable_formula: 'CeedFormula' = ObjectProperty(None)
"""The formula used to generate the vertical input variable, if it is
part of a lens chain and the variable is the result of a previous
formula in the chain.
"""
y_variable = StringProperty('')
"""The name of the output (y) variable computed.
"""
num_contours = NumericProperty(5)
"""The number of equal value contours to display for 3D plots.
"""
track_ylim = BooleanProperty(False)
"""For 3D plots, whether to update the plot outout range as the output (y)
range changes or if the leave it unchanged.
"""
_last_values = {}
_yvals = None
_y_range = None
_contour_plots = []
_value_plot = None
colors = itertools.cycle([
rgb('7dac9f'), rgb('dc7062'), rgb('66a8d4'), rgb('e5b060')])
"""Plot colors to use.
"""
def __init__(self, **kwargs):
super(FormulaPlot, self).__init__(**kwargs)
self.fbind('start', self._update_from_params)
self.fbind('end', self._update_from_params)
self.fbind('x2_start', self._update_from_params)
self.fbind('x2_end', self._update_from_params)
self.fbind('num_points', self._update_from_params)
self.fbind('x_variable', self._update_from_params)
self.fbind('x2_variable', self._update_from_params)
self.fbind('y_variable', self._update_from_params)
self.fbind('formula', self._update_from_params)
self.fbind('x_variable_formula', self._update_from_params)
self.fbind('x2_variable_formula', self._update_from_params)
self.fbind('y_start', self._update_y_vals)
self.fbind('y_end', self._update_y_vals)
self.fbind('num_contours', self._update_y_vals)
def _update_from_params(self, *largs):
self.refresh_plot(from_variables=False)
def _update_y_vals(self, *largs):
graph = self.graph
if graph is None:
return
if self.x2_variable:
if not self.plot or self._yvals is None:
return
self.plot.data = np.clip(self._yvals.T, self.y_start, self.y_end)
self.compute_contours()
else:
graph.ymin = self.y_start
graph.ymax = self.y_end
graph.y_ticks_major = abs(graph.ymax - graph.ymin) / 4
def compute_contours(self):
"""Computes the equal value contours for 3D plots.
"""
graph = self.graph
if graph is None or not self.x2_variable:
return
for plot in self._contour_plots:
self.graph.remove_plot(plot)
plots = self._contour_plots = []
data = np.clip(self._yvals, self.y_start, self.y_end)
xscale = (self.end - self.start) / self.num_points
x2scale = (self.x2_end - self.x2_start) / self.num_points
color = next(self.colors)
for val in np.linspace(self.y_start, self.y_end, self.num_contours):
contours = measure.find_contours(data, val)
for contour in contours:
contour[:, 0] *= xscale
contour[:, 0] += self.start
contour[:, 1] *= x2scale
contour[:, 1] += self.x2_start
plot = MeshLinePlot(color=color)
plots.append(plot)
graph.add_plot(plot)
plot.points = contour
def create_plot(self):
"""Creates and displays the plot for this formula/graph.
"""
x2 = self.x2_variable
plot = self.plot
if plot is not None:
if (x2 and isinstance(plot, ContourPlot)
or not x2 and isinstance(plot, LinePlot)):
return
self.graph.remove_plot(plot)
for plot in self._contour_plots:
self.graph.remove_plot(plot)
self._contour_plots = []
self._yvals = None
yvar = x2 or self.y_variable
graph_theme = {
'label_options': {
'color': rgb('444444'),
'bold': True},
# 'background_color': rgb('f8f8f2'),
'tick_color': rgb('808080'),
'border_color': rgb('808080'),
'xlabel': '{} -- {}'.format(
self.x_variable_formula.widget.name,
self.formula.variable_descriptions.get(
self.x_variable, self.x_variable)),
'ylabel': self.formula.variable_descriptions.get(
yvar, yvar),
'x_ticks_minor': 5,
# 'y_ticks_minor': 5,
'y_grid_label': True,
'x_grid_label': True,
'padding': 5,
'x_grid': True,
'y_grid': True,
}
graph = self.graph
for k, v in graph_theme.items():
setattr(graph, k, v)
if x2:
self.plot = plot = ContourPlot(color=next(self.colors))
else:
self.plot = plot = LinePlot(color=next(self.colors), line_width=2)
graph.add_plot(plot)
self._value_plot = PointPlot(color=next(self.colors), point_size=3)
graph.add_plot(self._value_plot)
def refresh_plot(self, from_variables=True):
"""Updates plot when any of the variables or parameters change.
"""
if self.graph is None or not self.x_variable or not self.y_variable:
return
self.create_plot()
formula = self.formula
xvar = self.x_variable
x2var = self.x2_variable
# new_vals = {
# var: getattr(formula, var) for var in formula.x_variables
# if var != xvar and var != x2var}
# if from_variables and new_vals == self._last_values:
# return
# self._last_values = new_vals
start = self.start
end = self.end
n = self.num_points
plot = self.plot
graph = self.graph
self._yvals = None
if x2var:
x2_start = self.x2_start
x2_end = self.x2_end
xvals = np.linspace(start, end, n)
xvals = np.repeat(np.expand_dims(xvals, 1), n, 1)
x2vals = np.linspace(x2_start, x2_end, n)
x2vals = np.repeat(np.expand_dims(x2vals, 1), n, 1).T
input_variables = [
(self.x_variable_formula, xvar),
(self.x2_variable_formula, x2var)]
variables = {input_variables[0]: xvals, input_variables[1]: x2vals}
yvals = formula.infer_variable_value(
self.y_variable, variables, in_subtree={},
input_variables=input_variables)
if not isinstance(
yvals, np.ndarray) or n > 1 and yvals.shape != (n, n):
yvals = np.zeros((n, n)) + float(yvals)
else:
yvals[np.logical_or(np.logical_or(
np.isnan(yvals), np.isinf(yvals)),
np.isneginf(yvals))] = -1000
graph.xmin = start
graph.xmax = end
graph.ymin = x2_start
graph.ymax = x2_end
if self._y_range is None or self.track_ylim:
self.y_start, self.y_end = self._y_range = \
float(np.min(yvals)), float(np.max(yvals))
else:
self._y_range = float(np.min(yvals)), float(np.max(yvals))
graph.x_ticks_major = abs(end - start) / 10
graph.y_ticks_major = abs(x2_end - x2_start) / 10
graph.xlabel = '{} -- {}'.format(
self.x_variable_formula.widget.name,
self.formula.variable_descriptions.get(xvar, xvar))
graph.ylabel = '{} -- {}'.format(
self.x2_variable_formula.widget.name,
self.formula.variable_descriptions.get(x2var, x2var))
plot.xrange = (start, end)
plot.yrange = (x2_start, x2_end)
plot.data = np.clip(yvals.T, self.y_start, self.y_end)
self._yvals = yvals
self.compute_contours()
self._value_plot.points = [(
getattr(self.x_variable_formula, xvar),
getattr(self.x2_variable_formula, x2var))]
else:
xvals = np.linspace(start, end, n)
input_variables = [(self.x_variable_formula, xvar)]
variables = {input_variables[0]: xvals}
yvals = formula.infer_variable_value(
self.y_variable, variables, in_subtree={},
input_variables=input_variables)
if not isinstance(
yvals,
(np.ndarray, np.generic)) or n > 1 and len(yvals) == 1:
yvals = np.zeros((n, )) + float(yvals)
else:
yvals[np.logical_or(np.logical_or(
np.isnan(yvals), np.isinf(yvals)),
np.isneginf(yvals))] = -1000
ymin, ymax = np.min(yvals), np.max(yvals)
if math.isclose(ymin, ymax):
ydiff = abs(ymin) * 0.2
else:
ydiff = (ymax - ymin) * .02
graph.xmin = start
graph.xmax = end
if self._y_range is None or self.track_ylim:
self.y_start, self.y_end = self._y_range = \
float(ymin - ydiff), float(ymax + ydiff)
else:
self._y_range = float(ymin - ydiff), float(ymax + ydiff)
graph.ymin = self.y_start
graph.ymax = self.y_end
graph.x_ticks_major = abs(end - start) / 10
graph.y_ticks_major = abs(graph.ymax - graph.ymin) / 4
graph.xlabel = '{} -- {}'.format(
self.x_variable_formula.widget.name,
self.formula.variable_descriptions.get(xvar, xvar))
graph.ylabel = self.formula.variable_descriptions.get(
self.y_variable, self.y_variable)
plot.points = list(zip(xvals, yvals))
self._value_plot.points = [(
getattr(self.x_variable_formula, xvar),
getattr(formula, self.y_variable))]
def reset_y_axis(self):
"""Resets the y (output)-axis to the previous value.
"""
if not self.graph or not self.plot or self._y_range is None:
return
self.y_start, self.y_end = self._y_range
class CeedFormula(EventDispatcher):
"""A formula that computes a output value(s), given some input value
or input value range.
The input value(s) can be formula themselves so that we can have a formula
DAG, with some formula leaves. This allows the output of a leaf to be
re-computed when any of the inputs or its parents change in the graph.
The formula is computed from properties that are input parameters and it
generates one or more output values that are also stored as properties.
The input properties (e.g. ``lens_pos``) can have a second, corresponding
property with the ``_src`` suffix (e.g. ``lens_pos_src``), which if set to
a tuple of (:class:`CeedFormula`, property_name), the given property of the
formula will be used for the input value (``lens_pos_src``) instead of the
property of this instance (``lens_pos``). This allows chaining.
Additionally, each output property must have a corresponding method with
a ``compute_`` prefix that when called returns the output given the
inputs stored in the class properties. It can be given a set of
:class:`CeedFormula` that is used to look up values for any of the input
properties, instead of using the value currently stored in the instance
property for that input.
"""
x_variables: List[str] = ListProperty([])
"""List of properties of this class (by name) whose values are used as
inputs to the formula's function.
There must be a corresponding method with a ``compute_`` prefix for each
variable.
There may also be a corresponding property with a `_src`` suffix that
contains a formula to use to look up the property value as the output of
that formula, instead of using this property value directly.
"""
y_variables: List[str] = ListProperty([])
"""List of properties of this class (by name) whose values are outputs of
the formula's function.
Items in :attr:`y_variables` may depend on other items in
:attr:`y_variables` as input to their calculation. See
:attr:`y_dependency_ordered`.
"""
plots = ListProperty([])
"""All the plots that use this formula and need to be updated when the
inputs change.
"""
widget = ObjectProperty(None)
variable_descriptions: Dict[str, str] = DictProperty({})
"""A mapping that gets a nicer description for each variable.
"""
dependency_graph: Dict[str, List[str]] = {}
'''Mapping that maps each y-variable to a list of all the y and x variables
it depends on.
'''
y_dependency_ordered: List[str] = []
'''As mentioned, y (output) variables may depend on other output variables.
This orders y variables from leaf to dependant variables, such that
any variable is only dependant on other y variables that are listed
previously in :attr:`y_dependency_ordered`.
It is automatically computed.
'''
def __init__(self, **kwargs):
super(CeedFormula, self).__init__(**kwargs)
for var in self.x_variables:
self.fbind(var, self.update_result, var)
var_src = '{}_src'.format(var)
if not hasattr(self, var_src):
continue
self.bind_variable_to_src(var, var_src)
yvars = self.y_variables
deps = self.dependency_graph
deps = {
var: [v for v in dep_vars if v in yvars and v in deps]
for var, dep_vars in deps.items() if var in yvars}
y_ordered = self.y_dependency_ordered = [
v for v in yvars if v not in deps]
while deps:
found = ''
for var, dep_vars in deps.items():
if not dep_vars:
y_ordered.append(var)
found = var
break
if not found:
raise Exception(
'Found y variables that depend on each other, so we cannot'
' compute their dependency structure')
deps = {
var: [v for v in dep_vars if v != found]
for var, dep_vars in deps.items() if var != found}
def bind_variable_to_src(self, variable, variable_src):
"""For each x-variable, if there's a corresponding ``_src`` suffixed
variable that is set, this method will track that formula and update the
our property when the source changes.
"""
uid = [None, None, None]
def watch_variable_src(*largs):
if uid[0]:
uid[1].unbind_uid(uid[2], uid[0])
uid[0] = None
src = getattr(self, variable_src)
if not src:
return
obj, prop = src
uid[1] = obj
uid[2] = prop
uid[0] = obj.fbind(prop, set_variable)
setattr(self, variable, getattr(obj, prop))
def set_variable(instance, value):
setattr(self, variable, value)
self.fbind(variable_src, watch_variable_src)
watch_variable_src()
def update_result(self, variable, *largs):
"""Called automatically when a x-variable changes and it recomputes
all the y-variables.
"""
for var in self.y_dependency_ordered:
func = 'compute_{}'.format(var)
setattr(self, var, getattr(self, func)())
for plot in self.plots:
plot.refresh_plot()
def _get_src(self, variable):
src = '{}_src'.format(variable)
return getattr(self, src, None)
def variables_in_subtree(self, variable, in_subtree, input_variables):
'''Checks whether the variable or its dependencies is a input variable.
If so it'll need to be computed over the range that the input is
sampled on. If not, it's a constant value that we can just get from
the formula properties.
We also check if the variable itself is a input variable.
'''
key = (self, variable)
if key in in_subtree:
return in_subtree[key]
if key in input_variables:
in_subtree[key] = True
return True
if variable in self.x_variables:
var_src = self._get_src(variable)
if not var_src:
in_subtree[key] = False
return False
obj, prop = var_src
in_subtree[key] = obj.variables_in_subtree(
prop, in_subtree, input_variables)
return in_subtree[key]
assert variable in self.y_variables
in_subtree[key] = any(
self.variables_in_subtree(var, in_subtree, input_variables)
for var in self.dependency_graph.get(variable, []))
return in_subtree[key]
def infer_variable_value(
self, variable, variables, in_subtree, input_variables):
'''Computes the value of variable for the range of values of the
input variables.
Variables accumulates the values of variables as they are computed in
the graph, starting from the root.
in_subtree stores whether a variable contains any of the
input_variables in it's dependency tree (it depends on it).
'''
key = (self, variable)
if key in variables:
return variables[key]
if not self.variables_in_subtree(variable, in_subtree, input_variables):
variables[key] = getattr(self, variable)
return variables[key]
if variable in self.x_variables:
assert key not in input_variables
formula, prop = self._get_src(variable)
variables[key] = formula.infer_variable_value(
prop, variables, in_subtree, input_variables)
return variables[key]
assert variable in self.y_variables
for var in self.dependency_graph.get(variable, []):
self.infer_variable_value(
var, variables, in_subtree, input_variables)
yfunc = getattr(self, 'compute_{}'.format(variable))
variables[key] = yfunc(variables)
return variables[key]
def get_variable_dep_leaves(self, variable):
"""Gets set of all the (formula, variable) tuples that this variable
depends on upto the root, but only those that are roots in the sense
that the variables don't depend on other variable themselves.
This will let us start from them and compute the full graph until
reaching the the variable.
"""
deps_graph = self.dependency_graph
yvars = self.y_variables
xvars = self.x_variables
leaves = set()
dep_x_vars = set()
if variable in yvars:
deps_vars = deque(deps_graph.get(variable, []))
# go through all the deps of all yvars that depend on the variable
while deps_vars:
dep_var = deps_vars.popleft()
if dep_var in xvars:
dep_x_vars.add(dep_var)
else:
deps_vars.extend(deps_graph.get(dep_var, []))
else:
dep_x_vars.add(variable)
for var in dep_x_vars:
src = self._get_src(var)
if not src:
leaves.add((self, var))
else:
formula, prop = src
leaves.update(formula.get_variable_dep_leaves(prop))
return leaves
class LensFixedObjectFormula(CeedFormula):
"""Computes the properties of a lens, where the object (input distance)
is fixed.
"""
lens_pos_src: Optional[CeedFormula] = ObjectProperty(None)
"""The previous formula in the chain, if any, that computes this input
variable as its output.
"""
focal_length_src: Optional[CeedFormula] = ObjectProperty(None)
"""The previous formula in the chain, if any, that computes this input
variable as its output.
"""
object_pos_src: Optional[CeedFormula] = ObjectProperty(None)
"""The previous formula in the chain, if any, that computes this input
variable as its output.
"""
base_magnification_src: Optional[CeedFormula] = ObjectProperty(None)
"""The previous formula in the chain, if any, that computes this input
variable as its output.
"""
lens_pos = NumericProperty(0)
"""The value of the input variable (automatically set from ``_src`` if set).
"""
focal_length = NumericProperty(0)
"""The value of the input variable (automatically set from ``_src`` if set).
"""
object_pos = NumericProperty(0)
"""The value of the input variable (automatically set from ``_src`` if set).
"""
base_magnification = NumericProperty(1)
"""The value of the input variable (automatically set from ``_src`` if set).
"""
image_pos = NumericProperty(0)
"""The computed output variable value.
"""
img_lens_pos = NumericProperty(0)
"""The computed output variable value.
"""
magnification = NumericProperty(0)
"""The computed output variable value.
"""
def __init__(self, **kwargs):
self.x_variables.extend(
['focal_length', 'object_pos', 'lens_pos', 'base_magnification'])
self.y_variables.extend(['image_pos', 'magnification', 'img_lens_pos'])
self.dependency_graph = {
'image_pos': ['lens_pos', 'object_pos', 'focal_length'],
'magnification': ['lens_pos', 'object_pos', 'image_pos',
'base_magnification'],
'img_lens_pos': ['image_pos', 'lens_pos']
}
super(LensFixedObjectFormula, self).__init__(**kwargs)
def compute_image_pos(self, variables={}):
"""Computes and returns this output variable.
If the ``variables`` dict contains a formula for a input variable, that
variable value is gotten from from ``variables``. Otherwise it gets it
from the variable property. THis allows us to support computing
whole ranges for the input variables as their values can be an array.
"""
lens_pos = variables.get((self, 'lens_pos'), self.lens_pos)
object_pos = variables.get((self, 'object_pos'), self.object_pos)
focal_length = variables.get((self, 'focal_length'), self.focal_length)
object_dist = lens_pos - object_pos
try:
res = object_dist * focal_length / (
object_dist - focal_length) + lens_pos
except ZeroDivisionError:
res = -1000
return res
def compute_magnification(self, variables={}):
"""Similar to :meth:`compute_image_pos`.
"""
lens_pos = variables.get((self, 'lens_pos'), self.lens_pos)
object_pos = variables.get((self, 'object_pos'), self.object_pos)
image_pos = variables.get((self, 'image_pos'), self.image_pos)
base_mag = variables.get(
(self, 'base_magnification'), self.base_magnification)
object_dist = lens_pos - object_pos
image_dist = image_pos - lens_pos
try:
res = -image_dist / object_dist * base_mag
except ZeroDivisionError:
res = -1000
return res
def compute_img_lens_pos(self, variables={}):
"""Similar to :meth:`compute_image_pos`.
"""
image_pos = variables.get((self, 'image_pos'), self.image_pos)
lens_pos = variables.get((self, 'lens_pos'), self.lens_pos)
return image_pos - lens_pos
class LensFocalLengthFormula(CeedFormula):
'''Only valid when not a virtual image.
'''
lens_pos_src: Optional[CeedFormula] = ObjectProperty(None)
"""The previous formula in the chain, if any, that computes this input
variable as its output.
"""
image_pos_src: Optional[CeedFormula] = ObjectProperty(None)
"""The previous formula in the chain, if any, that computes this input
variable as its output.
"""
object_pos_src: Optional[CeedFormula] = ObjectProperty(None)
"""The previous formula in the chain, if any, that computes this input
variable as its output.
"""
lens_pos = NumericProperty(0)
"""The value of the input variable (automatically set from ``_src`` if set).
"""
image_pos = NumericProperty(0)
"""The value of the input variable (automatically set from ``_src`` if set).
"""
object_pos = NumericProperty(0)
"""The value of the input variable (automatically set from ``_src`` if set).
"""
focal_length = NumericProperty(0)
"""The computed output variable value.
"""
magnification = NumericProperty(0)
"""The computed output variable value.
"""
def __init__(self, **kwargs):
self.x_variables.extend(['image_pos', 'object_pos', 'lens_pos'])
self.y_variables.extend(['focal_length', 'magnification'])
self.dependency_graph = {
'focal_length': ['lens_pos', 'object_pos', 'image_pos'],
'magnification': ['lens_pos', 'object_pos', 'image_pos']
}
super(LensFocalLengthFormula, self).__init__(**kwargs)
def compute_focal_length(self, variables={}):
"""Similar to :meth:`LensFixedObjectFormula.compute_image_pos`.
"""
lens_pos = variables.get((self, 'lens_pos'), self.lens_pos)
object_pos = variables.get((self, 'object_pos'), self.object_pos)
image_pos = variables.get((self, 'image_pos'), self.image_pos)
object_dist = lens_pos - object_pos
image_dist = image_pos - lens_pos
try:
res = 1 / (1 / image_dist + 1 / object_dist)
except ZeroDivisionError:
res = -1000
return res
def compute_magnification(self, variables={}):
"""Similar to :meth:`LensFixedObjectFormula.compute_image_pos`.
"""
lens_pos = variables.get((self, 'lens_pos'), self.lens_pos)
object_pos = variables.get((self, 'object_pos'), self.object_pos)
image_pos = variables.get((self, 'image_pos'), self.image_pos)
object_dist = lens_pos - object_pos
image_dist = image_pos - lens_pos
try:
res = -image_dist / object_dist
except ZeroDivisionError:
res = -1000
return res
class FormulaWidget(BoxLayout):
"""Widget that displays a formula, its inputs, outputs and graphs.
"""
formula: CeedFormula = ObjectProperty(None)
"""The formula visulized by the widget.
"""
props_container_x = ObjectProperty(None)
"""Widget container for all the input values.
"""
props_container_y = ObjectProperty(None)
"""Widget container for all the output values.
"""
plots_container = ObjectProperty(None)
"""Widget container for all the graphs displayed for the formula.
"""
description = StringProperty('')
"""Description shown for the formula.
"""
name = StringProperty('')
"""Name shown for the formula.
"""
hidden_variables = ObjectProperty({'base_magnification'})
"""List of all the input/output variables that are not shown in the GUI.
"""
def populate_widget(self):
"""Adds widgets for all the variables.
"""
props_container_x = self.props_container_x
props_container_y = self.props_container_y
formula = self.formula
hidden_variables = self.hidden_variables
def to_float(x):
return float(x) if x else 0.
to_str = lambda x: '{:0.4f}'.format(x) if x else '0'
descriptions = self.formula.variable_descriptions
def update(widget, src_name, *largs):
widget.read_only = bool(getattr(formula, src_name))
for var in sorted(formula.x_variables):
if var in hidden_variables:
continue
display = Factory.VariableDisplay(
name=descriptions.get(var, var),
prop_from_display_setter=to_float,
display_from_prop_setter=to_str,
obj=formula,
obj_prop=var)
props_container_x.add_widget(display)
src = '{}_src'.format(var)
if hasattr(formula, src):
formula.fbind(src, update, display, src)
update(display, src)
for var in sorted(formula.y_variables):
if var in hidden_variables:
continue
display = Factory.VariableDisplay(
name=descriptions.get(var, var),
prop_from_display_setter=to_float,
display_from_prop_setter=to_str,
obj=formula,
obj_prop=var,
read_only=True)
props_container_y.add_widget(display)
def add_plot(self):
"""Adds a new plot for the formula.
"""
plot = FormulaPlot(graph=None, formula=self.formula)
self.formula.plots.append(plot)
widget = PlotWidget(plot=plot, formula_widget=self)
self.plots_container.add_widget(widget)
plot.refresh_plot(from_variables=False)
widget.populate_x_variables()
def remove_plot(self, plot_widget):
"""Removes an existing plot from the formula.
"""
self.formula.plots.remove(plot_widget.plot)
self.plots_container.remove_widget(plot_widget.__self__)
class PropertyDisplayBinding(EventDispatcher):
"""Tracks a property (input/output variable) and updates the widget
representing the property with the new value.
"""
prop_from_display_setter = ObjectProperty(lambda x: x)
"""Lambda that can be used to convert the property from the displayed value
in the GUI when that changes (that could e.g. be a string) to the correct
type when setting the property value of the formula.
"""
display_from_prop_setter = ObjectProperty(lambda x: x)
"""Like :attr:`prop_from_display_setter`, but converts the property so it
can be displayed in the GUI (e.g. to string from float).
"""
obj = ObjectProperty(None)
"""The object to track.
"""
obj_prop = StringProperty('')
"""The property of :attr:`obj` to track.
"""
read_only = BooleanProperty(False)
"""Whether it's read only and cannot be updated from the GUI.
"""
prop_value = ObjectProperty('')
"""Current value of the property as it's shown in the GUI.
"""
def __init__(self, **kwargs):
super(PropertyDisplayBinding, self).__init__(**kwargs)
uid = [None, None, None]
def watch_prop():
if uid[0]:
uid[1].unbind_uid(uid[2], uid[0])
uid[0] = None
if not self.obj or not self.obj_prop:
return
uid[1] = obj = self.obj
uid[2] = prop = self.obj_prop
uid[0] = obj.fbind(prop, set_display)
self.prop_value = self.display_from_prop_setter(getattr(obj, prop))
def set_display(instance, value):
self.prop_value = self.display_from_prop_setter(value)
self.fbind('obj', watch_prop)
self.fbind('obj_prop', watch_prop)
watch_prop()
def set_obj_property(self, value):
"""Callback to set the from the GUI.
"""
if not self.obj or not self.obj_prop or self.read_only:
return
setattr(self.obj, self.obj_prop, self.prop_from_display_setter(value))
class FormulaVariableBehavior(EventDispatcher):
"""Visualization for a formula variable with an attached name.
"""
name = StringProperty('')
"""The name of the property that is displayed to user.
"""
class PlotWidget(BoxLayout):
"""Widget that visualizes a plot for a formula.
"""
plot: FormulaPlot = ObjectProperty(None, rebind=True)
"""The plot that visualizes the formula.
"""
formula_widget: FormulaWidget = ObjectProperty(None)
"""The formula to visualize.
"""
mouse_x_val = NumericProperty(0)
"""x-pos of the mouse in formula input domain.
"""
mouse_x2_val = NumericProperty(None, allownone=True)
"""x2-pos of the mouse in formula input domain.
"""
mouse_y_val = NumericProperty(0)
"""y value at the current mouse pos in formula output domain.
"""
graph_min_height = NumericProperty(200)
"""Smallest height for the graph.
"""
_names_to_x_variables = DictProperty({})
def select_x_variable(self, x_prop, variable_name):
"""Sets the input variable to display on the horizontal/vertical.
"""
formula_prop = '{}_variable_formula'.format(x_prop)
variable_prop = '{}_variable'.format(x_prop)
if not variable_name:
setattr(self.plot, formula_prop, self.plot.formula)
setattr(self.plot, variable_prop, '')
else:
formula, var = self._names_to_x_variables[variable_name]
setattr(self.plot, formula_prop, formula)
setattr(self.plot, variable_prop, var)
def populate_x_variables(self):
"""Updates the list of input variables the user can select from in the
GUI, when selecting the variable to show on an axis.
"""
formula = self.formula_widget.formula
deps = set()
for var in formula.y_variables:
deps.update(formula.get_variable_dep_leaves(var))
self._names_to_x_variables = {
'{}: {}'.format(f.widget.name, var): (f, var) for (f, var) in deps
if var not in f.widget.hidden_variables
}
Factory.register('PropertyDisplayBinding', cls=PropertyDisplayBinding)
Factory.register('FormulaVariableBehavior', cls=FormulaVariableBehavior)
class OpticsApp(App):
"""The app that shows all the formula.
"""
theme = ObjectProperty(None, rebind=True)
"""The flat material design style theme to use.
"""
formula_container_widget = ObjectProperty(None)
"""Widget that contains all the formula.
"""
focal_len_from_io_f: LensFocalLengthFormula = ObjectProperty(None)
"""Formula that computes focal length and mag from the other
parameters.
"""
image_from_f: LensFixedObjectFormula = ObjectProperty(None)
"""Formula that computes the parameters for a fixed object.
"""
objective_lens: LensFixedObjectFormula = ObjectProperty(None)
"""Formula that computes the parameters for a fixed object.
This is the first lens in the 4-lens chain.
"""
cam_lens_further: LensFixedObjectFormula = ObjectProperty(None)
"""Formula that computes the parameters for a fixed object.
This is the second lens in the 4-lens chain.
"""
cam_lens_closer: LensFixedObjectFormula = ObjectProperty(None)
"""Formula that computes the parameters for a fixed object.
This is the third lens in the 4-lens chain.
"""
cam_lens_closer2: LensFixedObjectFormula = ObjectProperty(None)
"""Formula that computes the parameters for a fixed object.
This is the forth and final lens in the 4-lens chain.
"""
def __init__(self, **kwargs):
self.theme = ColorTheme()
super(OpticsApp, self).__init__(**kwargs)
self.focal_len_from_io_f = LensFocalLengthFormula()
self.image_from_f = LensFixedObjectFormula()
self.objective_lens = LensFixedObjectFormula()
self.cam_lens_further = LensFixedObjectFormula()
self.cam_lens_closer = LensFixedObjectFormula()
self.cam_lens_closer2 = LensFixedObjectFormula()
self.cam_lens_further.object_pos_src = self.objective_lens, 'image_pos'
self.cam_lens_closer.object_pos_src = self.cam_lens_further, 'image_pos'
self.cam_lens_closer2.object_pos_src = self.cam_lens_closer, 'image_pos'
self.cam_lens_further.base_magnification_src = (
self.objective_lens, 'magnification')
self.cam_lens_closer.base_magnification_src = (
self.cam_lens_further, 'magnification')
self.cam_lens_closer2.base_magnification_src = (
self.cam_lens_closer, 'magnification')
def build(self):
root = Factory.FormulaRoot()
container = self.formula_container_widget = root.children[0].children[0]
self.focal_len_from_io_f.widget = widget = FormulaWidget(
formula=self.focal_len_from_io_f,
description='Compute f from object/image distance',
name='Lf')
widget.populate_widget()
container.add_widget(widget)
self.image_from_f.widget = widget = FormulaWidget(
formula=self.image_from_f,
description='Compute image from fixed f/object distance',
name='Li')
widget.populate_widget()
container.add_widget(widget)
self.objective_lens.widget = widget = FormulaWidget(
formula=self.objective_lens,
description='(1/4) First lens in the sequence.',
name='L1')
widget.populate_widget()
container.add_widget(widget)
self.cam_lens_further.widget = widget = FormulaWidget(
formula=self.cam_lens_further,
description='(2/4) Second lens in the sequence.',
name='L2')
widget.populate_widget()
container.add_widget(widget)
self.cam_lens_closer.widget = widget = FormulaWidget(
formula=self.cam_lens_closer,
description='(3/4) Third lens in the sequence.',
name='L3')
widget.populate_widget()
container.add_widget(widget)
self.cam_lens_closer2.widget = widget = FormulaWidget(
formula=self.cam_lens_closer2,
description='((4/4) Fourth lens in the sequence.',
name='L4')
widget.populate_widget()
container.add_widget(widget)
from kivy.core.window import Window
inspector.create_inspector(Window, root)
return root
if __name__ == '__main__':
class _AppHandler(ExceptionHandler):
def handle_exception(self, inst):
Logger.error(inst, exc_info=sys.exc_info())
return ExceptionManager.PASS
handler = _AppHandler()
ExceptionManager.add_handler(handler)
app = OpticsApp()
try:
app.run()
except Exception as e:
Logger.error(e, exc_info=sys.exc_info())
ExceptionManager.remove_handler(handler)
|
|
from __future__ import division
from functools import partial
import warnings
import dlib
from pathlib import Path
import numpy as np
from menpo.feature import no_op
from menpo.base import name_of_callable
from menpofit import checks
from menpofit.visualize import print_progress
from menpofit.compatibility import STRING_TYPES
from menpofit.fitter import (noisy_shape_from_bounding_box,
MultiScaleNonParametricFitter,
generate_perturbations_from_gt)
from menpofit.builder import (scale_images, rescale_images_to_reference_shape,
compute_reference_shape)
from menpofit.result import Result
from .algorithm import DlibAlgorithm
class DlibERT(MultiScaleNonParametricFitter):
r"""
Class for training a multi-scale Ensemble of Regression Trees model. This
class uses the implementation provided by the official DLib package
(http://dlib.net/) and makes it multi-scale.
Parameters
----------
images : `list` of `menpo.image.Image`
The `list` of training images.
group : `str` or ``None``, optional
The landmark group that corresponds to the ground truth shape of each
image. If ``None`` and the images only have a single landmark group,
then that is the one that will be used. Note that all the training
images need to have the specified landmark group.
bounding_box_group_glob : `glob` or ``None``, optional
Glob that defines the bounding boxes to be used for training. If
``None``, then the bounding boxes of the ground truth shapes are used.
reference_shape : `menpo.shape.PointCloud` or ``None``, optional
The reference shape that will be used for normalising the size of the
training images. The normalization is performed by rescaling all the
training images so that the scale of their ground truth shapes
matches the scale of the reference shape. Note that the reference
shape is rescaled with respect to the `diagonal` before performing
the normalisation. If ``None``, then the mean shape will be used.
diagonal : `int` or ``None``, optional
This parameter is used to rescale the reference shape so that the
diagonal of its bounding box matches the provided value. In other
words, this parameter controls the size of the model at the highest
scale. If ``None``, then the reference shape does not get rescaled.
scales : `float` or `tuple` of `float`, optional
The scale value of each scale. They must provided in ascending order,
i.e. from lowest to highest scale. If `float`, then a single scale is
assumed.
n_perturbations : `int` or ``None``, optional
The number of perturbations to be generated from each of the bounding
boxes using `perturb_from_gt_bounding_box`. Note that the total
number of perturbations is `n_perturbations * n_dlib_perturbations`.
perturb_from_gt_bounding_box : `function`, optional
The function that will be used to generate the perturbations.
n_dlib_perturbations : `int` or ``None`` or `list` of those, optional
The number of perturbations to be generated from the part of DLib. DLib
calls this "oversampling amount". If `list`, it must specify a value per
scale. Note that the total number of perturbations is
`n_perturbations * n_dlib_perturbations`.
n_iterations : `int` or `list` of `int`, optional
The number of iterations (cascades) of each level. If `list`, it must
specify a value per scale. If `int`, then it defines the total number of
iterations (cascades) over all scales.
feature_padding : `float` or `list` of `float`, optional
When we randomly sample the pixels for the feature pool we do so in a
box fit around the provided training landmarks. By default, this box
is the tightest box that contains the landmarks. However, you can
expand or shrink the size of the pixel sampling region by setting a
different value of padding. To explain this precisely, for a padding
of 0 we say that the pixels are sampled from a box of size 1x1. The
padding value is added to each side of the box. So a padding of 0.5
would cause the algorithm to sample pixels from a box that was 2x2,
effectively multiplying the area pixels are sampled from by 4.
Similarly, setting the padding to -0.2 would cause it to sample from
a box 0.6x0.6 in size. If `list`, it must specify a value per scale.
n_pixel_pairs : `int` or `list` of `int`, optional
`P` parameter from [1]. At each level of the cascade we randomly sample
pixels from the image. These pixels are used to generate features for
the random trees. So in general larger settings of this parameter
give better accuracy but make the algorithm run slower. If `list`, it
must specify a value per scale.
distance_prior_weighting : `float` or `list` of `float`, optional
To decide how to split nodes in the regression trees the algorithm
looks at pairs of pixels in the image. These pixel pairs are sampled
randomly but with a preference for selecting pixels that are near
each other. This parameter controls this "nearness" preference. In
particular, smaller values will make the algorithm prefer to select
pixels close together and larger values will make it care less about
picking nearby pixel pairs. Note that this is the inverse of how it is
defined in [1]. For this object, you should think of
`distance_prior_weighting` as "the fraction of the bounding box will
we traverse to find a neighboring pixel". Nominally, this is
normalized between 0 and 1. So reasonable settings are values in the
range (0, 1). If `list`, it must specify a value per scale.
regularisation_weight : `float` or `list` of `float`, optional
Boosting regularization parameter - `nu` from [1]. Larger values may
cause overfitting but improve performance on training data. If `list`,
it must specify a value per scale.
n_split_tests : `int` or `list` of `int`, optional
When generating the random trees we randomly sample `n_split_tests`
possible split features at each node and pick the one that gives the
best split. Larger values of this parameter will usually give more
accurate outputs but take longer to train. It is equivalent of `S`
from [1]. If `list`, it must specify a value per scale.
n_trees : `int` or `list` of `int`, optional
Number of trees created for each cascade. The total number of trees
in the learned model is equal n_trees * n_tree_levels. Equivalent to
`K` from [1]. If `list`, it must specify a value per scale.
n_tree_levels : `int` or `list` of `int`, optional
The number of levels in the tree (depth of tree). In particular,
there are pow(2, n_tree_levels) leaves in each tree. Equivalent to
`F` from [1]. If `list`, it must specify a value per scale.
verbose : `bool`, optional
If ``True``, then the progress of building ERT will be printed.
References
----------
.. [1] V. Kazemi, and J. Sullivan. "One millisecond face alignment with
an ensemble of regression trees." Proceedings of the IEEE Conference
on Computer Vision and Pattern Recognition. 2014.
"""
def __init__(self, images, group=None, bounding_box_group_glob=None,
reference_shape=None, diagonal=None, scales=(0.5, 1.0),
n_perturbations=30, n_dlib_perturbations=1,
perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
n_iterations=10, feature_padding=0, n_pixel_pairs=400,
distance_prior_weighting=0.1, regularisation_weight=0.1,
n_split_tests=20, n_trees=500, n_tree_levels=5, verbose=False):
checks.check_diagonal(diagonal)
scales = checks.check_scales(scales)
n_scales = len(scales)
# Dummy option that is required by _prepare_image of MultiFitter.
holistic_features = checks.check_callable(no_op, n_scales)
# Call superclass
super(DlibERT, self).__init__(
scales=scales, reference_shape=reference_shape,
holistic_features=holistic_features, algorithms=[])
# Set parameters
self.diagonal = diagonal
self.n_perturbations = n_perturbations
self.n_iterations = checks.check_max_iters(n_iterations, n_scales)
self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box
# DLib options
self._setup_dlib_options(feature_padding, n_pixel_pairs,
distance_prior_weighting,
regularisation_weight, n_split_tests, n_trees,
n_dlib_perturbations, n_tree_levels)
# Set-up algorithms
for j in range(self.n_scales):
self.algorithms.append(DlibAlgorithm(
self._dlib_options_templates[j],
n_iterations=self.n_iterations[j]))
# Train DLIB over multiple scales
self._train(images, group=group,
bounding_box_group_glob=bounding_box_group_glob,
verbose=verbose)
def _setup_dlib_options(self, feature_padding, n_pixel_pairs,
distance_prior_weighting, regularisation_weight,
n_split_tests, n_trees, n_dlib_perturbations,
n_tree_levels):
check_int = partial(checks.check_multi_scale_param, self.n_scales,
(int,))
check_float = partial(checks.check_multi_scale_param, self.n_scales,
(float,))
feature_padding = check_int('feature_padding', feature_padding)
n_pixel_pairs = check_int('n_pixel_pairs', n_pixel_pairs)
distance_prior_weighting = check_float('distance_prior_weighting',
distance_prior_weighting)
regularisation_weight = check_float('regularisation_weight',
regularisation_weight)
n_split_tests = check_int('n_split_tests', n_split_tests)
n_trees = check_int('n_trees', n_trees)
n_dlib_perturbations = check_int('n_dlib_perturbations',
n_dlib_perturbations)
n_tree_levels = check_int('n_tree_levels', n_tree_levels)
self._dlib_options_templates = []
for j in range(self.n_scales):
new_opts = dlib.shape_predictor_training_options()
# Size of region within which to sample features for the feature
# pool, e.g a padding of 0.5 would cause the algorithm to sample
# pixels from a box that was 2x2 pixels
new_opts.feature_pool_region_padding = feature_padding[j]
# P parameter from Kazemi paper
new_opts.feature_pool_size = n_pixel_pairs[j]
# Controls how tight the feature sampling should be. Lower values
# enforce closer features. Opposite of explanation from Kazemi
# paper, lambda
new_opts.lambda_param = distance_prior_weighting[j]
# Boosting regularization parameter - nu from Kazemi paper, larger
# values may cause overfitting but improve performance on training
# data
new_opts.nu = regularisation_weight[j]
# S from Kazemi paper - Number of split features at each node to
# sample. The one that gives the best split is chosen.
new_opts.num_test_splits = n_split_tests[j]
# K from Kazemi paper - number of weak regressors
new_opts.num_trees_per_cascade_level = n_trees[j]
# R from Kazemi paper - amount of times other shapes are sampled
# as example initialisations
new_opts.oversampling_amount = n_dlib_perturbations[j]
# F from Kazemi paper - number of levels in the tree (depth of tree)
new_opts.tree_depth = n_tree_levels[j]
self._dlib_options_templates.append(new_opts)
def _train(self, original_images, group=None, bounding_box_group_glob=None,
verbose=False):
# Dlib does not support incremental builds, so we must be passed a list
if not isinstance(original_images, list):
original_images = list(original_images)
# We use temporary landmark groups - so we need the group key to not be
# None
if group is None:
group = original_images[0].landmarks.group_labels[0]
# Temporarily store all the bounding boxes for rescaling
for i in original_images:
i.landmarks['__gt_bb'] = i.landmarks[group].lms.bounding_box()
if self.reference_shape is None:
# If no reference shape was given, use the mean of the first batch
self._reference_shape = compute_reference_shape(
[i.landmarks['__gt_bb'].lms for i in original_images],
self.diagonal, verbose=verbose)
# Rescale images wrt the scale factor between the existing
# reference_shape and their ground truth (group) bboxes
images = rescale_images_to_reference_shape(
original_images, '__gt_bb', self.reference_shape,
verbose=verbose)
# Scaling is done - remove temporary gt bounding boxes
for i, i2 in zip(original_images, images):
del i.landmarks['__gt_bb']
del i2.landmarks['__gt_bb']
# Create a callable that generates perturbations of the bounding boxes
# of the provided images.
generated_bb_func = generate_perturbations_from_gt(
images, self.n_perturbations, self._perturb_from_gt_bounding_box,
gt_group=group, bb_group_glob=bounding_box_group_glob,
verbose=verbose)
# For each scale (low --> high)
for j in range(self.n_scales):
# Print progress if asked
if verbose:
if len(self.scales) > 1:
scale_prefix = ' - Scale {}: '.format(j)
else:
scale_prefix = ' - '
else:
scale_prefix = None
# Rescale images according to scales. Note that scale_images is smart
# enough in order not to rescale the images if the current scale
# factor equals to 1.
scaled_images, scale_transforms = scale_images(
images, self.scales[j], prefix=scale_prefix,
return_transforms=True, verbose=verbose)
# Get bbox estimations of current scale. If we are at the first
# scale, this is done by using generated_bb_func. If we are at the
# rest of the scales, then the current bboxes are attached on the
# scaled_images with key '__ert_current_bbox_{}'.
current_bounding_boxes = []
if j == 0:
# At the first scale, the current bboxes are created by calling
# generated_bb_func.
current_bounding_boxes = [generated_bb_func(im)
for im in scaled_images]
else:
# At the rest of the scales, extract the current bboxes that
# were attached to the images
msg = '{}Extracting bbox estimations from previous ' \
'scale.'.format(scale_prefix)
wrap = partial(print_progress, prefix=msg,
end_with_newline=False, verbose=verbose)
for ii in wrap(scaled_images):
c_bboxes = []
for k in list(range(self.n_perturbations)):
c_key = '__ert_current_bbox_{}'.format(k)
c_bboxes.append(ii.landmarks[c_key].lms)
current_bounding_boxes.append(c_bboxes)
# Extract scaled ground truth shapes for current scale
scaled_gt_shapes = [i.landmarks[group].lms for i in scaled_images]
# Train the Dlib model. This returns the bbox estimations for the
# next scale.
current_bounding_boxes = self.algorithms[j].train(
scaled_images, scaled_gt_shapes, current_bounding_boxes,
prefix=scale_prefix, verbose=verbose)
# Scale the current bbox estimations for the next level. This
# doesn't have to be done for the last scale. The only thing we need
# to do at the last scale is to remove any attached landmarks from
# the training images.
if j < (self.n_scales - 1):
for jj, image_bboxes in enumerate(current_bounding_boxes):
for k, bbox in enumerate(image_bboxes):
c_key = '__ert_current_bbox_{}'.format(k)
images[jj].landmarks[c_key] = \
scale_transforms[jj].apply(bbox)
def fit_from_shape(self, image, initial_shape, gt_shape=None):
r"""
Fits the model to an image. Note that it is not possible to
initialise the fitting process from a shape. Thus, this method raises a
warning and calls `fit_from_bb` with the bounding box of the provided
`initial_shape`.
Parameters
----------
image : `menpo.image.Image` or subclass
The image to be fitted.
initial_shape : `menpo.shape.PointCloud`
The initial shape estimate from which the fitting procedure
will start. Note that the shape won't actually be used, only its
bounding box.
gt_shape : `menpo.shape.PointCloud`, optional
The ground truth shape associated to the image.
Returns
-------
fitting_result : :map:`MultiScaleNonParametricIterativeResult`
The result of the fitting procedure.
"""
warnings.warn('Fitting from an initial shape is not supported by '
'Dlib - therefore we are falling back to the tightest '
'bounding box from the given initial_shape')
tightest_bb = initial_shape.bounding_box()
return self.fit_from_bb(image, tightest_bb, gt_shape=gt_shape)
def fit_from_bb(self, image, bounding_box, gt_shape=None):
r"""
Fits the model to an image given an initial bounding box.
Parameters
----------
image : `menpo.image.Image` or subclass
The image to be fitted.
bounding_box : `menpo.shape.PointDirectedGraph`
The initial bounding box from which the fitting procedure
will start.
gt_shape : `menpo.shape.PointCloud`, optional
The ground truth shape associated to the image.
Returns
-------
fitting_result : :map:`MultiScaleNonParametricIterativeResult`
The result of the fitting procedure.
"""
# Generate the list of images to be fitted, as well as the correctly
# scaled initial and ground truth shapes per level. The function also
# returns the lists of affine and scale transforms per level that are
# required in order to transform the shapes at the original image
# space in the fitting result. The affine transforms refer to the
# transform introduced by the rescaling to the reference shape as well
# as potential affine transform from the features. The scale
# transforms are the Scale objects that correspond to each level's
# scale.
(images, bounding_boxes, gt_shapes, affine_transforms,
scale_transforms) = self._prepare_image(image, bounding_box,
gt_shape=gt_shape)
# Execute multi-scale fitting
algorithm_results = self._fit(images=images,
initial_shape=bounding_boxes[0],
affine_transforms=affine_transforms,
scale_transforms=scale_transforms,
return_costs=False, gt_shapes=gt_shapes)
# Return multi-scale fitting result
return self._fitter_result(image=image,
algorithm_results=algorithm_results,
affine_transforms=affine_transforms,
scale_transforms=scale_transforms,
gt_shape=gt_shape)
def __str__(self):
if self.diagonal is not None:
diagonal = self.diagonal
else:
y, x = self.reference_shape.range()
diagonal = np.sqrt(x ** 2 + y ** 2)
# Compute scale info strings
scales_info = []
lvl_str_tmplt = r""" - Scale {0}
- Cascade depth: {1}
- Depth per tree: {2}
- Trees per cascade level: {3}
- Regularisation parameter: {4:.1f}
- Feature pool of size {5} and padding {6:.1f}
- Lambda: {7:.1f}
- {8} split tests
- Perturbations generated per shape: {9}
- Total perturbations generated: {10}"""
for k, s in enumerate(self.scales):
scales_info.append(lvl_str_tmplt.format(
s,
self._dlib_options_templates[k].cascade_depth,
self._dlib_options_templates[k].tree_depth,
self._dlib_options_templates[k].num_trees_per_cascade_level,
self._dlib_options_templates[k].nu,
self._dlib_options_templates[k].feature_pool_size,
self._dlib_options_templates[k].feature_pool_region_padding,
self._dlib_options_templates[k].lambda_param,
self._dlib_options_templates[k].num_test_splits,
self._dlib_options_templates[k].oversampling_amount,
self._dlib_options_templates[k].oversampling_amount *
self.n_perturbations))
scales_info = '\n'.join(scales_info)
is_custom_perturb_func = (self._perturb_from_gt_bounding_box !=
noisy_shape_from_bounding_box)
if is_custom_perturb_func:
is_custom_perturb_func = name_of_callable(
self._perturb_from_gt_bounding_box)
cls_str = r"""{class_title}
- Images scaled to diagonal: {diagonal:.2f}
- Perturbations generated per shape: {n_perturbations}
- Custom perturbation scheme used: {is_custom_perturb_func}
- Scales: {scales}
{scales_info}
""".format(class_title='Ensemble of Regression Trees',
diagonal=diagonal,
n_perturbations=self.n_perturbations,
is_custom_perturb_func=is_custom_perturb_func,
scales=self.scales,
scales_info=scales_info)
return cls_str
class DlibWrapper(object):
r"""
Wrapper class for fitting a pre-trained ERT model. Pre-trained models are
provided by the official DLib package (http://dlib.net/).
Parameters
----------
model : `Path` or `str`
Path to the pre-trained model.
"""
def __init__(self, model):
if isinstance(model, STRING_TYPES) or isinstance(model, Path):
m_path = Path(model)
if not Path(m_path).exists():
raise ValueError('Model {} does not exist.'.format(m_path))
model = dlib.shape_predictor(str(m_path))
# Dlib doesn't expose any information about how the model was built,
# so we just create dummy options
self.algorithm = DlibAlgorithm(dlib.shape_predictor_training_options(),
n_iterations=0)
self.algorithm.dlib_model = model
self.scales = [1]
def fit_from_shape(self, image, initial_shape, gt_shape=None):
r"""
Fits the model to an image. Note that it is not possible to
initialise the fitting process from a shape. Thus, this method raises a
warning and calls `fit_from_bb` with the bounding box of the provided
`initial_shape`.
Parameters
----------
image : `menpo.image.Image` or subclass
The image to be fitted.
initial_shape : `menpo.shape.PointCloud`
The initial shape estimate from which the fitting procedure
will start. Note that the shape won't actually be used, only its
bounding box.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape associated to the image.
Returns
-------
fitting_result : :map:`Result`
The result of the fitting procedure.
"""
warnings.warn('Fitting from an initial shape is not supported by '
'Dlib - therefore we are falling back to the tightest '
'bounding box from the given initial_shape')
tightest_bb = initial_shape.bounding_box()
return self.fit_from_bb(image, tightest_bb, gt_shape=gt_shape)
def fit_from_bb(self, image, bounding_box, gt_shape=None):
r"""
Fits the model to an image given an initial bounding box.
Parameters
----------
image : `menpo.image.Image` or subclass
The image to be fitted.
bounding_box : `menpo.shape.PointDirectedGraph`
The initial bounding box.
gt_shape : `menpo.shape.PointCloud`
The ground truth shape associated to the image.
Returns
-------
fitting_result : :map:`Result`
The result of the fitting procedure.
"""
# We get back a NonParametricIterativeResult with one iteration,
# which is pointless. Simply convert it to a Result instance without
# passing in an initial shape.
fit_result = self.algorithm.run(image, bounding_box, gt_shape=gt_shape)
return Result(final_shape=fit_result.final_shape, image=image,
initial_shape=None, gt_shape=gt_shape)
def __str__(self):
return "Pre-trained DLib Ensemble of Regression Trees model"
|
|
# encoding: utf-8
"""
mprnlri.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from struct import unpack
from exabgp.protocol.ip import NoNextHop
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.protocol.family import Family
from exabgp.bgp.message.direction import IN
# from exabgp.bgp.message.update.attribute.attribute import Attribute
from exabgp.bgp.message.update.attribute import Attribute
from exabgp.bgp.message.update.attribute import NextHop
from exabgp.bgp.message.update.nlri import NLRI
from exabgp.bgp.message.notification import Notify
# from exabgp.bgp.message.open.capability import Negotiated
# ==================================================== MP Unreacheable NLRI (15)
#
@Attribute.register()
class MPRNLRI (Attribute,Family):
FLAG = Attribute.Flag.OPTIONAL
ID = Attribute.CODE.MP_REACH_NLRI
# __slots__ = ['nlris']
def __init__ (self, afi, safi, nlris):
Family.__init__(self,afi,safi)
# all the routes must have the same next-hop
self.nlris = nlris
def __eq__ (self, other):
return \
self.ID == other.ID and \
self.FLAG == other.FLAG and \
self.nlris == other.nlris
def __ne__ (self, other):
return not self.__eq__(other)
def packed_attributes (self, negotiated):
if not self.nlris:
return
# addpath = negotiated.addpath.send(self.afi,self.safi)
# nexthopself = negotiated.nexthopself(self.afi)
maximum = negotiated.FREE_SIZE
mpnlri = {}
for nlri in self.nlris:
if nlri.nexthop is NoNextHop:
# EOR and Flow may not have any next_hop
nexthop = ''
else:
# we do not want a next_hop attribute packed (with the _attribute()) but just the next_hop itself
if nlri.safi.has_rd():
# .packed and not .pack()
nexthop = chr(0)*8 + nlri.nexthop.ton(negotiated)
else:
# .packed and not .pack()
nexthop = nlri.nexthop.ton(negotiated)
# mpunli[afi,safi][nexthop] = nlri
mpnlri.setdefault((nlri.afi.pack(),nlri.safi.pack()),{}).setdefault(nexthop,[]).append(nlri.pack(negotiated))
for (pafi,psafi),data in mpnlri.iteritems():
for nexthop,nlris in data.iteritems():
payload = \
pafi + psafi + \
chr(len(nexthop)) + nexthop + \
chr(0) + ''.join(nlris)
if self._len(payload) <= maximum:
yield self._attribute(payload)
continue
# This will not generate an optimum update size..
# we should feedback the maximum on each iteration
for nlri in nlris:
yield self._attribute(
pafi + psafi +
chr(len(nexthop)) + nexthop +
chr(0) + nlri
)
def pack (self, negotiated):
return ''.join(self.packed_attributes(negotiated))
def __len__ (self):
raise RuntimeError('we can not give you the size of an MPRNLRI - was it with our witout addpath ?')
# return len(self.pack(False))
def __repr__ (self):
return "MP_REACH_NLRI for %s %s with %d NLRI(s)" % (self.afi,self.safi,len(self.nlris))
@classmethod
def unpack (cls, data, negotiated):
nlris = []
# -- Reading AFI/SAFI
afi,safi = unpack('!HB',data[:3])
offset = 3
# we do not want to accept unknown families
if negotiated and (afi,safi) not in negotiated.families:
raise Notify(3,0,'presented a non-negotiated family %d/%d' % (afi,safi))
# -- Reading length of next-hop
len_nh = ord(data[offset])
offset += 1
rd = 0
# check next-hop size
if afi == AFI.ipv4:
if safi in (SAFI.unicast,SAFI.multicast):
if len_nh != 4:
raise Notify(3,0,'invalid ipv4 unicast/multicast next-hop length %d expected 4' % len_nh)
elif safi in (SAFI.mpls_vpn,):
if len_nh != 12:
raise Notify(3,0,'invalid ipv4 mpls_vpn next-hop length %d expected 12' % len_nh)
rd = 8
elif safi in (SAFI.flow_ip,):
if len_nh not in (0,4):
raise Notify(3,0,'invalid ipv4 flow_ip next-hop length %d expected 4' % len_nh)
elif safi in (SAFI.flow_vpn,):
if len_nh not in (0,4):
raise Notify(3,0,'invalid ipv4 flow_vpn next-hop length %d expected 4' % len_nh)
elif safi in (SAFI.rtc,):
if len_nh not in (4,16):
raise Notify(3,0,'invalid ipv4 rtc next-hop length %d expected 4' % len_nh)
elif afi == AFI.ipv6:
if safi in (SAFI.unicast,):
if len_nh not in (16,32):
raise Notify(3,0,'invalid ipv6 unicast next-hop length %d expected 16 or 32' % len_nh)
elif safi in (SAFI.mpls_vpn,):
if len_nh not in (24,40):
raise Notify(3,0,'invalid ipv6 mpls_vpn next-hop length %d expected 24 or 40' % len_nh)
rd = 8
elif safi in (SAFI.flow_ip,):
if len_nh not in (0,16,32):
raise Notify(3,0,'invalid ipv6 flow_ip next-hop length %d expected 0, 16 or 32' % len_nh)
elif safi in (SAFI.flow_vpn,):
if len_nh not in (0,16,32):
raise Notify(3,0,'invalid ipv6 flow_vpn next-hop length %d expected 0, 16 or 32' % len_nh)
elif afi == AFI.l2vpn:
if len_nh != 4:
Notify(3,0,'invalid l2vpn next-hop length %d expected 4' % len_nh)
size = len_nh - rd
# XXX: FIXME: GET IT FROM CACHE HERE ?
nhs = data[offset+rd:offset+rd+size]
nexthops = [nhs[pos:pos+16] for pos in range(0,len(nhs),16)]
# chech the RD is well zero
if rd and sum([int(ord(_)) for _ in data[offset:8]]) != 0:
raise Notify(3,0,"MP_REACH_NLRI next-hop's route-distinguisher must be zero")
offset += len_nh
# Skip a reserved bit as somone had to bug us !
reserved = ord(data[offset])
offset += 1
if reserved != 0:
raise Notify(3,0,'the reserved bit of MP_REACH_NLRI is not zero')
# Is the peer going to send us some Path Information with the route (AddPath)
addpath = negotiated.addpath.receive(afi,safi)
# Reading the NLRIs
data = data[offset:]
if not data:
raise Notify(3,0,'No data to decode in an MPREACHNLRI but it is not an EOR %d/%d' % (afi,safi))
while data:
if nexthops:
for nexthop in nexthops:
nlri,left = NLRI.unpack_nlri(afi,safi,data,IN.ANNOUNCED,addpath)
nlri.nexthop = NextHop.unpack(nexthop)
nlris.append(nlri)
else:
nlri,left = NLRI.unpack_nlri(afi,safi,data,IN.ANNOUNCED,addpath)
nlris.append(nlri)
if left == data:
raise RuntimeError("sub-calls should consume data")
# logger.parser(LazyFormat("parsed announce mp nlri %s payload " % nlri,data[:length]))
data = left
return cls(afi,safi,nlris)
EMPTY_MPRNLRI = MPRNLRI(AFI(AFI.undefined),SAFI(SAFI.undefined),[])
|
|
# -*- coding: utf-8 -*-
# This file is part of the pymfony package.
#
# (c) Alexandre Quercia <[email protected]>
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from __future__ import absolute_import;
import unittest;
from pymfony.component.event_dispatcher import Event;
from pymfony.component.event_dispatcher import EventDispatcher;
from pymfony.component.event_dispatcher import EventSubscriberInterface;
"""
"""
class TestEventDispatcher(unittest.TestCase):
# Some pseudo events
preFoo = 'pre.foo';
postFoo = 'post.foo';
preBar = 'pre.bar';
postBar = 'post.bar';
def setUp(self):
self.dispatcher = EventDispatcher();
self.listener = TestEventListener();
def tearDown(self):
self.dispatcher = None;
self.listener = None;
def testInitialState(self):
self.assertEqual(dict(), self.dispatcher.getListeners());
self.assertFalse(self.dispatcher.hasListeners(self.preFoo));
self.assertFalse(self.dispatcher.hasListeners(self.postFoo));
def testAddListener(self):
self.dispatcher.addListener('pre.foo', [self.listener, 'preFoo']);
self.dispatcher.addListener('post.foo', [self.listener, 'postFoo']);
self.assertTrue(self.dispatcher.hasListeners(self.preFoo));
self.assertTrue(self.dispatcher.hasListeners(self.postFoo));
self.assertEqual(1, len(self.dispatcher.getListeners(self.preFoo)));
self.assertEqual(1, len(self.dispatcher.getListeners(self.postFoo)));
self.assertEqual(2, len(self.dispatcher.getListeners()));
def testGetListenersSortsByPriority(self):
listener1 = TestEventListener();
listener2 = TestEventListener();
listener3 = TestEventListener();
listener1.name = '1';
listener2.name = '2';
listener3.name = '3';
self.dispatcher.addListener('pre.foo', [listener1, 'preFoo'], -10);
self.dispatcher.addListener('pre.foo', [listener2, 'preFoo'], 10);
self.dispatcher.addListener('pre.foo', [listener3, 'preFoo']);
expected = [
[listener2, 'preFoo'],
[listener3, 'preFoo'],
[listener1, 'preFoo'],
];
self.assertEqual(expected, self.dispatcher.getListeners('pre.foo'));
def testGetAllListenersSortsByPriority(self):
listener1 = TestEventListener();
listener2 = TestEventListener();
listener3 = TestEventListener();
listener4 = TestEventListener();
listener5 = TestEventListener();
listener6 = TestEventListener();
self.dispatcher.addListener('pre.foo', listener1, -10);
self.dispatcher.addListener('pre.foo', listener2);
self.dispatcher.addListener('pre.foo', listener3, 10);
self.dispatcher.addListener('post.foo', listener4, -10);
self.dispatcher.addListener('post.foo', listener5);
self.dispatcher.addListener('post.foo', listener6, 10);
expected = {
'pre.foo': [listener3, listener2, listener1],
'post.foo': [listener6, listener5, listener4],
};
self.assertEqual(expected, self.dispatcher.getListeners());
def testDispatch(self):
self.dispatcher.addListener('pre.foo', [self.listener, 'preFoo']);
self.dispatcher.addListener('post.foo', [self.listener, 'postFoo']);
self.dispatcher.dispatch(self.preFoo);
self.assertTrue(self.listener.preFooInvoked);
self.assertFalse(self.listener.postFooInvoked);
self.assertTrue(isinstance(self.dispatcher.dispatch('noevent'), Event));
self.assertTrue(isinstance(self.dispatcher.dispatch(self.preFoo), Event));
event = Event();
ret = self.dispatcher.dispatch(self.preFoo, event);
self.assertEqual('pre.foo', event.getName());
self.assertTrue(event is ret);
def testDispatchForClosure(self):
self.invoked = 0;
def listener(e):
self.invoked += 1;
self.dispatcher.addListener('pre.foo', listener);
self.dispatcher.addListener('post.foo', listener);
self.dispatcher.dispatch(self.preFoo);
self.assertEqual(1, self.invoked);
def testStopEventPropagation(self):
otherListener = TestEventListener();
# postFoo() stops the propagation, so only one listener should
# be executed
# Manually set priority to enforce self.listener to be called first
self.dispatcher.addListener('post.foo', [self.listener, 'postFoo'], 10);
self.dispatcher.addListener('post.foo', [otherListener, 'preFoo']);
self.dispatcher.dispatch(self.postFoo);
self.assertTrue(self.listener.postFooInvoked);
self.assertFalse(otherListener.postFooInvoked);
def testDispatchByPriority(self):
invoked = list();
def listener1(e):
invoked.append('1');
def listener2(e):
invoked.append('2');
def listener3(e):
invoked.append('3');
self.dispatcher.addListener('pre.foo', listener1, -10);
self.dispatcher.addListener('pre.foo', listener2);
self.dispatcher.addListener('pre.foo', listener3, 10);
self.dispatcher.dispatch(self.preFoo);
self.assertEqual(['3', '2', '1'], invoked);
def testRemoveListener(self):
self.dispatcher.addListener('pre.bar', self.listener);
self.assertTrue(self.dispatcher.hasListeners(self.preBar));
self.dispatcher.removeListener('pre.bar', self.listener);
self.assertFalse(self.dispatcher.hasListeners(self.preBar));
self.dispatcher.removeListener('notExists', self.listener);
def testAddSubscriber(self):
eventSubscriber = TestEventSubscriber();
self.dispatcher.addSubscriber(eventSubscriber);
self.assertTrue(self.dispatcher.hasListeners(self.preFoo));
self.assertTrue(self.dispatcher.hasListeners(self.postFoo));
def testAddSubscriberWithPriorities(self):
eventSubscriber = TestEventSubscriber();
self.dispatcher.addSubscriber(eventSubscriber);
eventSubscriber = TestEventSubscriberWithPriorities();
self.dispatcher.addSubscriber(eventSubscriber);
listeners = self.dispatcher.getListeners('pre.foo');
self.assertTrue(self.dispatcher.hasListeners(self.preFoo));
self.assertEqual(2, len(listeners));
self.assertTrue(isinstance(listeners[0][0], TestEventSubscriberWithPriorities));
def testAddSubscriberWithMultipleListeners(self):
eventSubscriber = TestEventSubscriberWithMultipleListeners();
self.dispatcher.addSubscriber(eventSubscriber);
listeners = self.dispatcher.getListeners('pre.foo');
self.assertTrue(self.dispatcher.hasListeners(self.preFoo));
self.assertEqual(2, len(listeners));
self.assertEqual('preFoo2', listeners[0][1]);
def testRemoveSubscriber(self):
eventSubscriber = TestEventSubscriber();
self.dispatcher.addSubscriber(eventSubscriber);
self.assertTrue(self.dispatcher.hasListeners(self.preFoo));
self.assertTrue(self.dispatcher.hasListeners(self.postFoo));
self.dispatcher.removeSubscriber(eventSubscriber);
self.assertFalse(self.dispatcher.hasListeners(self.preFoo));
self.assertFalse(self.dispatcher.hasListeners(self.postFoo));
def testRemoveSubscriberWithPriorities(self):
eventSubscriber = TestEventSubscriberWithPriorities();
self.dispatcher.addSubscriber(eventSubscriber);
self.assertTrue(self.dispatcher.hasListeners(self.preFoo));
self.dispatcher.removeSubscriber(eventSubscriber);
self.assertFalse(self.dispatcher.hasListeners(self.preFoo));
def testRemoveSubscriberWithMultipleListeners(self):
eventSubscriber = TestEventSubscriberWithMultipleListeners();
self.dispatcher.addSubscriber(eventSubscriber);
self.assertTrue(self.dispatcher.hasListeners(self.preFoo));
self.assertEqual(2, len(self.dispatcher.getListeners(self.preFoo)));
self.dispatcher.removeSubscriber(eventSubscriber);
self.assertFalse(self.dispatcher.hasListeners(self.preFoo));
def testEventReceivesTheDispatcherInstance(self):
test = self;
dispatcher = list();
def callback(event):
dispatcher.append(event.getDispatcher());
self.dispatcher.addListener('test', callback);
self.dispatcher.dispatch('test');
self.assertTrue(self.dispatcher is dispatcher[0]);
class TestEventListener():
preFooInvoked = False;
postFooInvoked = False;
# Listener methods
def preFoo(self, e):
assert isinstance(e, Event);
self.preFooInvoked = True;
def postFoo(self, e):
assert isinstance(e, Event);
self.postFooInvoked = True;
e.stopPropagation();
class TestEventSubscriber(EventSubscriberInterface):
@classmethod
def getSubscribedEvents(cls):
return {'pre.foo': 'preFoo', 'post.foo': 'postFoo'};
class TestEventSubscriberWithPriorities(EventSubscriberInterface):
@classmethod
def getSubscribedEvents(cls):
return {
'pre.foo': ['preFoo', 10],
'post.foo': ['postFoo'],
};
class TestEventSubscriberWithMultipleListeners(EventSubscriberInterface):
@classmethod
def getSubscribedEvents(cls):
return {'pre.foo': [
['preFoo1'],
['preFoo2', 10],
]};
if __name__ == '__main__':
unittest.main();
|
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class PagedSavedSearch(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cursor': 'str',
'items': 'list[SavedSearch]',
'limit': 'int',
'more_items': 'bool',
'offset': 'int',
'sort': 'Sorting',
'total_items': 'int'
}
attribute_map = {
'cursor': 'cursor',
'items': 'items',
'limit': 'limit',
'more_items': 'moreItems',
'offset': 'offset',
'sort': 'sort',
'total_items': 'totalItems'
}
def __init__(self, cursor=None, items=None, limit=None, more_items=None, offset=None, sort=None, total_items=None, _configuration=None): # noqa: E501
"""PagedSavedSearch - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._cursor = None
self._items = None
self._limit = None
self._more_items = None
self._offset = None
self._sort = None
self._total_items = None
self.discriminator = None
if cursor is not None:
self.cursor = cursor
if items is not None:
self.items = items
if limit is not None:
self.limit = limit
if more_items is not None:
self.more_items = more_items
if offset is not None:
self.offset = offset
if sort is not None:
self.sort = sort
if total_items is not None:
self.total_items = total_items
@property
def cursor(self):
"""Gets the cursor of this PagedSavedSearch. # noqa: E501
The id at which the current (limited) search can be continued to obtain more matching items # noqa: E501
:return: The cursor of this PagedSavedSearch. # noqa: E501
:rtype: str
"""
return self._cursor
@cursor.setter
def cursor(self, cursor):
"""Sets the cursor of this PagedSavedSearch.
The id at which the current (limited) search can be continued to obtain more matching items # noqa: E501
:param cursor: The cursor of this PagedSavedSearch. # noqa: E501
:type: str
"""
self._cursor = cursor
@property
def items(self):
"""Gets the items of this PagedSavedSearch. # noqa: E501
List of requested items # noqa: E501
:return: The items of this PagedSavedSearch. # noqa: E501
:rtype: list[SavedSearch]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this PagedSavedSearch.
List of requested items # noqa: E501
:param items: The items of this PagedSavedSearch. # noqa: E501
:type: list[SavedSearch]
"""
self._items = items
@property
def limit(self):
"""Gets the limit of this PagedSavedSearch. # noqa: E501
:return: The limit of this PagedSavedSearch. # noqa: E501
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this PagedSavedSearch.
:param limit: The limit of this PagedSavedSearch. # noqa: E501
:type: int
"""
self._limit = limit
@property
def more_items(self):
"""Gets the more_items of this PagedSavedSearch. # noqa: E501
Whether more items are available for return by increment offset or cursor # noqa: E501
:return: The more_items of this PagedSavedSearch. # noqa: E501
:rtype: bool
"""
return self._more_items
@more_items.setter
def more_items(self, more_items):
"""Sets the more_items of this PagedSavedSearch.
Whether more items are available for return by increment offset or cursor # noqa: E501
:param more_items: The more_items of this PagedSavedSearch. # noqa: E501
:type: bool
"""
self._more_items = more_items
@property
def offset(self):
"""Gets the offset of this PagedSavedSearch. # noqa: E501
:return: The offset of this PagedSavedSearch. # noqa: E501
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this PagedSavedSearch.
:param offset: The offset of this PagedSavedSearch. # noqa: E501
:type: int
"""
self._offset = offset
@property
def sort(self):
"""Gets the sort of this PagedSavedSearch. # noqa: E501
:return: The sort of this PagedSavedSearch. # noqa: E501
:rtype: Sorting
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this PagedSavedSearch.
:param sort: The sort of this PagedSavedSearch. # noqa: E501
:type: Sorting
"""
self._sort = sort
@property
def total_items(self):
"""Gets the total_items of this PagedSavedSearch. # noqa: E501
An estimate (lower-bound) of the total number of items available for return. May not be a tight estimate for facet queries # noqa: E501
:return: The total_items of this PagedSavedSearch. # noqa: E501
:rtype: int
"""
return self._total_items
@total_items.setter
def total_items(self, total_items):
"""Sets the total_items of this PagedSavedSearch.
An estimate (lower-bound) of the total number of items available for return. May not be a tight estimate for facet queries # noqa: E501
:param total_items: The total_items of this PagedSavedSearch. # noqa: E501
:type: int
"""
self._total_items = total_items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PagedSavedSearch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PagedSavedSearch):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PagedSavedSearch):
return True
return self.to_dict() != other.to_dict()
|
|
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
A fake XenAPI SDK.
"""
import base64
import pickle
import random
import uuid
from xml.sax import saxutils
import zlib
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from nova import exception
from nova.i18n import _
from nova.virt.xenapi.client import session as xenapi_session
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
LOG = logging.getLogger(__name__)
def reset():
for c in _CLASSES:
_db_content[c] = {}
host = create_host('fake')
create_vm('fake dom 0',
'Running',
is_a_template=False,
is_control_domain=True,
resident_on=host)
def reset_table(table):
if table not in _CLASSES:
return
_db_content[table] = {}
def _create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
host_ref = _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
host_default_sr_ref = _create_local_srs(host_ref)
_create_local_pif(host_ref)
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
_db_content['pool'][pool_ref]['master'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
if status == 'Running':
domid = random.randrange(1, 1 << 16)
resident_on = list(_db_content['host'])[0]
else:
domid = -1
resident_on = ''
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on})
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {'vhd-parent': None},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0, other_config=None):
if other_config is None:
other_config = {}
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False,
'other_config': other_config}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created.
"""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vbd_rec.setdefault('other_config', {})
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
vdi_ref = vbd_rec['VDI']
if vdi_ref and vdi_ref != "OpaqueRef:NULL":
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
def after_VIF_create(vif_ref, vif_rec):
"""Create backref from VM to VIF when VIF is created.
"""
vm_ref = vif_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VIFs'].append(vif_ref)
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('domid', -1)
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('is_a_template', False)
vm_rec.setdefault('memory_static_max', str(8 * units.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('VIFs', [])
vm_rec.setdefault('resident_on', '')
def create_pbd(host_ref, sr_ref, attached):
config = {'path': '/var/run/sr-mount/%s' % sr_ref}
return _create_object('PBD',
{'device_config': config,
'host': host_ref,
'SR': sr_ref,
'currently_attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def _create_local_srs(host_ref):
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Also, fake the installation of
an ISO SR.
"""
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_size=80000,
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
return create_sr(name_label='Local storage',
type='ext',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_size=40000,
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref,
'network': '',
'IP': '10.1.1.1',
'IPv6': '',
'uuid': '',
'management': 'true'})
_db_content['PIF'][pif_ref]['uuid'] = pif_ref
return pif_ref
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = list(_db_content['host'])[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd(host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return _db_content[table].keys()
def get_all_records(table):
return _db_content[table]
def _query_matches(record, query):
# Simple support for the XenServer query language:
# 'field "host"="<uuid>" and field "SR"="<sr uuid>"'
# Tested through existing tests (e.g. calls to find_network_with_bridge)
and_clauses = query.split(" and ")
if len(and_clauses) > 1:
matches = True
for clause in and_clauses:
matches = matches and _query_matches(record, clause)
return matches
or_clauses = query.split(" or ")
if len(or_clauses) > 1:
matches = False
for clause in or_clauses:
matches = matches or _query_matches(record, clause)
return matches
if query.startswith('not '):
return not _query_matches(record, query[4:])
# Now it must be a single field - bad queries never match
if not query.startswith('field'):
return False
(field, value) = query[6:].split('=', 1)
# Some fields (e.g. name_label, memory_overhead) have double
# underscores in the DB, but only single underscores when querying
field = field.replace("__", "_").strip(" \"'")
value = value.strip(" \"'")
# Strings should be directly compared
if isinstance(record[field], str):
return record[field] == value
# But for all other value-checks, convert to a string first
# (Notably used for booleans - which can be lower or camel
# case and are interpreted/sanitised by XAPI)
return str(record[field]).lower() == value.lower()
def get_all_records_where(table_name, query):
matching_records = {}
table = _db_content[table_name]
for record in table:
if _query_matches(table[record], query):
matching_records[record] = table[record]
return matching_records
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument.
"""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as a JSON dict.
"""
arg = args or kwargs
return jsonutils.dumps(arg)
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return {str(i): self.details[i] for i in range(len(self.details))}
class SessionBase(object):
"""Base class for Fake Sessions."""
def __init__(self, uri):
self._session = None
xenapi_session.apply_session_helpers(self)
def pool_get_default_SR(self, _1, pool_ref):
return _db_content['pool'].values()[0]['default-SR']
def VBD_insert(self, _1, vbd_ref, vdi_ref):
vbd_rec = get_record('VBD', vbd_ref)
get_record('VDI', vdi_ref)
vbd_rec['empty'] = False
vbd_rec['VDI'] = vdi_ref
def VBD_plug(self, _1, ref):
rec = get_record('VBD', ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = rec['userdevice']
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', ref])
rec['currently_attached'] = False
rec['device'] = ''
def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config',
vbd_ref, key])
db_ref['other_config'][key] = value
def VBD_get_other_config(self, _1, vbd_ref):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
return {}
return db_ref['other_config']
def PBD_create(self, _1, pbd_rec):
pbd_ref = _create_object('PBD', pbd_rec)
_db_content['PBD'][pbd_ref]['currently_attached'] = False
return pbd_ref
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
shared, sm_config):
for ref, rec in six.iteritems(_db_content['SR']):
if rec.get('uuid') == sr_uuid:
# make forgotten = 0 and return ref
_db_content['SR'][ref]['forgotten'] = 0
return ref
# SR not found in db, so we create one
params = {'sr_uuid': sr_uuid,
'label': label,
'desc': desc,
'type': type,
'content_type': content_type,
'shared': shared,
'sm_config': sm_config}
sr_ref = _create_object('SR', params)
_db_content['SR'][sr_ref]['uuid'] = sr_uuid
_db_content['SR'][sr_ref]['forgotten'] = 0
vdi_per_lun = False
if type == 'iscsi':
# Just to be clear
vdi_per_lun = True
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
def SR_forget(self, _1, sr_ref):
_db_content['SR'][sr_ref]['forgotten'] = 1
def SR_scan(self, _1, sr_ref):
return
def VM_get_xenstore_data(self, _1, vm_ref):
return _db_content['VM'][vm_ref].get('xenstore_data', {})
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
pass
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config',
vdi_ref, key])
db_ref['other_config'][key] = value
def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
db_ref = _db_content['VDI'][vdi_to_copy_ref]
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
other_config = db_ref['other_config'].copy()
return create_vdi(name_label, sr_ref, sharable=sharable,
read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
sr_ref = db_ref['SR']
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
# Always return 12GB available
return 12 * units.Gi
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
def _plugin_agent_password(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_inject_file(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_agentupdate(self, method, args):
url = args["url"]
md5 = args["md5sum"]
message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
md5=md5)
return as_json(returncode='0', message=message)
def _plugin_noop(self, method, args):
return ''
def _plugin_pickle_noop(self, method, args):
return pickle.dumps(None)
def _plugin_migration_transfer_vhd(self, method, args):
kwargs = pickle.loads(args['params'])['kwargs']
vdi_ref = self.xenapi_request('VDI.get_by_uuid',
(kwargs['vdi_uuid'], ))
assert vdi_ref
return pickle.dumps(None)
_plugin_glance_upload_vhd = _plugin_pickle_noop
_plugin_kernel_copy_vdi = _plugin_noop
_plugin_kernel_create_kernel_ramdisk = _plugin_noop
_plugin_kernel_remove_kernel_ramdisk = _plugin_noop
_plugin_migration_move_vhds_into_sr = _plugin_noop
def _plugin_xenhost_host_data(self, method, args):
return jsonutils.dumps({
'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40},
'host_uuid': 'fb97583b-baa1-452d-850e-819d95285def',
'host_name-label': 'fake-xenhost',
'host_name-description': 'Default install of XenServer',
'host_hostname': 'fake-xenhost',
'host_ip_address': '10.219.10.24',
'enabled': 'true',
'host_capabilities': ['xen-3.0-x86_64',
'xen-3.0-x86_32p',
'hvm-3.0-x86_32',
'hvm-3.0-x86_32p',
'hvm-3.0-x86_64'],
'host_other-config': {
'agent_start_time': '1412774967.',
'iscsi_iqn': 'iqn.2014-10.org.example:39fa9ee3',
'boot_time': '1412774885.',
},
'host_cpu_info': {
'physical_features': '0098e3fd-bfebfbff-00000001-28100800',
'modelname': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'vendor': 'GenuineIntel',
'features': '0098e3fd-bfebfbff-00000001-28100800',
'family': 6,
'maskable': 'full',
'cpu_count': 4,
'socket_count': '1',
'flags': 'fpu de tsc msr pae mce cx8 apic sep mtrr mca '
'cmov pat clflush acpi mmx fxsr sse sse2 ss ht '
'nx constant_tsc nonstop_tsc aperfmperf pni vmx '
'est ssse3 sse4_1 sse4_2 popcnt hypervisor ida '
'tpr_shadow vnmi flexpriority ept vpid',
'stepping': 5,
'model': 30,
'features_after_reboot': '0098e3fd-bfebfbff-00000001-28100800',
'speed': '2394.086'
},
})
def _plugin_poweraction(self, method, args):
return jsonutils.dumps({"power_action": method[5:]})
_plugin_xenhost_host_reboot = _plugin_poweraction
_plugin_xenhost_host_startup = _plugin_poweraction
_plugin_xenhost_host_shutdown = _plugin_poweraction
def _plugin_xenhost_set_host_enabled(self, method, args):
enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
return jsonutils.dumps({"status": enabled})
def _plugin_xenhost_host_uptime(self, method, args):
return jsonutils.dumps({"uptime": "fake uptime"})
def _plugin_xenhost_get_pci_device_details(self, method, args):
"""Simulate the ouput of three pci devices.
Both of those devices are available for pci passtrough but
only one will match with the pci whitelist used in the
method test_pci_passthrough_devices_*().
Return a single list.
"""
# Driver is not pciback
dev_bad1 = ["Slot:\t0000:86:10.0", "Class:\t0604", "Vendor:\t10b5",
"Device:\t8747", "Rev:\tba", "Driver:\tpcieport", "\n"]
# Driver is pciback but vendor and device are bad
dev_bad2 = ["Slot:\t0000:88:00.0", "Class:\t0300", "Vendor:\t0bad",
"Device:\tcafe", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
# Driver is pciback and vendor, device are used for matching
dev_good = ["Slot:\t0000:87:00.0", "Class:\t0300", "Vendor:\t10de",
"Device:\t11bf", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
lspci_output = "\n".join(dev_bad1 + dev_bad2 + dev_good)
return pickle.dumps(lspci_output)
def _plugin_xenhost_get_pci_type(self, method, args):
return pickle.dumps("type-PCI")
def _plugin_console_get_console_log(self, method, args):
dom_id = args["dom_id"]
if dom_id == 0:
raise Failure('Guest does not have a console')
return base64.b64encode(zlib.compress("dom_id: %s" % dom_id))
def _plugin_nova_plugin_version_get_version(self, method, args):
return pickle.dumps("1.2")
def _plugin_xenhost_query_gc(self, method, args):
return pickle.dumps("False")
def host_call_plugin(self, _1, _2, plugin, method, args):
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
if not func:
raise Exception('No simulation in host_call_plugin for %s,%s' %
(plugin, method))
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * units.Gi
def VDI_resize_online(self, *args):
return 'derp'
VDI_resize = VDI_resize_online
def _VM_reboot(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
if db_ref['power_state'] != 'Running':
raise Failure(['VM_BAD_POWER_STATE',
'fake-opaque-ref', db_ref['power_state'].lower(), 'halted'])
db_ref['power_state'] = 'Running'
db_ref['domid'] = random.randrange(1, 1 << 16)
def VM_clean_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Halted'
db_ref['domid'] = -1
VM_clean_shutdown = VM_hard_shutdown
def VM_suspend(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Suspended'
def VM_pause(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Paused'
def pool_eject(self, session, host_ref):
pass
def pool_join(self, session, hostname, username, password):
pass
def pool_set_name_label(self, session, pool_ref, name):
pass
def host_migrate_receive(self, session, destref, nwref, options):
return "fake_migrate_data"
def VM_assert_can_migrate(self, session, vmref, migrate_data, live,
vdi_map, vif_map, options):
pass
def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map,
vif_map, options):
pass
def VM_remove_from_blocked_operations(self, session, vm_ref, key):
# operation is idempotent, XenServer doesn't care if the key exists
_db_content['VM'][vm_ref]['blocked_operations'].pop(key, None)
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
def _login(self, method, params):
self._session = str(uuid.uuid4())
_session_info = {'uuid': str(uuid.uuid4()),
'this_host': list(_db_content['host'])[0]}
_db_content['session'][self._session] = _session_info
def _logout(self):
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
"Logging out a session that is invalid or already logged "
"out: %s" % s)
del _db_content['session'][s]
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
def callit(*params):
LOG.debug('Calling %(name)s %(impl)s',
{'name': name, 'impl': impl})
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug('Calling getter %s', name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug('Calling setter %s', name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
elif name == 'XenAPI':
return FakeXenAPI()
else:
return None
def _is_gettersetter(self, name, getter):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
return self._is_method(name, 'create')
def _is_destroy(self, name):
return self._is_method(name, 'destroy')
def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
if func == 'get_all_records':
self._check_arg_count(params, 1)
return get_all_records(cls)
if func == 'get_all_records_where':
self._check_arg_count(params, 2)
return get_all_records_where(cls, params[1])
if func == 'get_record':
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
if (ref in _db_content[cls]):
if (field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
else:
raise Failure(['HANDLE_INVALID', cls, ref])
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if len(params) == 3:
field = func[len('set_'):]
ref = params[1]
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
'is missing that field' % name)
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = (is_sr_create and
_create_sr(cls, params) or
is_vlan_create and
_create_vlan(params[1], params[2], params[3]) or
_create_object(cls, params[1]))
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
if after_hook in globals():
globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
# Add RO fields
if cls == 'VM':
obj['power_state'] = 'Halted'
return ref
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise Failure(['HANDLE_INVALID', table, ref])
# Call destroy function (if exists)
destroy_func = globals().get('destroy_%s' % table.lower())
if destroy_func:
destroy_func(ref)
else:
del _db_content[table][ref]
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
func = name[len('Async.'):]
try:
result = self.xenapi_request(func, params[1:])
if result:
result = as_value(result)
task['result'] = result
task['status'] = 'success'
except Failure as exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug('Raising NotImplemented')
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
actual = len(params)
if actual != expected:
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in six.iteritems(recs):
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
raise Failure(['UUID_INVALID', v, result, recs, k])
return result
class FakeXenAPI(object):
def __init__(self):
self.Failure = Failure
# Based upon _Method from xmlrpclib.
class _Dispatcher(object):
def __init__(self, send, name):
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<xenapi.fake._Dispatcher for %s>' % self.__name
else:
return '<xenapi.fake._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__send, name)
else:
return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
|
|
# coding: utf8
from __future__ import unicode_literals
def explain(term):
if term in GLOSSARY:
return GLOSSARY[term]
GLOSSARY = {
# POS tags
# Universal POS Tags
# http://universaldependencies.org/u/pos/
'ADJ': 'adjective',
'ADP': 'adposition',
'ADV': 'adverb',
'AUX': 'auxiliary',
'CONJ': 'conjunction',
'CCONJ': 'coordinating conjunction',
'DET': 'determiner',
'INTJ': 'interjection',
'NOUN': 'noun',
'NUM': 'numeral',
'PART': 'particle',
'PRON': 'pronoun',
'PROPN': 'proper noun',
'PUNCT': 'punctuation',
'SCONJ': 'subordinating conjunction',
'SYM': 'symbol',
'VERB': 'verb',
'X': 'other',
'EOL': 'end of line',
'SPACE': 'space',
# POS tags (English)
# OntoNotes 5 / Penn Treebank
# https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
'.': 'punctuation mark, sentence closer',
',': 'punctuation mark, comma',
'-LRB-': 'left round bracket',
'-RRB-': 'right round bracket',
'``': 'opening quotation mark',
'""': 'closing quotation mark',
"''": 'closing quotation mark',
':': 'punctuation mark, colon or ellipsis',
'$': 'symbol, currency',
'#': 'symbol, number sign',
'AFX': 'affix',
'CC': 'conjunction, coordinating',
'CD': 'cardinal number',
'DT': 'determiner',
'EX': 'existential there',
'FW': 'foreign word',
'HYPH': 'punctuation mark, hyphen',
'IN': 'conjunction, subordinating or preposition',
'JJ': 'adjective',
'JJR': 'adjective, comparative',
'JJS': 'adjective, superlative',
'LS': 'list item marker',
'MD': 'verb, modal auxiliary',
'NIL': 'missing tag',
'NN': 'noun, singular or mass',
'NNP': 'noun, proper singular',
'NNPS': 'noun, proper plural',
'NNS': 'noun, plural',
'PDT': 'predeterminer',
'POS': 'possessive ending',
'PRP': 'pronoun, personal',
'PRP$': 'pronoun, possessive',
'RB': 'adverb',
'RBR': 'adverb, comparative',
'RBS': 'adverb, superlative',
'RP': 'adverb, particle',
'TO': 'infinitival to',
'UH': 'interjection',
'VB': 'verb, base form',
'VBD': 'verb, past tense',
'VBG': 'verb, gerund or present participle',
'VBN': 'verb, past participle',
'VBP': 'verb, non-3rd person singular present',
'VBZ': 'verb, 3rd person singular present',
'WDT': 'wh-determiner',
'WP': 'wh-pronoun, personal',
'WP$': 'wh-pronoun, possessive',
'WRB': 'wh-adverb',
'SP': 'space',
'ADD': 'email',
'NFP': 'superfluous punctuation',
'GW': 'additional word in multi-word expression',
'XX': 'unknown',
'BES': 'auxiliary "be"',
'HVS': 'forms of "have"',
# POS Tags (German)
# TIGER Treebank
# http://www.ims.uni-stuttgart.de/forschung/ressourcen/korpora/TIGERCorpus/annotation/tiger_introduction.pdf
'$(': 'other sentence-internal punctuation mark',
'$,': 'comma',
'$.': 'sentence-final punctuation mark',
'ADJA': 'adjective, attributive',
'ADJD': 'adjective, adverbial or predicative',
'APPO': 'postposition',
'APRP': 'preposition; circumposition left',
'APPRART': 'preposition with article',
'APZR': 'circumposition right',
'ART': 'definite or indefinite article',
'CARD': 'cardinal number',
'FM': 'foreign language material',
'ITJ': 'interjection',
'KOKOM': 'comparative conjunction',
'KON': 'coordinate conjunction',
'KOUI': 'subordinate conjunction with "zu" and infinitive',
'KOUS': 'subordinate conjunction with sentence',
'NE': 'proper noun',
'NNE': 'proper noun',
'PAV': 'pronominal adverb',
'PROAV': 'pronominal adverb',
'PDAT': 'attributive demonstrative pronoun',
'PDS': 'substituting demonstrative pronoun',
'PIAT': 'attributive indefinite pronoun without determiner',
'PIDAT': 'attributive indefinite pronoun with determiner',
'PIS': 'substituting indefinite pronoun',
'PPER': 'non-reflexive personal pronoun',
'PPOSAT': 'attributive possessive pronoun',
'PPOSS': 'substituting possessive pronoun',
'PRELAT': 'attributive relative pronoun',
'PRELS': 'substituting relative pronoun',
'PRF': 'reflexive personal pronoun',
'PTKA': 'particle with adjective or adverb',
'PTKANT': 'answer particle',
'PTKNEG': 'negative particle',
'PTKVZ': 'separable verbal particle',
'PTKZU': '"zu" before infinitive',
'PWAT': 'attributive interrogative pronoun',
'PWAV': 'adverbial interrogative or relative pronoun',
'PWS': 'substituting interrogative pronoun',
'TRUNC': 'word remnant',
'VAFIN': 'finite verb, auxiliary',
'VAIMP': 'imperative, auxiliary',
'VAINF': 'infinitive, auxiliary',
'VAPP': 'perfect participle, auxiliary',
'VMFIN': 'finite verb, modal',
'VMINF': 'infinitive, modal',
'VMPP': 'perfect participle, modal',
'VVFIN': 'finite verb, full',
'VVIMP': 'imperative, full',
'VVINF': 'infinitive, full',
'VVIZU': 'infinitive with "zu", full',
'VVPP': 'perfect participle, full',
'XY': 'non-word containing non-letter',
# Noun chunks
'NP': 'noun phrase',
'PP': 'prepositional phrase',
'VP': 'verb phrase',
'ADVP': 'adverb phrase',
'ADJP': 'adjective phrase',
'SBAR': 'subordinating conjunction',
'PRT': 'particle',
'PNP': 'prepositional noun phrase',
# Dependency Labels (English)
# ClearNLP / Universal Dependencies
# https://github.com/clir/clearnlp-guidelines/blob/master/md/specifications/dependency_labels.md
'acomp': 'adjectival complement',
'advcl': 'adverbial clause modifier',
'advmod': 'adverbial modifier',
'agent': 'agent',
'amod': 'adjectival modifier',
'appos': 'appositional modifier',
'attr': 'attribute',
'aux': 'auxiliary',
'auxpass': 'auxiliary (passive)',
'cc': 'coordinating conjunction',
'ccomp': 'clausal complement',
'complm': 'complementizer',
'conj': 'conjunct',
'cop': 'copula',
'csubj': 'clausal subject',
'csubjpass': 'clausal subject (passive)',
'dep': 'unclassified dependent',
'det': 'determiner',
'dobj': 'direct object',
'expl': 'expletive',
'hmod': 'modifier in hyphenation',
'hyph': 'hyphen',
'infmod': 'infinitival modifier',
'intj': 'interjection',
'iobj': 'indirect object',
'mark': 'marker',
'meta': 'meta modifier',
'neg': 'negation modifier',
'nmod': 'modifier of nominal',
'nn': 'noun compound modifier',
'npadvmod': 'noun phrase as adverbial modifier',
'nsubj': 'nominal subject',
'nsubjpass': 'nominal subject (passive)',
'num': 'number modifier',
'number': 'number compound modifier',
'oprd': 'object predicate',
'obj': 'object',
'obl': 'oblique nominal',
'parataxis': 'parataxis',
'partmod': 'participal modifier',
'pcomp': 'complement of preposition',
'pobj': 'object of preposition',
'poss': 'possession modifier',
'possessive': 'possessive modifier',
'preconj': 'pre-correlative conjunction',
'prep': 'prepositional modifier',
'prt': 'particle',
'punct': 'punctuation',
'quantmod': 'modifier of quantifier',
'rcmod': 'relative clause modifier',
'root': 'root',
'xcomp': 'open clausal complement',
# Dependency labels (German)
# TIGER Treebank
# http://www.ims.uni-stuttgart.de/forschung/ressourcen/korpora/TIGERCorpus/annotation/tiger_introduction.pdf
# currently missing: 'cc' (comparative complement) because of conflict
# with English labels
'ac': 'adpositional case marker',
'adc': 'adjective component',
'ag': 'genitive attribute',
'ams': 'measure argument of adjective',
'app': 'apposition',
'avc': 'adverbial phrase component',
'cd': 'coordinating conjunction',
'cj': 'conjunct',
'cm': 'comparative conjunction',
'cp': 'complementizer',
'cvc': 'collocational verb construction',
'da': 'dative',
'dh': 'discourse-level head',
'dm': 'discourse marker',
'ep': 'expletive es',
'hd': 'head',
'ju': 'junctor',
'mnr': 'postnominal modifier',
'mo': 'modifier',
'ng': 'negation',
'nk': 'noun kernel element',
'nmc': 'numerical component',
'oa': 'accusative object',
'oa': 'second accusative object',
'oc': 'clausal object',
'og': 'genitive object',
'op': 'prepositional object',
'par': 'parenthetical element',
'pd': 'predicate',
'pg': 'phrasal genitive',
'ph': 'placeholder',
'pm': 'morphological particle',
'pnc': 'proper noun component',
'rc': 'relative clause',
're': 'repeated element',
'rs': 'reported speech',
'sb': 'subject',
# Named Entity Recognition
# OntoNotes 5
# https://catalog.ldc.upenn.edu/docs/LDC2013T19/OntoNotes-Release-5.0.pdf
'PERSON': 'People, including fictional',
'NORP': 'Nationalities or religious or political groups',
'FACILITY': 'Buildings, airports, highways, bridges, etc.',
'ORG': 'Companies, agencies, institutions, etc.',
'GPE': 'Countries, cities, states',
'LOC': 'Non-GPE locations, mountain ranges, bodies of water',
'PRODUCT': 'Objects, vehicles, foods, etc. (not services)',
'EVENT': 'Named hurricanes, battles, wars, sports events, etc.',
'WORK_OF_ART': 'Titles of books, songs, etc.',
'LANGUAGE': 'Any named language',
'DATE': 'Absolute or relative dates or periods',
'TIME': 'Times smaller than a day',
'PERCENT': 'Percentage, including "%"',
'MONEY': 'Monetary values, including unit',
'QUANTITY': 'Measurements, as of weight or distance',
'ORDINAL': '"first", "second", etc.',
'CARDINAL': 'Numerals that do not fall under another type'
}
|
|
'''
Created on 05/nov/2013
@author: <[email protected]>
'''
import markdown, Constants
from PyQt4 import QtCore
from PyQt4.QtCore import pyqtSlot,SIGNAL
from subprocess import call
class Controller():
'''
classdocs
'''
def __init__(self, view, model):
'''
Constructor
'''
self.VIEW = view
self.MODEL = model
self.CONTROLLER = self
self.VIEW.newAction.triggered.connect(self.new_file)
self.VIEW.openAction.triggered.connect(self.open_file)
self.VIEW.saveAction.triggered.connect(self.save_file)
self.VIEW.exportHTMLAction.triggered.connect(self.export_html)
self.VIEW.viewInBrowserAction.triggered.connect(self.preview_in_browser)
self.VIEW.showInFolderAction.triggered.connect(self.open_folder)
self.VIEW.preferencesAction.triggered.connect(self.show_preferences)
self.VIEW.syntaxAction.triggered.connect(self.open_references)
self.VIEW.browserButton.clicked.connect(self.select_browser)
uis = self.VIEW.add_tab( Constants.EMPTY_TAB_TITLE )
inputEdit = uis[0]
inputEdit.connect(inputEdit,SIGNAL("textChanged()"),self.renderInput)
#inputEdit.css = self.MODEL.get_css()
self.VIEW.tabs.connect(self.VIEW.tabs,SIGNAL("currentChanged(int)"),self.tabChangedSlot)
self.VIEW.tabs.connect(self.VIEW.tabs,SIGNAL("tabCloseRequested(int)"),self.tabCloseRequestedSlot)
self.VIEW.mapper.mapped['QString'].connect(self.open_file_path)
self.VIEW.themesMapper.mapped['QString'].connect(self.change_theme)
self.refresh_recent_documents()
self.load_themes()
@pyqtSlot()
def renderInput(self):
plainText = self.VIEW.active_input().toPlainText()
html = markdown.markdown( unicode(plainText) )
self.VIEW.saveAction.setDisabled(False)
preview = self.VIEW.active_preview()
y = preview.page().mainFrame().scrollPosition().y()
data = QtCore.QString("<style>")
data.append(QtCore.QString(self.MODEL.base_css))
data.append("</style>")
data.append(QtCore.QString(html))
preview.setContent( data.toUtf8(), "text/html; charset=utf-8" )
preview.scroll(0, y)
preview.page().mainFrame().scroll(0, y)
preview.page().mainFrame().setScrollPosition(QtCore.QPoint(0, y))
y = preview.page().mainFrame().scrollPosition().y()
#preview.reload()
@pyqtSlot(int)
def tabChangedSlot(self,argTabIndex):
self.MODEL.set_active_tab(argTabIndex)
if self.MODEL.FILE_PATH == "":
self.VIEW.exportHTMLAction.setDisabled(True)
self.VIEW.viewInBrowserAction.setDisabled(True)
self.VIEW.showInFolderAction.setDisabled(True)
else:
self.VIEW.exportHTMLAction.setDisabled(False)
self.VIEW.viewInBrowserAction.setDisabled(False)
self.VIEW.showInFolderAction.setDisabled(False)
@pyqtSlot(int)
def tabCloseRequestedSlot(self,argTabIndex):
self.MODEL.remove_tab(argTabIndex)
self.VIEW.remove_tab(argTabIndex)
def show_preferences(self):
self.VIEW.show_preferences()
browser_name = self.MODEL.get_browser_name()
self.VIEW.browserLineEdit.setText(browser_name)
def open_folder(self):
self.VIEW.open_folder(self.MODEL.get_file_folder( self.MODEL.FILE_PATH ))
def select_browser(self):
self.VIEW.prefs.close()
browser_path = self.VIEW.select_browser()
if browser_path is not None and browser_path != "" and browser_path is not False:
self.MODEL.save_in_config("browser", str(browser_path))
browser_name = self.MODEL.get_browser_name()
self.VIEW.browserLineEdit.setText(browser_name)
self.VIEW.prefs.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.VIEW.prefs.show()
else:
self.VIEW.prefs.show()
def change_theme(self, theme_index):
self.MODEL.set_css(theme_index)
self.renderInput()
def load_themes(self):
for theme in self.MODEL.THEMES:
action = self.VIEW.add_theme_menu_item( theme["name"], theme["id"] )
action.triggered.connect(self.VIEW.themesMapper.map)
def refresh_recent_documents(self):
if len(self.MODEL.RECENT_DOCUMENTS) > 0:
self.VIEW.recentDocumentMenu.setDisabled(False)
self.VIEW.recentDocumentMenu.clear()
for item in self.MODEL.RECENT_DOCUMENTS:
action = self.VIEW.add_recent_document(str(item))
action.triggered.connect(self.VIEW.mapper.map)
def new_file(self):
self.MODEL.append_document("")
uis = self.VIEW.add_tab( Constants.EMPTY_TAB_TITLE )
inputEdit = uis[0]
inputEdit.connect(inputEdit,SIGNAL("textChanged()"),self.renderInput)
inputEdit.css = self.MODEL.get_css()
self.VIEW.change_active_tab( self.MODEL.ACTIVE_TAB )
def open_file_path(self, file_path):
file_content = self.MODEL.get_file_content_utf8(file_path)
if file_content is False:
self.VIEW.no_file_alert()
return False
doc_ix = self.MODEL.is_document_present(file_path)
if doc_ix != -1:
self.MODEL.ACTIVE_TAB = doc_ix
self.MODEL.add_recent_document(file_path)
self.VIEW.change_active_tab( self.MODEL.ACTIVE_TAB )
else:
self.MODEL.append_document(file_path)
self.MODEL.add_recent_document(file_path)
uis = self.VIEW.add_tab( self.MODEL.get_file_name(file_path) )
inputEdit = uis[0]
inputEdit.connect(inputEdit,SIGNAL("textChanged()"),self.renderInput)
inputEdit.css = self.MODEL.get_css()
self.VIEW.change_active_tab( self.MODEL.ACTIVE_TAB )
self.VIEW.set_document(unicode(file_content, 'utf-8'))
self.VIEW.saveAction.setDisabled(False)
self.VIEW.exportHTMLAction.setDisabled(False)
self.VIEW.viewInBrowserAction.setDisabled(False)
self.VIEW.showInFolderAction.setDisabled(False)
self.refresh_recent_documents()
return file_path
def open_file(self):
file_path = self.VIEW.select_file()
if file_path != False:
self.open_file_path(file_path)
def save_file(self):
current_document = self.VIEW.get_current_document_content()
if self.MODEL.FILE_PATH == '':
file_path = self.VIEW.save_file_picker()
if file_path != False:
self.MODEL.FILE_PATH = file_path
self.MODEL.save_document_path(file_path)
self.MODEL.write_file_content_utf8( self.MODEL.FILE_PATH, current_document )
self.MODEL.add_recent_document(file_path)
self.VIEW.update_status('Document saved to ' + self.MODEL.FILE_PATH)
self.VIEW.exportHTMLAction.setDisabled(False)
self.VIEW.viewInBrowserAction.setDisabled(False)
self.VIEW.showInFolderAction.setDisabled(False)
self.VIEW.tabs.setTabText( self.MODEL.ACTIVE_TAB, self.MODEL.get_file_name( file_path ) )
self.refresh_recent_documents()
else:
self.MODEL.write_file_content_utf8( self.MODEL.FILE_PATH, current_document )
self.VIEW.update_status('Document saved to ' + self.MODEL.FILE_PATH)
def export_html(self):
export_path = self.MODEL.FILE_PATH.replace(".md", ".html")
current_document = self.VIEW.get_current_document_content()
html_document = "<!doctype html><html><body>"
html_document += "<style type=\"text/css\">" + self.MODEL.base_css + "</style>"
html_document += markdown.markdown( current_document )
html_document += "</body></html>"
result = self.MODEL.write_file_content(export_path, html_document)
if result == True:
self.VIEW.update_status('File exported to ' + export_path)
return export_path
else:
self.VIEW.update_status('An error occurred...')
return None
def preview_in_browser(self):
browser_path = self.MODEL.get_browser_path()
if browser_path == "":
self.select_browser()
else:
path = self.export_html()
path = str(path).replace(":/", ":\\\\").replace("/", "\\")
call([str(browser_path), path])
def open_references(self):
self.open_file_path( Constants.HELP_FILE )
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import ast
import json
import copy
import threading
from collections import defaultdict
from typing import Dict, Optional, List, Tuple, Set, Iterable, NamedTuple, Sequence, TYPE_CHECKING, Union
import binascii
from . import util, bitcoin
from .util import profiler, WalletFileException, multisig_type, TxMinedInfo, bfh
from .invoices import Invoice
from .keystore import bip44_derivation
from .transaction import Transaction, TxOutpoint, tx_from_any, PartialTransaction, PartialTxOutput
from .logging import Logger
from .lnutil import LOCAL, REMOTE, FeeUpdate, UpdateAddHtlc, LocalConfig, RemoteConfig, ChannelType
from .lnutil import ImportedChannelBackupStorage, OnchainChannelBackupStorage
from .lnutil import ChannelConstraints, Outpoint, ShachainElement
from .json_db import StoredDict, JsonDB, locked, modifier
from .plugin import run_hook, plugin_loaders
from .paymentrequest import PaymentRequest
from .submarine_swaps import SwapData
if TYPE_CHECKING:
from .storage import WalletStorage
# seed_version is now used for the version of the wallet file
OLD_SEED_VERSION = 4 # electrum versions < 2.0
NEW_SEED_VERSION = 11 # electrum versions >= 2.0
FINAL_SEED_VERSION = 44 # electrum >= 2.7 will set this to prevent
# old versions from overwriting new format
class TxFeesValue(NamedTuple):
fee: Optional[int] = None
is_calculated_by_us: bool = False
num_inputs: Optional[int] = None
class WalletDB(JsonDB):
def __init__(self, raw, *, manual_upgrades: bool):
JsonDB.__init__(self, {})
self._manual_upgrades = manual_upgrades
self._called_after_upgrade_tasks = False
if raw: # loading existing db
self.load_data(raw)
self.load_plugins()
else: # creating new db
self.put('seed_version', FINAL_SEED_VERSION)
self._after_upgrade_tasks()
def load_data(self, s):
try:
self.data = json.loads(s)
except:
try:
d = ast.literal_eval(s)
labels = d.get('labels', {})
except Exception as e:
raise WalletFileException("Cannot read wallet file. (parsing failed)")
self.data = {}
for key, value in d.items():
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f'Failed to convert label to json format: {key}')
continue
self.data[key] = value
if not isinstance(self.data, dict):
raise WalletFileException("Malformed wallet file (not dict)")
if not self._manual_upgrades and self.requires_split():
raise WalletFileException("This wallet has multiple accounts and must be split")
if not self.requires_upgrade():
self._after_upgrade_tasks()
elif not self._manual_upgrades:
self.upgrade()
def requires_split(self):
d = self.get('accounts', {})
return len(d) > 1
def get_split_accounts(self):
result = []
# backward compatibility with old wallets
d = self.get('accounts', {})
if len(d) < 2:
return
wallet_type = self.get('wallet_type')
if wallet_type == 'old':
assert len(d) == 2
data1 = copy.deepcopy(self.data)
data1['accounts'] = {'0': d['0']}
data1['suffix'] = 'deterministic'
data2 = copy.deepcopy(self.data)
data2['accounts'] = {'/x': d['/x']}
data2['seed'] = None
data2['seed_version'] = None
data2['master_public_key'] = None
data2['wallet_type'] = 'imported'
data2['suffix'] = 'imported'
result = [data1, data2]
elif wallet_type in ['bip44', 'trezor', 'keepkey', 'ledger', 'btchip', 'digitalbitbox', 'safe_t']:
mpk = self.get('master_public_keys')
for k in d.keys():
i = int(k)
x = d[k]
if x.get("pending"):
continue
xpub = mpk["x/%d'"%i]
new_data = copy.deepcopy(self.data)
# save account, derivation and xpub at index 0
new_data['accounts'] = {'0': x}
new_data['master_public_keys'] = {"x/0'": xpub}
new_data['derivation'] = bip44_derivation(k)
new_data['suffix'] = k
result.append(new_data)
else:
raise WalletFileException("This wallet has multiple accounts and must be split")
return result
def requires_upgrade(self):
return self.get_seed_version() < FINAL_SEED_VERSION
@profiler
def upgrade(self):
self.logger.info('upgrading wallet format')
if self._called_after_upgrade_tasks:
# we need strict ordering between upgrade() and after_upgrade_tasks()
raise Exception("'after_upgrade_tasks' must NOT be called before 'upgrade'")
self._convert_imported()
self._convert_wallet_type()
self._convert_account()
self._convert_version_13_b()
self._convert_version_14()
self._convert_version_15()
self._convert_version_16()
self._convert_version_17()
self._convert_version_18()
self._convert_version_19()
self._convert_version_20()
self._convert_version_21()
self._convert_version_22()
self._convert_version_23()
self._convert_version_24()
self._convert_version_25()
self._convert_version_26()
self._convert_version_27()
self._convert_version_28()
self._convert_version_29()
self._convert_version_30()
self._convert_version_31()
self._convert_version_32()
self._convert_version_33()
self._convert_version_34()
self._convert_version_35()
self._convert_version_36()
self._convert_version_37()
self._convert_version_38()
self._convert_version_39()
self._convert_version_40()
self._convert_version_41()
self._convert_version_42()
self._convert_version_43()
self._convert_version_44()
self.put('seed_version', FINAL_SEED_VERSION) # just to be sure
self._after_upgrade_tasks()
def _after_upgrade_tasks(self):
self._called_after_upgrade_tasks = True
self._load_transactions()
def _convert_wallet_type(self):
if not self._is_upgrade_method_needed(0, 13):
return
wallet_type = self.get('wallet_type')
if wallet_type == 'btchip': wallet_type = 'ledger'
if self.get('keystore') or self.get('x1/') or wallet_type=='imported':
return False
assert not self.requires_split()
seed_version = self.get_seed_version()
seed = self.get('seed')
xpubs = self.get('master_public_keys')
xprvs = self.get('master_private_keys', {})
mpk = self.get('master_public_key')
keypairs = self.get('keypairs')
key_type = self.get('key_type')
if seed_version == OLD_SEED_VERSION or wallet_type == 'old':
d = {
'type': 'old',
'seed': seed,
'mpk': mpk,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif key_type == 'imported':
d = {
'type': 'imported',
'keypairs': keypairs,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['xpub', 'standard']:
xpub = xpubs["x/"]
xprv = xprvs.get("x/")
d = {
'type': 'bip32',
'xpub': xpub,
'xprv': xprv,
'seed': seed,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['bip44']:
xpub = xpubs["x/0'"]
xprv = xprvs.get("x/0'")
d = {
'type': 'bip32',
'xpub': xpub,
'xprv': xprv,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['trezor', 'keepkey', 'ledger', 'digitalbitbox', 'safe_t']:
xpub = xpubs["x/0'"]
derivation = self.get('derivation', bip44_derivation(0))
d = {
'type': 'hardware',
'hw_type': wallet_type,
'xpub': xpub,
'derivation': derivation,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif (wallet_type == '2fa') or multisig_type(wallet_type):
for key in xpubs.keys():
d = {
'type': 'bip32',
'xpub': xpubs[key],
'xprv': xprvs.get(key),
}
if key == 'x1/' and seed:
d['seed'] = seed
self.put(key, d)
else:
raise WalletFileException('Unable to tell wallet type. Is this even a wallet file?')
# remove junk
self.put('master_public_key', None)
self.put('master_public_keys', None)
self.put('master_private_keys', None)
self.put('derivation', None)
self.put('seed', None)
self.put('keypairs', None)
self.put('key_type', None)
def _convert_version_13_b(self):
# version 13 is ambiguous, and has an earlier and a later structure
if not self._is_upgrade_method_needed(0, 13):
return
if self.get('wallet_type') == 'standard':
if self.get('keystore').get('type') == 'imported':
pubkeys = self.get('keystore').get('keypairs').keys()
d = {'change': []}
receiving_addresses = []
for pubkey in pubkeys:
addr = bitcoin.pubkey_to_address('p2pkh', pubkey)
receiving_addresses.append(addr)
d['receiving'] = receiving_addresses
self.put('addresses', d)
self.put('pubkeys', None)
self.put('seed_version', 13)
def _convert_version_14(self):
# convert imported wallets for 3.0
if not self._is_upgrade_method_needed(13, 13):
return
if self.get('wallet_type') =='imported':
addresses = self.get('addresses')
if type(addresses) is list:
addresses = dict([(x, None) for x in addresses])
self.put('addresses', addresses)
elif self.get('wallet_type') == 'standard':
if self.get('keystore').get('type')=='imported':
addresses = set(self.get('addresses').get('receiving'))
pubkeys = self.get('keystore').get('keypairs').keys()
assert len(addresses) == len(pubkeys)
d = {}
for pubkey in pubkeys:
addr = bitcoin.pubkey_to_address('p2pkh', pubkey)
assert addr in addresses
d[addr] = {
'pubkey': pubkey,
'redeem_script': None,
'type': 'p2pkh'
}
self.put('addresses', d)
self.put('pubkeys', None)
self.put('wallet_type', 'imported')
self.put('seed_version', 14)
def _convert_version_15(self):
if not self._is_upgrade_method_needed(14, 14):
return
if self.get('seed_type') == 'segwit':
# should not get here; get_seed_version should have caught this
raise Exception('unsupported derivation (development segwit, v14)')
self.put('seed_version', 15)
def _convert_version_16(self):
# fixes issue #3193 for Imported_Wallets with addresses
# also, previous versions allowed importing any garbage as an address
# which we now try to remove, see pr #3191
if not self._is_upgrade_method_needed(15, 15):
return
def remove_address(addr):
def remove_from_dict(dict_name):
d = self.get(dict_name, None)
if d is not None:
d.pop(addr, None)
self.put(dict_name, d)
def remove_from_list(list_name):
lst = self.get(list_name, None)
if lst is not None:
s = set(lst)
s -= {addr}
self.put(list_name, list(s))
# note: we don't remove 'addr' from self.get('addresses')
remove_from_dict('addr_history')
remove_from_dict('labels')
remove_from_dict('payment_requests')
remove_from_list('frozen_addresses')
if self.get('wallet_type') == 'imported':
addresses = self.get('addresses')
assert isinstance(addresses, dict)
addresses_new = dict()
for address, details in addresses.items():
if not bitcoin.is_address(address):
remove_address(address)
continue
if details is None:
addresses_new[address] = {}
else:
addresses_new[address] = details
self.put('addresses', addresses_new)
self.put('seed_version', 16)
def _convert_version_17(self):
# delete pruned_txo; construct spent_outpoints
if not self._is_upgrade_method_needed(16, 16):
return
self.put('pruned_txo', None)
transactions = self.get('transactions', {}) # txid -> raw_tx
spent_outpoints = defaultdict(dict)
for txid, raw_tx in transactions.items():
tx = Transaction(raw_tx)
for txin in tx.inputs():
if txin.is_coinbase_input():
continue
prevout_hash = txin.prevout.txid.hex()
prevout_n = txin.prevout.out_idx
spent_outpoints[prevout_hash][str(prevout_n)] = txid
self.put('spent_outpoints', spent_outpoints)
self.put('seed_version', 17)
def _convert_version_18(self):
# delete verified_tx3 as its structure changed
if not self._is_upgrade_method_needed(17, 17):
return
self.put('verified_tx3', None)
self.put('seed_version', 18)
def _convert_version_19(self):
# delete tx_fees as its structure changed
if not self._is_upgrade_method_needed(18, 18):
return
self.put('tx_fees', None)
self.put('seed_version', 19)
def _convert_version_20(self):
# store 'derivation' (prefix) and 'root_fingerprint' in all xpub-based keystores.
# store explicit None values if we cannot retroactively determine them
if not self._is_upgrade_method_needed(19, 19):
return
from .bip32 import BIP32Node, convert_bip32_intpath_to_strpath
# note: This upgrade method reimplements bip32.root_fp_and_der_prefix_from_xkey.
# This is done deliberately, to avoid introducing that method as a dependency to this upgrade.
for ks_name in ('keystore', *['x{}/'.format(i) for i in range(1, 16)]):
ks = self.get(ks_name, None)
if ks is None: continue
xpub = ks.get('xpub', None)
if xpub is None: continue
bip32node = BIP32Node.from_xkey(xpub)
# derivation prefix
derivation_prefix = ks.get('derivation', None)
if derivation_prefix is None:
assert bip32node.depth >= 0, bip32node.depth
if bip32node.depth == 0:
derivation_prefix = 'm'
elif bip32node.depth == 1:
child_number_int = int.from_bytes(bip32node.child_number, 'big')
derivation_prefix = convert_bip32_intpath_to_strpath([child_number_int])
ks['derivation'] = derivation_prefix
# root fingerprint
root_fingerprint = ks.get('ckcc_xfp', None)
if root_fingerprint is not None:
root_fingerprint = root_fingerprint.to_bytes(4, byteorder="little", signed=False).hex().lower()
if root_fingerprint is None:
if bip32node.depth == 0:
root_fingerprint = bip32node.calc_fingerprint_of_this_node().hex().lower()
elif bip32node.depth == 1:
root_fingerprint = bip32node.fingerprint.hex()
ks['root_fingerprint'] = root_fingerprint
ks.pop('ckcc_xfp', None)
self.put(ks_name, ks)
self.put('seed_version', 20)
def _convert_version_21(self):
if not self._is_upgrade_method_needed(20, 20):
return
channels = self.get('channels')
if channels:
for channel in channels:
channel['state'] = 'OPENING'
self.put('channels', channels)
self.put('seed_version', 21)
def _convert_version_22(self):
# construct prevouts_by_scripthash
if not self._is_upgrade_method_needed(21, 21):
return
from .bitcoin import script_to_scripthash
transactions = self.get('transactions', {}) # txid -> raw_tx
prevouts_by_scripthash = defaultdict(list)
for txid, raw_tx in transactions.items():
tx = Transaction(raw_tx)
for idx, txout in enumerate(tx.outputs()):
outpoint = f"{txid}:{idx}"
scripthash = script_to_scripthash(txout.scriptpubkey.hex())
prevouts_by_scripthash[scripthash].append((outpoint, txout.value))
self.put('prevouts_by_scripthash', prevouts_by_scripthash)
self.put('seed_version', 22)
def _convert_version_23(self):
if not self._is_upgrade_method_needed(22, 22):
return
channels = self.get('channels', [])
LOCAL = 1
REMOTE = -1
for c in channels:
# move revocation store from remote_config
r = c['remote_config'].pop('revocation_store')
c['revocation_store'] = r
# convert fee updates
log = c.get('log', {})
for sub in LOCAL, REMOTE:
l = log[str(sub)]['fee_updates']
d = {}
for i, fu in enumerate(l):
d[str(i)] = {
'rate':fu['rate'],
'ctn_local':fu['ctns'][str(LOCAL)],
'ctn_remote':fu['ctns'][str(REMOTE)]
}
log[str(int(sub))]['fee_updates'] = d
self.data['channels'] = channels
self.data['seed_version'] = 23
def _convert_version_24(self):
if not self._is_upgrade_method_needed(23, 23):
return
channels = self.get('channels', [])
for c in channels:
# convert revocation store to dict
r = c['revocation_store']
d = {}
for i in range(49):
v = r['buckets'][i]
if v is not None:
d[str(i)] = v
r['buckets'] = d
c['revocation_store'] = r
# convert channels to dict
self.data['channels'] = {x['channel_id']: x for x in channels}
# convert txi & txo
txi = self.get('txi', {})
for tx_hash, d in list(txi.items()):
d2 = {}
for addr, l in d.items():
d2[addr] = {}
for ser, v in l:
d2[addr][ser] = v
txi[tx_hash] = d2
self.data['txi'] = txi
txo = self.get('txo', {})
for tx_hash, d in list(txo.items()):
d2 = {}
for addr, l in d.items():
d2[addr] = {}
for n, v, cb in l:
d2[addr][str(n)] = (v, cb)
txo[tx_hash] = d2
self.data['txo'] = txo
self.data['seed_version'] = 24
def _convert_version_25(self):
if not self._is_upgrade_method_needed(24, 24):
return
# add 'type' field to onchain requests
PR_TYPE_ONCHAIN = 0
requests = self.data.get('payment_requests', {})
for k, r in list(requests.items()):
if r.get('address') == k:
requests[k] = {
'address': r['address'],
'amount': r.get('amount'),
'exp': r.get('exp'),
'id': r.get('id'),
'memo': r.get('memo'),
'time': r.get('time'),
'type': PR_TYPE_ONCHAIN,
}
# convert bip70 invoices
invoices = self.data.get('invoices', {})
for k, r in list(invoices.items()):
data = r.get("hex")
if data:
pr = PaymentRequest(bytes.fromhex(data))
if pr.id != k:
continue
invoices[k] = {
'type': PR_TYPE_ONCHAIN,
'amount': pr.get_amount(),
'bip70': data,
'exp': pr.get_expiration_date() - pr.get_time(),
'id': pr.id,
'message': pr.get_memo(),
'outputs': [x.to_legacy_tuple() for x in pr.get_outputs()],
'time': pr.get_time(),
'requestor': pr.get_requestor(),
}
self.data['seed_version'] = 25
def _convert_version_26(self):
if not self._is_upgrade_method_needed(25, 25):
return
channels = self.data.get('channels', {})
channel_timestamps = self.data.pop('lightning_channel_timestamps', {})
for channel_id, c in channels.items():
item = channel_timestamps.get(channel_id)
if item:
funding_txid, funding_height, funding_timestamp, closing_txid, closing_height, closing_timestamp = item
if funding_txid:
c['funding_height'] = funding_txid, funding_height, funding_timestamp
if closing_txid:
c['closing_height'] = closing_txid, closing_height, closing_timestamp
self.data['seed_version'] = 26
def _convert_version_27(self):
if not self._is_upgrade_method_needed(26, 26):
return
channels = self.data.get('channels', {})
for channel_id, c in channels.items():
c['local_config']['htlc_minimum_msat'] = 1
self.data['seed_version'] = 27
def _convert_version_28(self):
if not self._is_upgrade_method_needed(27, 27):
return
channels = self.data.get('channels', {})
for channel_id, c in channels.items():
c['local_config']['channel_seed'] = None
self.data['seed_version'] = 28
def _convert_version_29(self):
if not self._is_upgrade_method_needed(28, 28):
return
PR_TYPE_ONCHAIN = 0
requests = self.data.get('payment_requests', {})
invoices = self.data.get('invoices', {})
for d in [invoices, requests]:
for key, r in list(d.items()):
_type = r.get('type', 0)
item = {
'type': _type,
'message': r.get('message') or r.get('memo', ''),
'amount': r.get('amount'),
'exp': r.get('exp') or 0,
'time': r.get('time', 0),
}
if _type == PR_TYPE_ONCHAIN:
address = r.pop('address', None)
if address:
outputs = [(0, address, r.get('amount'))]
else:
outputs = r.get('outputs')
item.update({
'outputs': outputs,
'id': r.get('id'),
'bip70': r.get('bip70'),
'requestor': r.get('requestor'),
})
else:
item.update({
'rhash': r['rhash'],
'invoice': r['invoice'],
})
d[key] = item
self.data['seed_version'] = 29
def _convert_version_30(self):
if not self._is_upgrade_method_needed(29, 29):
return
PR_TYPE_ONCHAIN = 0
PR_TYPE_LN = 2
requests = self.data.get('payment_requests', {})
invoices = self.data.get('invoices', {})
for d in [invoices, requests]:
for key, item in list(d.items()):
_type = item['type']
if _type == PR_TYPE_ONCHAIN:
item['amount_sat'] = item.pop('amount')
elif _type == PR_TYPE_LN:
amount_sat = item.pop('amount')
item['amount_msat'] = 1000 * amount_sat if amount_sat is not None else None
item.pop('exp')
item.pop('message')
item.pop('rhash')
item.pop('time')
else:
raise Exception(f"unknown invoice type: {_type}")
self.data['seed_version'] = 30
def _convert_version_31(self):
if not self._is_upgrade_method_needed(30, 30):
return
PR_TYPE_ONCHAIN = 0
requests = self.data.get('payment_requests', {})
invoices = self.data.get('invoices', {})
for d in [invoices, requests]:
for key, item in list(d.items()):
if item['type'] == PR_TYPE_ONCHAIN:
item['amount_sat'] = item['amount_sat'] or 0
item['exp'] = item['exp'] or 0
item['time'] = item['time'] or 0
self.data['seed_version'] = 31
def _convert_version_32(self):
if not self._is_upgrade_method_needed(31, 31):
return
PR_TYPE_ONCHAIN = 0
invoices_old = self.data.get('invoices', {})
invoices_new = {k: item for k, item in invoices_old.items()
if not (item['type'] == PR_TYPE_ONCHAIN and item['outputs'] is None)}
self.data['invoices'] = invoices_new
self.data['seed_version'] = 32
def _convert_version_33(self):
if not self._is_upgrade_method_needed(32, 32):
return
PR_TYPE_ONCHAIN = 0
requests = self.data.get('payment_requests', {})
invoices = self.data.get('invoices', {})
for d in [invoices, requests]:
for key, item in list(d.items()):
if item['type'] == PR_TYPE_ONCHAIN:
item['height'] = item.get('height') or 0
self.data['seed_version'] = 33
def _convert_version_34(self):
if not self._is_upgrade_method_needed(33, 33):
return
channels = self.data.get('channels', {})
for key, item in channels.items():
item['local_config']['upfront_shutdown_script'] = \
item['local_config'].get('upfront_shutdown_script') or ""
item['remote_config']['upfront_shutdown_script'] = \
item['remote_config'].get('upfront_shutdown_script') or ""
self.data['seed_version'] = 34
def _convert_version_35(self):
# same as 32, but for payment_requests
if not self._is_upgrade_method_needed(34, 34):
return
PR_TYPE_ONCHAIN = 0
requests_old = self.data.get('payment_requests', {})
requests_new = {k: item for k, item in requests_old.items()
if not (item['type'] == PR_TYPE_ONCHAIN and item['outputs'] is None)}
self.data['payment_requests'] = requests_new
self.data['seed_version'] = 35
def _convert_version_36(self):
if not self._is_upgrade_method_needed(35, 35):
return
old_frozen_coins = self.data.get('frozen_coins', [])
new_frozen_coins = {coin: True for coin in old_frozen_coins}
self.data['frozen_coins'] = new_frozen_coins
self.data['seed_version'] = 36
def _convert_version_37(self):
if not self._is_upgrade_method_needed(36, 36):
return
payments = self.data.get('lightning_payments', {})
for k, v in list(payments.items()):
amount_sat, direction, status = v
amount_msat = amount_sat * 1000 if amount_sat is not None else None
payments[k] = amount_msat, direction, status
self.data['lightning_payments'] = payments
self.data['seed_version'] = 37
def _convert_version_38(self):
if not self._is_upgrade_method_needed(37, 37):
return
PR_TYPE_ONCHAIN = 0
PR_TYPE_LN = 2
from .bitcoin import TOTAL_COIN_SUPPLY_LIMIT_IN_BTC, COIN
max_sats = TOTAL_COIN_SUPPLY_LIMIT_IN_BTC * COIN
requests = self.data.get('payment_requests', {})
invoices = self.data.get('invoices', {})
for d in [invoices, requests]:
for key, item in list(d.items()):
if item['type'] == PR_TYPE_ONCHAIN:
amount_sat = item['amount_sat']
if amount_sat == '!':
continue
if not (isinstance(amount_sat, int) and 0 <= amount_sat <= max_sats):
del d[key]
elif item['type'] == PR_TYPE_LN:
amount_msat = item['amount_msat']
if not amount_msat:
continue
if not (isinstance(amount_msat, int) and 0 <= amount_msat <= max_sats * 1000):
del d[key]
self.data['seed_version'] = 38
def _convert_version_39(self):
# this upgrade prevents initialization of lightning_privkey2 after lightning_xprv has been set
if not self._is_upgrade_method_needed(38, 38):
return
self.data['imported_channel_backups'] = self.data.pop('channel_backups', {})
self.data['seed_version'] = 39
def _convert_version_40(self):
# put 'seed_type' into keystores
if not self._is_upgrade_method_needed(39, 39):
return
for ks_name in ('keystore', *['x{}/'.format(i) for i in range(1, 16)]):
ks = self.data.get(ks_name, None)
if ks is None: continue
seed = ks.get('seed')
if not seed: continue
seed_type = None
xpub = ks.get('xpub') or None
if xpub:
assert isinstance(xpub, str)
if xpub[0:4] in ('xpub', 'tpub'):
seed_type = 'standard'
elif xpub[0:4] in ('zpub', 'Zpub', 'vpub', 'Vpub'):
seed_type = 'segwit'
elif ks.get('type') == 'old':
seed_type = 'old'
if seed_type is not None:
ks['seed_type'] = seed_type
self.data['seed_version'] = 40
def _convert_version_41(self):
# this is a repeat of upgrade 39, to fix wallet backup files (see #7339)
if not self._is_upgrade_method_needed(40, 40):
return
imported_channel_backups = self.data.pop('channel_backups', {})
imported_channel_backups.update(self.data.get('imported_channel_backups', {}))
self.data['imported_channel_backups'] = imported_channel_backups
self.data['seed_version'] = 41
def _convert_version_42(self):
# in OnchainInvoice['outputs'], convert values from None to 0
if not self._is_upgrade_method_needed(41, 41):
return
PR_TYPE_ONCHAIN = 0
requests = self.data.get('payment_requests', {})
invoices = self.data.get('invoices', {})
for d in [invoices, requests]:
for key, item in list(d.items()):
if item['type'] == PR_TYPE_ONCHAIN:
item['outputs'] = [(_type, addr, (val or 0))
for _type, addr, val in item['outputs']]
self.data['seed_version'] = 42
def _convert_version_43(self):
if not self._is_upgrade_method_needed(42, 42):
return
channels = self.data.pop('channels', {})
for k, c in channels.items():
log = c['log']
c['fail_htlc_reasons'] = log.pop('fail_htlc_reasons', {})
c['unfulfilled_htlcs'] = log.pop('unfulfilled_htlcs', {})
log["1"]['unacked_updates'] = log.pop('unacked_local_updates2', {})
self.data['channels'] = channels
self.data['seed_version'] = 43
def _convert_version_44(self):
if not self._is_upgrade_method_needed(43, 43):
return
channels = self.data.get('channels', {})
for key, item in channels.items():
if bool(item.get('static_remotekey_enabled')):
channel_type = ChannelType.OPTION_STATIC_REMOTEKEY
else:
channel_type = ChannelType(0)
item.pop('static_remotekey_enabled', None)
item['channel_type'] = channel_type
self.data['seed_version'] = 44
def _convert_imported(self):
if not self._is_upgrade_method_needed(0, 13):
return
# '/x' is the internal ID for imported accounts
d = self.get('accounts', {}).get('/x', {}).get('imported',{})
if not d:
return False
addresses = []
keypairs = {}
for addr, v in d.items():
pubkey, privkey = v
if privkey:
keypairs[pubkey] = privkey
else:
addresses.append(addr)
if addresses and keypairs:
raise WalletFileException('mixed addresses and privkeys')
elif addresses:
self.put('addresses', addresses)
self.put('accounts', None)
elif keypairs:
self.put('wallet_type', 'standard')
self.put('key_type', 'imported')
self.put('keypairs', keypairs)
self.put('accounts', None)
else:
raise WalletFileException('no addresses or privkeys')
def _convert_account(self):
if not self._is_upgrade_method_needed(0, 13):
return
self.put('accounts', None)
def _is_upgrade_method_needed(self, min_version, max_version):
assert min_version <= max_version
cur_version = self.get_seed_version()
if cur_version > max_version:
return False
elif cur_version < min_version:
raise WalletFileException(
'storage upgrade: unexpected version {} (should be {}-{})'
.format(cur_version, min_version, max_version))
else:
return True
@locked
def get_seed_version(self):
seed_version = self.get('seed_version')
if not seed_version:
seed_version = OLD_SEED_VERSION if len(self.get('master_public_key','')) == 128 else NEW_SEED_VERSION
if seed_version > FINAL_SEED_VERSION:
raise WalletFileException('This version of Electrum is too old to open this wallet.\n'
'(highest supported storage version: {}, version of this file: {})'
.format(FINAL_SEED_VERSION, seed_version))
if seed_version==14 and self.get('seed_type') == 'segwit':
self._raise_unsupported_version(seed_version)
if seed_version >=12:
return seed_version
if seed_version not in [OLD_SEED_VERSION, NEW_SEED_VERSION]:
self._raise_unsupported_version(seed_version)
return seed_version
def _raise_unsupported_version(self, seed_version):
msg = f"Your wallet has an unsupported seed version: {seed_version}."
if seed_version in [5, 7, 8, 9, 10, 14]:
msg += "\n\nTo open this wallet, try 'git checkout seed_v%d'"%seed_version
if seed_version == 6:
# version 1.9.8 created v6 wallets when an incorrect seed was entered in the restore dialog
msg += '\n\nThis file was created because of a bug in version 1.9.8.'
if self.get('master_public_keys') is None and self.get('master_private_keys') is None and self.get('imported_keys') is None:
# pbkdf2 (at that time an additional dependency) was not included with the binaries, and wallet creation aborted.
msg += "\nIt does not contain any keys, and can safely be removed."
else:
# creation was complete if electrum was run from source
msg += "\nPlease open this file with Electrum 1.9.8, and move your coins to a new wallet."
raise WalletFileException(msg)
@locked
def get_txi_addresses(self, tx_hash: str) -> List[str]:
"""Returns list of is_mine addresses that appear as inputs in tx."""
assert isinstance(tx_hash, str)
return list(self.txi.get(tx_hash, {}).keys())
@locked
def get_txo_addresses(self, tx_hash: str) -> List[str]:
"""Returns list of is_mine addresses that appear as outputs in tx."""
assert isinstance(tx_hash, str)
return list(self.txo.get(tx_hash, {}).keys())
@locked
def get_txi_addr(self, tx_hash: str, address: str) -> Iterable[Tuple[str, int]]:
"""Returns an iterable of (prev_outpoint, value)."""
assert isinstance(tx_hash, str)
assert isinstance(address, str)
d = self.txi.get(tx_hash, {}).get(address, {})
return list(d.items())
@locked
def get_txo_addr(self, tx_hash: str, address: str) -> Dict[int, Tuple[int, bool]]:
"""Returns a dict: output_index -> (value, is_coinbase)."""
assert isinstance(tx_hash, str)
assert isinstance(address, str)
d = self.txo.get(tx_hash, {}).get(address, {})
return {int(n): (v, cb) for (n, (v, cb)) in d.items()}
@modifier
def add_txi_addr(self, tx_hash: str, addr: str, ser: str, v: int) -> None:
assert isinstance(tx_hash, str)
assert isinstance(addr, str)
assert isinstance(ser, str)
assert isinstance(v, int)
if tx_hash not in self.txi:
self.txi[tx_hash] = {}
d = self.txi[tx_hash]
if addr not in d:
d[addr] = {}
d[addr][ser] = v
@modifier
def add_txo_addr(self, tx_hash: str, addr: str, n: Union[int, str], v: int, is_coinbase: bool) -> None:
n = str(n)
assert isinstance(tx_hash, str)
assert isinstance(addr, str)
assert isinstance(n, str)
assert isinstance(v, int)
assert isinstance(is_coinbase, bool)
if tx_hash not in self.txo:
self.txo[tx_hash] = {}
d = self.txo[tx_hash]
if addr not in d:
d[addr] = {}
d[addr][n] = (v, is_coinbase)
@locked
def list_txi(self) -> Sequence[str]:
return list(self.txi.keys())
@locked
def list_txo(self) -> Sequence[str]:
return list(self.txo.keys())
@modifier
def remove_txi(self, tx_hash: str) -> None:
assert isinstance(tx_hash, str)
self.txi.pop(tx_hash, None)
@modifier
def remove_txo(self, tx_hash: str) -> None:
assert isinstance(tx_hash, str)
self.txo.pop(tx_hash, None)
@locked
def list_spent_outpoints(self) -> Sequence[Tuple[str, str]]:
return [(h, n)
for h in self.spent_outpoints.keys()
for n in self.get_spent_outpoints(h)
]
@locked
def get_spent_outpoints(self, prevout_hash: str) -> Sequence[str]:
assert isinstance(prevout_hash, str)
return list(self.spent_outpoints.get(prevout_hash, {}).keys())
@locked
def get_spent_outpoint(self, prevout_hash: str, prevout_n: Union[int, str]) -> Optional[str]:
assert isinstance(prevout_hash, str)
prevout_n = str(prevout_n)
return self.spent_outpoints.get(prevout_hash, {}).get(prevout_n)
@modifier
def remove_spent_outpoint(self, prevout_hash: str, prevout_n: Union[int, str]) -> None:
assert isinstance(prevout_hash, str)
prevout_n = str(prevout_n)
self.spent_outpoints[prevout_hash].pop(prevout_n, None)
if not self.spent_outpoints[prevout_hash]:
self.spent_outpoints.pop(prevout_hash)
@modifier
def set_spent_outpoint(self, prevout_hash: str, prevout_n: Union[int, str], tx_hash: str) -> None:
assert isinstance(prevout_hash, str)
assert isinstance(tx_hash, str)
prevout_n = str(prevout_n)
if prevout_hash not in self.spent_outpoints:
self.spent_outpoints[prevout_hash] = {}
self.spent_outpoints[prevout_hash][prevout_n] = tx_hash
@modifier
def add_prevout_by_scripthash(self, scripthash: str, *, prevout: TxOutpoint, value: int) -> None:
assert isinstance(scripthash, str)
assert isinstance(prevout, TxOutpoint)
assert isinstance(value, int)
if scripthash not in self._prevouts_by_scripthash:
self._prevouts_by_scripthash[scripthash] = set()
self._prevouts_by_scripthash[scripthash].add((prevout.to_str(), value))
@modifier
def remove_prevout_by_scripthash(self, scripthash: str, *, prevout: TxOutpoint, value: int) -> None:
assert isinstance(scripthash, str)
assert isinstance(prevout, TxOutpoint)
assert isinstance(value, int)
self._prevouts_by_scripthash[scripthash].discard((prevout.to_str(), value))
if not self._prevouts_by_scripthash[scripthash]:
self._prevouts_by_scripthash.pop(scripthash)
@locked
def get_prevouts_by_scripthash(self, scripthash: str) -> Set[Tuple[TxOutpoint, int]]:
assert isinstance(scripthash, str)
prevouts_and_values = self._prevouts_by_scripthash.get(scripthash, set())
return {(TxOutpoint.from_str(prevout), value) for prevout, value in prevouts_and_values}
@modifier
def add_transaction(self, tx_hash: str, tx: Transaction) -> None:
assert isinstance(tx_hash, str)
assert isinstance(tx, Transaction), tx
# note that tx might be a PartialTransaction
# serialize and de-serialize tx now. this might e.g. convert a complete PartialTx to a Tx
tx = tx_from_any(str(tx))
if not tx_hash:
raise Exception("trying to add tx to db without txid")
if tx_hash != tx.txid():
raise Exception(f"trying to add tx to db with inconsistent txid: {tx_hash} != {tx.txid()}")
# don't allow overwriting complete tx with partial tx
tx_we_already_have = self.transactions.get(tx_hash, None)
if tx_we_already_have is None or isinstance(tx_we_already_have, PartialTransaction):
self.transactions[tx_hash] = tx
@modifier
def remove_transaction(self, tx_hash: str) -> Optional[Transaction]:
assert isinstance(tx_hash, str)
return self.transactions.pop(tx_hash, None)
@locked
def get_transaction(self, tx_hash: Optional[str]) -> Optional[Transaction]:
if tx_hash is None:
return None
assert isinstance(tx_hash, str)
return self.transactions.get(tx_hash)
@locked
def list_transactions(self) -> Sequence[str]:
return list(self.transactions.keys())
@locked
def get_history(self) -> Sequence[str]:
return list(self.history.keys())
def is_addr_in_history(self, addr: str) -> bool:
# does not mean history is non-empty!
assert isinstance(addr, str)
return addr in self.history
@locked
def get_addr_history(self, addr: str) -> Sequence[Tuple[str, int]]:
assert isinstance(addr, str)
return self.history.get(addr, [])
@modifier
def set_addr_history(self, addr: str, hist) -> None:
assert isinstance(addr, str)
self.history[addr] = hist
@modifier
def remove_addr_history(self, addr: str) -> None:
assert isinstance(addr, str)
self.history.pop(addr, None)
@locked
def list_verified_tx(self) -> Sequence[str]:
return list(self.verified_tx.keys())
@locked
def get_verified_tx(self, txid: str) -> Optional[TxMinedInfo]:
assert isinstance(txid, str)
if txid not in self.verified_tx:
return None
height, timestamp, txpos, header_hash = self.verified_tx[txid]
return TxMinedInfo(height=height,
conf=None,
timestamp=timestamp,
txpos=txpos,
header_hash=header_hash)
@modifier
def add_verified_tx(self, txid: str, info: TxMinedInfo):
assert isinstance(txid, str)
assert isinstance(info, TxMinedInfo)
self.verified_tx[txid] = (info.height, info.timestamp, info.txpos, info.header_hash)
@modifier
def remove_verified_tx(self, txid: str):
assert isinstance(txid, str)
self.verified_tx.pop(txid, None)
def is_in_verified_tx(self, txid: str) -> bool:
assert isinstance(txid, str)
return txid in self.verified_tx
@modifier
def add_tx_fee_from_server(self, txid: str, fee_sat: Optional[int]) -> None:
assert isinstance(txid, str)
# note: when called with (fee_sat is None), rm currently saved value
if txid not in self.tx_fees:
self.tx_fees[txid] = TxFeesValue()
tx_fees_value = self.tx_fees[txid]
if tx_fees_value.is_calculated_by_us:
return
self.tx_fees[txid] = tx_fees_value._replace(fee=fee_sat, is_calculated_by_us=False)
@modifier
def add_tx_fee_we_calculated(self, txid: str, fee_sat: Optional[int]) -> None:
assert isinstance(txid, str)
if fee_sat is None:
return
assert isinstance(fee_sat, int)
if txid not in self.tx_fees:
self.tx_fees[txid] = TxFeesValue()
self.tx_fees[txid] = self.tx_fees[txid]._replace(fee=fee_sat, is_calculated_by_us=True)
@locked
def get_tx_fee(self, txid: str, *, trust_server: bool = False) -> Optional[int]:
assert isinstance(txid, str)
"""Returns tx_fee."""
tx_fees_value = self.tx_fees.get(txid)
if tx_fees_value is None:
return None
if not trust_server and not tx_fees_value.is_calculated_by_us:
return None
return tx_fees_value.fee
@modifier
def add_num_inputs_to_tx(self, txid: str, num_inputs: int) -> None:
assert isinstance(txid, str)
assert isinstance(num_inputs, int)
if txid not in self.tx_fees:
self.tx_fees[txid] = TxFeesValue()
self.tx_fees[txid] = self.tx_fees[txid]._replace(num_inputs=num_inputs)
@locked
def get_num_all_inputs_of_tx(self, txid: str) -> Optional[int]:
assert isinstance(txid, str)
tx_fees_value = self.tx_fees.get(txid)
if tx_fees_value is None:
return None
return tx_fees_value.num_inputs
@locked
def get_num_ismine_inputs_of_tx(self, txid: str) -> int:
assert isinstance(txid, str)
txins = self.txi.get(txid, {})
return sum([len(tupls) for addr, tupls in txins.items()])
@modifier
def remove_tx_fee(self, txid: str) -> None:
assert isinstance(txid, str)
self.tx_fees.pop(txid, None)
@locked
def get_dict(self, name) -> dict:
# Warning: interacts un-intuitively with 'put': certain parts
# of 'data' will have pointers saved as separate variables.
if name not in self.data:
self.data[name] = {}
return self.data[name]
@locked
def num_change_addresses(self) -> int:
return len(self.change_addresses)
@locked
def num_receiving_addresses(self) -> int:
return len(self.receiving_addresses)
@locked
def get_change_addresses(self, *, slice_start=None, slice_stop=None) -> List[str]:
# note: slicing makes a shallow copy
return self.change_addresses[slice_start:slice_stop]
@locked
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None) -> List[str]:
# note: slicing makes a shallow copy
return self.receiving_addresses[slice_start:slice_stop]
@modifier
def add_change_address(self, addr: str) -> None:
assert isinstance(addr, str)
self._addr_to_addr_index[addr] = (1, len(self.change_addresses))
self.change_addresses.append(addr)
@modifier
def add_receiving_address(self, addr: str) -> None:
assert isinstance(addr, str)
self._addr_to_addr_index[addr] = (0, len(self.receiving_addresses))
self.receiving_addresses.append(addr)
@locked
def get_address_index(self, address: str) -> Optional[Sequence[int]]:
assert isinstance(address, str)
return self._addr_to_addr_index.get(address)
@modifier
def add_imported_address(self, addr: str, d: dict) -> None:
assert isinstance(addr, str)
self.imported_addresses[addr] = d
@modifier
def remove_imported_address(self, addr: str) -> None:
assert isinstance(addr, str)
self.imported_addresses.pop(addr)
@locked
def has_imported_address(self, addr: str) -> bool:
assert isinstance(addr, str)
return addr in self.imported_addresses
@locked
def get_imported_addresses(self) -> Sequence[str]:
return list(sorted(self.imported_addresses.keys()))
@locked
def get_imported_address(self, addr: str) -> Optional[dict]:
assert isinstance(addr, str)
return self.imported_addresses.get(addr)
def load_addresses(self, wallet_type):
""" called from Abstract_Wallet.__init__ """
if wallet_type == 'imported':
self.imported_addresses = self.get_dict('addresses') # type: Dict[str, dict]
else:
self.get_dict('addresses')
for name in ['receiving', 'change']:
if name not in self.data['addresses']:
self.data['addresses'][name] = []
self.change_addresses = self.data['addresses']['change']
self.receiving_addresses = self.data['addresses']['receiving']
self._addr_to_addr_index = {} # type: Dict[str, Sequence[int]] # key: address, value: (is_change, index)
for i, addr in enumerate(self.receiving_addresses):
self._addr_to_addr_index[addr] = (0, i)
for i, addr in enumerate(self.change_addresses):
self._addr_to_addr_index[addr] = (1, i)
@profiler
def _load_transactions(self):
self.data = StoredDict(self.data, self, [])
# references in self.data
# TODO make all these private
# txid -> address -> prev_outpoint -> value
self.txi = self.get_dict('txi') # type: Dict[str, Dict[str, Dict[str, int]]]
# txid -> address -> output_index -> (value, is_coinbase)
self.txo = self.get_dict('txo') # type: Dict[str, Dict[str, Dict[str, Tuple[int, bool]]]]
self.transactions = self.get_dict('transactions') # type: Dict[str, Transaction]
self.spent_outpoints = self.get_dict('spent_outpoints') # txid -> output_index -> next_txid
self.history = self.get_dict('addr_history') # address -> list of (txid, height)
self.verified_tx = self.get_dict('verified_tx3') # txid -> (height, timestamp, txpos, header_hash)
self.tx_fees = self.get_dict('tx_fees') # type: Dict[str, TxFeesValue]
# scripthash -> set of (outpoint, value)
self._prevouts_by_scripthash = self.get_dict('prevouts_by_scripthash') # type: Dict[str, Set[Tuple[str, int]]]
# remove unreferenced tx
for tx_hash in list(self.transactions.keys()):
if not self.get_txi_addresses(tx_hash) and not self.get_txo_addresses(tx_hash):
self.logger.info(f"removing unreferenced tx: {tx_hash}")
self.transactions.pop(tx_hash)
# remove unreferenced outpoints
for prevout_hash in self.spent_outpoints.keys():
d = self.spent_outpoints[prevout_hash]
for prevout_n, spending_txid in list(d.items()):
if spending_txid not in self.transactions:
self.logger.info("removing unreferenced spent outpoint")
d.pop(prevout_n)
@modifier
def clear_history(self):
self.txi.clear()
self.txo.clear()
self.spent_outpoints.clear()
self.transactions.clear()
self.history.clear()
self.verified_tx.clear()
self.tx_fees.clear()
self._prevouts_by_scripthash.clear()
def _convert_dict(self, path, key, v):
if key == 'transactions':
# note: for performance, "deserialize=False" so that we will deserialize these on-demand
v = dict((k, tx_from_any(x, deserialize=False)) for k, x in v.items())
if key == 'invoices':
v = dict((k, Invoice.from_json(x)) for k, x in v.items())
if key == 'payment_requests':
v = dict((k, Invoice.from_json(x)) for k, x in v.items())
elif key == 'adds':
v = dict((k, UpdateAddHtlc.from_tuple(*x)) for k, x in v.items())
elif key == 'fee_updates':
v = dict((k, FeeUpdate(**x)) for k, x in v.items())
elif key == 'submarine_swaps':
v = dict((k, SwapData(**x)) for k, x in v.items())
elif key == 'imported_channel_backups':
v = dict((k, ImportedChannelBackupStorage(**x)) for k, x in v.items())
elif key == 'onchain_channel_backups':
v = dict((k, OnchainChannelBackupStorage(**x)) for k, x in v.items())
elif key == 'tx_fees':
v = dict((k, TxFeesValue(*x)) for k, x in v.items())
elif key == 'prevouts_by_scripthash':
v = dict((k, {(prevout, value) for (prevout, value) in x}) for k, x in v.items())
elif key == 'buckets':
v = dict((k, ShachainElement(bfh(x[0]), int(x[1]))) for k, x in v.items())
elif key == 'data_loss_protect_remote_pcp':
v = dict((k, bfh(x)) for k, x in v.items())
# convert htlc_id keys to int
if key in ['adds', 'locked_in', 'settles', 'fails', 'fee_updates', 'buckets',
'unacked_updates', 'unfulfilled_htlcs', 'fail_htlc_reasons', 'onion_keys']:
v = dict((int(k), x) for k, x in v.items())
# convert keys to HTLCOwner
if key == 'log' or (path and path[-1] in ['locked_in', 'fails', 'settles']):
if "1" in v:
v[LOCAL] = v.pop("1")
v[REMOTE] = v.pop("-1")
return v
def _convert_value(self, path, key, v):
if key == 'local_config':
v = LocalConfig(**v)
elif key == 'remote_config':
v = RemoteConfig(**v)
elif key == 'constraints':
v = ChannelConstraints(**v)
elif key == 'funding_outpoint':
v = Outpoint(**v)
elif key == 'channel_type':
v = ChannelType(v)
return v
def _should_convert_to_stored_dict(self, key) -> bool:
if key == 'keystore':
return False
multisig_keystore_names = [('x%d/' % i) for i in range(1, 16)]
if key in multisig_keystore_names:
return False
return True
def write(self, storage: 'WalletStorage'):
with self.lock:
self._write(storage)
@profiler
def _write(self, storage: 'WalletStorage'):
if threading.current_thread().daemon:
self.logger.warning('daemon thread cannot write db')
return
if not self.modified():
return
json_str = self.dump(human_readable=not storage.is_encrypted())
storage.write(json_str)
self.set_modified(False)
def is_ready_to_be_used_by_wallet(self):
return not self.requires_upgrade() and self._called_after_upgrade_tasks
def split_accounts(self, root_path):
from .storage import WalletStorage
out = []
result = self.get_split_accounts()
for data in result:
path = root_path + '.' + data['suffix']
storage = WalletStorage(path)
db = WalletDB(json.dumps(data), manual_upgrades=False)
db._called_after_upgrade_tasks = False
db.upgrade()
db.write(storage)
out.append(path)
return out
def get_action(self):
action = run_hook('get_action', self)
return action
def load_plugins(self):
wallet_type = self.get('wallet_type')
if wallet_type in plugin_loaders:
plugin_loaders[wallet_type]()
def set_keystore_encryption(self, enable):
self.put('use_encryption', enable)
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
from migrate import exceptions as versioning_exceptions
from migrate import UniqueConstraint
from migrate.versioning import api as versioning_api
import mock
from oslo_db.sqlalchemy import utils as db_utils
import sqlalchemy
from nova import context
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import migration
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests import uuidsentinel
class TestNullInstanceUuidScanDB(test.TestCase):
# NOTE(mriedem): Copied from the 267 database migration.
def downgrade(self, migrate_engine):
UniqueConstraint('uuid',
table=db_utils.get_table(migrate_engine, 'instances'),
name='uniq_instances0uuid').drop()
for table_name in ('instances', 'shadow_instances'):
table = db_utils.get_table(migrate_engine, table_name)
table.columns.uuid.alter(nullable=True)
def setUp(self):
super(TestNullInstanceUuidScanDB, self).setUp()
self.engine = db_api.get_engine()
# When this test runs, we've already run the schema migration to make
# instances.uuid non-nullable, so we have to alter the table here
# so we can test against a real database.
self.downgrade(self.engine)
# Now create fake entries in the fixed_ips, consoles and
# instances table where (instance_)uuid is None for testing.
for table_name in ('fixed_ips', 'instances', 'consoles'):
table = db_utils.get_table(self.engine, table_name)
fake_record = {'id': 1}
table.insert().execute(fake_record)
def test_db_null_instance_uuid_scan_readonly(self):
results = migration.db_null_instance_uuid_scan(delete=False)
self.assertEqual(1, results.get('instances'))
self.assertEqual(1, results.get('consoles'))
# The fixed_ips table should be ignored.
self.assertNotIn('fixed_ips', results)
# Now pick a random table with an instance_uuid column and show it's
# in the results but with 0 hits.
self.assertEqual(0, results.get('instance_info_caches'))
# Make sure nothing was deleted.
for table_name in ('fixed_ips', 'instances', 'consoles'):
table = db_utils.get_table(self.engine, table_name)
record = table.select(table.c.id == 1).execute().first()
self.assertIsNotNone(record)
def test_db_null_instance_uuid_scan_delete(self):
results = migration.db_null_instance_uuid_scan(delete=True)
self.assertEqual(1, results.get('instances'))
self.assertEqual(1, results.get('consoles'))
# The fixed_ips table should be ignored.
self.assertNotIn('fixed_ips', results)
# Now pick a random table with an instance_uuid column and show it's
# in the results but with 0 hits.
self.assertEqual(0, results.get('instance_info_caches'))
# Make sure fixed_ips wasn't touched, but instances and instance_faults
# records were deleted.
fixed_ips = db_utils.get_table(self.engine, 'fixed_ips')
record = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNotNone(record)
consoles = db_utils.get_table(self.engine, 'consoles')
record = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(record)
instances = db_utils.get_table(self.engine, 'instances')
record = instances.select(instances.c.id == 1).execute().first()
self.assertIsNone(record)
@mock.patch.object(migration, 'db_version', return_value=2)
@mock.patch.object(migration, '_find_migrate_repo', return_value='repo')
@mock.patch.object(versioning_api, 'upgrade')
@mock.patch.object(versioning_api, 'downgrade')
@mock.patch.object(migration, 'get_engine', return_value='engine')
class TestDbSync(test.NoDBTestCase):
def test_version_none(self, mock_get_engine, mock_downgrade, mock_upgrade,
mock_find_repo, mock_version):
database = 'fake'
migration.db_sync(database=database)
mock_version.assert_called_once_with(database, context=None)
mock_find_repo.assert_called_once_with(database)
mock_get_engine.assert_called_once_with(database, context=None)
mock_upgrade.assert_called_once_with('engine', 'repo', None)
self.assertFalse(mock_downgrade.called)
def test_downgrade(self, mock_get_engine, mock_downgrade, mock_upgrade,
mock_find_repo, mock_version):
database = 'fake'
migration.db_sync(1, database=database)
mock_version.assert_called_once_with(database, context=None)
mock_find_repo.assert_called_once_with(database)
mock_get_engine.assert_called_once_with(database, context=None)
mock_downgrade.assert_called_once_with('engine', 'repo', 1)
self.assertFalse(mock_upgrade.called)
@mock.patch.object(migration, '_find_migrate_repo', return_value='repo')
@mock.patch.object(versioning_api, 'db_version')
@mock.patch.object(migration, 'get_engine')
class TestDbVersion(test.NoDBTestCase):
def test_db_version(self, mock_get_engine, mock_db_version,
mock_find_repo):
database = 'fake'
mock_get_engine.return_value = 'engine'
migration.db_version(database)
mock_find_repo.assert_called_once_with(database)
mock_db_version.assert_called_once_with('engine', 'repo')
def test_not_controlled(self, mock_get_engine, mock_db_version,
mock_find_repo):
database = 'api'
mock_get_engine.side_effect = ['engine', 'engine', 'engine']
exc = versioning_exceptions.DatabaseNotControlledError()
mock_db_version.side_effect = [exc, '']
metadata = mock.MagicMock()
metadata.tables.return_value = []
with mock.patch.object(sqlalchemy, 'MetaData',
metadata), mock.patch.object(migration,
'db_version_control') as mock_version_control:
migration.db_version(database)
mock_version_control.assert_called_once_with(0,
database,
context=None)
db_version_calls = [mock.call('engine', 'repo')] * 2
self.assertEqual(db_version_calls, mock_db_version.call_args_list)
engine_calls = [mock.call(database, context=None)] * 3
self.assertEqual(engine_calls, mock_get_engine.call_args_list)
@mock.patch.object(migration, '_find_migrate_repo', return_value='repo')
@mock.patch.object(migration, 'get_engine', return_value='engine')
@mock.patch.object(versioning_api, 'version_control')
class TestDbVersionControl(test.NoDBTestCase):
def test_version_control(self, mock_version_control, mock_get_engine,
mock_find_repo):
database = 'fake'
migration.db_version_control(database=database)
mock_find_repo.assert_called_once_with(database)
mock_version_control.assert_called_once_with('engine', 'repo', None)
class TestGetEngine(test.NoDBTestCase):
def test_get_main_engine(self):
with mock.patch.object(db_api, 'get_engine',
return_value='engine') as mock_get_engine:
engine = migration.get_engine()
self.assertEqual('engine', engine)
mock_get_engine.assert_called_once_with(context=None)
def test_get_api_engine(self):
with mock.patch.object(db_api, 'get_api_engine',
return_value='api_engine') as mock_get_engine:
engine = migration.get_engine('api')
self.assertEqual('api_engine', engine)
mock_get_engine.assert_called_once_with()
class TestFlavorCheck(test.TestCase):
def setUp(self):
super(TestFlavorCheck, self).setUp()
self.context = context.get_admin_context()
self.migration = importlib.import_module(
'nova.db.sqlalchemy.migrate_repo.versions.'
'291_enforce_flavors_migrated')
self.engine = db_api.get_engine()
def test_upgrade_clean(self):
inst = objects.Instance(context=self.context,
uuid=uuidsentinel.fake,
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar'})
inst.create()
self.migration.upgrade(self.engine)
def test_upgrade_dirty(self):
inst = objects.Instance(context=self.context,
uuid=uuidsentinel.fake,
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar',
'instance_type_id': 'foo'})
inst.create()
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_upgrade_flavor_deleted_instances(self):
inst = objects.Instance(context=self.context,
uuid=uuidsentinel.fake,
user_id=self.context.user_id,
project_id=self.context.project_id,
system_metadata={'foo': 'bar',
'instance_type_id': 'foo'})
inst.create()
inst.destroy()
self.migration.upgrade(self.engine)
class TestNewtonCheck(test.TestCase):
def setUp(self):
super(TestNewtonCheck, self).setUp()
self.useFixture(nova_fixtures.DatabaseAtVersion(329))
self.context = context.get_admin_context()
self.migration = importlib.import_module(
'nova.db.sqlalchemy.migrate_repo.versions.'
'330_enforce_mitaka_online_migrations')
self.engine = db_api.get_engine()
def test_all_migrated(self):
cn = objects.ComputeNode(context=self.context,
vcpus=1, memory_mb=512, local_gb=10,
vcpus_used=0, memory_mb_used=256,
local_gb_used=5, hypervisor_type='HyperDanVM',
hypervisor_version='34', cpu_info='foo')
cn.create()
objects.Aggregate(context=self.context,
name='foo').create()
objects.PciDevice.create(self.context, {})
self.migration.upgrade(self.engine)
def test_cn_not_migrated(self):
cn = objects.ComputeNode(context=self.context,
vcpus=1, memory_mb=512, local_gb=10,
vcpus_used=0, memory_mb_used=256,
local_gb_used=5, hypervisor_type='HyperDanVM',
hypervisor_version='34', cpu_info='foo')
cn.create()
db_api.compute_node_update(self.context, cn.id, {'uuid': None})
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_aggregate_not_migrated(self):
agg = db_api.aggregate_create(self.context, {"name": "foobar"})
db_api.aggregate_update(self.context, agg.id, {'uuid': None})
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_pci_device_not_migrated(self):
db_api.pci_device_update(self.context, 1, 'foo:bar',
{'parent_addr': None,
'compute_node_id': 1,
'address': 'foo:bar',
'vendor_id': '123',
'product_id': '456',
'dev_type': 'foo',
'label': 'foobar',
'status': 'whatisthis?'})
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_deleted_not_migrated(self):
cn_values = dict(vcpus=1, memory_mb=512, local_gb=10,
vcpus_used=0, memory_mb_used=256,
local_gb_used=5, hypervisor_type='HyperDanVM',
hypervisor_version='34', cpu_info='foo')
cn = db_api.compute_node_create(self.context, cn_values)
agg_values = dict(name='foo')
agg = db_api.aggregate_create(self.context, agg_values)
pd = db_api.pci_device_update(self.context, 1, 'foo:bar',
{'parent_addr': None,
'compute_node_id': 1,
'address': 'foo:bar',
'vendor_id': '123',
'product_id': '456',
'dev_type': 'foo',
'label': 'foobar',
'status': 'whatisthis?'})
db_api.compute_node_delete(self.context, cn['id'])
db_api.aggregate_delete(self.context, agg['id'])
db_api.pci_device_destroy(self.context, pd['compute_node_id'],
pd['address'])
# blocker should not block on soft-deleted records
self.migration.upgrade(self.engine)
class TestOcataCheck(test.TestCase):
def setUp(self):
super(TestOcataCheck, self).setUp()
self.context = context.get_admin_context()
self.migration = importlib.import_module(
'nova.db.sqlalchemy.migrate_repo.versions.'
'345_require_online_migration_completion')
self.engine = db_api.get_engine()
self.flavor_values = {
'name': 'foo',
'memory_mb': 256,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 100,
'flavorid': 'bar',
'swap': 1,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'disabled': False,
'is_public': True,
}
self.keypair_values = {
'name': 'foo',
'user_ud': 'bar',
'fingerprint': 'baz',
'public_key': 'bat',
'type': 'ssh',
}
self.aggregate_values = {
'uuid': uuidsentinel.agg,
'name': 'foo',
}
self.ig_values = {
'user_id': 'foo',
'project_id': 'bar',
'uuid': uuidsentinel.ig,
'name': 'baz',
}
def test_upgrade_clean(self):
self.migration.upgrade(self.engine)
def test_upgrade_dirty_flavors(self):
db_api.flavor_create(self.context, self.flavor_values)
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_upgrade_with_deleted_flavors(self):
flavor = db_api.flavor_create(self.context, self.flavor_values)
db_api.flavor_destroy(self.context, flavor['flavorid'])
self.migration.upgrade(self.engine)
def test_upgrade_dirty_keypairs(self):
db_api.key_pair_create(self.context, self.keypair_values)
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_upgrade_with_deleted_keypairs(self):
keypair = db_api.key_pair_create(self.context, self.keypair_values)
db_api.key_pair_destroy(self.context,
keypair['user_id'], keypair['name'])
self.migration.upgrade(self.engine)
def test_upgrade_dirty_aggregates(self):
db_api.aggregate_create(self.context, self.aggregate_values)
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_upgrade_with_deleted_aggregates(self):
agg = db_api.aggregate_create(self.context, self.aggregate_values)
db_api.aggregate_delete(self.context, agg['id'])
self.migration.upgrade(self.engine)
def test_upgrade_dirty_instance_groups(self):
db_api.instance_group_create(self.context, self.ig_values)
self.assertRaises(exception.ValidationError,
self.migration.upgrade, self.engine)
def test_upgrade_with_deleted_instance_groups(self):
group = db_api.instance_group_create(self.context, self.ig_values)
db_api.instance_group_delete(self.context, group['uuid'])
self.migration.upgrade(self.engine)
class TestNewtonCellsCheck(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(TestNewtonCellsCheck, self).setUp()
self.useFixture(nova_fixtures.DatabaseAtVersion(28, 'api'))
self.context = context.get_admin_context()
self.migration = importlib.import_module(
'nova.db.sqlalchemy.api_migrations.migrate_repo.versions.'
'030_require_cell_setup')
self.engine = db_api.get_api_engine()
@mock.patch('nova.objects.Flavor._ensure_migrated')
def _flavor_me(self, _):
flavor = objects.Flavor(context=self.context,
name='foo', memory_mb=123,
vcpus=1, root_gb=1,
flavorid='m1.foo')
flavor.create()
def test_upgrade_with_no_cell_mappings(self):
self._flavor_me()
self.assertRaisesRegex(exception.ValidationError,
'Cell mappings',
self.migration.upgrade, self.engine)
def test_upgrade_with_only_cell0(self):
self._flavor_me()
cell0 = objects.CellMapping(context=self.context,
uuid=objects.CellMapping.CELL0_UUID,
name='cell0',
transport_url='fake',
database_connection='fake')
cell0.create()
self.assertRaisesRegex(exception.ValidationError,
'Cell mappings',
self.migration.upgrade, self.engine)
def test_upgrade_without_cell0(self):
self._flavor_me()
cell1 = objects.CellMapping(context=self.context,
uuid=uuidsentinel.cell1,
name='cell1',
transport_url='fake',
database_connection='fake')
cell1.create()
cell2 = objects.CellMapping(context=self.context,
uuid=uuidsentinel.cell2,
name='cell2',
transport_url='fake',
database_connection='fake')
cell2.create()
self.assertRaisesRegex(exception.ValidationError,
'Cell0',
self.migration.upgrade, self.engine)
def test_upgrade_with_no_host_mappings(self):
self._flavor_me()
cell0 = objects.CellMapping(context=self.context,
uuid=objects.CellMapping.CELL0_UUID,
name='cell0',
transport_url='fake',
database_connection='fake')
cell0.create()
cell1 = objects.CellMapping(context=self.context,
uuid=uuidsentinel.cell1,
name='cell1',
transport_url='fake',
database_connection='fake')
cell1.create()
with mock.patch.object(self.migration, 'LOG') as log:
self.migration.upgrade(self.engine)
self.assertTrue(log.warning.called)
def test_upgrade_with_required_mappings(self):
self._flavor_me()
cell0 = objects.CellMapping(context=self.context,
uuid=objects.CellMapping.CELL0_UUID,
name='cell0',
transport_url='fake',
database_connection='fake')
cell0.create()
cell1 = objects.CellMapping(context=self.context,
uuid=uuidsentinel.cell1,
name='cell1',
transport_url='fake',
database_connection='fake')
cell1.create()
hostmapping = objects.HostMapping(context=self.context,
cell_mapping=cell1,
host='foo')
hostmapping.create()
self.migration.upgrade(self.engine)
def test_upgrade_new_deploy(self):
self.migration.upgrade(self.engine)
|
|
'''
Translates a source file using a translation model.
'''
import argparse
import theano
import numpy
import cPickle as pkl
from nmt import (build_sampler, gen_sample, load_params,
init_params, init_tparams)
from multiprocessing import Process, Queue
def translate_model(queue, rqueue, mask_left, mask_right, write_mask, pid, model, options, k, normalize):
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# allocate model parameters
params = init_params(options)
# load model parameters and set theano shared variables
params = load_params(model, params)
tparams = init_tparams(params)
# word index
f_init, f_next = build_sampler(tparams, options, trng, use_noise)
def _translate(seq, left, right, write):
# sample given an input sequence and obtain scores
print left.shape, right.shape, write.shape, len(seq)
sample, score = gen_sample(tparams, f_init, f_next,
numpy.array(seq).reshape([len(seq), 1]),
left[:, :, None], right[:, :, None], write,
options, trng=trng, k=k, maxlen=200,
stochastic=False, argmax=False)
# normalize scores according to sequence lengths
if normalize:
lengths = numpy.array([len(s) for s in sample])
score = score / lengths
sidx = numpy.argmin(score)
return sample[sidx]
while True:
req = queue.get()
if req is None:
break
rem_l = mask_left.get()
rem_r = mask_right.get()
rem_w = write_mask.get()
idx, x = req[0], req[1]
l = rem_l[1]
r = rem_r[1]
w = rem_w[1]
print pid, '-', idx
seq = _translate(x, l, r, w)
rqueue.put((idx, seq))
return
def main(model, dictionary, dictionary_target, source_file, tree_file, saveto, k=5,
normalize=False, n_process=5, chr_level=False):
# load model model_options
with open('%s.pkl' % model, 'rb') as f:
options = pkl.load(f)
# load source dictionary and invert
with open(dictionary, 'rb') as f:
word_dict = pkl.load(f)
word_idict = dict()
for kk, vv in word_dict.iteritems():
word_idict[vv] = kk
word_idict[0] = '<eos>'
word_idict[1] = 'UNK'
# load target dictionary and invert
with open(dictionary_target, 'rb') as f:
word_dict_trg = pkl.load(f)
word_idict_trg = dict()
for kk, vv in word_dict_trg.iteritems():
word_idict_trg[vv] = kk
word_idict_trg[0] = '<eos>'
word_idict_trg[1] = 'UNK'
# create input and output queues for processes
queue = Queue()
# for tree structure
mask_left = Queue()
mask_right = Queue()
write_mask = Queue()
rqueue = Queue()
processes = [None] * n_process
for midx in xrange(n_process):
processes[midx] = Process(
target=translate_model,
args=(queue, rqueue, mask_left, mask_right, write_mask, midx, model, options, k, normalize))
processes[midx].start()
# utility function
def _seqs2words(caps):
capsw = []
for cc in caps:
ww = []
for w in cc:
if w == 0:
break
ww.append(word_idict_trg[w])
capsw.append(' '.join(ww))
return capsw
def _send_jobs(fname, tname):
len_x = []
with open(fname, 'r') as f:
for idx, line in enumerate(f):
if chr_level:
words = list(line.decode('utf-8').strip())
else:
words = line.strip().split()
x = map(lambda w: word_dict[w] if w in word_dict else 1, words)
x = map(lambda ii: ii if ii < options['n_words_src'] else 1, x)
x += [0]
len_x.append(len(x))
queue.put((idx, x))
with open(tname, 'r') as f:
for idx, line in enumerate(f):
tree_actions = line.strip().split()
mask_l = numpy.zeros((len(tree_actions), len_x[idx] + len(tree_actions))).astype('float32')
mask_r = numpy.zeros((len(tree_actions), len_x[idx] + len(tree_actions))).astype('float32')
wrt_mask = numpy.zeros((len(tree_actions), len_x[idx] + len(tree_actions))).astype('float32')
# print mask_l.shape
idx_act = 0
for tree_act in tree_actions:
wrt_mask[idx_act][(eval(tree_act)[2] - 201) + len_x[idx]] = 1.
if eval(tree_act)[0] > 200:
mask_l[idx_act][(eval(tree_act)[0] - 201) + len_x[idx]] = 1.
else:
mask_l[idx_act][eval(tree_act)[0] - 1] = 1.
if eval(tree_act)[1] > 200:
mask_r[idx_act][(eval(tree_act)[1] - 201) + len_x[idx]] = 1.
else:
mask_r[idx_act][eval(tree_act)[1] - 1] = 1.
idx_act += 1
# print idx_act
mask_left.put((idx, mask_l))
mask_right.put((idx, mask_r))
write_mask.put((idx, wrt_mask))
return idx+1
def _finish_processes():
for midx in xrange(n_process):
queue.put(None)
def _retrieve_jobs(n_samples):
trans = [None] * n_samples
for idx in xrange(n_samples):
resp = rqueue.get()
trans[resp[0]] = resp[1]
if numpy.mod(idx, 10) == 0:
print 'Sample ', (idx+1), '/', n_samples, ' Done'
return trans
print 'Translating ', source_file, '...'
n_samples = _send_jobs(source_file, tree_file) # return the number of sentences
trans = _seqs2words(_retrieve_jobs(n_samples))
_finish_processes()
with open(saveto, 'w') as f:
print >>f, '\n'.join(trans)
print 'Done'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-k', type=int, default=5)
parser.add_argument('-p', type=int, default=5)
parser.add_argument('-n', action="store_true", default=False)
parser.add_argument('-c', action="store_true", default=False)
parser.add_argument('model', type=str)
parser.add_argument('dictionary', type=str)
parser.add_argument('dictionary_target', type=str)
parser.add_argument('source', type=str)
parser.add_argument('source_tree', type=str)
parser.add_argument('saveto', type=str)
args = parser.parse_args()
main(args.model, args.dictionary, args.dictionary_target, args.source, args.source_tree,
args.saveto, k=args.k, normalize=args.n, n_process=args.p,
chr_level=args.c)
|
|
import numpy as np
from keras import backend as kB
import pandas as pd
from concise.effects.util import *
import copy
def predict_vals(input_data, mutated_positions, apply_function=None, output_concat_axis=0, batch_size=100, **kwargs):
outputs = {}
# if type(input_data) not in [list, tuple, dict]:
# input_data = [input_data]
# assert(len(input_data)>0)
# for el in input_data:
# assert(el.shape[0] == mutated_positions.shape[0])
batch_idx = 0
for batch_idx in range(int(np.ceil(mutated_positions.shape[0]/batch_size))):
batched_input = get_batch(input_data, batch_size, batch_idx)
start_idx = (batch_idx) * batch_size
end_idx = min((batch_idx + 1) * batch_size, mutated_positions.shape[0])
#if batched_input is None:
# break
res = apply_function(input_data=batched_input, mutated_positions=mutated_positions[start_idx:end_idx,...], **kwargs)
batch_idx += 1
for k in res:
if k not in outputs:
outputs[k] = [res[k]]
else:
outputs[k].append(res[k])
for k in outputs:
outputs[k] = concatenate_by_input_type(input_data, outputs[k], output_concat_axis=output_concat_axis)
return outputs
def concatenate_by_input_type(input_data, to_concat, output_concat_axis=0):
if isinstance(input_data, (list, tuple)):
out_obj = []
for x in range(len(input_data)):
concat_els = []
for el in to_concat:
concat_els.append(el[x])
out_obj.append(np.concatenate(concat_els, axis=output_concat_axis))
if isinstance(input_data, tuple):
out_obj = tuple(out_obj)
return out_obj
elif isinstance(input_data, dict):
out_obj = {}
for k in input_data:
concat_els = []
for el in to_concat:
concat_els.append(el[k])
out_obj[k] = np.concatenate(concat_els, axis=output_concat_axis)
return out_obj
elif isinstance(input_data, np.ndarray):
return np.concatenate(to_concat, axis=output_concat_axis)
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
def get_batch(input_data, batchsize, batch_idx):
# yield the output object
return_obj = None
start_idx = (batch_idx) * batchsize
if isinstance(input_data, (list, tuple)):
out_obj = []
for x in range(len(input_data)):
end_idx = min((batch_idx + 1) * batchsize, input_data[x].shape[0])
if start_idx > end_idx:
return None
out_obj.append(input_data[x][start_idx:end_idx, ...])
if isinstance(input_data, tuple):
out_obj = tuple(out_obj)
return_obj = out_obj
elif isinstance(input_data, dict):
out_obj = {}
for k in input_data:
end_idx = min((batch_idx + 1) * batchsize, input_data[k].shape[0])
if start_idx > end_idx:
return None
out_obj[k] = (input_data[k][start_idx:end_idx, ...])
return_obj = out_obj
elif isinstance(input_data, np.ndarray):
end_idx = min((batch_idx + 1) * batchsize, input_data.shape[0])
if start_idx > end_idx:
return None
out_obj = (input_data[start_idx:end_idx, ...])
return_obj = out_obj
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
return return_obj
def general_diff(left, right):
if isinstance(left, (list, tuple)):
out = []
for l, r in zip(left, right):
out.append(l - r)
return out
elif isinstance(left, dict):
out = {}
for k in left:
out[k] = left[k] - right[k]
return out
elif isinstance(left, np.ndarray):
return left - right
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
def general_sel(remains, return_if_smaller_than):
# Generalisation of: sel = np.abs(diff_fwd) < np.abs(diff_rc)
if isinstance(remains, (list, tuple)):
return [np.abs(rem) < np.abs(test) for rem, test in zip(remains, return_if_smaller_than)]
elif isinstance(remains, dict):
out = {}
for k in remains:
out[k] = np.abs(remains[k]) < np.abs(return_if_smaller_than[k])
return out
elif isinstance(remains, np.ndarray):
return np.abs(remains) < np.abs(return_if_smaller_than)
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
def replace_by_sel(to_be_edited, alt_values, sel):
if isinstance(to_be_edited, (list, tuple)):
for t, a, s in zip(to_be_edited, alt_values, sel):
t[s] = a[s]
elif isinstance(to_be_edited, dict):
for k in to_be_edited:
to_be_edited[k][sel[k]] = alt_values[k][sel[k]]
elif isinstance(to_be_edited, np.ndarray):
to_be_edited[sel] = alt_values[sel]
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
def input_times_grad(input, gradient, positions):
def multiply_input_grad(grad, inp, positions):
if positions.shape[0] != grad.shape[0]:
raise Exception("At the moment exactly one (mutational) position is allowed per input sequence!")
if not np.all(np.array(grad.shape) == np.array(inp.shape)):
raise Exception("Input sequence and gradient have to have the same dimensions!")
scores = grad[range(positions.shape[0]), positions, :]
input_at_mut = inp[range(positions.shape[0]), positions, :]
# Calculate gradient * input
return (scores * input_at_mut).sum(axis=1)
assert (len(positions.shape) == 1) # has to be 1-dimensional
positions = positions.astype(np.int)
if type(input) is not type(gradient):
raise Exception("Input sequence and gradient have to be the same type!")
if isinstance(input, (list, tuple)):
if not (len(input) == len(gradient)):
raise Exception("Internal Error: Input and gradient list objects have different lenghts!")
out_obj = []
for x in range(len(input)):
out_obj.append(multiply_input_grad(input[x], gradient[x], positions))
elif isinstance(input, dict):
if not np.all(np.in1d(input.keys(), gradient.keys())) or (len(input) != len(gradient)):
raise Exception("Internal Error: Input and gradient dict objects have different keys!")
out_obj = {}
for k in input:
out_obj[k] = multiply_input_grad(input[k], gradient[k], positions)
elif isinstance(input, np.ndarray):
out_obj = multiply_input_grad(input, gradient, positions)
else:
raise ValueError("Input can only be of type: list, dict or np.ndarray")
return out_obj
def __get_direct_saliencies__(input_data, score_func, mutated_positions, model):
all_scores = {}
method_name = "dGrad"
# Take first element as it is the one with gradients
if isinstance(input_data, list):
input = [el for el in input_data]
else:
input = [input_data]
if kB._BACKEND == "theano":
if model.output._uses_learning_phase:
input.append(0)
else:
input.append(0)
scores = score_func(input) # test phase, so learning_phase = 0
if isinstance(input_data, np.ndarray):
scores = scores[0]
scores = input_times_grad(input_data, scores, mutated_positions)
all_scores[method_name] = scores
return all_scores
def __generate_direct_saliency_functions__(model, out_annotation_all_outputs, out_annotation=None):
sal_funcs = {}
if out_annotation is not None:
sel_outputs = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0]
else:
sel_outputs = np.arange(out_annotation_all_outputs.shape[0])
for i in sel_outputs:
inp = copy.copy(model.inputs)
outp = model.layers[-1].output
max_outp = outp[:, i]
if kB._BACKEND == "theano":
saliency = kB.gradients(max_outp.sum(), inp)
if model.output._uses_learning_phase:
inp.append(kB.learning_phase())
else:
saliency = kB.gradients(max_outp, inp)
inp.append(kB.learning_phase())
sal_funcs[out_annotation_all_outputs[i]] = kB.function(inp, saliency)
return sal_funcs
def __generate_direct_saliency_functions_DEPRECATED__(model, out_annotation_all_outputs, out_annotation=None):
sal_funcs = {}
if out_annotation is not None:
sel_outputs = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0]
else:
sel_outputs = np.arange(out_annotation_all_outputs.shape[0])
for i in sel_outputs:
inp = model.layers[0].input
outp = model.layers[-1].output
max_outp = outp[:, i]
saliency = kB.gradients(max_outp, inp)
sal_funcs[out_annotation_all_outputs[i]] = kB.function([inp, kB.learning_phase()], saliency)
return sal_funcs
# The function called from outside
def gradient_pred(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs,
output_filter_mask=None, out_annotation=None):
"""Gradient-based (saliency) variant effect prediction
Based on the idea of [saliency maps](https://arxiv.org/pdf/1312.6034.pdf) the gradient-based prediction of
variant effects uses the `gradient` function of the Keras backend to estimate the importance of a variant
for a given output. This value is then multiplied by the input, as recommended by
[Shrikumar et al., 2017](https://arxiv.org/pdf/1605.01713.pdf).
# Arguments
model: Keras model
ref: Input sequence with the reference genotype in the mutation position
ref_rc: Reverse complement of the 'ref' argument
alt: Input sequence with the alternative genotype in the mutation position
alt_rc: Reverse complement of the 'alt' argument
mutation_positions: Position on which the mutation was placed in the forward sequences
out_annotation_all_outputs: Output labels of the model.
output_filter_mask: Mask of boolean values indicating which model outputs should be used.
Use this or 'out_annotation'
out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the
predictions should be calculated.
# Returns
Dictionary with three different entries:
- ref: Gradient * input at the mutation position using the reference sequence.
Forward or reverse-complement sequence is chose based on sequence direction caused
the bigger absolute difference ('diff')
- alt: Gradient * input at the mutation position using the alternative sequence. Forward or
reverse-complement sequence is chose based on sequence direction caused the bigger
absolute difference ('diff')
- diff: 'alt' - 'ref'. Forward or reverse-complement sequence is chose based on sequence
direction caused the bigger absolute difference.
"""
seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc}
for k in seqs:
if not isinstance(seqs[k], (list, tuple, np.ndarray)):
raise Exception("At the moment only models with list, tuple or np.ndarray inputs are supported.")
assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"])
assert get_seq_len(ref)[0] == mutation_positions.shape[0]
assert len(mutation_positions.shape) == 1
# determine which outputs should be selected
if output_filter_mask is None:
if out_annotation is None:
output_filter_mask = np.arange(out_annotation_all_outputs.shape[0])
else:
output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0]
# make sure the labels are assigned correctly
out_annotation = out_annotation_all_outputs[output_filter_mask]
# Generate the necessary gradient functions
sal_funcs = __generate_direct_saliency_functions__(model, out_annotation_all_outputs, out_annotation)
# ANALOGOUS TO ISM:
# predict
preds = {}
for k in seqs:
preds[k] = {}
if "_rc" in k:
mutated_positions_here = get_seq_len(ref)[1] - 1 - mutation_positions
else:
mutated_positions_here = mutation_positions
for l in out_annotation:
preds[k][l] = predict_vals(input_data=seqs[k], apply_function=__get_direct_saliencies__,
score_func=sal_funcs[l], mutated_positions=mutated_positions_here, model = model)
diff_ret_dGrad = {}
pred_out = {"ref": {}, "alt": {}}
for k in preds["ref"]:
# TODO make list (and dict)-ready
diff_fwd = general_diff(preds["alt"][k]["dGrad"], preds["ref"][k]["dGrad"])
diff_rc = general_diff(preds["alt_rc"][k]["dGrad"], preds["ref_rc"][k]["dGrad"])
sel = general_sel(diff_fwd, diff_rc)
replace_by_sel(diff_fwd, diff_rc, sel)
diff_ret_dGrad[k] = diff_fwd
# Overwrite the fwd values with rc values if rc was selected
replace_by_sel(preds["ref"][k]["dGrad"], preds["ref_rc"][k]["dGrad"], sel)
replace_by_sel(preds["alt"][k]["dGrad"], preds["alt_rc"][k]["dGrad"], sel)
pred_out["ref"][k] = preds["ref"][k]["dGrad"]
pred_out["alt"][k] = preds["alt"][k]["dGrad"]
return {"diff": pd.DataFrame(diff_ret_dGrad),
"ref": pd.DataFrame(pred_out["ref"]),
"alt": pd.DataFrame(pred_out["alt"])}
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import netaddr
from oslo_versionedobjects import exception as ovo_exc
import six
from nova.network import model as network_model
from nova.objects import fields
from nova import signature_utils
from nova import test
from nova import utils
class FakeFieldType(fields.FieldType):
def coerce(self, obj, attr, value):
return '*%s*' % value
def to_primitive(self, obj, attr, value):
return '!%s!' % value
def from_primitive(self, obj, attr, value):
return value[1:-1]
class FakeEnum(fields.Enum):
FROG = "frog"
PLATYPUS = "platypus"
ALLIGATOR = "alligator"
ALL = (FROG, PLATYPUS, ALLIGATOR)
def __init__(self, **kwargs):
super(FakeEnum, self).__init__(valid_values=FakeEnum.ALL,
**kwargs)
class FakeEnumAlt(fields.Enum):
FROG = "frog"
PLATYPUS = "platypus"
AARDVARK = "aardvark"
ALL = (FROG, PLATYPUS, AARDVARK)
def __init__(self, **kwargs):
super(FakeEnumAlt, self).__init__(valid_values=FakeEnumAlt.ALL,
**kwargs)
class FakeEnumField(fields.BaseEnumField):
AUTO_TYPE = FakeEnum()
class FakeEnumAltField(fields.BaseEnumField):
AUTO_TYPE = FakeEnumAlt()
class TestField(test.NoDBTestCase):
def setUp(self):
super(TestField, self).setUp()
self.field = fields.Field(FakeFieldType())
self.coerce_good_values = [('foo', '*foo*')]
self.coerce_bad_values = []
self.to_primitive_values = [('foo', '!foo!')]
self.from_primitive_values = [('!foo!', 'foo')]
def test_coerce_good_values(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val))
def test_coerce_bad_values(self):
for in_val in self.coerce_bad_values:
self.assertRaises((TypeError, ValueError),
self.field.coerce, 'obj', 'attr', in_val)
def test_to_primitive(self):
for in_val, prim_val in self.to_primitive_values:
self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr',
in_val))
def test_from_primitive(self):
class ObjectLikeThing(object):
_context = 'context'
for prim_val, out_val in self.from_primitive_values:
self.assertEqual(out_val, self.field.from_primitive(
ObjectLikeThing, 'attr', prim_val))
def test_stringify(self):
self.assertEqual('123', self.field.stringify(123))
class TestString(TestField):
def setUp(self):
super(TestString, self).setUp()
self.field = fields.StringField()
self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')]
if six.PY2:
self.coerce_good_values.append((int(1), '1'))
self.coerce_bad_values = [None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'123'", self.field.stringify(123))
class TestBaseEnum(TestField):
def setUp(self):
super(TestBaseEnum, self).setUp()
self.field = FakeEnumField()
self.coerce_good_values = [('frog', 'frog'),
('platypus', 'platypus'),
('alligator', 'alligator')]
self.coerce_bad_values = ['aardvark', 'wookie']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'platypus'", self.field.stringify('platypus'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'aardvark')
def test_fingerprint(self):
# Notes(yjiang5): make sure changing valid_value will be detected
# in test_objects.test_versions
field1 = FakeEnumField()
field2 = FakeEnumAltField()
self.assertNotEqual(str(field1), str(field2))
class TestEnum(TestField):
def setUp(self):
super(TestEnum, self).setUp()
self.field = fields.EnumField(
valid_values=['foo', 'bar', 1, 1, True])
self.coerce_good_values = [('foo', 'foo'), (1, '1'), (True, 'True')]
if six.PY2:
self.coerce_good_values.append((int(1), '1'))
self.coerce_bad_values = ['boo', 2, False]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'foo'", self.field.stringify('foo'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, '123')
def test_fingerprint(self):
# Notes(yjiang5): make sure changing valid_value will be detected
# in test_objects.test_versions
field1 = fields.EnumField(valid_values=['foo', 'bar'])
field2 = fields.EnumField(valid_values=['foo', 'bar1'])
self.assertNotEqual(str(field1), str(field2))
def test_without_valid_values(self):
self.assertRaises(ovo_exc.EnumValidValuesInvalidError,
fields.EnumField, 1)
def test_with_empty_values(self):
self.assertRaises(ovo_exc.EnumRequiresValidValuesError,
fields.EnumField, [])
class TestArchitecture(TestField):
def setUp(self):
super(TestArchitecture, self).setUp()
self.field = fields.ArchitectureField()
self.coerce_good_values = [('x86_64', 'x86_64'),
('amd64', 'x86_64'),
('I686', 'i686'),
('i386', 'i686')]
self.coerce_bad_values = ['x86_99']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'aarch64'", self.field.stringify('aarch64'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'ppc42')
class TestBlockDeviceDestinationType(TestField):
def setUp(self):
super(TestBlockDeviceDestinationType, self).setUp()
self.field = fields.BlockDeviceDestinationTypeField()
self.coerce_good_values = [('local', 'local'),
('volume', 'volume')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'volume'", self.field.stringify('volume'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestBlockDeviceSourceType(TestField):
def setUp(self):
super(TestBlockDeviceSourceType, self).setUp()
self.field = fields.BlockDeviceSourceTypeField()
self.coerce_good_values = [('blank', 'blank'),
('image', 'image'),
('snapshot', 'snapshot'),
('volume', 'volume')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'image'", self.field.stringify('image'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestBlockDeviceType(TestField):
def setUp(self):
super(TestBlockDeviceType, self).setUp()
self.field = fields.BlockDeviceTypeField()
self.coerce_good_values = [('cdrom', 'cdrom'),
('disk', 'disk'),
('floppy', 'floppy'),
('fs', 'fs'),
('lun', 'lun')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'disk'", self.field.stringify('disk'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestCPUMode(TestField):
def setUp(self):
super(TestCPUMode, self).setUp()
self.field = fields.CPUModeField()
self.coerce_good_values = [('host-model', 'host-model'),
('host-passthrough', 'host-passthrough'),
('custom', 'custom')]
self.coerce_bad_values = ['magic']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'custom'", self.field.stringify('custom'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'magic')
class TestCPUMatch(TestField):
def setUp(self):
super(TestCPUMatch, self).setUp()
self.field = fields.CPUMatchField()
self.coerce_good_values = [('exact', 'exact'),
('strict', 'strict'),
('minimum', 'minimum')]
self.coerce_bad_values = ['best']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'exact'", self.field.stringify('exact'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'best')
class TestCPUFeaturePolicy(TestField):
def setUp(self):
super(TestCPUFeaturePolicy, self).setUp()
self.field = fields.CPUFeaturePolicyField()
self.coerce_good_values = [('force', 'force'),
('require', 'require'),
('optional', 'optional'),
('disable', 'disable'),
('forbid', 'forbid')]
self.coerce_bad_values = ['disallow']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'forbid'", self.field.stringify('forbid'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'disallow')
class TestConfigDrivePolicy(TestField):
def setUp(self):
super(TestConfigDrivePolicy, self).setUp()
self.field = fields.ConfigDrivePolicyField()
self.coerce_good_values = [('optional', 'optional'),
('mandatory', 'mandatory')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'optional'", self.field.stringify('optional'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestCPUAllocationPolicy(TestField):
def setUp(self):
super(TestCPUAllocationPolicy, self).setUp()
self.field = fields.CPUAllocationPolicyField()
self.coerce_good_values = [('dedicated', 'dedicated'),
('shared', 'shared')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'shared'", self.field.stringify('shared'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestCPUThreadAllocationPolicy(TestField):
def setUp(self):
super(TestCPUThreadAllocationPolicy, self).setUp()
self.field = fields.CPUThreadAllocationPolicyField()
self.coerce_good_values = [('prefer', 'prefer'),
('isolate', 'isolate'),
('require', 'require')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'prefer'", self.field.stringify('prefer'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestPciDeviceType(TestField):
def setUp(self):
super(TestPciDeviceType, self).setUp()
self.field = fields.PciDeviceTypeField()
self.coerce_good_values = [('type-PCI', 'type-PCI'),
('type-PF', 'type-PF'),
('type-VF', 'type-VF')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'type-VF'", self.field.stringify('type-VF'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestDiskBus(TestField):
def setUp(self):
super(TestDiskBus, self).setUp()
self.field = fields.DiskBusField()
self.coerce_good_values = [('fdc', 'fdc'),
('ide', 'ide'),
('sata', 'sata'),
('scsi', 'scsi'),
('usb', 'usb'),
('virtio', 'virtio'),
('xen', 'xen'),
('lxc', 'lxc'),
('uml', 'uml')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'ide'", self.field.stringify('ide'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestHVType(TestField):
def setUp(self):
super(TestHVType, self).setUp()
self.field = fields.HVTypeField()
self.coerce_good_values = [('baremetal', 'baremetal'),
('bhyve', 'bhyve'),
('fake', 'fake'),
('kvm', 'kvm'),
('xapi', 'xen')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'xen'", self.field.stringify('xen'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestImageSignatureTypes(TestField):
# Ensure that the object definition is updated
# in step with the signature_utils module
def setUp(self):
super(TestImageSignatureTypes, self).setUp()
self.hash_field = fields.ImageSignatureHashType()
self.key_type_field = fields.ImageSignatureKeyType()
def test_hashes(self):
for hash_name in list(signature_utils.HASH_METHODS.keys()):
self.assertIn(hash_name, self.hash_field.hashes)
def test_key_types(self):
key_type_dict = signature_utils.SignatureKeyType._REGISTERED_TYPES
key_types = list(key_type_dict.keys())
for key_type in key_types:
self.assertIn(key_type, self.key_type_field.key_types)
class TestOSType(TestField):
def setUp(self):
super(TestOSType, self).setUp()
self.field = fields.OSTypeField()
self.coerce_good_values = [('linux', 'linux'),
('windows', 'windows'),
('WINDOWS', 'windows')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'linux'", self.field.stringify('linux'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestResourceClass(TestField):
def setUp(self):
super(TestResourceClass, self).setUp()
self.field = fields.ResourceClassField()
self.coerce_good_values = [
('VCPU', 'VCPU'),
('MEMORY_MB', 'MEMORY_MB'),
('DISK_GB', 'DISK_GB'),
('PCI_DEVICE', 'PCI_DEVICE'),
('SRIOV_NET_VF', 'SRIOV_NET_VF'),
('NUMA_SOCKET', 'NUMA_SOCKET'),
('NUMA_CORE', 'NUMA_CORE'),
('NUMA_THREAD', 'NUMA_THREAD'),
('NUMA_MEMORY_MB', 'NUMA_MEMORY_MB'),
('IPV4_ADDRESS', 'IPV4_ADDRESS'),
]
self.expected_indexes = [
('VCPU', 0),
('MEMORY_MB', 1),
('DISK_GB', 2),
('PCI_DEVICE', 3),
('SRIOV_NET_VF', 4),
('NUMA_SOCKET', 5),
('NUMA_CORE', 6),
('NUMA_THREAD', 7),
('NUMA_MEMORY_MB', 8),
('IPV4_ADDRESS', 9),
]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'VCPU'", self.field.stringify(
fields.ResourceClass.VCPU))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'cow')
def test_index(self):
for name, index in self.expected_indexes:
self.assertEqual(index, self.field.index(name))
def test_index_invalid(self):
self.assertRaises(ValueError, self.field.index, 'cow')
def test_from_index(self):
for name, index in self.expected_indexes:
self.assertEqual(name, self.field.from_index(index))
def test_from_index_invalid(self):
self.assertRaises(IndexError, self.field.from_index, 999)
class TestRNGModel(TestField):
def setUp(self):
super(TestRNGModel, self).setUp()
self.field = fields.RNGModelField()
self.coerce_good_values = [('virtio', 'virtio'), ]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'virtio'", self.field.stringify('virtio'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestSCSIModel(TestField):
def setUp(self):
super(TestSCSIModel, self).setUp()
self.field = fields.SCSIModelField()
self.coerce_good_values = [('buslogic', 'buslogic'),
('ibmvscsi', 'ibmvscsi'),
('lsilogic', 'lsilogic'),
('lsisas1068', 'lsisas1068'),
('lsisas1078', 'lsisas1078'),
('virtio-scsi', 'virtio-scsi'),
('vmpvscsi', 'vmpvscsi'),
('lsilogicsas', 'lsisas1068'),
('paravirtual', 'vmpvscsi')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'vmpvscsi'", self.field.stringify('vmpvscsi'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestVideoModel(TestField):
def setUp(self):
super(TestVideoModel, self).setUp()
self.field = fields.VideoModelField()
self.coerce_good_values = [('cirrus', 'cirrus'),
('qxl', 'qxl'),
('vga', 'vga'),
('vmvga', 'vmvga'),
('xen', 'xen')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'cirrus'", self.field.stringify('cirrus'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestVIFModel(TestField):
def setUp(self):
super(TestVIFModel, self).setUp()
self.field = fields.VIFModelField()
self.coerce_good_values = [('virtio', 'virtio'),
('ne2k_pci', 'ne2k_pci'),
('pcnet', 'pcnet'),
('rtl8139', 'rtl8139'),
('e1000', 'e1000'),
('e1000e', 'e1000e'),
('netfront', 'netfront'),
('spapr-vlan', 'spapr-vlan'),
('VirtualE1000', 'e1000'),
('VirtualE1000e', 'e1000e'),
('VirtualPCNet32', 'pcnet'),
('VirtualSriovEthernetCard', 'sriov'),
('VirtualVmxnet', 'vmxnet'),
('VirtualVmxnet3', 'vmxnet3'),
]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'e1000'", self.field.stringify('e1000'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestVMMode(TestField):
def setUp(self):
super(TestVMMode, self).setUp()
self.field = fields.VMModeField()
self.coerce_good_values = [('hvm', 'hvm'),
('xen', 'xen'),
('uml', 'uml'),
('exe', 'exe'),
('pv', 'xen'),
('hv', 'hvm'),
('baremetal', 'hvm')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'hvm'", self.field.stringify('hvm'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestWatchdogAction(TestField):
def setUp(self):
super(TestWatchdogAction, self).setUp()
self.field = fields.WatchdogActionField()
self.coerce_good_values = [('none', 'none'),
('pause', 'pause'),
('poweroff', 'poweroff'),
('reset', 'reset')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'reset'", self.field.stringify('reset'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestMonitorMetricType(TestField):
def setUp(self):
super(TestMonitorMetricType, self).setUp()
self.field = fields.MonitorMetricTypeField()
self.coerce_good_values = [('cpu.frequency', 'cpu.frequency'),
('cpu.user.time', 'cpu.user.time'),
('cpu.kernel.time', 'cpu.kernel.time'),
('cpu.idle.time', 'cpu.idle.time'),
('cpu.iowait.time', 'cpu.iowait.time'),
('cpu.user.percent', 'cpu.user.percent'),
('cpu.kernel.percent',
'cpu.kernel.percent'),
('cpu.idle.percent', 'cpu.idle.percent'),
('cpu.iowait.percent',
'cpu.iowait.percent'),
('cpu.percent', 'cpu.percent')]
self.coerce_bad_values = ['cpu.typo']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'cpu.frequency'",
self.field.stringify('cpu.frequency'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'cpufrequency')
class TestDiskFormat(TestField):
def setUp(self):
super(TestDiskFormat, self).setUp()
self.field = fields.DiskFormatField()
self.coerce_good_values = [('qcow2', 'qcow2'),
('raw', 'raw'),
('lvm', 'lvm'),
('rbd', 'rbd'),
('ploop', 'ploop'),
('vhd', 'vhd'),
('vmdk', 'vmdk'),
('vdi', 'vdi'),
('iso', 'iso')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'rbd'", self.field.stringify('rbd'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'acme')
class TestInteger(TestField):
def setUp(self):
super(TestInteger, self).setUp()
self.field = fields.IntegerField()
self.coerce_good_values = [(1, 1), ('1', 1)]
self.coerce_bad_values = ['foo', None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
class TestNonNegativeInteger(TestInteger):
def setUp(self):
super(TestNonNegativeInteger, self).setUp()
self.field = fields.Field(fields.NonNegativeInteger())
self.coerce_bad_values.extend(['-2', '4.2'])
class TestFloat(TestField):
def setUp(self):
super(TestFloat, self).setUp()
self.field = fields.FloatField()
self.coerce_good_values = [(1.1, 1.1), ('1.1', 1.1)]
self.coerce_bad_values = ['foo', None]
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
class TestNonNegativeFloat(TestFloat):
def setUp(self):
super(TestNonNegativeFloat, self).setUp()
self.field = fields.Field(fields.NonNegativeFloat())
self.coerce_bad_values.extend(['-4.2'])
class TestBoolean(TestField):
def setUp(self):
super(TestBoolean, self).setUp()
self.field = fields.BooleanField()
self.coerce_good_values = [(True, True), (False, False), (1, True),
('foo', True), (0, False), ('', False)]
self.coerce_bad_values = []
self.to_primitive_values = self.coerce_good_values[0:2]
self.from_primitive_values = self.coerce_good_values[0:2]
class TestDateTime(TestField):
def setUp(self):
super(TestDateTime, self).setUp()
self.dt = datetime.datetime(1955, 11, 5, tzinfo=iso8601.iso8601.Utc())
self.field = fields.DateTimeField()
self.coerce_good_values = [(self.dt, self.dt),
(utils.isotime(self.dt), self.dt)]
self.coerce_bad_values = [1, 'foo']
self.to_primitive_values = [(self.dt, utils.isotime(self.dt))]
self.from_primitive_values = [(utils.isotime(self.dt), self.dt)]
def test_stringify(self):
self.assertEqual(
'1955-11-05T18:00:00Z',
self.field.stringify(
datetime.datetime(1955, 11, 5, 18, 0, 0,
tzinfo=iso8601.iso8601.Utc())))
class TestIPAddress(TestField):
def setUp(self):
super(TestIPAddress, self).setUp()
self.field = fields.IPAddressField()
self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')),
('::1', netaddr.IPAddress('::1')),
(netaddr.IPAddress('::1'),
netaddr.IPAddress('::1'))]
self.coerce_bad_values = ['1-2', 'foo']
self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4'),
(netaddr.IPAddress('::1'), '::1')]
self.from_primitive_values = [('1.2.3.4',
netaddr.IPAddress('1.2.3.4')),
('::1',
netaddr.IPAddress('::1'))]
class TestIPAddressV4(TestField):
def setUp(self):
super(TestIPAddressV4, self).setUp()
self.field = fields.IPV4AddressField()
self.coerce_good_values = [('1.2.3.4', netaddr.IPAddress('1.2.3.4')),
(netaddr.IPAddress('1.2.3.4'),
netaddr.IPAddress('1.2.3.4'))]
self.coerce_bad_values = ['1-2', 'foo', '::1']
self.to_primitive_values = [(netaddr.IPAddress('1.2.3.4'), '1.2.3.4')]
self.from_primitive_values = [('1.2.3.4',
netaddr.IPAddress('1.2.3.4'))]
class TestIPAddressV6(TestField):
def setUp(self):
super(TestIPAddressV6, self).setUp()
self.field = fields.IPV6AddressField()
self.coerce_good_values = [('::1', netaddr.IPAddress('::1')),
(netaddr.IPAddress('::1'),
netaddr.IPAddress('::1'))]
self.coerce_bad_values = ['1.2', 'foo', '1.2.3.4']
self.to_primitive_values = [(netaddr.IPAddress('::1'), '::1')]
self.from_primitive_values = [('::1',
netaddr.IPAddress('::1'))]
class TestDict(TestField):
def setUp(self):
super(TestDict, self).setUp()
self.field = fields.Field(fields.Dict(FakeFieldType()))
self.coerce_good_values = [({'foo': 'bar'}, {'foo': '*bar*'}),
({'foo': 1}, {'foo': '*1*'})]
self.coerce_bad_values = [{1: 'bar'}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': '!bar!'})]
self.from_primitive_values = [({'foo': '!bar!'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{key=val}", self.field.stringify({'key': 'val'}))
class TestDictOfStrings(TestField):
def setUp(self):
super(TestDictOfStrings, self).setUp()
self.field = fields.DictOfStringsField()
self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
({'foo': 1}, {'foo': '1'})]
self.coerce_bad_values = [{1: 'bar'}, {'foo': None}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{key='val'}", self.field.stringify({'key': 'val'}))
class TestDictOfIntegers(TestField):
def setUp(self):
super(TestDictOfIntegers, self).setUp()
self.field = fields.DictOfIntegersField()
self.coerce_good_values = [({'foo': '42'}, {'foo': 42}),
({'foo': 4.2}, {'foo': 4})]
self.coerce_bad_values = [{1: 'bar'}, {'foo': 'boo'},
'foo', {'foo': None}]
self.to_primitive_values = [({'foo': 42}, {'foo': 42})]
self.from_primitive_values = [({'foo': 42}, {'foo': 42})]
def test_stringify(self):
self.assertEqual("{key=42}", self.field.stringify({'key': 42}))
class TestDictOfStringsNone(TestField):
def setUp(self):
super(TestDictOfStringsNone, self).setUp()
self.field = fields.DictOfNullableStringsField()
self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
({'foo': 1}, {'foo': '1'}),
({'foo': None}, {'foo': None})]
self.coerce_bad_values = [{1: 'bar'}, 'foo']
self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
def test_stringify(self):
self.assertEqual("{k2=None,key='val'}",
self.field.stringify({'k2': None,
'key': 'val'}))
class TestListOfDictOfNullableStringsField(TestField):
def setUp(self):
super(TestListOfDictOfNullableStringsField, self).setUp()
self.field = fields.ListOfDictOfNullableStringsField()
self.coerce_good_values = [([{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}],
[{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}]),
([{'f': 1}, {'f1': 'b1'}],
[{'f': '1'}, {'f1': 'b1'}]),
([{'foo': None}], [{'foo': None}])]
self.coerce_bad_values = [[{1: 'a'}], ['ham', 1], ['eggs']]
self.to_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}],
[{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])]
self.from_primitive_values = [([{'f': 'b'}, {'f1': 'b1'},
{'f2': None}],
[{'f': 'b'}, {'f1': 'b1'},
{'f2': None}])]
def test_stringify(self):
self.assertEqual("[{f=None,f1='b1'},{f2='b2'}]",
self.field.stringify(
[{'f': None, 'f1': 'b1'}, {'f2': 'b2'}]))
class TestList(TestField):
def setUp(self):
super(TestList, self).setUp()
self.field = fields.Field(fields.List(FakeFieldType()))
self.coerce_good_values = [(['foo', 'bar'], ['*foo*', '*bar*'])]
self.coerce_bad_values = ['foo']
self.to_primitive_values = [(['foo'], ['!foo!'])]
self.from_primitive_values = [(['!foo!'], ['foo'])]
def test_stringify(self):
self.assertEqual('[123]', self.field.stringify([123]))
class TestListOfStrings(TestField):
def setUp(self):
super(TestListOfStrings, self).setUp()
self.field = fields.ListOfStringsField()
self.coerce_good_values = [(['foo', 'bar'], ['foo', 'bar'])]
self.coerce_bad_values = ['foo']
self.to_primitive_values = [(['foo'], ['foo'])]
self.from_primitive_values = [(['foo'], ['foo'])]
def test_stringify(self):
self.assertEqual("['abc']", self.field.stringify(['abc']))
class TestSet(TestField):
def setUp(self):
super(TestSet, self).setUp()
self.field = fields.Field(fields.Set(FakeFieldType()))
self.coerce_good_values = [(set(['foo', 'bar']),
set(['*foo*', '*bar*']))]
self.coerce_bad_values = [['foo'], {'foo': 'bar'}]
self.to_primitive_values = [(set(['foo']), tuple(['!foo!']))]
self.from_primitive_values = [(tuple(['!foo!']), set(['foo']))]
def test_stringify(self):
self.assertEqual('set([123])', self.field.stringify(set([123])))
class TestSetOfIntegers(TestField):
def setUp(self):
super(TestSetOfIntegers, self).setUp()
self.field = fields.SetOfIntegersField()
self.coerce_good_values = [(set(['1', 2]),
set([1, 2]))]
self.coerce_bad_values = [set(['foo'])]
self.to_primitive_values = [(set([1]), tuple([1]))]
self.from_primitive_values = [(tuple([1]), set([1]))]
def test_stringify(self):
self.assertEqual('set([1,2])', self.field.stringify(set([1, 2])))
class TestListOfSetsOfIntegers(TestField):
def setUp(self):
super(TestListOfSetsOfIntegers, self).setUp()
self.field = fields.ListOfSetsOfIntegersField()
self.coerce_good_values = [([set(['1', 2]), set([3, '4'])],
[set([1, 2]), set([3, 4])])]
self.coerce_bad_values = [[set(['foo'])]]
self.to_primitive_values = [([set([1])], [tuple([1])])]
self.from_primitive_values = [([tuple([1])], [set([1])])]
def test_stringify(self):
self.assertEqual('[set([1,2])]', self.field.stringify([set([1, 2])]))
class TestNetworkModel(TestField):
def setUp(self):
super(TestNetworkModel, self).setUp()
model = network_model.NetworkInfo()
self.field = fields.Field(fields.NetworkModel())
self.coerce_good_values = [(model, model), (model.json(), model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, model.json())]
self.from_primitive_values = [(model.json(), model)]
def test_stringify(self):
networkinfo = network_model.NetworkInfo()
networkinfo.append(network_model.VIF(id=123))
networkinfo.append(network_model.VIF(id=456))
self.assertEqual('NetworkModel(123,456)',
self.field.stringify(networkinfo))
class TestIPNetwork(TestField):
def setUp(self):
super(TestIPNetwork, self).setUp()
self.field = fields.Field(fields.IPNetwork())
good = ['192.168.1.0/24', '0.0.0.0/0', '::1/128', '::1/64', '::1/0']
self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
'::1/129', '192.168.0.0/-1']
self.to_primitive_values = [(netaddr.IPNetwork(x), x)
for x in good]
self.from_primitive_values = [(x, netaddr.IPNetwork(x))
for x in good]
class TestIPV4Network(TestField):
def setUp(self):
super(TestIPV4Network, self).setUp()
self.field = fields.Field(fields.IPV4Network())
good = ['192.168.1.0/24', '0.0.0.0/0']
self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
'::1/129', '192.168.0.0/-1']
self.to_primitive_values = [(netaddr.IPNetwork(x), x)
for x in good]
self.from_primitive_values = [(x, netaddr.IPNetwork(x))
for x in good]
class TestIPV6Network(TestField):
def setUp(self):
super(TestIPV6Network, self).setUp()
self.field = fields.Field(fields.IPV6Network())
good = ['::1/128', '::1/64', '::1/0']
self.coerce_good_values = [(x, netaddr.IPNetwork(x)) for x in good]
self.coerce_bad_values = ['192.168.0.0/f', '192.168.0.0/foo',
'::1/129', '192.168.0.0/-1']
self.to_primitive_values = [(netaddr.IPNetwork(x), x)
for x in good]
self.from_primitive_values = [(x, netaddr.IPNetwork(x))
for x in good]
class TestNotificationPriority(TestField):
def setUp(self):
super(TestNotificationPriority, self).setUp()
self.field = fields.NotificationPriorityField()
self.coerce_good_values = [('audit', 'audit'),
('critical', 'critical'),
('debug', 'debug'),
('error', 'error'),
('sample', 'sample'),
('warn', 'warn')]
self.coerce_bad_values = ['warning']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'warn'", self.field.stringify('warn'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'warning')
class TestNotificationPhase(TestField):
def setUp(self):
super(TestNotificationPhase, self).setUp()
self.field = fields.NotificationPhaseField()
self.coerce_good_values = [('start', 'start'),
('end', 'end'),
('error', 'error')]
self.coerce_bad_values = ['begin']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'error'", self.field.stringify('error'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'begin')
class TestNotificationAction(TestField):
def setUp(self):
super(TestNotificationAction, self).setUp()
self.field = fields.NotificationActionField()
self.coerce_good_values = [('update', 'update')]
self.coerce_bad_values = ['magic']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'update'", self.field.stringify('update'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'magic')
|
|
import json
import sys
import os
import yaml
from cloudmesh_base.Shell import Shell
from cloudmesh_database.dbconn import get_mongo_dbname_from_collection
from cloudmesh_base.ConfigDict import ConfigDict
from cloudmesh_base.util import path_expand
from cmd3.console import Console
from passlib.hash import sha256_crypt
from bson.objectid import ObjectId
from texttable import Texttable
from mongoengine import fields
from util import requires_roles
# from cloudmesh_management.base_classes import SubUser, Project
from cloudmesh_management.base_user import User
from cloudmesh_management.base_project import Project
STATUS = ('pending', 'approved', 'blocked', 'denied', 'active', 'suspended')
ROLE = ('user', 'admin', 'reviewer')
def implement():
print "IMPLEMENT ME"
def update_document(document, data_dict):
def field_value(field, values):
if field.__class__ in (fields.ListField, fields.SortedListField):
if values:
return str(values).split(", ")
else:
return []
if field.__class__ in (
fields.EmbeddedDocumentField,
fields.GenericEmbeddedDocumentField,
fields.ReferenceField,
fields.GenericReferenceField
):
pass
# return field.document_type(**value)
else:
return values
[setattr(
document, key,
field_value(document._fields[key], value)
) for key, value in data_dict.items()]
return document
'''
def generate_password_hash(password)
# maybe using passlib https://pypi.python.org/pypi/passlib
salt = uuid.uuid4().hex
hashed_password = hashlib.sha512(password + salt).hexdigest()
return hashed_password'''
def read_user(filename):
"""
Reads user data from a yaml file
:param filename: The file name
:type filename: String of the path
"""
stream = open(filename, 'r')
data = yaml.load(stream)
user = User(
status=data["status"],
username=data["username"],
title=data["title"],
firstname=data["firstname"],
lastname=data["lastname"],
email=data["email"],
url=data["url"],
citizenship=data["citizenship"],
bio=data["bio"],
password=data["password"],
userid=data["userid"],
phone=data["phone"],
projects=data["projects"],
institution=data["institution"],
department=data["department"],
address=data["address"],
country=data["country"],
advisor=data["advisor"],
message=data["message"],
)
return user
# noinspection PyBroadException
class Users(object):
"""
Convenience object to manage several users
"""
def __init__(self):
# config = ConfigDict(filename=config_file("/cloudmesh_server.yaml"))
# port = config['cloudmesh']['server']['mongo']['port']
# db = connect('manage', port=port)
self.users = User.objects()
db_name = get_mongo_dbname_from_collection("manage")
if db_name:
meta = {'db_alias': db_name}
# get_mongo_db("manage", DBConnFactory.TYPE_MONGOENGINE)
@classmethod
def objects(cls):
"""
Returns the users
"""
return cls.users
@classmethod
def get_unique_username(cls, proposal):
"""
Gets a unique username from a proposal. This is achieved while appending a number at the end.
:param proposal: the proposed username
:type proposal: String
"""
new_proposal = proposal.lower()
num = 1
username = User.objects(username=new_proposal)
while username.count() > 0:
new_proposal = proposal + str(num)
username = User.objects(username=new_proposal)
num += 1
return new_proposal
@classmethod
@requires_roles('user', 'admin', 'reviewer')
def create_config(cls, username):
dir_path = path_expand("~/.cloudmesh/{0}".format("accounts"))
if not os.path.exists(dir_path):
Shell.mkdir(dir_path)
filename = path_expand("~/.cloudmesh/{0}/{1}".format("accounts", ".config"))
data = dict(user=username)
with open(filename, 'w') as outfile:
outfile.write(yaml.dump(data, default_flow_style=True))
@classmethod
@requires_roles('admin')
def add(cls, user):
"""
Adds a user
:param user: the username
:type user: User object
"""
user.username = cls.get_unique_username(user.username)
user.roles = ['user']
user.set_date_deactivate()
if cls.validate_email(user.email):
user.save()
else:
Console.error("A user with the e-mail `{0}` already exists".format(user.email))
@classmethod
@requires_roles('admin')
def delete_user(cls, user_name=None):
if user_name:
try:
user = User.objects(username=user_name)
if user:
user.delete()
if user_name != "super":
Console.info("User " + user_name + " removed from the database.")
else:
Console.error("User with the name '{0}' does not exist.".format(user_name))
except:
Console.error("Oops! Something went wrong while trying to remove a user")
else:
Console.error("Please specify the user to be removed")
@classmethod
@requires_roles('admin')
def amend_user_status(cls, user_name=None, new_status=None):
current_status = ""
if user_name:
try:
current_status = cls.get_user_status(user_name)
except:
Console.error("Oops! Something went wrong while trying to get user status")
if new_status == "approved":
if current_status in ["pending", "denied"]:
cls.set_user_status(user_name, new_status)
else:
Console.error("Cannot approve user. User not in pending status.")
elif new_status == "active":
if current_status in ["approved", "suspended", "blocked"]:
cls.set_user_status(user_name, new_status)
else:
Console.error("Cannot activate user. User not in approved or suspended status.")
elif new_status == "suspended":
if current_status == "active":
cls.set_user_status(user_name, new_status)
else:
Console.error("Cannot suspend user. User not in active status.")
elif new_status == "blocked":
if current_status == "active":
cls.set_user_status(user_name, new_status)
else:
Console.error("Cannot block user. User not in active status.")
elif new_status == "denied":
if current_status in ["approved", "pending"]:
cls.set_user_status(user_name, new_status)
else:
Console.error("Cannot deny user. User not in approved or pending status.")
else:
Console.error("Please specify the user to be amended")
@classmethod
@requires_roles('admin')
def set_user_status(cls, user_name, status):
if user_name:
try:
User.objects(username=user_name).update_one(set__status=status)
except:
Console.error("Oops! Something went wrong while trying to amend user status")
else:
Console.error("Please specify the user to be amended")
@classmethod
@requires_roles('admin')
def get_user_status(cls, user_name):
if user_name:
try:
user = User.objects(username=user_name).only('status')
if user:
for entry in user:
return entry.status
else:
return
except:
Console.error("Oops! Something went wrong while trying to get user status")
else:
Console.error("Please specify the user get status")
@classmethod
@requires_roles('admin')
def validate_email(cls, email):
"""
Verifies if the email of the user is not already in the users.
:param email: email id of the user
:return: true or false
"""
user = User.objects(email=email)
valid = user.count() == 0
return valid
@classmethod
@requires_roles('admin')
def find(cls, email=None):
"""
Returns the users based on the given query.
If no email is specified all users are returned.
If the email is specified we search for the user with the given e-mail.
:param email: email
:type email: email address
"""
if email is None:
return User.objects()
else:
found = User.objects(email=email)
if found.count() > 0:
return User.objects()[0]
else:
return None
@classmethod
@requires_roles('admin')
def find_user(cls, username):
"""
Returns a user based on the username
:param username:
:type username:
"""
return User.object(username=username)
@classmethod
@requires_roles('admin')
def clear(cls):
"""
Removes all elements form the mongo db that are users
"""
try:
for user in User.objects:
user.delete()
Console.info("Users cleared from the database.")
except:
Console.error("Oops! Something went wrong while trying to clear the users from database")
@classmethod
@requires_roles('admin')
def list_users(cls, display_fmt=None, username=None, status=None):
# req_fields = ["username", "title", "firstname", "lastname",
# "email", "phone", "url", "citizenship",
# "institution", "institutionrole", "department",
# "advisor", "address", "status", "projects"]
req_fields = ["username", "firstname", "lastname",
"email", "phone", "institution", "institutionrole",
"advisor", "address", "status", "projects", "roles"]
try:
if username is None:
if status:
if status not in STATUS:
Console.info("Invalid status requested.. Displaying all users..")
user_json = User.objects.only(*req_fields).to_json()
else:
user_json = User.objects(status=status).only(*req_fields).to_json()
else:
user_json = User.objects.only(*req_fields).to_json()
user_dict = json.loads(user_json)
if user_dict:
if display_fmt != 'json':
cls.display(user_dict, username)
else:
cls.display_json(user_dict, username)
else:
Console.info("No users in the database.")
else:
user_json = User.objects(username=username).to_json()
users_list = json.loads(user_json)
for item in users_list:
users_dict = item
if users_dict:
if display_fmt != 'json':
cls.display_two_columns(users_dict)
else:
cls.display_json(users_dict, username)
else:
Console.error("User not in the database.")
except:
Console.error("Oops.. Something went wrong in the list users method " + sys.exc_info()[0])
@classmethod
@requires_roles('admin')
def list_users_json(cls):
# req_fields = ["username", "title", "firstname", "lastname",
# "email", "phone", "url", "citizenship",
# "institution", "institutionrole", "department",
# "advisor", "address", "status", "projects"]
req_fields = ["username", "firstname", "lastname",
"email", "phone", "institution", "institutionrole",
"advisor", "address", "status", "projects"]
try:
user_json = User.objects().to_json()
return user_json
except:
Console.error("Oops.. Something went wrong in the list users method " + sys.exc_info()[0])
@classmethod
@requires_roles('admin')
def list_projects(cls, user_name=None):
required_fields = ["username", "firstname", "lastname", "projects"]
try:
if user_name:
user_json = User.objects.only(*required_fields).to_json()
user_dict = json.loads(user_json)
if user_dict:
cls.display(user_dict, user_name)
else:
Console.info("No user details available in the database.")
except:
Console.error("Please provide a username.")
@classmethod
@requires_roles('admin')
def display(cls, user_dicts=None, user_name=None):
if bool(user_dicts):
values = []
table = Texttable(max_width=180)
for entry in user_dicts:
items = []
headers = []
for key, value in entry.iteritems():
if key == "projects":
project_entry = ""
if value:
for itm in value:
user_project = Project.objects(id=ObjectId(itm.get('$oid'))).only('title',
'project_id').first()
project_entry = project_entry + user_project.title + ", "
items.append(project_entry)
elif key == "roles":
role_entry = ""
if value:
for itm in value:
role_entry = role_entry+itm + ", "
role_entry = role_entry.rstrip(', ')
items.append(role_entry)
else:
items.append(value)
headers.append(key.replace('_', ' ').title())
values.append(items)
table.add_row(items)
table.header(headers)
print table.draw()
else:
if user_name:
Console.error("No user in the system with name '{0}'".format(user_name))
@classmethod
@requires_roles('admin')
def display_two_columns(cls, table_dict=None):
if table_dict:
ignore_fields = ['_cls', '_id', 'date_modified', 'date_created', 'password', 'confirm']
table = Texttable(max_width=100)
rows = [['Property', 'Value']]
for key, value in table_dict.iteritems():
if key not in ignore_fields:
items = [key.replace('_', ' ').title()]
if isinstance(value, list):
if value:
if key == "projects":
project_entry = ""
for itm in value:
user_project = Project.objects(id=ObjectId(itm.get('$oid'))) \
.only('title', 'project_id').first()
project_entry = project_entry + user_project.title + ", "
project_entry.strip(', ')
items.append(project_entry)
else:
items.append(' , '.join(value))
else:
items.append('None')
else:
items.append(value)
rows.append(items)
try:
if rows:
table.add_rows(rows)
except:
print sys.exc_info()[0]
print table.draw()
pass
@classmethod
@requires_roles('admin')
def display_json(cls, user_dict=None, user_name=None):
if bool(user_dict):
print json.dumps(user_dict, indent=4)
else:
if user_name:
Console.error("No user in the system with name '{0}'".format(user_name))
@classmethod
@requires_roles('admin')
def create_user_from_file(cls, file_path):
try:
filename = path_expand(file_path)
file_config = ConfigDict(filename=filename)
except:
Console.error("Could not load file, please check filename and its path")
return
try:
user_config = file_config.get("cloudmesh", "user")
user_name = user_config['username']
user = User()
update_document(user, user_config)
except:
Console.error("Could not get user information from yaml file, "
"please check you yaml file, users information must be "
"under 'cloudmesh' -> 'users' -> user1...")
return
try:
if cls.check_exists(user_name) is False:
cls.add(user)
Console.info("User created in the database.")
else:
Console.error("User with user name " + user_name + " already exists.")
return
except:
Console.error("User creation in database failed, " + str(sys.exc_info()))
return
@classmethod
@requires_roles('admin')
def check_exists(cls, user_name):
return len(User.objects(username=user_name)) > 0
@classmethod
@requires_roles('user', 'admin')
def set_password(cls, user_name, passwd):
pass_hash = sha256_crypt.encrypt(passwd)
try:
User.objects(username=user_name).update_one(set__password=pass_hash)
Console.info("User password updated.")
except:
Console.error("Oops! Something went wrong while trying to set user password")
@classmethod
@requires_roles('admin', 'reviewer')
def set_role(cls, user_name, role):
try:
if role in ROLE:
User.objects(username=user_name).update_one(push__roles=role)
Console.info("Role {0} added to user {1}".format(role, user_name))
else:
Console.error("Please specify a valid role. Role {0} is invalid.".format(role))
except:
Console.error("Oops! Something went wrong while trying to set user role.")
def verified_email_domain(email):
"""
not yet implemented. Returns true if the e-mail is in a specified domain.
:param email:
:type email:
"""
domains = ["indiana.edu"]
for domain in domains:
if email.endswith() == domain:
return True
return False
|
|
# winout.py
#
# generic "output window"
#
# This Window will detect itself closing, and recreate next time output is
# written to it.
# This has the option of writing output at idle time (by hooking the
# idle message, and queueing output) or writing as each
# write is executed.
# Updating the window directly gives a jerky appearance as many writes
# take place between commands, and the windows scrolls, and updates etc
# Updating at idle-time may defer all output of a long process, giving the
# appearence nothing is happening.
# There is a compromise "line" mode, which will output whenever
# a complete line is available.
# behaviour depends on self.writeQueueing
# This module is thread safe - output can originate from any thread. If any thread
# other than the main thread attempts to print, it is always queued until next idle time
import sys, string, re
from pywin.mfc import docview
from pywin.framework import app, window
import win32ui, win32api, win32con
import queue
debug = lambda msg: None
##debug=win32ui.OutputDebugString
##import win32trace;win32trace.InitWrite() # for debugging - delete me!
##debug = win32trace.write
class flags:
# queueing of output.
WQ_NONE = 0
WQ_LINE = 1
WQ_IDLE = 2
#WindowOutputDocumentParent=docview.RichEditDoc
#WindowOutputDocumentParent=docview.Document
import pywin.scintilla.document
from pywin.scintilla import scintillacon
from pywin import default_scintilla_encoding
WindowOutputDocumentParent=pywin.scintilla.document.CScintillaDocument
class WindowOutputDocument(WindowOutputDocumentParent):
def SaveModified(self):
return 1 # say it is OK to destroy my document
def OnSaveDocument( self, fileName ):
win32ui.SetStatusText("Saving file...",1)
try:
self.SaveFile(fileName)
except IOError as details:
win32ui.MessageBox("Error - could not save file\r\n\r\n%s"%details)
return 0
win32ui.SetStatusText("Ready")
return 1
class WindowOutputFrame(window.MDIChildWnd):
def __init__(self, wnd = None):
window.MDIChildWnd.__init__(self, wnd)
self.HookMessage(self.OnSizeMove, win32con.WM_SIZE)
self.HookMessage(self.OnSizeMove, win32con.WM_MOVE)
def LoadFrame( self, idResource, style, wndParent, context ):
self.template = context.template
return self._obj_.LoadFrame(idResource, style, wndParent, context)
def PreCreateWindow(self, cc):
cc = self._obj_.PreCreateWindow(cc)
if self.template.defSize and self.template.defSize[0] != self.template.defSize[1]:
rect = app.RectToCreateStructRect(self.template.defSize)
cc = cc[0], cc[1], cc[2], cc[3], rect, cc[5], cc[6], cc[7], cc[8]
return cc
def OnSizeMove(self, msg):
# so recreate maintains position.
# Need to map coordinates from the
# frame windows first child.
mdiClient = self.GetParent()
self.template.defSize = mdiClient.ScreenToClient(self.GetWindowRect())
def OnDestroy(self, message):
self.template.OnFrameDestroy(self)
return 1
class WindowOutputViewImpl:
def __init__(self):
self.patErrorMessage=re.compile('\W*File "(.*)", line ([0-9]+)')
self.template = self.GetDocument().GetDocTemplate()
def HookHandlers(self):
# Hook for the right-click menu.
self.HookMessage(self.OnRClick,win32con.WM_RBUTTONDOWN)
def OnDestroy(self, msg):
self.template.OnViewDestroy(self)
def OnInitialUpdate(self):
self.RestoreKillBuffer()
self.SetSel(-2) # end of buffer
def GetRightMenuItems(self):
ret = []
flags=win32con.MF_STRING|win32con.MF_ENABLED
ret.append((flags, win32ui.ID_EDIT_COPY, '&Copy'))
ret.append((flags, win32ui.ID_EDIT_SELECT_ALL, '&Select all'))
return ret
#
# Windows command handlers, virtuals, etc.
#
def OnRClick(self,params):
paramsList = self.GetRightMenuItems()
menu = win32ui.CreatePopupMenu()
for appendParams in paramsList:
if type(appendParams)!=type(()):
appendParams = (appendParams,)
menu.AppendMenu(*appendParams)
menu.TrackPopupMenu(params[5]) # track at mouse position.
return 0
# as this is often used as an output window, exeptions will often
# be printed. Therefore, we support this functionality at this level.
# Returns TRUE if the current line is an error message line, and will
# jump to it. FALSE if no error (and no action taken)
def HandleSpecialLine(self):
from . import scriptutils
line = self.GetLine()
if line[:11]=="com_error: ":
# An OLE Exception - pull apart the exception
# and try and locate a help file.
try:
import win32api, win32con
det = eval(line[line.find(":")+1:].strip())
win32ui.SetStatusText("Opening help file on OLE error...");
from . import help
help.OpenHelpFile(det[2][3],win32con.HELP_CONTEXT, det[2][4])
return 1
except win32api.error as details:
win32ui.SetStatusText("The help file could not be opened - %s" % details.strerror)
return 1
except:
win32ui.SetStatusText("Line is a COM error, but no WinHelp details can be parsed");
# Look for a Python traceback.
matchResult = self.patErrorMessage.match(line)
if matchResult is None:
# No match - try the previous line
lineNo = self.LineFromChar()
if lineNo > 0:
line = self.GetLine(lineNo-1)
matchResult = self.patErrorMessage.match(line)
if matchResult is not None:
# we have an error line.
fileName = matchResult.group(1)
if fileName[0]=="<":
win32ui.SetStatusText("Can not load this file")
return 1 # still was an error message.
else:
lineNoString = matchResult.group(2)
# Attempt to locate the file (in case it is a relative spec)
fileNameSpec = fileName
fileName = scriptutils.LocatePythonFile(fileName)
if fileName is None:
# Dont force update, so it replaces the idle prompt.
win32ui.SetStatusText("Cant locate the file '%s'" % (fileNameSpec), 0)
return 1
win32ui.SetStatusText("Jumping to line "+lineNoString+" of file "+fileName,1)
if not scriptutils.JumpToDocument(fileName, int(lineNoString)):
win32ui.SetStatusText("Could not open %s" % fileName)
return 1 # still was an error message.
return 1
return 0 # not an error line
def write(self, msg):
return self.template.write(msg)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
self.template.flush()
class WindowOutputViewRTF(docview.RichEditView, WindowOutputViewImpl):
def __init__(self, doc):
docview.RichEditView.__init__(self, doc)
WindowOutputViewImpl.__init__(self)
def OnInitialUpdate(self):
WindowOutputViewImpl.OnInitialUpdate(self)
return docview.RichEditView.OnInitialUpdate(self)
def OnDestroy(self, msg):
WindowOutputViewImpl.OnDestroy(self, msg)
docview.RichEditView.OnDestroy(self, msg)
def HookHandlers(self):
WindowOutputViewImpl.HookHandlers(self)
# Hook for finding and locating error messages
self.HookMessage(self.OnLDoubleClick,win32con.WM_LBUTTONDBLCLK)
# docview.RichEditView.HookHandlers(self)
def OnLDoubleClick(self,params):
if self.HandleSpecialLine():
return 0 # dont pass on
return 1 # pass it on by default.
def RestoreKillBuffer(self):
if len(self.template.killBuffer):
self.StreamIn(win32con.SF_RTF, self._StreamRTFIn)
self.template.killBuffer = []
def SaveKillBuffer(self):
self.StreamOut(win32con.SF_RTFNOOBJS, self._StreamRTFOut)
def _StreamRTFOut(self, data):
self.template.killBuffer.append(data)
return 1 # keep em coming!
def _StreamRTFIn(self, bytes):
try:
item = self.template.killBuffer[0]
self.template.killBuffer.remove(item)
if bytes < len(item):
print("Warning - output buffer not big enough!")
return item
except IndexError:
return None
def dowrite(self, str):
self.SetSel(-2)
self.ReplaceSel(str)
import pywin.scintilla.view
class WindowOutputViewScintilla(pywin.scintilla.view.CScintillaView, WindowOutputViewImpl):
def __init__(self, doc):
pywin.scintilla.view.CScintillaView.__init__(self, doc)
WindowOutputViewImpl.__init__(self)
def OnInitialUpdate(self):
pywin.scintilla.view.CScintillaView.OnInitialUpdate(self)
self.SCISetMarginWidth(3)
WindowOutputViewImpl.OnInitialUpdate(self)
def OnDestroy(self, msg):
WindowOutputViewImpl.OnDestroy(self, msg)
pywin.scintilla.view.CScintillaView.OnDestroy(self, msg)
def HookHandlers(self):
WindowOutputViewImpl.HookHandlers(self)
pywin.scintilla.view.CScintillaView.HookHandlers(self)
self.GetParent().HookNotify(self.OnScintillaDoubleClick, scintillacon.SCN_DOUBLECLICK)
## self.HookMessage(self.OnLDoubleClick,win32con.WM_LBUTTONDBLCLK)
def OnScintillaDoubleClick(self, std, extra):
self.HandleSpecialLine()
## def OnLDoubleClick(self,params):
## return 0 # never dont pass on
def RestoreKillBuffer(self):
assert len(self.template.killBuffer) in [0,1], "Unexpected killbuffer contents"
if self.template.killBuffer:
self.SCIAddText(self.template.killBuffer[0])
self.template.killBuffer = []
def SaveKillBuffer(self):
self.template.killBuffer = [self.GetTextRange(0,-1)]
def dowrite(self, str):
end = self.GetTextLength()
atEnd = end==self.GetSel()[0]
self.SCIInsertText(str, end)
if atEnd:
self.SetSel(self.GetTextLength())
def SetWordWrap(self, bWrapOn = 1):
if bWrapOn:
wrap_mode = scintillacon.SC_WRAP_WORD
else:
wrap_mode = scintillacon.SC_WRAP_NONE
self.SCISetWrapMode(wrap_mode)
def _MakeColorizer(self):
return None # No colorizer for me!
WindowOutputView = WindowOutputViewScintilla
# The WindowOutput class is actually an MFC template. This is a conventient way of
# making sure that my state can exist beyond the life of the windows themselves.
# This is primarily to support the functionality of a WindowOutput window automatically
# being recreated if necessary when written to.
class WindowOutput(docview.DocTemplate):
""" Looks like a general Output Window - text can be written by the 'write' method.
Will auto-create itself on first write, and also on next write after being closed """
softspace=1
def __init__(self, title=None, defSize=None, queueing = flags.WQ_LINE, \
bAutoRestore = 1, style=None,
makeDoc = None, makeFrame = None, makeView = None):
""" init the output window -
Params
title=None -- What is the title of the window
defSize=None -- What is the default size for the window - if this
is a string, the size will be loaded from the ini file.
queueing = flags.WQ_LINE -- When should output be written
bAutoRestore=1 -- Should a minimized window be restored.
style -- Style for Window, or None for default.
makeDoc, makeFrame, makeView -- Classes for frame, view and window respectively.
"""
if makeDoc is None: makeDoc = WindowOutputDocument
if makeFrame is None: makeFrame = WindowOutputFrame
if makeView is None: makeView = WindowOutputViewScintilla
docview.DocTemplate.__init__(self, win32ui.IDR_PYTHONTYPE, \
makeDoc, makeFrame, makeView)
self.SetDocStrings("\nOutput\n\nText Documents (*.txt)\n.txt\n\n\n")
win32ui.GetApp().AddDocTemplate(self)
self.writeQueueing = queueing
self.errorCantRecreate = 0
self.killBuffer=[]
self.style = style
self.bAutoRestore = bAutoRestore
self.title = title
self.bCreating = 0
self.interruptCount = 0
if type(defSize)==type(''): # is a string - maintain size pos from ini file.
self.iniSizeSection = defSize
self.defSize = app.LoadWindowSize(defSize)
self.loadedSize = self.defSize
else:
self.iniSizeSection = None
self.defSize=defSize
self.currentView = None
self.outputQueue = queue.Queue(-1)
self.mainThreadId = win32api.GetCurrentThreadId()
self.idleHandlerSet = 0
self.SetIdleHandler()
def __del__(self):
self.Close()
def Create(self, title=None, style = None):
self.bCreating = 1
if title: self.title = title
if style: self.style = style
doc=self.OpenDocumentFile()
if doc is None: return
self.currentView = doc.GetFirstView()
self.bCreating = 0
if self.title: doc.SetTitle(self.title)
def Close(self):
self.RemoveIdleHandler()
try:
parent = self.currentView.GetParent()
except (AttributeError, win32ui.error): # Already closed
return
parent.DestroyWindow()
def SetTitle(self, title):
self.title = title
if self.currentView: self.currentView.GetDocument().SetTitle(self.title)
def OnViewDestroy(self, view):
self.currentView.SaveKillBuffer()
self.currentView = None
def OnFrameDestroy(self, frame):
if self.iniSizeSection:
# use GetWindowPlacement(), as it works even when min'd or max'd
newSize = frame.GetWindowPlacement()[4]
if self.loadedSize!=newSize:
app.SaveWindowSize(self.iniSizeSection, newSize)
def SetIdleHandler(self):
if not self.idleHandlerSet:
debug("Idle handler set\n")
win32ui.GetApp().AddIdleHandler(self.QueueIdleHandler)
self.idleHandlerSet = 1
def RemoveIdleHandler(self):
if self.idleHandlerSet:
debug("Idle handler reset\n")
if (win32ui.GetApp().DeleteIdleHandler(self.QueueIdleHandler)==0):
debug('Error deleting idle handler\n')
self.idleHandlerSet = 0
def RecreateWindow(self):
if self.errorCantRecreate:
debug("Error = not trying again")
return 0
try:
# This will fail if app shutting down
win32ui.GetMainFrame().GetSafeHwnd()
self.Create()
return 1
except (win32ui.error, AttributeError):
self.errorCantRecreate = 1
debug("Winout can not recreate the Window!\n")
return 0
# this handles the idle message, and does the printing.
def QueueIdleHandler(self,handler,count):
try:
bEmpty = self.QueueFlush(20)
# If the queue is empty, then we are back to idle and restart interrupt logic.
if bEmpty: self.interruptCount = 0
except KeyboardInterrupt:
# First interrupt since idle we just pass on.
# later ones we dump the queue and give up.
self.interruptCount = self.interruptCount + 1
if self.interruptCount > 1:
# Drop the queue quickly as the user is already annoyed :-)
self.outputQueue = queue.Queue(-1)
print("Interrupted.")
bEmpty = 1
else:
raise # re-raise the error so the users exception filters up.
return not bEmpty # More to do if not empty.
# Returns true if the Window needs to be recreated.
def NeedRecreateWindow(self):
try:
if self.currentView is not None and self.currentView.IsWindow():
return 0
except (win32ui.error, AttributeError): # Attribute error if the win32ui object has died.
pass
return 1
# Returns true if the Window is OK (either cos it was, or because it was recreated
def CheckRecreateWindow(self):
if self.bCreating: return 1
if not self.NeedRecreateWindow():
return 1
if self.bAutoRestore:
if self.RecreateWindow():
return 1
return 0
def QueueFlush(self, max = None):
# Returns true if the queue is empty after the flush
# debug("Queueflush - %d, %d\n" % (max, self.outputQueue.qsize()))
if self.bCreating: return 1
items = []
rc = 0
while max is None or max > 0:
try:
item = self.outputQueue.get_nowait()
items.append(item)
except queue.Empty:
rc = 1
break
if max is not None:
max = max - 1
if len(items) != 0:
if not self.CheckRecreateWindow():
debug(":Recreate failed!\n")
return 1 # In trouble - so say we have nothing to do.
win32ui.PumpWaitingMessages() # Pump paint messages
self.currentView.dowrite(''.join(items))
return rc
def HandleOutput(self,message):
# debug("QueueOutput on thread %d, flags %d with '%s'...\n" % (win32api.GetCurrentThreadId(), self.writeQueueing, message ))
self.outputQueue.put(message)
if win32api.GetCurrentThreadId() != self.mainThreadId:
pass
# debug("not my thread - ignoring queue options!\n")
elif self.writeQueueing==flags.WQ_LINE:
pos = message.rfind('\n')
if pos>=0:
# debug("Line queueing - forcing flush\n")
self.QueueFlush()
return
elif self.writeQueueing==flags.WQ_NONE:
# debug("WQ_NONE - flushing!\n")
self.QueueFlush()
return
# Let our idle handler get it - wake it up
try:
win32ui.GetMainFrame().PostMessage(win32con.WM_USER) # Kick main thread off.
except win32ui.error:
# This can happen as the app is shutting down, so we send it to the C++ debugger
win32api.OutputDebugString(message)
# delegate certain fns to my view.
def writelines(self, lines):
for line in lines:
self.write(line)
def write(self,message):
self.HandleOutput(message)
def flush(self):
self.QueueFlush()
def HandleSpecialLine(self):
self.currentView.HandleSpecialLine()
def RTFWindowOutput(*args, **kw):
kw['makeView'] = WindowOutputViewRTF
return WindowOutput(*args, **kw)
def thread_test(o):
for i in range(5):
o.write("Hi from thread %d\n" % (win32api.GetCurrentThreadId()))
win32api.Sleep(100)
def test():
w = WindowOutput(queueing=flags.WQ_IDLE)
w.write("First bit of text\n")
import _thread
for i in range(5):
w.write("Hello from the main thread\n")
_thread.start_new(thread_test, (w,))
for i in range(2):
w.write("Hello from the main thread\n")
win32api.Sleep(50)
return w
if __name__=='__main__':
test()
|
|
try: # py3
from shlex import quote
except ImportError: # py2
from pipes import quote
import hashlib
import logging
import os
import subprocess
import sys
import time
from threading import Thread
from getpass import getuser
from ray.autoscaler.tags import TAG_RAY_NODE_STATUS, TAG_RAY_RUNTIME_CONFIG, \
STATUS_UP_TO_DATE, STATUS_UPDATE_FAILED, STATUS_WAITING_FOR_SSH, \
STATUS_SETTING_UP, STATUS_SYNCING_FILES
from ray.autoscaler.log_timer import LogTimer
logger = logging.getLogger(__name__)
# How long to wait for a node to start, in seconds
NODE_START_WAIT_S = 300
READY_CHECK_INTERVAL = 5
HASH_MAX_LENGTH = 10
KUBECTL_RSYNC = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "kubernetes/kubectl-rsync.sh")
def with_interactive(cmd):
force_interactive = ("true && source ~/.bashrc && "
"export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && ")
return ["bash", "--login", "-c", "-i", quote(force_interactive + cmd)]
class KubernetesCommandRunner:
def __init__(self, log_prefix, namespace, node_id, auth_config,
process_runner):
self.log_prefix = log_prefix
self.process_runner = process_runner
self.node_id = node_id
self.namespace = namespace
self.kubectl = ["kubectl", "-n", self.namespace]
def run(self,
cmd,
timeout=120,
allocate_tty=False,
exit_on_fail=False,
port_forward=None):
logger.info(self.log_prefix + "Running {}...".format(cmd))
if port_forward:
if not isinstance(port_forward, list):
port_forward = [port_forward]
port_forward_cmd = self.kubectl + [
"port-forward",
self.node_id,
] + [str(fwd) for fwd in port_forward]
port_forward_process = subprocess.Popen(port_forward_cmd)
# Give port-forward a grace period to run and print output before
# running the actual command. This is a little ugly, but it should
# work in most scenarios and nothing should go very wrong if the
# command starts running before the port forward starts.
time.sleep(1)
final_cmd = self.kubectl + [
"exec",
"-it" if allocate_tty else "-i",
self.node_id,
"--",
] + with_interactive(cmd)
try:
self.process_runner.check_call(" ".join(final_cmd), shell=True)
except subprocess.CalledProcessError:
if exit_on_fail:
quoted_cmd = " ".join(final_cmd[:-1] + [quote(final_cmd[-1])])
logger.error(self.log_prefix +
"Command failed: \n\n {}\n".format(quoted_cmd))
sys.exit(1)
else:
raise
finally:
# Clean up the port forward process. First, try to let it exit
# gracefull with SIGTERM. If that doesn't work after 1s, send
# SIGKILL.
if port_forward:
port_forward_process.terminate()
for _ in range(10):
time.sleep(0.1)
port_forward_process.poll()
if port_forward_process.returncode:
break
logger.info(self.log_prefix +
"Waiting for port forward to die...")
else:
logger.warning(self.log_prefix +
"Killing port forward with SIGKILL.")
port_forward_process.kill()
def run_rsync_up(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
source,
"{}@{}:{}".format(self.node_id, self.namespace, target),
])
except Exception as e:
logger.warning(self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'"
.format(e))
self.process_runner.check_call(self.kubectl + [
"cp", source, "{}/{}:{}".format(self.namespace, self.node_id,
target)
])
def run_rsync_down(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
"{}@{}:{}".format(self.node_id, self.namespace, source),
target,
])
except Exception as e:
logger.warning(self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'"
.format(e))
self.process_runner.check_call(self.kubectl + [
"cp", "{}/{}:{}".format(self.namespace, self.node_id, source),
target
])
def remote_shell_command_str(self):
return "{} exec -it {} bash".format(" ".join(self.kubectl),
self.node_id)
class SSHCommandRunner:
def __init__(self, log_prefix, node_id, provider, auth_config,
cluster_name, process_runner, use_internal_ip):
ssh_control_hash = hashlib.md5(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.md5(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(
ssh_user_hash[:HASH_MAX_LENGTH],
ssh_control_hash[:HASH_MAX_LENGTH])
self.log_prefix = log_prefix
self.process_runner = process_runner
self.node_id = node_id
self.use_internal_ip = use_internal_ip
self.provider = provider
self.ssh_private_key = auth_config["ssh_private_key"]
self.ssh_user = auth_config["ssh_user"]
self.ssh_control_path = ssh_control_path
self.ssh_ip = None
def get_default_ssh_options(self, connect_timeout):
OPTS = [
("ConnectTimeout", "{}s".format(connect_timeout)),
("StrictHostKeyChecking", "no"),
("ControlMaster", "auto"),
("ControlPath", "{}/%C".format(self.ssh_control_path)),
("ControlPersist", "10s"),
("IdentitiesOnly", "yes"),
]
return ["-i", self.ssh_private_key] + [
x for y in (["-o", "{}={}".format(k, v)] for k, v in OPTS)
for x in y
]
def get_node_ip(self):
if self.use_internal_ip:
return self.provider.internal_ip(self.node_id)
else:
return self.provider.external_ip(self.node_id)
def wait_for_ip(self, deadline):
while time.time() < deadline and \
not self.provider.is_terminated(self.node_id):
logger.info(self.log_prefix + "Waiting for IP...")
ip = self.get_node_ip()
if ip is not None:
return ip
time.sleep(10)
return None
def set_ssh_ip_if_required(self):
if self.ssh_ip is not None:
return
# We assume that this never changes.
# I think that's reasonable.
deadline = time.time() + NODE_START_WAIT_S
with LogTimer(self.log_prefix + "Got IP"):
ip = self.wait_for_ip(deadline)
assert ip is not None, "Unable to find IP of node"
self.ssh_ip = ip
# This should run before any SSH commands and therefore ensure that
# the ControlPath directory exists, allowing SSH to maintain
# persistent sessions later on.
try:
self.process_runner.check_call(
["mkdir", "-p", self.ssh_control_path])
except subprocess.CalledProcessError as e:
logger.warning(e)
try:
self.process_runner.check_call(
["chmod", "0700", self.ssh_control_path])
except subprocess.CalledProcessError as e:
logger.warning(self.log_prefix + str(e))
def run(self,
cmd,
timeout=120,
allocate_tty=False,
exit_on_fail=False,
port_forward=None):
self.set_ssh_ip_if_required()
logger.info(self.log_prefix +
"Running {} on {}...".format(cmd, self.ssh_ip))
ssh = ["ssh"]
if allocate_tty:
ssh.append("-tt")
if port_forward:
if not isinstance(port_forward, list):
port_forward = [port_forward]
for fwd in port_forward:
ssh += ["-L", "{}:localhost:{}".format(fwd, fwd)]
final_cmd = ssh + self.get_default_ssh_options(timeout) + [
"{}@{}".format(self.ssh_user, self.ssh_ip)
] + with_interactive(cmd)
try:
self.process_runner.check_call(final_cmd)
except subprocess.CalledProcessError:
if exit_on_fail:
quoted_cmd = " ".join(final_cmd[:-1] + [quote(final_cmd[-1])])
logger.error(self.log_prefix +
"Command failed: \n\n {}\n".format(quoted_cmd))
sys.exit(1)
else:
raise
def run_rsync_up(self, source, target):
self.set_ssh_ip_if_required()
self.process_runner.check_call([
"rsync", "--rsh",
" ".join(["ssh"] + self.get_default_ssh_options(120)), "-avz",
source, "{}@{}:{}".format(self.ssh_user, self.ssh_ip, target)
])
def run_rsync_down(self, source, target):
self.set_ssh_ip_if_required()
self.process_runner.check_call([
"rsync", "--rsh",
" ".join(["ssh"] + self.get_default_ssh_options(120)), "-avz",
"{}@{}:{}".format(self.ssh_user, self.ssh_ip, source), target
])
def remote_shell_command_str(self):
return "ssh -o IdentitiesOnly=yes -i {} {}@{}\n".format(
self.ssh_private_key, self.ssh_user, self.ssh_ip)
class NodeUpdater:
"""A process for syncing files and running init commands on a node."""
def __init__(self,
node_id,
provider_config,
provider,
auth_config,
cluster_name,
file_mounts,
initialization_commands,
setup_commands,
ray_start_commands,
runtime_hash,
process_runner=subprocess,
use_internal_ip=False):
self.log_prefix = "NodeUpdater: {}: ".format(node_id)
if provider_config["type"] == "kubernetes":
self.cmd_runner = KubernetesCommandRunner(
self.log_prefix, provider.namespace, node_id, auth_config,
process_runner)
else:
use_internal_ip = (use_internal_ip or provider_config.get(
"use_internal_ips", False))
self.cmd_runner = SSHCommandRunner(
self.log_prefix, node_id, provider, auth_config, cluster_name,
process_runner, use_internal_ip)
self.daemon = True
self.process_runner = process_runner
self.node_id = node_id
self.provider = provider
self.file_mounts = {
remote: os.path.expanduser(local)
for remote, local in file_mounts.items()
}
self.initialization_commands = initialization_commands
self.setup_commands = setup_commands
self.ray_start_commands = ray_start_commands
self.runtime_hash = runtime_hash
def run(self):
logger.info(self.log_prefix +
"Updating to {}".format(self.runtime_hash))
try:
with LogTimer(self.log_prefix +
"Applied config {}".format(self.runtime_hash)):
self.do_update()
except Exception as e:
error_str = str(e)
if hasattr(e, "cmd"):
error_str = "(Exit Status {}) {}".format(
e.returncode, " ".join(e.cmd))
logger.error(self.log_prefix +
"Error updating {}".format(error_str))
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED})
raise e
self.provider.set_node_tags(
self.node_id, {
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_RUNTIME_CONFIG: self.runtime_hash
})
self.exitcode = 0
def sync_file_mounts(self, sync_cmd):
# Rsync file mounts
for remote_path, local_path in self.file_mounts.items():
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(self.log_prefix +
"Synced {} to {}".format(local_path, remote_path)):
self.cmd_runner.run("mkdir -p {}".format(
os.path.dirname(remote_path)))
sync_cmd(local_path, remote_path)
def wait_ready(self, deadline):
with LogTimer(self.log_prefix + "Got remote shell"):
logger.info(self.log_prefix + "Waiting for remote shell...")
while time.time() < deadline and \
not self.provider.is_terminated(self.node_id):
try:
logger.debug(self.log_prefix +
"Waiting for remote shell...")
self.cmd_runner.run("uptime", timeout=5)
logger.debug("Uptime succeeded.")
return True
except Exception as e:
retry_str = str(e)
if hasattr(e, "cmd"):
retry_str = "(Exit Status {}): {}".format(
e.returncode, " ".join(e.cmd))
logger.debug(self.log_prefix +
"Node not up, retrying: {}".format(retry_str))
time.sleep(READY_CHECK_INTERVAL)
assert False, "Unable to connect to node"
def do_update(self):
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_WAITING_FOR_SSH})
deadline = time.time() + NODE_START_WAIT_S
self.wait_ready(deadline)
node_tags = self.provider.node_tags(self.node_id)
logger.debug("Node tags: {}".format(str(node_tags)))
if node_tags.get(TAG_RAY_RUNTIME_CONFIG) == self.runtime_hash:
logger.info(self.log_prefix +
"{} already up-to-date, skip to ray start".format(
self.node_id))
else:
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SYNCING_FILES})
self.sync_file_mounts(self.rsync_up)
# Run init commands
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SETTING_UP})
with LogTimer(self.log_prefix +
"Initialization commands completed"):
for cmd in self.initialization_commands:
self.cmd_runner.run(cmd)
with LogTimer(self.log_prefix + "Setup commands completed"):
for cmd in self.setup_commands:
self.cmd_runner.run(cmd)
with LogTimer(self.log_prefix + "Ray start commands completed"):
for cmd in self.ray_start_commands:
self.cmd_runner.run(cmd)
def rsync_up(self, source, target):
logger.info(self.log_prefix +
"Syncing {} to {}...".format(source, target))
self.cmd_runner.run_rsync_up(source, target)
def rsync_down(self, source, target):
logger.info(self.log_prefix +
"Syncing {} from {}...".format(source, target))
self.cmd_runner.run_rsync_down(source, target)
class NodeUpdaterThread(NodeUpdater, Thread):
def __init__(self, *args, **kwargs):
Thread.__init__(self)
NodeUpdater.__init__(self, *args, **kwargs)
self.exitcode = -1
|
|
import cProfile
import logging
import time
import traceback
from typing import Any, AnyStr, Callable, Dict, \
Iterable, List, MutableMapping, Optional, Text
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import DisallowedHost
from django.db import connection
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.shortcuts import redirect, render
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import cookie_date
from django.utils.translation import ugettext as _
from django.views.csrf import csrf_failure as html_csrf_failure
from zerver.lib.bugdown import get_bugdown_requests, get_bugdown_time
from zerver.lib.cache import get_remote_cache_requests, get_remote_cache_time
from zerver.lib.debug import maybe_tracemalloc_listen
from zerver.lib.exceptions import ErrorCode, JsonableError, RateLimited
from zerver.lib.queue import queue_json_publish
from zerver.lib.response import json_error, json_response_from_error
from zerver.lib.subdomains import get_subdomain
from zerver.lib.utils import statsd
from zerver.models import Realm, flush_per_request_caches, get_realm
logger = logging.getLogger('zulip.requests')
def record_request_stop_data(log_data: MutableMapping[str, Any]) -> None:
log_data['time_stopped'] = time.time()
log_data['remote_cache_time_stopped'] = get_remote_cache_time()
log_data['remote_cache_requests_stopped'] = get_remote_cache_requests()
log_data['bugdown_time_stopped'] = get_bugdown_time()
log_data['bugdown_requests_stopped'] = get_bugdown_requests()
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
def async_request_stop(request: HttpRequest) -> None:
record_request_stop_data(request._log_data)
def record_request_restart_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].enable()
log_data['time_restarted'] = time.time()
log_data['remote_cache_time_restarted'] = get_remote_cache_time()
log_data['remote_cache_requests_restarted'] = get_remote_cache_requests()
log_data['bugdown_time_restarted'] = get_bugdown_time()
log_data['bugdown_requests_restarted'] = get_bugdown_requests()
def async_request_restart(request: HttpRequest) -> None:
if "time_restarted" in request._log_data:
# Don't destroy data when being called from
# finish_current_handler
return
record_request_restart_data(request._log_data)
def record_request_start_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"] = cProfile.Profile()
log_data["prof"].enable()
log_data['time_started'] = time.time()
log_data['remote_cache_time_start'] = get_remote_cache_time()
log_data['remote_cache_requests_start'] = get_remote_cache_requests()
log_data['bugdown_time_start'] = get_bugdown_time()
log_data['bugdown_requests_start'] = get_bugdown_requests()
def timedelta_ms(timedelta: float) -> float:
return timedelta * 1000
def format_timedelta(timedelta: float) -> str:
if (timedelta >= 1):
return "%.1fs" % (timedelta)
return "%.0fms" % (timedelta_ms(timedelta),)
def is_slow_query(time_delta: float, path: Text) -> bool:
if time_delta < 1.2:
return False
is_exempt = \
path in ["/activity", "/json/report/error",
"/api/v1/deployments/report_error"] \
or path.startswith("/realm_activity/") \
or path.startswith("/user_activity/")
if is_exempt:
return time_delta >= 5
if 'webathena_kerberos' in path:
return time_delta >= 10
return True
def write_log_line(log_data: MutableMapping[str, Any], path: Text, method: str, remote_ip: str, email: Text,
client_name: Text, status_code: int=200, error_content: Optional[AnyStr]=None,
error_content_iter: Optional[Iterable[AnyStr]]=None) -> None:
assert error_content is None or error_content_iter is None
if error_content is not None:
error_content_iter = (error_content,)
# For statsd timer name
if path == '/':
statsd_path = u'webreq'
else:
statsd_path = u"webreq.%s" % (path[1:].replace('/', '.'),)
# Remove non-ascii chars from path (there should be none, if there are it's
# because someone manually entered a nonexistent path), as UTF-8 chars make
# statsd sad when it sends the key name over the socket
statsd_path = statsd_path.encode('ascii', errors='ignore').decode("ascii")
blacklisted_requests = ['do_confirm', 'send_confirm',
'eventslast_event_id', 'webreq.content', 'avatar', 'user_uploads',
'password.reset', 'static', 'json.bots', 'json.users', 'json.streams',
'accounts.unsubscribe', 'apple-touch-icon', 'emoji', 'json.bots',
'upload_file', 'realm_activity', 'user_activity']
suppress_statsd = any((blacklisted in statsd_path for blacklisted in blacklisted_requests))
time_delta = -1
# A time duration of -1 means the StartLogRequests middleware
# didn't run for some reason
optional_orig_delta = ""
if 'time_started' in log_data:
time_delta = time.time() - log_data['time_started']
if 'time_stopped' in log_data:
orig_time_delta = time_delta
time_delta = ((log_data['time_stopped'] - log_data['time_started']) +
(time.time() - log_data['time_restarted']))
optional_orig_delta = " (lp: %s)" % (format_timedelta(orig_time_delta),)
remote_cache_output = ""
if 'remote_cache_time_start' in log_data:
remote_cache_time_delta = get_remote_cache_time() - log_data['remote_cache_time_start']
remote_cache_count_delta = get_remote_cache_requests() - log_data['remote_cache_requests_start']
if 'remote_cache_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
remote_cache_time_delta += (log_data['remote_cache_time_stopped'] -
log_data['remote_cache_time_restarted'])
remote_cache_count_delta += (log_data['remote_cache_requests_stopped'] -
log_data['remote_cache_requests_restarted'])
if (remote_cache_time_delta > 0.005):
remote_cache_output = " (mem: %s/%s)" % (format_timedelta(remote_cache_time_delta),
remote_cache_count_delta)
if not suppress_statsd:
statsd.timing("%s.remote_cache.time" % (statsd_path,), timedelta_ms(remote_cache_time_delta))
statsd.incr("%s.remote_cache.querycount" % (statsd_path,), remote_cache_count_delta)
startup_output = ""
if 'startup_time_delta' in log_data and log_data["startup_time_delta"] > 0.005:
startup_output = " (+start: %s)" % (format_timedelta(log_data["startup_time_delta"]))
bugdown_output = ""
if 'bugdown_time_start' in log_data:
bugdown_time_delta = get_bugdown_time() - log_data['bugdown_time_start']
bugdown_count_delta = get_bugdown_requests() - log_data['bugdown_requests_start']
if 'bugdown_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
bugdown_time_delta += (log_data['bugdown_time_stopped'] -
log_data['bugdown_time_restarted'])
bugdown_count_delta += (log_data['bugdown_requests_stopped'] -
log_data['bugdown_requests_restarted'])
if (bugdown_time_delta > 0.005):
bugdown_output = " (md: %s/%s)" % (format_timedelta(bugdown_time_delta),
bugdown_count_delta)
if not suppress_statsd:
statsd.timing("%s.markdown.time" % (statsd_path,), timedelta_ms(bugdown_time_delta))
statsd.incr("%s.markdown.count" % (statsd_path,), bugdown_count_delta)
# Get the amount of time spent doing database queries
db_time_output = ""
queries = connection.connection.queries if connection.connection is not None else []
if len(queries) > 0:
query_time = sum(float(query.get('time', 0)) for query in queries)
db_time_output = " (db: %s/%sq)" % (format_timedelta(query_time),
len(queries))
if not suppress_statsd:
# Log ms, db ms, and num queries to statsd
statsd.timing("%s.dbtime" % (statsd_path,), timedelta_ms(query_time))
statsd.incr("%s.dbq" % (statsd_path,), len(queries))
statsd.timing("%s.total" % (statsd_path,), timedelta_ms(time_delta))
if 'extra' in log_data:
extra_request_data = " %s" % (log_data['extra'],)
else:
extra_request_data = ""
logger_client = "(%s via %s)" % (email, client_name)
logger_timing = ('%5s%s%s%s%s%s %s' %
(format_timedelta(time_delta), optional_orig_delta,
remote_cache_output, bugdown_output,
db_time_output, startup_output, path))
logger_line = ('%-15s %-7s %3d %s%s %s' %
(remote_ip, method, status_code,
logger_timing, extra_request_data, logger_client))
if (status_code in [200, 304] and method == "GET" and path.startswith("/static")):
logger.debug(logger_line)
else:
logger.info(logger_line)
if (is_slow_query(time_delta, path)):
queue_json_publish("slow_queries", "%s (%s)" % (logger_line, email))
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
profile_path = "/tmp/profile.data.%s.%s" % (path.split("/")[-1], int(time_delta * 1000),)
log_data["prof"].dump_stats(profile_path)
# Log some additional data whenever we return certain 40x errors
if 400 <= status_code < 500 and status_code not in [401, 404, 405]:
assert error_content_iter is not None
error_content_list = list(error_content_iter)
if error_content_list:
error_data = u''
elif isinstance(error_content_list[0], Text):
error_data = u''.join(error_content_list)
elif isinstance(error_content_list[0], bytes):
error_data = repr(b''.join(error_content_list))
if len(error_data) > 100:
error_data = u"[content more than 100 characters]"
logger.info('status=%3d, data=%s, uid=%s' % (status_code, error_data, email))
class LogRequests(MiddlewareMixin):
# We primarily are doing logging using the process_view hook, but
# for some views, process_view isn't run, so we call the start
# method here too
def process_request(self, request: HttpRequest) -> None:
maybe_tracemalloc_listen()
request._log_data = dict()
record_request_start_data(request._log_data)
if connection.connection is not None:
connection.connection.queries = []
def process_view(self, request: HttpRequest, view_func: Callable[..., HttpResponse],
args: List[str], kwargs: Dict[str, Any]) -> None:
# process_request was already run; we save the initialization
# time (i.e. the time between receiving the request and
# figuring out which view function to call, which is primarily
# importing modules on the first start)
request._log_data["startup_time_delta"] = time.time() - request._log_data["time_started"]
# And then completely reset our tracking to only cover work
# done as part of this request
record_request_start_data(request._log_data)
if connection.connection is not None:
connection.connection.queries = []
def process_response(self, request: HttpRequest,
response: StreamingHttpResponse) -> StreamingHttpResponse:
# The reverse proxy might have sent us the real external IP
remote_ip = request.META.get('HTTP_X_REAL_IP')
if remote_ip is None:
remote_ip = request.META['REMOTE_ADDR']
# Get the requestor's email address and client, if available.
try:
email = request._email
except Exception:
email = "unauth"
try:
client = request.client.name
except Exception:
client = "?"
if response.streaming:
content_iter = response.streaming_content
content = None
else:
content = response.content
content_iter = None
write_log_line(request._log_data, request.path, request.method,
remote_ip, email, client, status_code=response.status_code,
error_content=content, error_content_iter=content_iter)
return response
class JsonErrorHandler(MiddlewareMixin):
def process_exception(self, request: HttpRequest, exception: Exception) -> Optional[HttpResponse]:
if isinstance(exception, JsonableError):
return json_response_from_error(exception)
if request.error_format == "JSON":
logging.error(traceback.format_exc())
return json_error(_("Internal server error"), status=500)
return None
class TagRequests(MiddlewareMixin):
def process_view(self, request: HttpRequest, view_func: Callable[..., HttpResponse],
args: List[str], kwargs: Dict[str, Any]) -> None:
self.process_request(request)
def process_request(self, request: HttpRequest) -> None:
if request.path.startswith("/api/") or request.path.startswith("/json/"):
request.error_format = "JSON"
else:
request.error_format = "HTML"
class CsrfFailureError(JsonableError):
http_status_code = 403
code = ErrorCode.CSRF_FAILED
data_fields = ['reason']
def __init__(self, reason: Text) -> None:
self.reason = reason # type: Text
@staticmethod
def msg_format() -> Text:
return _("CSRF Error: {reason}")
def csrf_failure(request: HttpRequest, reason: Text="") -> HttpResponse:
if request.error_format == "JSON":
return json_response_from_error(CsrfFailureError(reason))
else:
return html_csrf_failure(request, reason)
class RateLimitMiddleware(MiddlewareMixin):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
if not settings.RATE_LIMITING:
return response
from zerver.lib.rate_limiter import max_api_calls, RateLimitedUser
# Add X-RateLimit-*** headers
if hasattr(request, '_ratelimit_applied_limits'):
entity = RateLimitedUser(request.user)
response['X-RateLimit-Limit'] = str(max_api_calls(entity))
if hasattr(request, '_ratelimit_secs_to_freedom'):
response['X-RateLimit-Reset'] = str(int(time.time() + request._ratelimit_secs_to_freedom))
if hasattr(request, '_ratelimit_remaining'):
response['X-RateLimit-Remaining'] = str(request._ratelimit_remaining)
return response
def process_exception(self, request: HttpRequest, exception: Exception) -> Optional[HttpResponse]:
if isinstance(exception, RateLimited):
resp = json_error(
_("API usage exceeded rate limit"),
data={'retry-after': request._ratelimit_secs_to_freedom},
status=429
)
resp['Retry-After'] = request._ratelimit_secs_to_freedom
return resp
return None
class FlushDisplayRecipientCache(MiddlewareMixin):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
# We flush the per-request caches after every request, so they
# are not shared at all between requests.
flush_per_request_caches()
return response
class SessionHostDomainMiddleware(SessionMiddleware):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
try:
request.get_host()
except DisallowedHost:
# If we get a DisallowedHost exception trying to access
# the host, (1) the request is failed anyway and so the
# below code will do nothing, and (2) the below will
# trigger a recursive exception, breaking things, so we
# just return here.
return response
if (not request.path.startswith("/static/") and not request.path.startswith("/api/") and
not request.path.startswith("/json/")):
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
realm = get_realm(subdomain)
if (realm is None):
return render(request, "zerver/invalid_realm.html")
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
host = request.get_host().split(':')[0]
# The subdomains feature overrides the
# SESSION_COOKIE_DOMAIN setting, since the setting
# is a fixed value and with subdomains enabled,
# the session cookie domain has to vary with the
# subdomain.
session_cookie_domain = host
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=session_cookie_domain,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
class SetRemoteAddrFromForwardedFor(MiddlewareMixin):
"""
Middleware that sets REMOTE_ADDR based on the HTTP_X_FORWARDED_FOR.
This middleware replicates Django's former SetRemoteAddrFromForwardedFor middleware.
Because Zulip sits behind a NGINX reverse proxy, if the HTTP_X_FORWARDED_FOR
is set in the request, then it has properly been set by NGINX.
Therefore HTTP_X_FORWARDED_FOR's value is trusted.
"""
def process_request(self, request: HttpRequest) -> None:
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# For NGINX reverse proxy servers, the client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
|
|
import logging
from ..requests import requests
from .common import access_token
from itchatmp.config import COMPANY_URL
from itchatmp.content import (
IMAGE, VOICE, VIDEO, MUSIC, TEXT, NEWS, CARD)
from itchatmp.utils import retry, encode_send_dict
from itchatmp.returnvalues import ReturnValue
logger = logging.getLogger('itchatmp')
@access_token
def authorize_user(userId, accessToken=None):
params = {
'access_token': accessToken,
'userid': userId, }
r = requests.get('%s/cgi-bin/user/authsucc' % COMPANY_URL, params=params).json()
return ReturnValue(r)
@access_token
def create_department(name, parentId=1, order=None, id=None, accessToken=None):
data = {
'name': name,
'parentid': parentId, }
if order is not None: data['order'] = order
if id is not None: data['id'] = id
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/department/create?access_token=%s' %
(COMPANY_URL, accessToken), data=data).json()
return ReturnValue(r)
@access_token
def update_department(id, name=None, parentId=None, order=None, accessToken=None):
data = {'id': id}
if name is not None: data['name'] = name
if parentId is not None: data['parentid'] = parentId
if order is not None: data['order'] = order
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/department/update?access_token=%s' %
(COMPANY_URL, accessToken), data=data).json()
return ReturnValue(r)
@access_token
def delete_department(id, accessToken=None):
params = {
'access_token': accessToken,
'id': id, }
r = requests.get('%s/cgi-bin/department/delete' % COMPANY_URL, params=params).json()
return ReturnValue(r)
@access_token
def get_departments(parentId, accessToken=None):
params = {
'access_token': accessToken,
'id': parentId, }
r = requests.get('%s/cgi-bin/department/list' % COMPANY_URL, params=params).json()
return ReturnValue(r)
@access_token
def create_user(userId, name, departmentIdList,
position=None, mobile=None, gender=None, email=None,
weixinId=None, headImgId=None, extAttr=None,
accessToken=None):
data = {
'userid': userId , 'name': name ,
'department': departmentIdList , 'position': position ,
'mobile': mobile , 'gender': gender ,
'email': email , 'weixinid': weixinId ,
'avatar_mediaid': headImgId , 'extattr': extAttr , }
for k in list(data):
if data[k] is None: del data[k]
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/user/create?access_token=%s' %
(COMPANY_URL, accessToken), data=data).json()
return ReturnValue(r)
@access_token
def update_user(userId, name=None, departmentIdList=None,
position=None, mobile=None, gender=None, email=None,
weixinId=None, headImgId=None, extAttr=None,
accessToken=None):
data = {
'userid': userId , 'name': name ,
'department': departmentIdList , 'position': position ,
'mobile': mobile , 'gender': gender ,
'email': email , 'weixinid': weixinId ,
'avatar_mediaid': headImgId , 'extattr': extAttr , }
for k in list(data):
if data[k] is None: del data[k]
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/user/update?access_token=%s' %
(COMPANY_URL, accessToken), data=data).json()
return ReturnValue(r)
@access_token
def delete_users(userId, accessToken=None):
''' delete user using userId
* userId can be a list or only one userId
'''
if isinstance(userId, list):
data = {'useridlist': userId}
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
url = '%s/cgi-bin/department/batchdelete?access_token=%s' % (COMPANY_URL, accessToken)
r = requests.post(url, data=data).json()
else:
params = {
'access_token': accessToken,
'userid': userId, }
url = '%s/cgi-bin/department/delete'
r = requests.get(url % COMPANY_URL, params=params).json()
return ReturnValue(r)
def get_user_info(userId):
''' get info of a user or a list of users
* userId can be a list or only one userId
* this is for creating api similiar to massive platform
'''
@access_token
def _get_user_info(userId):
params = {
'access_token': accessToken,
'userid': userId, }
r = requests.get('%s/cgi-bin/user/get' % COMPANY_URL, params=params).json()
return ReturnValue(r)
if isinstance(userId, list):
userDict = {'user_info_list': []}
for id in userId:
r = _get_user_info(id)
if r:
userDict['user_info_list'].append(r)
else:
userDict['errcode'] = r['errcode']
userDict['errmsg'] = r['errmsg']
break
return ReturnValue(userDict)
else:
return _get_user_info(userId)
@access_token
def get_users(nextOpenId='', departmentId=None, fetchChild=False, status=4, accessToken=None):
''' get users of the department
* nextOpenId is for mp api
'''
if departmentId is None:
return ReturnValue({'errcode': 40035, 'errmsg': 'departmentId must be set',})
params = {
'access_token' : accessToken,
'department_id' : departmentId,
'fetch_child' : int(fetchChild),
'status' : status, }
r = requests.get('%s/cgi-bin/user/simplelist' % SERVER_URL, params=params).json()
return ReturnValue(r)
@access_token
def get_detailed_users(nextOpenId='',
departmentId=None, fetchChild=False, status=4, accessToken=None):
''' get users of the department
* nextOpenId is for mp api
'''
if departmentId is None:
return ReturnValue({'errcode': 40035, 'errmsg': 'departmentId must be set',})
params = {
'access_token' : accessToken,
'department_id' : departmentId,
'fetch_child' : int(fetchChild),
'status' : status, }
r = requests.get('%s/cgi-bin/user/list' % SERVER_URL, params=params).json()
return ReturnValue(r)
@access_token
def create_tag(name, id=None, accessToken=None):
data = {'tagname': name}
if id is not None: data['tagid'] = id
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tag/create?access_token=%s'
% (COMPANY_URL, accessToken), data=data).json()
return ReturnValue(r)
@access_token
def get_tags(accessToken=None):
r = requests.get('%s/cgi-bin/tag/list?access_token=%s'
% (SERVER_URL, accessToken)).json()
return ReturnValue(r)
@access_token
def update_tag(id, name, accessToken=None):
data = encode_send_dict({'tagid': id, 'tagname': name, })
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tag/update?access_token=%s'
% (COMPANY_URL, accessToken), data=data).json()
return ReturnValue(r)
@access_token
def delete_tag(id, accessToken=None):
params = {
'access_token': accessToken,
'tagid': id, }
r = requests.get('%s/cgi-bin/tag/delete' % COMPANY_URL, params=params).json()
return ReturnValue(r)
@access_token
def get_users_of_tag(id, nextOpenId='', accessToken=None):
params = {
'access_token': accessToken,
'tagid': id, }
r = requests.get('%s/cgi-bin/tag/get' % COMPANY_URL, params=params).json()
return ReturnValue(r)
@access_token
def add_users_into_tag(id, userIdList=None, partyList=None, accessToken=None):
if not (userIdList or partyList):
return ReturnValue({'errcode': 40035,
'errmsg': 'either userId or partyList should be set'})
data = {'tagid': id}
if userIdList:
data['userlist'] = userIdList
if partyList:
data['partylist'] = partyList
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tag/addtagusers?access_token=%s'
% (COMPANY_URL, accessToken), data=data).json()
if 'invalidlist' in r or 'invalidparty' in r: r['errcode'] = 40070
return ReturnValue(r)
@access_token
def delete_users_of_tag(id, userIdList=None, partyList=None, accessToken=None):
if not (userIdList or partyList):
return ReturnValue({'errcode': 40035,
'errmsg': 'either userId or partyList should be set'})
data = {'tagid': id}
if userIdList:
data['userlist'] = userIdList
if partyList:
data['partylist'] = partyList
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
r = requests.post('%s/cgi-bin/tag/deltagusers?access_token=%s'
% (COMPANY_URL, accessToken), data=data).json()
if 'invalidlist' in r or 'invalidparty' in r: r['errcode'] = 40070
return ReturnValue(r)
# __server
def upload_contract(csvMediaId, callbackUrl, method='sync'):
''' update users with uploaded csv
* method can be syncuser, replaceuser, replaceparty
'''
if method not in ('syncuser', 'replaceuser', 'replaceparty'):
return ReturnValue({'errcode': -10003, 'errmsg':
'method should be syncuser, replaceuser, replaceparty'})
data = {'media_id': csvMediaId,
'callback': {
'url': callbackUrl,
'token': '__server.config.token',
'encodingaeskey': '__server.config.encodingAesKey', }}
data = encode_send_dict(data)
if data is None: return ReturnValue({'errcode': -10001})
@access_token
def upload(method, accessToken=None):
url = '%s/cgi-bin/batch/%s?access_token=%s' % \
(COMPANY_URL, method, accessToken)
r = requests.post(url, data=data).json()
return ReturnValue(r)
return upload(method)
@access_token
def get_result(jobId, accessToken=None):
params = {
'access_token': accessToken,
'jobid': jobId, }
r = requests.get('%s/cgi-bin/batch/getresult' % COMPANY_URL,
params=params).json()
return ReturnValue(r)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import argparse
import codecs
import json
import logging
import os
import ssl
import sys
import time
import signal
from datetime import timedelta
from getpass import getpass
from pgoapi.exceptions import NotLoggedInException, ServerSideRequestThrottlingException, ServerBusyOrOfflineException
from geopy.exc import GeocoderQuotaExceeded
from pokemongo_bot import PokemonGoBot, TreeConfigBuilder
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.health_record import BotEvent
from pokemongo_bot.plugin_loader import PluginLoader
from pokemongo_bot.api_wrapper import PermaBannedException
try:
from demjson import jsonlint
except ImportError:
# Run `pip install -r requirements.txt` to fix this
jsonlint = None
if sys.version_info >= (2, 7, 9):
ssl._create_default_https_context = ssl._create_unverified_context
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(name)10s] [%(levelname)s] %(message)s')
logger = logging.getLogger('cli')
logger.setLevel(logging.INFO)
class SIGINTRecieved(Exception): pass
def main():
bot = False
def handle_sigint(*args):
raise SIGINTRecieved
signal.signal(signal.SIGINT, handle_sigint)
try:
logger.info('PokemonGO Bot v1.0')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
config = init_config()
if not config:
return
logger.info('Configuration initialized')
health_record = BotEvent(config)
health_record.login_success()
finished = False
while not finished:
try:
bot = PokemonGoBot(config)
bot.start()
tree = TreeConfigBuilder(bot, config.raw_tasks).build()
bot.workers = tree
bot.metrics.capture_stats()
bot.health_record = health_record
bot.event_manager.emit(
'bot_start',
sender=bot,
level='info',
formatted='Starting bot...'
)
while True:
bot.tick()
except KeyboardInterrupt:
bot.event_manager.emit(
'bot_exit',
sender=bot,
level='info',
formatted='Exiting bot.'
)
finished = True
report_summary(bot)
except NotLoggedInException:
wait_time = config.reconnecting_timeout * 60
bot.event_manager.emit(
'api_error',
sender=bot,
level='info',
formatted='Log logged in, reconnecting in {:d}'.format(wait_time)
)
time.sleep(wait_time)
except ServerBusyOrOfflineException:
bot.event_manager.emit(
'api_error',
sender=bot,
level='info',
formatted='Server busy or offline'
)
except ServerSideRequestThrottlingException:
bot.event_manager.emit(
'api_error',
sender=bot,
level='info',
formatted='Server is throttling, reconnecting in 30 seconds'
)
time.sleep(30)
except PermaBannedException:
bot.event_manager.emit(
'api_error',
sender=bot,
level='info',
formatted='Probably permabanned, Game Over ! Play again at https://club.pokemon.com/us/pokemon-trainer-club/sign-up/'
)
except GeocoderQuotaExceeded:
raise Exception("Google Maps API key over requests limit.")
except SIGINTRecieved:
if bot:
bot.event_manager.emit(
'bot_interrupted',
sender=bot,
level='info',
formatted='Bot caught SIGINT. Shutting down.'
)
report_summary(bot)
except Exception as e:
# always report session summary and then raise exception
if bot:
report_summary(bot)
raise
finally:
# Cache here on SIGTERM, or Exception. Check data is available and worth caching.
if bot:
if bot.recent_forts[-1] is not None and bot.config.forts_cache_recent_forts:
cached_forts_path = os.path.join(
_base_dir, 'data', 'recent-forts-%s.json' % bot.config.username
)
try:
with open(cached_forts_path, 'w') as outfile:
json.dump(bot.recent_forts, outfile)
bot.event_manager.emit(
'cached_fort',
sender=bot,
level='debug',
formatted='Forts cached.',
)
except IOError as e:
bot.event_manager.emit(
'error_caching_forts',
sender=bot,
level='debug',
formatted='Error caching forts for {path}',
data={'path': cached_forts_path}
)
def report_summary(bot):
if bot.metrics.start_time is None:
return # Bot didn't actually start, no metrics to show.
metrics = bot.metrics
metrics.capture_stats()
logger.info('')
logger.info('Ran for {}'.format(metrics.runtime()))
logger.info('Total XP Earned: {} Average: {:.2f}/h'.format(metrics.xp_earned(), metrics.xp_per_hour()))
logger.info('Travelled {:.2f}km'.format(metrics.distance_travelled()))
logger.info('Visited {} stops'.format(metrics.visits['latest'] - metrics.visits['start']))
logger.info('Encountered {} pokemon, {} caught, {} released, {} evolved, {} never seen before'
.format(metrics.num_encounters(), metrics.num_captures(), metrics.releases,
metrics.num_evolutions(), metrics.num_new_mons()))
logger.info('Threw {} pokeball{}'.format(metrics.num_throws(), '' if metrics.num_throws() == 1 else 's'))
logger.info('Earned {} Stardust'.format(metrics.earned_dust()))
logger.info('')
if metrics.highest_cp is not None:
logger.info('Highest CP Pokemon: {}'.format(metrics.highest_cp['desc']))
if metrics.most_perfect is not None:
logger.info('Most Perfect Pokemon: {}'.format(metrics.most_perfect['desc']))
def init_config():
parser = argparse.ArgumentParser()
config_file = os.path.join(_base_dir, 'configs', 'config.json')
web_dir = "web"
# If config file exists, load variables from json
load = {}
def _json_loader(filename):
try:
with open(filename, 'rb') as data:
load.update(json.load(data))
except ValueError:
if jsonlint:
with open(filename, 'rb') as data:
lint = jsonlint()
rc = lint.main(['-v', filename])
logger.critical('Error with configuration file')
sys.exit(-1)
# Select a config file code
parser.add_argument("-cf", "--config", help="Config File to use")
config_arg = parser.parse_known_args() and parser.parse_known_args()[0].config or None
if config_arg and os.path.isfile(config_arg):
_json_loader(config_arg)
elif os.path.isfile(config_file):
logger.info('No config argument specified, checking for /configs/config.json')
_json_loader(config_file)
else:
logger.info('Error: No /configs/config.json or specified config')
# Read passed in Arguments
required = lambda x: not x in load
add_config(
parser,
load,
short_flag="-a",
long_flag="--auth_service",
help="Auth Service ('ptc' or 'google')",
required=required("auth_service"),
default=None
)
add_config(
parser,
load,
short_flag="-u",
long_flag="--username",
help="Username",
default=None
)
add_config(
parser,
load,
short_flag="-ws",
long_flag="--websocket.server_url",
help="Connect to websocket server at given url",
default=False
)
add_config(
parser,
load,
short_flag="-wss",
long_flag="--websocket.start_embedded_server",
help="Start embedded websocket server",
default=False
)
add_config(
parser,
load,
short_flag="-wsr",
long_flag="--websocket.remote_control",
help="Enable remote control through websocket (requires websocekt server url)",
default=False
)
add_config(
parser,
load,
short_flag="-p",
long_flag="--password",
help="Password",
default=None
)
add_config(
parser,
load,
short_flag="-l",
long_flag="--location",
help="Location",
type=parse_unicode_str,
default=''
)
add_config(
parser,
load,
short_flag="-lc",
long_flag="--location_cache",
help="Bot will start at last known location",
type=bool,
default=False
)
add_config(
parser,
load,
long_flag="--forts.spin",
help="Enable Spinning Pokestops",
type=bool,
default=True,
)
add_config(
parser,
load,
short_flag="-w",
long_flag="--walk",
help=
"Walk instead of teleport with given speed (meters per second, e.g. 2.5)",
type=float,
default=2.5
)
add_config(
parser,
load,
short_flag="-k",
long_flag="--gmapkey",
help="Set Google Maps API KEY",
type=str,
default=None
)
add_config(
parser,
load,
short_flag="-e",
long_flag="--show_events",
help="Show events",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-d",
long_flag="--debug",
help="Debug Mode",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-t",
long_flag="--test",
help="Only parse the specified location",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-du",
long_flag="--distance_unit",
help="Set the unit to display distance in (e.g, km for kilometers, mi for miles, ft for feet)",
type=str,
default='km'
)
add_config(
parser,
load,
short_flag="-rt",
long_flag="--reconnecting_timeout",
help="Timeout between reconnecting if error occured (in minutes, e.g. 15)",
type=float,
default=15.0
)
add_config(
parser,
load,
short_flag="-hr",
long_flag="--health_record",
help="Send anonymous bot event to GA for bot health record. Set \"health_record\":false if you need disable it.",
type=bool,
default=True
)
add_config(
parser,
load,
short_flag="-ac",
long_flag="--forts.avoid_circles",
help="Avoids circles (pokestops) of the max size set in max_circle_size flag",
type=bool,
default=False,
)
add_config(
parser,
load,
short_flag="-mcs",
long_flag="--forts.max_circle_size",
help="If avoid_circles flag is set, this flag specifies the maximum size of circles (pokestops) avoided",
type=int,
default=10,
)
add_config(
parser,
load,
short_flag="-crf",
long_flag="--forts.cache_recent_forts",
help="Caches recent forts used by max_circle_size",
type=bool,
default=True,
)
add_config(
parser,
load,
long_flag="--map_object_cache_time",
help="Amount of seconds to keep the map object in cache (bypass Niantic throttling)",
type=float,
default=5.0
)
add_config(
parser,
load,
long_flag="--logging_color",
help="If logging_color is set to true, colorized logging handler will be used",
type=bool,
default=True
)
add_config(
parser,
load,
short_flag="-cte",
long_flag="--catch_throw_parameters.excellent_rate",
help="Define the odd of performing an excellent throw",
type=float,
default=1
)
add_config(
parser,
load,
short_flag="-ctg",
long_flag="--catch_throw_parameters.great_rate",
help="Define the odd of performing a great throw",
type=float,
default=0
)
add_config(
parser,
load,
short_flag="-ctn",
long_flag="--catch_throw_parameters.nice_rate",
help="Define the odd of performing a nice throw",
type=float,
default=0
)
add_config(
parser,
load,
short_flag="-ctm",
long_flag="--catch_throw_parameters.normal_rate",
help="Define the odd of performing a normal throw",
type=float,
default=0
)
add_config(
parser,
load,
short_flag="-cts",
long_flag="--catch_throw_parameters.spin_success_rate",
help="Define the odds of performing a spin throw (Value between 0 (never) and 1 (always))",
type=float,
default=1
)
# Start to parse other attrs
config = parser.parse_args()
if not config.username and 'username' not in load:
config.username = raw_input("Username: ")
if not config.password and 'password' not in load:
config.password = getpass("Password: ")
config.encrypt_location = load.get('encrypt_location','')
config.catch = load.get('catch', {})
config.release = load.get('release', {})
config.action_wait_max = load.get('action_wait_max', 4)
config.action_wait_min = load.get('action_wait_min', 1)
config.plugins = load.get('plugins', [])
config.raw_tasks = load.get('tasks', [])
config.min_ultraball_to_keep = load.get('min_ultraball_to_keep', None)
config.save_pokemon_spawn = load.get('save_pokemon_spawn', False)
config.vips = load.get('vips', {})
if config.map_object_cache_time < 0.0:
parser.error("--map_object_cache_time is out of range! (should be >= 0.0)")
return None
if len(config.raw_tasks) == 0:
logging.error("No tasks are configured. Did you mean to configure some behaviors? Read https://github.com/PokemonGoF/PokemonGo-Bot/wiki/Configuration-files#configuring-tasks for more information")
return None
if config.auth_service not in ['ptc', 'google']:
logging.error("Invalid Auth service specified! ('ptc' or 'google')")
return None
def task_configuration_error(flag_name):
parser.error("""
\"{}\" was removed from the configuration options.
You can now change the behavior of the bot by modifying the \"tasks\" key.
Read https://github.com/PokemonGoF/PokemonGo-Bot/wiki/Configuration-files#configuring-tasks for more information.
""".format(flag_name))
old_flags = ['mode', 'catch_pokemon', 'spin_forts', 'forts_spin', 'hatch_eggs', 'release_pokemon', 'softban_fix',
'longer_eggs_first', 'evolve_speed', 'use_lucky_egg', 'item_filter', 'evolve_all', 'evolve_cp_min', 'max_steps']
for flag in old_flags:
if flag in load:
task_configuration_error(flag)
return None
nested_old_flags = [('forts', 'spin'), ('forts', 'move_to_spin'), ('navigator', 'path_mode'), ('navigator', 'path_file'), ('navigator', 'type')]
for outer, inner in nested_old_flags:
if load.get(outer, {}).get(inner, None):
task_configuration_error('{}.{}'.format(outer, inner))
return None
if "evolve_captured" in load:
logger.warning('The evolve_captured argument is no longer supported. Please use the EvolvePokemon task instead')
if not (config.location or config.location_cache):
parser.error("Needs either --use-location-cache or --location.")
return None
plugin_loader = PluginLoader()
for plugin in config.plugins:
plugin_loader.load_plugin(plugin)
# create web dir if not exists
try:
os.makedirs(web_dir)
except OSError:
if not os.path.isdir(web_dir):
raise
fix_nested_config(config)
return config
def add_config(parser, json_config, short_flag=None, long_flag=None, **kwargs):
if not long_flag:
raise Exception('add_config calls requires long_flag parameter!')
full_attribute_path = long_flag.split('--')[1]
attribute_name = full_attribute_path.split('.')[-1]
if '.' in full_attribute_path: # embedded config!
embedded_in = full_attribute_path.split('.')[0: -1]
for level in embedded_in:
json_config = json_config.get(level, {})
if 'default' in kwargs:
kwargs['default'] = json_config.get(attribute_name, kwargs['default'])
if short_flag:
args = (short_flag, long_flag)
else:
args = (long_flag,)
parser.add_argument(*args, **kwargs)
def fix_nested_config(config):
config_dict = config.__dict__
for key, value in config_dict.iteritems():
if '.' in key:
new_key = key.replace('.', '_')
config_dict[new_key] = value
del config_dict[key]
def parse_unicode_str(string):
try:
return string.decode('utf8')
except UnicodeEncodeError:
return string
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
"""
This module is for generating random, valid web navigator's
configs & User-Agent HTTP headers.
Functions:
* generate_user_agent: generates User-Agent HTTP header
* generate_navigator: generates web navigator's config
* generate_navigator_js: generates web navigator's config with keys
identical keys used in navigator object
FIXME:
* add Edge, Safari and Opera support
* add random config i.e. windows is more common than linux
Specs:
* https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent/Firefox
* http://msdn.microsoft.com/en-us/library/ms537503(VS.85).aspx
* https://developer.chrome.com/multidevice/user-agent
* http://www.javascriptkit.com/javatutors/navigator.shtml
Release history:
* https://en.wikipedia.org/wiki/Firefox_release_history
* https://en.wikipedia.org/wiki/Google_Chrome_release_history
* https://en.wikipedia.org/wiki/Internet_Explorer_version_history
* https://en.wikipedia.org/wiki/Android_version_history
Lists of user agents:
* http://www.useragentstring.com/
* http://www.user-agents.org/
* http://www.webapps-online.com/online-tools/user-agent-strings
"""
# pylint: enable=line-too-long
from random import SystemRandom
from datetime import datetime, timedelta
from itertools import product
import six
from .warning import warn
# pylint: disable=unused-import
from .device import SMARTPHONE_DEV_IDS, TABLET_DEV_IDS
# pylint: enable=unused-import
from .error import InvalidOption
__all__ = ['generate_user_agent', 'generate_navigator',
'generate_navigator_js']
randomizer = SystemRandom()
DEVICE_TYPE_OS = {
'desktop': ('win', 'mac', 'linux'),
'smartphone': ('android',),
'tablet': ('android',),
}
OS_DEVICE_TYPE = {
'win': ('desktop',),
'linux': ('desktop',),
'mac': ('desktop',),
'android': ('smartphone', 'tablet'),
}
DEVICE_TYPE_NAVIGATOR = {
'desktop': ('chrome', 'firefox', 'ie'),
'smartphone': ('firefox', 'chrome'),
'tablet': ('firefox', 'chrome'),
}
NAVIGATOR_DEVICE_TYPE = {
'ie': ('desktop',),
'chrome': ('desktop', 'smartphone', 'tablet'),
'firefox': ('desktop', 'smartphone', 'tablet'),
}
OS_PLATFORM = {
'win': (
'Windows NT 5.1', # Windows XP
'Windows NT 6.1', # Windows 7
'Windows NT 6.2', # Windows 8
'Windows NT 6.3', # Windows 8.1
'Windows NT 10.0', # Windows 10
),
'mac': (
'Macintosh; Intel Mac OS X 10.8',
'Macintosh; Intel Mac OS X 10.9',
'Macintosh; Intel Mac OS X 10.10',
'Macintosh; Intel Mac OS X 10.11',
'Macintosh; Intel Mac OS X 10.12',
),
'linux': (
'X11; Linux',
'X11; Ubuntu; Linux',
),
'android': (
'Android 4.4', # 2013-10-31
'Android 4.4.1', # 2013-12-05
'Android 4.4.2', # 2013-12-09
'Android 4.4.3', # 2014-06-02
'Android 4.4.4', # 2014-06-19
'Android 5.0', # 2014-11-12
'Android 5.0.1', # 2014-12-02
'Android 5.0.2', # 2014-12-19
'Android 5.1', # 2015-03-09
'Android 5.1.1', # 2015-04-21
'Android 6.0', # 2015-10-05
'Android 6.0.1', # 2015-12-07
#'Android 7.0', # 2016-08-22
#'Android 7.1', # 2016-10-04
#'Android 7.1.1', # 2016-12-05
),
}
OS_CPU = {
'win': (
'', # 32bit
'Win64; x64', # 64bit
'WOW64', # 32bit process on 64bit system
),
'linux': (
'i686', # 32bit
'x86_64', # 64bit
'i686 on x86_64', # 32bit process on 64bit system
),
'mac': (
'',
),
'android': (
'armv7l', # 32bit
'armv8l', # 64bit
),
}
OS_NAVIGATOR = {
'win': ('chrome', 'firefox', 'ie'),
'mac': ('firefox', 'chrome'),
'linux': ('chrome', 'firefox'),
'android': ('firefox', 'chrome'),
}
NAVIGATOR_OS = {
'chrome': ('win', 'linux', 'mac', 'android'),
'firefox': ('win', 'linux', 'mac', 'android'),
'ie': ('win',),
}
FIREFOX_VERSION = (
('45.0', datetime(2016, 3, 8)),
('46.0', datetime(2016, 4, 26)),
('47.0', datetime(2016, 6, 7)),
('48.0', datetime(2016, 8, 2)),
('49.0', datetime(2016, 9, 20)),
('50.0', datetime(2016, 11, 15)),
('51.0', datetime(2017, 1, 24)),
)
# Top chrome builds from website access log
# for september, october 2020
CHROME_BUILD = '''
80.0.3987.132
80.0.3987.149
80.0.3987.99
81.0.4044.117
81.0.4044.138
83.0.4103.101
83.0.4103.106
83.0.4103.96
84.0.4147.105
84.0.4147.111
84.0.4147.125
84.0.4147.135
84.0.4147.89
85.0.4183.101
85.0.4183.102
85.0.4183.120
85.0.4183.121
85.0.4183.127
85.0.4183.81
85.0.4183.83
86.0.4240.110
86.0.4240.111
86.0.4240.114
86.0.4240.183
86.0.4240.185
86.0.4240.75
86.0.4240.78
86.0.4240.80
86.0.4240.96
86.0.4240.99
'''.strip().splitlines()
IE_VERSION = (
# (numeric ver, string ver, trident ver) # release year
(8, 'MSIE 8.0', '4.0'), # 2009
(9, 'MSIE 9.0', '5.0'), # 2011
(10, 'MSIE 10.0', '6.0'), # 2012
(11, 'MSIE 11.0', '7.0'), # 2013
)
USER_AGENT_TEMPLATE = {
'firefox': (
'Mozilla/5.0'
' ({system[ua_platform]}; rv:{app[build_version]})'
' Gecko/{app[geckotrail]}'
' Firefox/{app[build_version]}'
),
'chrome': (
'Mozilla/5.0'
' ({system[ua_platform]}) AppleWebKit/537.36'
' (KHTML, like Gecko)'
' Chrome/{app[build_version]} Safari/537.36'
),
'chrome_smartphone': (
'Mozilla/5.0'
' ({system[ua_platform]}) AppleWebKit/537.36'
' (KHTML, like Gecko)'
' Chrome/{app[build_version]} Mobile Safari/537.36'
),
'chrome_tablet': (
'Mozilla/5.0'
' ({system[ua_platform]}) AppleWebKit/537.36'
' (KHTML, like Gecko)'
' Chrome/{app[build_version]} Safari/537.36'
),
'ie_less_11': (
'Mozilla/5.0'
' (compatible; {app[build_version]}; {system[ua_platform]};'
' Trident/{app[trident_version]})'
),
'ie_11': (
'Mozilla/5.0'
' ({system[ua_platform]}; Trident/{app[trident_version]};'
' rv:11.0) like Gecko'
),
}
def get_firefox_build():
build_ver, date_from = randomizer.choice(FIREFOX_VERSION)
try:
idx = FIREFOX_VERSION.index((build_ver, date_from))
_, date_to = FIREFOX_VERSION[idx + 1]
except IndexError:
date_to = date_from + timedelta(days=1)
sec_range = (date_to - date_from).total_seconds() - 1
build_rnd_time = (
date_from + timedelta(seconds=randomizer.randint(0, int(sec_range)))
)
return build_ver, build_rnd_time.strftime('%Y%m%d%H%M%S')
def get_chrome_build():
return randomizer.choice(CHROME_BUILD)
def get_ie_build():
"""
Return random IE version as tuple
(numeric_version, us-string component)
Example: (8, 'MSIE 8.0')
"""
return randomizer.choice(IE_VERSION)
MACOSX_CHROME_BUILD_RANGE = {
# https://en.wikipedia.org/wiki/MacOS#Release_history
'10.8': (0, 8),
'10.9': (0, 5),
'10.10': (0, 5),
'10.11': (0, 6),
'10.12': (0, 2)
}
def fix_chrome_mac_platform(platform):
"""
Chrome on Mac OS adds minor version number and uses underscores instead
of dots. E.g. platform for Firefox will be: 'Intel Mac OS X 10.11'
but for Chrome it will be 'Intel Mac OS X 10_11_6'.
:param platform: - string like "Macintosh; Intel Mac OS X 10.8"
:return: platform with version number including minor number and formatted
with underscores, e.g. "Macintosh; Intel Mac OS X 10_8_2"
"""
ver = platform.split('OS X ')[1]
build_range = range(*MACOSX_CHROME_BUILD_RANGE[ver])
build = randomizer.choice(build_range)
mac_ver = ver.replace('.', '_') + '_' + str(build)
return 'Macintosh; Intel Mac OS X %s' % mac_ver
def build_system_components(device_type, os_id, navigator_id):
"""
For given os_id build random platform and oscpu
components
Returns dict {platform_version, platform, ua_platform, oscpu}
platform_version is OS name used in different places
ua_platform goes to navigator.platform
platform is used in building navigator.userAgent
oscpu goes to navigator.oscpu
"""
if os_id == 'win':
platform_version = randomizer.choice(OS_PLATFORM['win'])
cpu = randomizer.choice(OS_CPU['win'])
if cpu:
platform = '%s; %s' % (platform_version, cpu)
else:
platform = platform_version
res = {
'platform_version': platform_version,
'platform': platform,
'ua_platform': platform,
'oscpu': platform,
}
elif os_id == 'linux':
cpu = randomizer.choice(OS_CPU['linux'])
platform_version = randomizer.choice(OS_PLATFORM['linux'])
platform = '%s %s' % (platform_version, cpu)
res = {
'platform_version': platform_version,
'platform': platform,
'ua_platform': platform,
'oscpu': 'Linux %s' % cpu,
}
elif os_id == 'mac':
cpu = randomizer.choice(OS_CPU['mac'])
platform_version = randomizer.choice(OS_PLATFORM['mac'])
platform = platform_version
if navigator_id == 'chrome':
platform = fix_chrome_mac_platform(platform)
res = {
'platform_version': platform_version,
'platform': 'MacIntel',
'ua_platform': platform,
'oscpu': 'Intel Mac OS X %s' % platform.split(' ')[-1],
}
elif os_id == 'android':
assert navigator_id in ('firefox', 'chrome')
assert device_type in ('smartphone', 'tablet')
platform_version = randomizer.choice(OS_PLATFORM['android'])
if navigator_id == 'firefox':
if device_type == 'smartphone':
ua_platform = '%s; Mobile' % platform_version
elif device_type == 'tablet':
ua_platform = '%s; Tablet' % platform_version
elif navigator_id == 'chrome':
device_id = randomizer.choice(SMARTPHONE_DEV_IDS)
ua_platform = 'Linux; %s; %s' % (platform_version, device_id)
oscpu = 'Linux %s' % randomizer.choice(OS_CPU['android'])
res = {
'platform_version': platform_version,
'ua_platform': ua_platform,
'platform': oscpu,
'oscpu': oscpu,
}
return res
def build_app_components(os_id, navigator_id):
"""
For given navigator_id build app features
Returns dict {name, product_sub, vendor, build_version, build_id}
"""
if navigator_id == 'firefox':
build_version, build_id = get_firefox_build()
if os_id in ('win', 'linux', 'mac'):
geckotrail = '20100101'
else:
geckotrail = build_version
res = {
'name': 'Netscape',
'product_sub': '20100101',
'vendor': '',
'build_version': build_version,
'build_id': build_id,
'geckotrail': geckotrail,
}
elif navigator_id == 'chrome':
res = {
'name': 'Netscape',
'product_sub': '20030107',
'vendor': 'Google Inc.',
'build_version': get_chrome_build(),
'build_id': None,
}
elif navigator_id == 'ie':
num_ver, build_version, trident_version = get_ie_build()
if num_ver >= 11:
app_name = 'Netscape'
else:
app_name = 'Microsoft Internet Explorer'
res = {
'name': app_name,
'product_sub': None,
'vendor': '',
'build_version': build_version,
'build_id': None,
'trident_version': trident_version,
}
return res
def get_option_choices(opt_name, opt_value, default_value, all_choices):
"""
Generate possible choices for the option `opt_name`
limited to `opt_value` value with default value
as `default_value`
"""
choices = []
if isinstance(opt_value, six.string_types):
choices = [opt_value]
elif isinstance(opt_value, (list, tuple)):
choices = list(opt_value)
elif opt_value is None:
choices = default_value
else:
raise InvalidOption('Option %s has invalid'
' value: %s' % (opt_name, opt_value))
if 'all' in choices:
choices = all_choices
for item in choices:
if item not in all_choices:
raise InvalidOption('Choices of option %s contains invalid'
' item: %s' % (opt_name, item))
return choices
def pick_config_ids(device_type, os, navigator):
"""
Select one random pair (device_type, os_id, navigator_id) from
all possible combinations matching the given os and
navigator filters.
:param os: allowed os(es)
:type os: string or list/tuple or None
:param navigator: allowed browser engine(s)
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "tablet", "all"
"""
if os is None:
default_dev_types = ['desktop']
else:
default_dev_types = list(DEVICE_TYPE_OS.keys())
dev_type_choices = get_option_choices(
'device_type', device_type, default_dev_types,
list(DEVICE_TYPE_OS.keys())
)
os_choices = get_option_choices('os', os, list(OS_NAVIGATOR.keys()),
list(OS_NAVIGATOR.keys()))
nav_choices = get_option_choices('navigator', navigator,
list(NAVIGATOR_OS.keys()),
list(NAVIGATOR_OS.keys()))
variants = []
for dev, os, nav in product(dev_type_choices, os_choices,
nav_choices):
if (os in DEVICE_TYPE_OS[dev]
and nav in DEVICE_TYPE_NAVIGATOR[dev]
and nav in OS_NAVIGATOR[os]):
variants.append((dev, os, nav))
if not variants:
raise InvalidOption('Options device_type, os and navigator'
' conflicts with each other')
device_type, os_id, navigator_id = randomizer.choice(variants)
assert os_id in OS_PLATFORM
assert navigator_id in NAVIGATOR_OS
assert device_type in DEVICE_TYPE_OS
return device_type, os_id, navigator_id
def choose_ua_template(device_type, navigator_id, app):
tpl_name = navigator_id
if navigator_id == 'ie':
tpl_name = ('ie_11' if app['build_version'] == 'MSIE 11.0'
else 'ie_less_11')
if navigator_id == 'chrome':
if device_type == 'smartphone':
tpl_name = 'chrome_smartphone'
if device_type == 'tablet':
tpl_name = 'chrome_tablet'
return USER_AGENT_TEMPLATE[tpl_name]
def build_navigator_app_version(os_id, navigator_id,
platform_version, user_agent):
if navigator_id in ('chrome', 'ie'):
assert user_agent.startswith('Mozilla/')
app_version = user_agent.split('Mozilla/', 1)[1]
elif navigator_id == 'firefox':
if os_id == 'android':
app_version = '5.0 (%s)' % platform_version
else:
os_token = {
'win': 'Windows',
'mac': 'Macintosh',
'linux': 'X11',
}[os_id]
app_version = '5.0 (%s)' % os_token
return app_version
def generate_navigator(os=None, navigator=None, platform=None,
device_type=None):
"""
Generates web navigator's config
:param os: limit list of oses for generation
:type os: string or list/tuple or None
:param navigator: limit list of browser engines for generation
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "tablet", "all"
:return: User-Agent config
:rtype: dict with keys (os, name, platform, oscpu, build_version,
build_id, app_version, app_name, app_code_name,
product, product_sub, vendor, vendor_sub,
user_agent)
:raises InvalidOption: if could not generate user-agent for
any combination of allowed platforms and navigators
:raise InvalidOption: if any of passed options is invalid
"""
if platform is not None:
os = platform
warn('The `platform` option is deprecated.'
' Use `os` option instead.', stacklevel=3)
device_type, os_id, navigator_id = (
pick_config_ids(device_type, os, navigator)
)
system = build_system_components(
device_type, os_id, navigator_id)
app = build_app_components(os_id, navigator_id)
ua_template = choose_ua_template(
device_type, navigator_id, app)
user_agent = ua_template.format(system=system, app=app)
app_version = build_navigator_app_version(
os_id, navigator_id, system['platform_version'], user_agent)
return {
# ids
'os_id': os_id,
'navigator_id': navigator_id,
# system components
'platform': system['platform'],
'oscpu': system['oscpu'],
# app components
'build_version': app['build_version'],
'build_id': app['build_id'],
'app_version': app_version,
'app_name': app['name'],
'app_code_name': 'Mozilla',
'product': 'Gecko',
'product_sub': app['product_sub'],
'vendor': app['vendor'],
'vendor_sub': '',
# compiled user agent
'user_agent': user_agent,
}
def generate_user_agent(os=None, navigator=None, platform=None,
device_type=None):
"""
Generates HTTP User-Agent header
:param os: limit list of os for generation
:type os: string or list/tuple or None
:param navigator: limit list of browser engines for generation
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "tablet", "all"
:return: User-Agent string
:rtype: string
:raises InvalidOption: if could not generate user-agent for
any combination of allowed oses and navigators
:raise InvalidOption: if any of passed options is invalid
"""
return generate_navigator(os=os, navigator=navigator,
platform=platform,
device_type=device_type)['user_agent']
def generate_navigator_js(os=None, navigator=None, platform=None,
device_type=None):
"""
Generates web navigator's config with keys corresponding
to keys of `windows.navigator` JavaScript object.
:param os: limit list of oses for generation
:type os: string or list/tuple or None
:param navigator: limit list of browser engines for generation
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "tablet", "all"
:return: User-Agent config
:rtype: dict with keys (TODO)
:raises InvalidOption: if could not generate user-agent for
any combination of allowed oses and navigators
:raise InvalidOption: if any of passed options is invalid
"""
config = generate_navigator(os=os, navigator=navigator,
platform=platform,
device_type=device_type)
return {
'appCodeName': config['app_code_name'],
'appName': config['app_name'],
'appVersion': config['app_version'],
'platform': config['platform'],
'userAgent': config['user_agent'],
'oscpu': config['oscpu'],
'product': config['product'],
'productSub': config['product_sub'],
'vendor': config['vendor'],
'vendorSub': config['vendor_sub'],
'buildID': config['build_id'],
}
|
|
# This is a Python implementation of the following jsonLogic JS library:
# https://github.com/jwadhams/json-logic-js
from __future__ import unicode_literals
import sys
from six.moves import reduce
import logging
logger = logging.getLogger(__name__)
try:
unicode
except NameError:
pass
else:
# Python 2 fallback.
str = unicode
def if_(*args):
"""Implements the 'if' operator with support for multiple elseif-s."""
for i in range(0, len(args) - 1, 2):
if args[i]:
return args[i + 1]
if len(args) % 2:
return args[-1]
else:
return None
def soft_equals(a, b):
"""Implements the '==' operator, which does type JS-style coertion."""
if isinstance(a, str) or isinstance(b, str):
return str(a) == str(b)
if isinstance(a, bool) or isinstance(b, bool):
return bool(a) is bool(b)
return a == b
def hard_equals(a, b):
"""Implements the '===' operator."""
if type(a) != type(b):
return False
return a == b
def less(a, b, *args):
"""Implements the '<' operator with JS-style type coertion."""
types = set([type(a), type(b)])
if float in types or int in types:
try:
a, b = float(a), float(b)
except TypeError:
# NaN
return False
return a < b and (not args or less(b, *args))
def less_or_equal(a, b, *args):
"""Implements the '<=' operator with JS-style type coertion."""
return (
less(a, b) or soft_equals(a, b)
) and (not args or less_or_equal(b, *args))
def to_numeric(arg):
"""
Converts a string either to int or to float.
This is important, because e.g. {"!==": [{"+": "0"}, 0.0]}
"""
if isinstance(arg, str):
if '.' in arg:
return float(arg)
else:
return int(arg)
return arg
def plus(*args):
"""Sum converts either to ints or to floats."""
return sum(to_numeric(arg) for arg in args)
def minus(*args):
"""Also, converts either to ints or to floats."""
if len(args) == 1:
return -to_numeric(args[0])
return to_numeric(args[0]) - to_numeric(args[1])
def merge(*args):
"""Implements the 'merge' operator for merging lists."""
ret = []
for arg in args:
if isinstance(arg, list) or isinstance(arg, tuple):
ret += list(arg)
else:
ret.append(arg)
return ret
def get_var(data, var_name, not_found=None):
"""Gets variable value from data dictionary."""
try:
for key in str(var_name).split('.'):
try:
data = data[key]
except TypeError:
data = data[int(key)]
except (KeyError, TypeError, ValueError):
return not_found
else:
return data
def missing(data, *args):
"""Implements the missing operator for finding missing variables."""
not_found = object()
if args and isinstance(args[0], list):
args = args[0]
ret = []
for arg in args:
if get_var(data, arg, not_found) is not_found:
ret.append(arg)
return ret
def missing_some(data, min_required, args):
"""Implements the missing_some operator for finding missing variables."""
if min_required < 1:
return []
found = 0
not_found = object()
ret = []
for arg in args:
if get_var(data, arg, not_found) is not_found:
ret.append(arg)
else:
found += 1
if found >= min_required:
return []
return ret
operations = {
"==": soft_equals,
"===": hard_equals,
"!=": lambda a, b: not soft_equals(a, b),
"!==": lambda a, b: not hard_equals(a, b),
">": lambda a, b: less(b, a),
">=": lambda a, b: less(b, a) or soft_equals(a, b),
"<": less,
"<=": less_or_equal,
"!": lambda a: not a,
"!!": bool,
"%": lambda a, b: a % b,
"and": lambda *args: reduce(lambda total, arg: total and arg, args, True),
"or": lambda *args: reduce(lambda total, arg: total or arg, args, False),
"?:": lambda a, b, c: b if a else c,
"if": if_,
"log": lambda a: logger.info(a) or a,
"in": lambda a, b: a in b if "__contains__" in dir(b) else False,
"cat": lambda *args: "".join(str(arg) for arg in args),
"+": plus,
"*": lambda *args: reduce(lambda total, arg: total * float(arg), args, 1),
"-": minus,
"/": lambda a, b=None: a if b is None else float(a) / float(b),
"min": lambda *args: min(args),
"max": lambda *args: max(args),
"merge": merge,
"count": lambda *args: sum(1 if a else 0 for a in args),
}
def jsonLogic(tests, data=None):
"""Executes the json-logic with given data."""
# You've recursed to a primitive, stop!
if tests is None or not isinstance(tests, dict):
return tests
data = data or {}
operator = list(tests.keys())[0]
values = tests[operator]
# Easy syntax for unary operators, like {"var": "x"} instead of strict
# {"var": ["x"]}
if not isinstance(values, list) and not isinstance(values, tuple):
values = [values]
# Recursion!
values = [jsonLogic(val, data) for val in values]
if operator == 'var':
return get_var(data, *values)
if operator == 'missing':
return missing(data, *values)
if operator == 'missing_some':
return missing_some(data, *values)
if operator not in operations:
raise ValueError("Unrecognized operation %s" % operator)
return operations[operator](*values)
|
|
from exceptions import CommandError, ParameterError
import time
import copy # deepcopy
from CommandHandler import CommandHandler
"""
Parsers
They are used to validate every arg before sending it to the Expenses functions
"""
@CommandHandler.addCommand("add")
def parseAdd(expenseInstance, args):
"""
Prepares and validates the arguments for the 'add <sum> <category>'
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: number of affected entries
Raises: ParameterError if any of the arguments can't be validated
"""
if len(args) < 2:
raise ParameterError("[command_add]: Not enough parameters received")
if not isInt(args[0]):
raise ParameterError("[command_add]: Sum must be an integer")
return expenseInstance.addToday(int(args[0]), args[1])
@CommandHandler.addCommand("insert")
def parseInsert(expenseInstance, args):
"""
Prepares and validates the arguments for 'insert <day> <sum> <category>'
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: number of affected entries
Raises: ParameterError if any of the arguments can't be validated
"""
if len(args) < 3:
raise ParameterError("[command_insert]: Not enough parameters received")
if not isInt(args[0]):
raise ParameterError("[command_insert]: Day must be an integer")
day = int(args[0])
if day < 1 or day > 31:
raise ParameterError("[command_insert]: Day is not in range(1, 31)")
if not isInt(args[1]):
raise ParameterError("[command_insert]: Sum must be an integer")
return expenseInstance.insertExpense(day, int(args[1]), args[2])
@CommandHandler.addCommand("remove")
def parseRemove(expenseInstance, args):
"""
Prepares and validates the arguments for:
'remove <day>'
'remove <startDay> to <endDay>'
'remove <category>'
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: number of affected entries
Raises: ParameterError if any of the arguments can't be validated
"""
if len(args) < 1:
raise ParameterError("[command_remove]: Not enough parameters received")
if len(args) == 1:
if isInt(args[0]):
# case remove day
day = int(args[0])
if day < 1 or day > 31:
raise ParameterError("[command_remove]: Day is not in range(1, 31)")
return expenseInstance.removeAllFromDay(day)
else:
# this should be category case
return expenseInstance.removeAllFromCategory(args[0])
elif len(args) == 3:
if args[1] != 'to':
raise ParameterError("[command_remove]: Expected 'to', got '{}'".format(args[1]))
if isInt(args[0]) and isInt(args[2]):
day1 = int(args[0])
day2 = int(args[2])
if day1 < 1 or day1 > 31:
raise ParameterError("[command_remove]: startDay is not in range(1, 31)")
if day2 < 1 or day2 > 31:
raise ParameterError("[command_remove]: endDay is not in range(1, 31)")
return expenseInstance.removeBetweenDays(day1, day2)
else:
raise ParameterError("[command_remove]: Invalid number of parameters received")
@CommandHandler.addCommand("list")
def parseList(expenseInstance, args):
"""
Prepares and validates the arguments for:
'list '
'list <category>'
'list <category> [<, =, >] <value>'
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: formated string containing a table with the result
Raises: ParameterError if any of the arguments can't be validated
"""
if len(args) == 0:
# list all
result = expenseInstance.listAll()
string = ""
string += '-'*34
string += ('\n|{: ^10}|{: ^10}|{: ^10}|\n'.format("Day", "Category", "Value"))
string += ('|{:-^10}|{:-^10}|{:-^10}|\n'.format("", "", ""))
for element in result:
string += ('|{: ^10}|{: ^10}|{: ^10}|\n'.format("Day {}".format(element['day']), element['category'], element['value']))
string += ('-'*34)
return string
elif len(args) == 1:
# list by category
result = expenseInstance.listCategory(args[0])
string = ""
string += '{:-^23}\n'.format(args[0])
for element in result:
string += ('|{: ^10}|{: ^10}|\n'.format("Day {}".format(element['day']), element['value']))
string += ('-'*23)
return string
elif len(args) == 3:
# list by category and comparator
if args[1] in ['<', '>', "="]:
if isInt(args[2]):
result = expenseInstance.listComplex(args[0], args[1], int(args[2]))
string = ""
string += '{:-^23}\n'.format(args[0])
for element in result:
string += ('|{: ^10}|{: ^10}|\n'.format("Day {}".format(element['day']), element['value']))
string += ('-'*23)
return string
else:
raise ParameterError("[command_list]: n must be an integer")
else:
raise ParameterError("[command_list]: Expected '<' or '>' or '=', got '{}'".format(args[1]))
else:
raise ParameterError("[command_list]: Invalid number of parameters received")
@CommandHandler.addCommand("sum")
def parseSum(expenseInstance, args):
"""
Prepares and validates the arguments for the sum command
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: formated string containing the result
Raises: ParameterError if any of the arguments can't be validated
"""
if len(args) < 1:
raise ParameterError("[command_sum]: Not enough parameters received")
result = expenseInstance.sumCategory(args[0])
return ('The sum for \'{}\' category is: {}'.format(args[0], result))
@CommandHandler.addCommand("max")
def parseMax(expenseInstance, args):
"""
Prepares and validates the arguments for the max command
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: formated string containing the result
Raises: ParameterError if any of the arguments can't be validated
"""
result = expenseInstance.maxDay()
return 'Day {} with {} spent on: {}'.format(result[0], result[2], result[1])
@CommandHandler.addCommand("sort")
def parseSort(expenseInstance, args):
"""
Prepares and validates the arguments for:
'sort day'
'sort <category>'
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: formated string containing the result
Raises: ParameterError if any of the arguments can't be validated
"""
if len(args) < 1:
raise ParameterError("[command_sort]: Not enough parameters received")
if args[0] == 'day':
result = expenseInstance.sortByDay()
string = ""
string += ('-'*23) + '\n'
for element in result:
string += ('|{: ^10}|{: ^10}|\n'.format("Day {}".format(element['day']), element['total']))
string += ('-'*23)
return string
else:
result = expenseInstance.sortCategory(args[0])
string = ""
string += '{:-^23}\n'.format(args[0])
for element in result:
string += ('|{: ^10}|{: ^10}|\n'.format("Day {}".format(element['day']), element['total']))
string += ('-'*23)
return string
@CommandHandler.addCommand("filter")
def parseFilter(expenseInstance, args):
"""
Prepares and validates the arguments for:
'filter <category>'
'filter <category> [<, =, >] <value>'
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: number of affected entries
Raises: ParameterError if any of the arguments can't be validated
"""
if len(args) < 1:
raise ParameterError("[command_filter]: Not enough parameters received")
elif len(args) == 1:
# this should be category listing
return expenseInstance.filterByCategory(args[0])
elif len(args) == 3:
if args[1] in ['<', '>', "="]:
if isInt(args[2]):
return expenseInstance.filterComplex(args[0], args[1], int(args[2]))
else:
raise ParameterError("[command_filter]: n must be an integer")
else:
raise ParameterError("[command_filter]: Expected '<' or '>' or '=', got '{}'".format(args[1]))
else:
raise ParameterError("[command_filter]: Invalid number of parameters received")
@CommandHandler.addCommand("undo")
def parseUndo(expenseInstance, args):
"""
Used to undo the last operations
Input: expenseInstance - instance of the Expenses class
args - array of arguments to be validated
Output: None
"""
expenseInstance.undoHistory()
@CommandHandler.addCommand("debug")
def parseDebug(expenseInstance, args):
result = expenseInstance.debug()
string = ""
string += '-' * 45
string += ('\n|{: ^10}|{: ^10}|{: ^10}|{: ^10}|\n'.format("Action", "Category", "Value", "Day"))
string += ('|{:-^10}|{:-^10}|{:-^10}|{:-^10}|\n'.format("", "", "", ""))
for pack in result:
for element in pack:
string += ('|{: ^10}|{: ^10}|{: ^10}|{: ^10}|\n'.format(element['action'], element['category'], element['amount'], element['day']))
string += ('|{:-^10}|{:-^10}|{:-^10}|{:-^10}|\n'.format("", "", "", ""))
return string
@CommandHandler.addCommand("help")
def doHelp(expenseInstance, args):
"""
Creates a string containing all the commands
Input: None
Output: a string with the commands
"""
helpLines = [
"help - shows this",
"add <sum:int> <category> - add to the current day an expense of <sum> RON for <category>",
"insert <day:int> <sum:int> <category> - insert to day <day> an expense of <sum> RON for food",
"remove <day:int> - remove all expenses for <day>",
"remove <start day:int> to <end day:int> - remove all expenses for between <start day> and <end day>",
"remove <category> - remove all the expenses for <category> from the current month",
"list - write the entire list of expenses",
"list <category> - write all the expenses for <category>",
"list <category> <comparator> <value:int> - writes all expenses for <category> with an amount of money <comparator> <value>. Ex: list internet = 44. Valid Comparators: <,=,>",
"sum <category> - write the total expense for category <category>",
"max day - write the day with the maximum expense",
"sort day - write the total daily expenses in ascending order by amount of money spent",
"sort <category> - write the daily expenses for category <category> in ascending order by amount of money ",
"filter <category> - keep only expenses in category <category> ",
"filter <category> <comparator> <value:int> - keep only expenses for <category> with amount of money <comparator> <value>. Ex: filter internet = 44. Valid Comparators: <,=,>",
"undo - undoes the last editing operation"
]
string = ""
for i in helpLines:
string += '{}\n'.format(i)
string += '*'*30
return string
def isInt(str):
"""
Checks if a value is numeric
Input: str - string to be checked
Output: False/True depending if the string was numeric or not
"""
try:
int(str)
return True
except ValueError:
return False
|
|
"""The tests for the Season sensor platform."""
# pylint: disable=protected-access
import unittest
from datetime import datetime
from homeassistant.setup import setup_component
import homeassistant.components.sensor.season as season
from tests.common import get_test_home_assistant
HEMISPHERE_NORTHERN = {
'homeassistant': {
'latitude': '48.864716',
'longitude': '2.349014',
},
'sensor': {
'platform': 'season',
'type': 'astronomical',
}
}
HEMISPHERE_SOUTHERN = {
'homeassistant': {
'latitude': '-33.918861',
'longitude': '18.423300',
},
'sensor': {
'platform': 'season',
'type': 'astronomical',
}
}
HEMISPHERE_EQUATOR = {
'homeassistant': {
'latitude': '0',
'longitude': '-51.065100',
},
'sensor': {
'platform': 'season',
'type': 'astronomical',
}
}
HEMISPHERE_EMPTY = {
'homeassistant': {
},
'sensor': {
'platform': 'season',
'type': 'meteorological',
}
}
# pylint: disable=invalid-name
class TestSeason(unittest.TestCase):
"""Test the season platform."""
DEVICE = None
CONFIG_ASTRONOMICAL = {'type': 'astronomical'}
CONFIG_METEOROLOGICAL = {'type': 'meteorological'}
def add_entities(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICE = device
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_season_should_be_summer_northern_astronomical(self):
"""Test that season should be summer."""
# A known day in summer
summer_day = datetime(2017, 9, 3, 0, 0)
current_season = season.get_season(summer_day, season.NORTHERN,
season.TYPE_ASTRONOMICAL)
assert season.STATE_SUMMER == \
current_season
def test_season_should_be_summer_northern_meteorological(self):
"""Test that season should be summer."""
# A known day in summer
summer_day = datetime(2017, 8, 13, 0, 0)
current_season = season.get_season(summer_day, season.NORTHERN,
season.TYPE_METEOROLOGICAL)
assert season.STATE_SUMMER == \
current_season
def test_season_should_be_autumn_northern_astronomical(self):
"""Test that season should be autumn."""
# A known day in autumn
autumn_day = datetime(2017, 9, 23, 0, 0)
current_season = season.get_season(autumn_day, season.NORTHERN,
season.TYPE_ASTRONOMICAL)
assert season.STATE_AUTUMN == \
current_season
def test_season_should_be_autumn_northern_meteorological(self):
"""Test that season should be autumn."""
# A known day in autumn
autumn_day = datetime(2017, 9, 3, 0, 0)
current_season = season.get_season(autumn_day, season.NORTHERN,
season.TYPE_METEOROLOGICAL)
assert season.STATE_AUTUMN == \
current_season
def test_season_should_be_winter_northern_astronomical(self):
"""Test that season should be winter."""
# A known day in winter
winter_day = datetime(2017, 12, 25, 0, 0)
current_season = season.get_season(winter_day, season.NORTHERN,
season.TYPE_ASTRONOMICAL)
assert season.STATE_WINTER == \
current_season
def test_season_should_be_winter_northern_meteorological(self):
"""Test that season should be winter."""
# A known day in winter
winter_day = datetime(2017, 12, 3, 0, 0)
current_season = season.get_season(winter_day, season.NORTHERN,
season.TYPE_METEOROLOGICAL)
assert season.STATE_WINTER == \
current_season
def test_season_should_be_spring_northern_astronomical(self):
"""Test that season should be spring."""
# A known day in spring
spring_day = datetime(2017, 4, 1, 0, 0)
current_season = season.get_season(spring_day, season.NORTHERN,
season.TYPE_ASTRONOMICAL)
assert season.STATE_SPRING == \
current_season
def test_season_should_be_spring_northern_meteorological(self):
"""Test that season should be spring."""
# A known day in spring
spring_day = datetime(2017, 3, 3, 0, 0)
current_season = season.get_season(spring_day, season.NORTHERN,
season.TYPE_METEOROLOGICAL)
assert season.STATE_SPRING == \
current_season
def test_season_should_be_winter_southern_astronomical(self):
"""Test that season should be winter."""
# A known day in winter
winter_day = datetime(2017, 9, 3, 0, 0)
current_season = season.get_season(winter_day, season.SOUTHERN,
season.TYPE_ASTRONOMICAL)
assert season.STATE_WINTER == \
current_season
def test_season_should_be_winter_southern_meteorological(self):
"""Test that season should be winter."""
# A known day in winter
winter_day = datetime(2017, 8, 13, 0, 0)
current_season = season.get_season(winter_day, season.SOUTHERN,
season.TYPE_METEOROLOGICAL)
assert season.STATE_WINTER == \
current_season
def test_season_should_be_spring_southern_astronomical(self):
"""Test that season should be spring."""
# A known day in spring
spring_day = datetime(2017, 9, 23, 0, 0)
current_season = season.get_season(spring_day, season.SOUTHERN,
season.TYPE_ASTRONOMICAL)
assert season.STATE_SPRING == \
current_season
def test_season_should_be_spring_southern_meteorological(self):
"""Test that season should be spring."""
# A known day in spring
spring_day = datetime(2017, 9, 3, 0, 0)
current_season = season.get_season(spring_day, season.SOUTHERN,
season.TYPE_METEOROLOGICAL)
assert season.STATE_SPRING == \
current_season
def test_season_should_be_summer_southern_astronomical(self):
"""Test that season should be summer."""
# A known day in summer
summer_day = datetime(2017, 12, 25, 0, 0)
current_season = season.get_season(summer_day, season.SOUTHERN,
season.TYPE_ASTRONOMICAL)
assert season.STATE_SUMMER == \
current_season
def test_season_should_be_summer_southern_meteorological(self):
"""Test that season should be summer."""
# A known day in summer
summer_day = datetime(2017, 12, 3, 0, 0)
current_season = season.get_season(summer_day, season.SOUTHERN,
season.TYPE_METEOROLOGICAL)
assert season.STATE_SUMMER == \
current_season
def test_season_should_be_autumn_southern_astronomical(self):
"""Test that season should be spring."""
# A known day in spring
autumn_day = datetime(2017, 4, 1, 0, 0)
current_season = season.get_season(autumn_day, season.SOUTHERN,
season.TYPE_ASTRONOMICAL)
assert season.STATE_AUTUMN == \
current_season
def test_season_should_be_autumn_southern_meteorological(self):
"""Test that season should be autumn."""
# A known day in autumn
autumn_day = datetime(2017, 3, 3, 0, 0)
current_season = season.get_season(autumn_day, season.SOUTHERN,
season.TYPE_METEOROLOGICAL)
assert season.STATE_AUTUMN == \
current_season
def test_on_equator_results_in_none(self):
"""Test that season should be unknown."""
# A known day in summer if astronomical and northern
summer_day = datetime(2017, 9, 3, 0, 0)
current_season = season.get_season(summer_day,
season.EQUATOR,
season.TYPE_ASTRONOMICAL)
assert current_season is None
def test_setup_hemisphere_northern(self):
"""Test platform setup of northern hemisphere."""
self.hass.config.latitude = HEMISPHERE_NORTHERN[
'homeassistant']['latitude']
assert setup_component(self.hass, 'sensor', HEMISPHERE_NORTHERN)
assert self.hass.config.as_dict()['latitude'] == \
HEMISPHERE_NORTHERN['homeassistant']['latitude']
state = self.hass.states.get('sensor.season')
assert state.attributes.get('friendly_name') == 'Season'
def test_setup_hemisphere_southern(self):
"""Test platform setup of southern hemisphere."""
self.hass.config.latitude = HEMISPHERE_SOUTHERN[
'homeassistant']['latitude']
assert setup_component(self.hass, 'sensor', HEMISPHERE_SOUTHERN)
assert self.hass.config.as_dict()['latitude'] == \
HEMISPHERE_SOUTHERN['homeassistant']['latitude']
state = self.hass.states.get('sensor.season')
assert state.attributes.get('friendly_name') == 'Season'
def test_setup_hemisphere_equator(self):
"""Test platform setup of equator."""
self.hass.config.latitude = HEMISPHERE_EQUATOR[
'homeassistant']['latitude']
assert setup_component(self.hass, 'sensor', HEMISPHERE_EQUATOR)
assert self.hass.config.as_dict()['latitude'] == \
HEMISPHERE_EQUATOR['homeassistant']['latitude']
state = self.hass.states.get('sensor.season')
assert state.attributes.get('friendly_name') == 'Season'
def test_setup_hemisphere_empty(self):
"""Test platform setup of missing latlong."""
self.hass.config.latitude = None
assert setup_component(self.hass, 'sensor', HEMISPHERE_EMPTY)
assert self.hass.config.as_dict()['latitude']is None
|
|
from mmtf.codecs import encode_array
import msgpack
from mmtf.utils import constants
def make_entity_dict(chain_indices,sequence,description,entity_type):
out_d = {}
out_d["description"] = description
out_d["type"] = entity_type
out_d["chainIndexList"] = chain_indices
out_d["sequence"] = sequence
return out_d
class Group(object):
def __eq__(self, other):
"""Function to define equality"""
if self.atom_name_list != other.atom_name_list:
return False
if self.charge_list != other.charge_list:
return False
if self.element_list != other.element_list:
return False
if self.group_type != other.group_type:
return False
if self.group_name != other.group_name:
return False
if self.single_letter_code != other.single_letter_code:
return False
if self.bond_atom_list != other.bond_atom_list:
return False
if self.bond_order_list != other.bond_order_list:
return False
return True
def __init__(self):
self.atom_name_list = []
self.bond_order_list = []
self.bond_atom_list = []
self.charge_list = []
self.element_list = []
self.group_name = constants.UNKOWN_GROUP_NAME
self.group_type = constants.UNKOWN_GROUP_TYPE
self.single_letter_code = constants.UNKNOWN_SL
def convert_to_dict(self):
"""Convert the group object to an appropriate DICT"""
out_dict = {}
out_dict["groupName"] = self.group_name
out_dict["atomNameList"] = self.atom_name_list
out_dict["elementList"] = self.element_list
out_dict["bondOrderList"] = self.bond_order_list
out_dict["bondAtomList"] = self.bond_atom_list
out_dict["formalChargeList"] = self.charge_list
out_dict["singleLetterCode"] = self.single_letter_code
out_dict["chemCompType"] = self.group_type
return out_dict
def get_unique_groups(input_list):
"""Function to get a unique list of groups."""
out_list = []
for item in input_list:
if item not in out_list:
out_list.append(item)
return out_list
class TemplateEncoder(object):
"""Template class to be used by third parties to pass data into other data structures."""
def init_structure(self, total_num_bonds, total_num_atoms,
total_num_groups, total_num_chains, total_num_models,
structure_id):
"""Initialise the structure object.
:param total_num_bonds: the number of bonds in the structure
:param total_num_atoms: the number of atoms in the structure
:param total_num_groups: the number of groups in the structure
:param total_num_chains: the number of chains in the structure
:param total_num_models: the number of models in the structure
:param structure_id the: id of the structure (e.g. PDB id)
"""
raise NotImplementedError
def set_atom_info(self, atom_name, serial_number, alternative_location_id,
x, y, z, occupancy, temperature_factor, element, charge):
"""Create an atom object an set the information.
:param atom_name: the atom name, e.g. CA for this atom
:param serial_number: the serial id of the atom (e.g. 1)
:param alternative_location_id: the alternative location id for the atom, if present
:param x: the x coordiante of the atom
:param y: the y coordinate of the atom
:param z: the z coordinate of the atom
:param occupancy: the occupancy of the atom
:param temperature_factor: the temperature factor of the atom
:param element: the element of the atom, e.g. C for carbon. According to IUPAC. Calcium is Ca
:param charge: the formal atomic charge of the atom
"""
raise NotImplementedError
def set_chain_info(self, chain_id, chain_name, num_groups):
"""Set the chain information.
:param chain_id: the asym chain id from mmCIF
:param chain_name: the auth chain id from mmCIF
:param num_groups: the number of groups this chain has
"""
raise NotImplementedError
def set_entity_info(self, chain_indices, sequence, description, entity_type):
"""Set the entity level information for the structure.
:param chain_indices: the indices of the chains for this entity
:param sequence: the one letter code sequence for this entity
:param description: the description for this entity
:param entity_type: the entity type (polymer,non-polymer,water)
"""
raise NotImplementedError
def set_group_info(self, group_name, group_number, insertion_code,
group_type, atom_count, bond_count, single_letter_code,
sequence_index, secondary_structure_type):
"""Set the information for a group
:param group_name: the name of this group,e.g. LYS
:param group_number: the residue number of this group
:param insertion_code: the insertion code for this group
:param group_type: a string indicating the type of group (as found in the chemcomp dictionary.
Empty string if none available.
:param atom_count: the number of atoms in the group
:param bond_count: the number of unique bonds in the group
:param single_letter_code: the single letter code of the group
:param sequence_index: the index of this group in the sequence defined by the enttiy
:param secondary_structure_type: the type of secondary structure used (types are according to DSSP and
number to type mappings are defined in the specification)
"""
raise NotImplementedError
def set_model_info(self, model_id, chain_count):
# FIXME model_id here is meaningles and potentially misleading.
"""Set the information for a model.
:param model_id: the index for the model
:param chain_count: the number of chains in the model
"""
raise NotImplementedError
def set_xtal_info(self, space_group, unit_cell):
"""Set the crystallographic information for the structure
:param space_group: the space group name, e.g. "P 21 21 21"
:param unit_cell: an array of length 6 with the unit cell parameters in order: a, b, c, alpha, beta, gamma
"""
raise NotImplementedError
def set_header_info(self, r_free, r_work, resolution, title,
deposition_date, release_date, experimental_methods):
"""Sets the header information.
:param r_free: the measured R-Free for the structure
:param r_work: the measure R-Work for the structure
:param resolution: the resolution of the structure
:param title: the title of the structure
:param deposition_date: the deposition date of the structure
:param release_date: the release date of the structure
:param experimnetal_methods: the list of experimental methods in the structure
"""
raise NotImplementedError
def set_bio_assembly_trans(self, bio_assembly_index, input_chain_indices, input_transform):
"""Set the Bioassembly transformation information. A single bioassembly can have multiple transforms,
:param bio_assembly_index: the integer index of the bioassembly
:param input_chain_indices: the list of integer indices for the chains of this bioassembly
:param input_transformation: the list of doubles for the transform of this bioassmbly transform"""
raise NotImplementedError
def finalize_structure(self):
"""Any functions needed to cleanup the structure."""
raise NotImplementedError
def set_group_bond(self, atom_index_one, atom_index_two, bond_order):
"""Add bonds within a group.
:param atom_index_one: the integer atom index (in the group) of the first partner in the bond
:param atom_index_two: the integer atom index (in the group) of the second partner in the bond
:param bond_order: the integer bond order
"""
raise NotImplementedError
def set_inter_group_bond(self, atom_index_one, atom_index_two, bond_order):
"""Add bonds between groups.
:param atom_index_one: the integer atom index (in the structure) of the first partner in the bond
:param atom_index_two: the integer atom index (in the structure) of the second partner in the bond
:param bond_order the bond order
"""
raise NotImplementedError
class MMTFEncoder(TemplateEncoder):
def encode_data(self):
"""Encode the data back into a dict."""
output_data = {}
output_data["groupTypeList"] = encode_array(self.group_type_list, 4, 0)
output_data["xCoordList"] = encode_array(self.x_coord_list, 10, 1000)
output_data["yCoordList"] = encode_array(self.y_coord_list, 10, 1000)
output_data["zCoordList"] = encode_array(self.z_coord_list, 10, 1000)
output_data["bFactorList"] = encode_array(self.b_factor_list, 10, 100)
output_data["occupancyList"] = encode_array(self.occupancy_list, 9, 100)
output_data["atomIdList"] = encode_array(self.atom_id_list, 8, 0)
output_data["altLocList"] = encode_array(self.alt_loc_list, 6, 0)
output_data["insCodeList"] = encode_array(self.ins_code_list, 6, 0)
output_data["groupIdList"] = encode_array(self.group_id_list, 8, 0)
output_data["groupList"] = self.group_list
output_data["sequenceIndexList"] = encode_array(self.sequence_index_list, 8, 0)
output_data["chainNameList"] = encode_array(self.chain_name_list, 5, 4)
output_data["chainIdList"] = encode_array(self.chain_id_list, 5, 4)
output_data["bondAtomList"] = encode_array(self.bond_atom_list, 4, 0)
output_data["bondOrderList"] = encode_array(self.bond_order_list, 2, 0)
output_data["secStructList"] = encode_array(self.sec_struct_list, 2, 0)
output_data["chainsPerModel"] = self.chains_per_model
output_data["groupsPerChain"] = self.groups_per_chain
output_data["spaceGroup"] = self.space_group
output_data["mmtfVersion"] = self.mmtf_version
output_data["mmtfProducer"] = self.mmtf_producer
output_data["structureId"] = self.structure_id
output_data["entityList"] = self.entity_list
output_data["bioAssemblyList"] = self.bio_assembly
output_data["rFree"] = self.r_free
output_data["rWork"] = self.r_work
output_data["resolution"] = self.resolution
output_data["title"] = self.title
output_data["experimentalMethods"] = self.experimental_methods
output_data["depositionDate"] = self.deposition_date
output_data["releaseDate"] = self.release_date
output_data["unitCell"] = self.unit_cell
output_data["numBonds"] = self.num_bonds
output_data["numChains"] = self.num_chains
output_data["numModels"] = self.num_models
output_data["numAtoms"] = self.num_atoms
output_data["numGroups"] = self.num_groups
return output_data
def get_msgpack(self):
"""Get the msgpack of the encoded data."""
return msgpack.packb(self.encode_data(), use_bin_type=True)
def write_file(self, file_path):
with open(file_path, "wb") as out_f:
out_f.write(self.get_msgpack())
def init_structure(self, total_num_bonds, total_num_atoms,
total_num_groups, total_num_chains, total_num_models,
structure_id):
"""Initialise the structure object.
:param total_num_bonds: the number of bonds in the structure
:param total_num_atoms: the number of atoms in the structure
:param total_num_groups: the number of groups in the structure
:param total_num_chains: the number of chains in the structure
:param total_num_models: the number of models in the structure
:param structure_id the: id of the structure (e.g. PDB id)
"""
self.mmtf_version = constants.MMTF_VERSION
self.mmtf_producer = constants.PRODUCER
self.num_atoms = total_num_atoms
self.num_bonds = total_num_bonds
self.num_groups = total_num_groups
self.num_chains = total_num_chains
self.num_models = total_num_models
self.structure_id = structure_id
# initialise the arrays
self.x_coord_list = []
self.y_coord_list = []
self.z_coord_list = []
self.group_type_list = []
self.entity_list = []
self.b_factor_list = []
self.occupancy_list = []
self.atom_id_list = []
self.alt_loc_list = []
self.ins_code_list = []
self.group_id_list = []
self.sequence_index_list = []
self.group_list = []
self.chain_name_list = []
self.chain_id_list = []
self.bond_atom_list = []
self.bond_order_list = []
self.sec_struct_list = []
self.chains_per_model = []
self.groups_per_chain = []
self.current_group = None
self.bio_assembly = []
def set_atom_info(self, atom_name, serial_number, alternative_location_id,
x, y, z, occupancy, temperature_factor, element, charge):
"""Create an atom object an set the information.
:param atom_name: the atom name, e.g. CA for this atom
:param serial_number: the serial id of the atom (e.g. 1)
:param alternative_location_id: the alternative location id for the atom, if present
:param x: the x coordiante of the atom
:param y: the y coordinate of the atom
:param z: the z coordinate of the atom
:param occupancy: the occupancy of the atom
:param temperature_factor: the temperature factor of the atom
:param element: the element of the atom, e.g. C for carbon. According to IUPAC. Calcium is Ca
:param charge: the formal atomic charge of the atom
"""
self.x_coord_list.append(x)
self.y_coord_list.append(y)
self.z_coord_list.append(z)
self.atom_id_list.append(serial_number)
self.alt_loc_list.append(alternative_location_id)
self.occupancy_list.append(occupancy)
self.b_factor_list.append(temperature_factor)
## Now add the group level data
self.current_group.atom_name_list.append(atom_name)
self.current_group.charge_list.append(charge)
self.current_group.element_list.append(element)
def set_chain_info(self, chain_id, chain_name, num_groups):
"""Set the chain information.
:param chain_id: the asym chain id from mmCIF
:param chain_name: the auth chain id from mmCIF
:param num_groups: the number of groups this chain has
"""
self.chain_id_list.append(chain_id)
self.chain_name_list.append(chain_name)
self.groups_per_chain.append(num_groups)
def set_entity_info(self, chain_indices, sequence, description, entity_type):
"""Set the entity level information for the structure.
:param chain_indices: the indices of the chains for this entity
:param sequence: the one letter code sequence for this entity
:param description: the description for this entity
:param entity_type: the entity type (polymer,non-polymer,water)
"""
self.entity_list.append(make_entity_dict(chain_indices,sequence,description,entity_type))
def set_group_info(self, group_name, group_number, insertion_code,
group_type, atom_count, bond_count, single_letter_code,
sequence_index, secondary_structure_type):
"""Set the information for a group
:param group_name: the name of this group,e.g. LYS
:param group_number: the residue number of this group
:param insertion_code: the insertion code for this group
:param group_type: a string indicating the type of group (as found in the chemcomp dictionary.
Empty string if none available.
:param atom_count: the number of atoms in the group
:param bond_count: the number of unique bonds in the group
:param single_letter_code: the single letter code of the group
:param sequence_index: the index of this group in the sequence defined by the enttiy
:param secondary_structure_type: the type of secondary structure used (types are according to DSSP and
number to type mappings are defined in the specification)
"""
# Add the group to the overall list - unless it's the first time round
if self.current_group is not None:
self.group_list.append(self.current_group)
# Add the group level information
self.group_id_list.append(group_number)
self.ins_code_list.append(insertion_code)
self.sequence_index_list.append(sequence_index)
self.sec_struct_list.append(secondary_structure_type)
self.current_group = Group()
self.current_group.group_name = group_name
self.current_group.group_type = group_type
self.current_group.single_letter_code = single_letter_code
def set_model_info(self, model_id, chain_count):
# FIXME model_id here is meaningles and potentially misleading.
"""Set the information for a model.
:param model_id: the index for the model
:param chain_count: the number of chains in the model
"""
self.chains_per_model.append(chain_count)
def set_xtal_info(self, space_group, unit_cell):
"""Set the crystallographic information for the structure
:param space_group: the space group name, e.g. "P 21 21 21"
:param unit_cell: an array of length 6 with the unit cell parameters in order: a, b, c, alpha, beta, gamma
"""
self.space_group = space_group
self.unit_cell = unit_cell
def set_header_info(self, r_free, r_work, resolution, title,
deposition_date, release_date, experimental_methods):
"""Sets the header information.
:param r_free: the measured R-Free for the structure
:param r_work: the measure R-Work for the structure
:param resolution: the resolution of the structure
:param title: the title of the structure
:param deposition_date: the deposition date of the structure
:param release_date: the release date of the structure
:param experimnetal_methods: the list of experimental methods in the structure
"""
self.r_free = r_free
self.r_work = r_work
self.resolution = resolution
self.title = title
self.deposition_date = deposition_date
self.release_date = release_date
self.experimental_methods = experimental_methods
def set_bio_assembly_trans(self, bio_assembly_index, input_chain_indices, input_transform):
"""Set the Bioassembly transformation information. A single bioassembly can have multiple transforms,
:param bio_assembly_index: the integer index of the bioassembly
:param input_chain_indices: the list of integer indices for the chains of this bioassembly
:param input_transformation: the list of doubles for the transform of this bioassmbly transform"""
this_bioass = None
for bioass in self.bio_assembly:
if bioass['name'] == str(bio_assembly_index):
this_bioass = bioass
break
if not this_bioass:
this_bioass = {"name": str(bio_assembly_index), 'transformList': []}
else:
self.bio_assembly.remove(this_bioass)
this_bioass['transformList'].append({'chainIndexList':input_chain_indices,'matrix': input_transform})
self.bio_assembly.append(this_bioass)
def finalize_structure(self):
"""Any functions needed to cleanup the structure."""
self.group_list.append(self.current_group)
group_set = get_unique_groups(self.group_list)
for item in self.group_list:
self.group_type_list.append(group_set.index(item))
self.group_list = [x.convert_to_dict() for x in group_set]
def set_group_bond(self, atom_index_one, atom_index_two, bond_order):
"""Add bonds within a group.
:param atom_index_one: the integer atom index (in the group) of the first partner in the bond
:param atom_index_two: the integer atom index (in the group) of the second partner in the bond
:param bond_order: the integer bond order
"""
self.current_group.bond_atom_list.append(atom_index_one)
self.current_group.bond_atom_list.append(atom_index_two)
self.current_group.bond_order_list.append(bond_order)
def set_inter_group_bond(self, atom_index_one, atom_index_two, bond_order):
"""Add bonds between groups.
:param atom_index_one: the integer atom index (in the structure) of the first partner in the bond
:param atom_index_two: the integer atom index (in the structure) of the second partner in the bond
:param bond_order the bond order
"""
self.bond_atom_list.append(atom_index_one)
self.bond_atom_list.append(atom_index_two)
self.bond_order_list.append(bond_order)
|
|
import datetime
import logging
from marcottimls.etl import PersonIngest, SeasonalDataIngest
from marcottimls.models import (Countries, Players, PlayerSalaries, PartialTenures, AcquisitionPaths,
AcquisitionType, PlayerDrafts, Competitions, CompetitionSeasons, Clubs,
Years, Seasons)
logger = logging.getLogger(__name__)
class AcquisitionIngest(PersonIngest):
BATCH_SIZE = 200
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Player Acquisition Paths...")
for keys in rows:
person_dict = self.get_person_data(**keys)
country_name = self.column_unicode("Country", **keys)
path = self.column("Acquisition", **keys)
acquisition_year = self.column("Year", **keys)
try:
acquisition_path = AcquisitionType.from_string(path)
except ValueError:
acquisition_path = None
country_id = self.get_id(Countries, name=country_name)
if country_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Country record {}".format(person_dict, country_name))
continue
year_id = self.get_id(Years, yr=acquisition_year)
if year_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Year record {}".format(person_dict, acquisition_year))
continue
player_dict = dict(country_id=country_id, **person_dict)
player_id = self.get_id(Players, **player_dict)
if player_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Player record".format(player_dict))
continue
acquisition_dict = dict(player_id=player_id, year_id=year_id, path=acquisition_path)
if not self.record_exists(AcquisitionPaths, **acquisition_dict):
acquisition_record = AcquisitionPaths(**acquisition_dict)
if acquisition_path in [AcquisitionType.college_draft, AcquisitionType.inaugural_draft,
AcquisitionType.super_draft, AcquisitionType.supplemental_draft]:
acquisition_record = self.parse_draft_data(acquisition_dict, keys)
if acquisition_record is not None:
insertion_list.append(acquisition_record)
inserted, insertion_list = self.bulk_insert(insertion_list, AcquisitionIngest.BATCH_SIZE)
inserts += inserted
if inserted and not inserts % AcquisitionIngest.BATCH_SIZE:
logger.info("{} records inserted".format(inserts))
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Acquisition records inserted and committed to database".format(inserts))
logger.info("Acquisition Ingestion complete.")
def parse_draft_data(self, acq_tuple, keys):
draft_round = self.column_int("Round", **keys)
draft_selection = self.column_int("Pick", **keys)
is_generation_adidas = self.column_bool("Gen Adidas", **keys)
drafting_club = self.column_unicode("Acquiring Club", **keys)
club_id = self.get_id(Clubs, name=drafting_club)
if club_id is None:
logger.error(u"Cannot insert {p[Acquisition]} record for {p[First Name]} {p[Last Name]}: "
u"Club {p[Acquiring Club]} not in database".format(p=keys))
return None
return PlayerDrafts(round=draft_round, selection=draft_selection,
gen_adidas=is_generation_adidas, club_id=club_id, **acq_tuple)
class PlayerSalaryIngest(SeasonalDataIngest):
BATCH_SIZE = 100
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Player Salaries...")
for keys in rows:
competition_name = self.column_unicode("Competition", **keys)
season_name = self.column("Season", **keys)
club_symbol = self.column("Club Symbol", **keys)
last_name = self.column_unicode("Last Name", **keys)
first_name = self.column_unicode("First Name", **keys)
base_salary = int(self.column_float("Base", **keys) * 100)
guar_salary = int(self.column_float("Guaranteed", **keys) * 100)
competition_id = self.get_id(Competitions, name=competition_name)
if competition_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Competition {} not in database".format(first_name, last_name, competition_name))
continue
season_id = self.get_id(Seasons, name=season_name)
if season_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Season {} not in database".format(first_name, last_name, season_name))
continue
club_id = self.get_id(Clubs, symbol=club_symbol)
if club_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Club {} not in database".format(first_name, last_name, club_symbol))
continue
player_id = self.get_player_from_name(first_name, last_name)
if player_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Player not in database".format(first_name, last_name))
continue
salary_dict = dict(player_id=player_id, club_id=club_id,
competition_id=competition_id, season_id=season_id)
if not self.record_exists(PlayerSalaries, **salary_dict):
insertion_list.append(PlayerSalaries(base_salary=base_salary,
avg_guaranteed=guar_salary,
**salary_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, PlayerSalaryIngest.BATCH_SIZE)
inserts += inserted
if inserted and not inserts % PlayerSalaryIngest.BATCH_SIZE:
logger.info("{} records inserted".format(inserts))
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Player Salary records inserted and committed to database".format(inserts))
logger.info("Player Salary Ingestion complete.")
class PartialTenureIngest(SeasonalDataIngest):
BATCH_SIZE = 10
def season_week(self, competition_id, season_id, **kwargs):
compseason = self.session.query(CompetitionSeasons).filter_by(
competition_id=competition_id, season_id=season_id).one()
if 'start' in kwargs:
ref_date_string = kwargs.get('start')
if ref_date_string is None:
return 1
elif 'end' in kwargs:
ref_date_string = kwargs.get('end')
if ref_date_string is None:
date_delta = compseason.end_date - compseason.start_date
return date_delta.days / 7 + 1
else:
logger.error("No 'start' or 'end' parameter in season_week call")
year, month, day = [int(x) for x in ref_date_string.split('-')]
ref_date = datetime.date(year, month, day)
date_delta = ref_date - compseason.start_date
return date_delta.days / 7 + 1
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Partial Tenure records...")
for keys in rows:
competition_name = self.column_unicode("Competition", **keys)
season_name = self.column("Season", **keys)
club_symbol = self.column("Club Symbol", **keys)
last_name = self.column_unicode("Last Name", **keys)
first_name = self.column_unicode("First Name", **keys)
start_week = self.column_int("Start Term", **keys)
end_week = self.column_int("End Term", **keys)
start_date_iso = self.column("Start Date", **keys) if "Start Date" in keys else None
end_date_iso = self.column("End Date", **keys) if "End Date" in keys else None
competition_id = self.get_id(Competitions, name=competition_name)
if competition_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Competition {} not in database".format(first_name, last_name, competition_name))
continue
season_id = self.get_id(Seasons, name=season_name)
if season_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Season {} not in database".format(first_name, last_name, season_name))
continue
club_id = self.get_id(Clubs, symbol=club_symbol)
if club_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Club {} not in database".format(first_name, last_name, club_symbol))
continue
player_id = self.get_player_from_name(first_name, last_name)
if player_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Player not in database".format(first_name, last_name))
continue
start_week = start_week or self.season_week(competition_id, season_id, start=start_date_iso)
end_week = end_week or self.season_week(competition_id, season_id, end=end_date_iso)
partials_dict = dict(player_id=player_id, club_id=club_id,
competition_id=competition_id,
season_id=season_id)
if not self.record_exists(PartialTenures, **partials_dict):
insertion_list.append(PartialTenures(start_week=start_week,
end_week=end_week,
**partials_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, PartialTenureIngest.BATCH_SIZE)
inserts += inserted
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Partial Tenure records inserted and committed to database".format(inserts))
logger.info("Partial Tenure Ingestion complete.")
|
|
from __future__ import division, absolute_import, print_function
import warnings
import sys
import os
import itertools
import textwrap
import pytest
import weakref
import numpy as np
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_less, build_err_msg, raises,
assert_raises, assert_warns, assert_no_warnings, assert_allclose,
assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
from numpy.core.overrides import ARRAY_FUNCTION_ENABLED
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
with assert_raises(AssertionError):
self._assert_func(a, b)
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=object)
self._test_equal(a, 1)
def test_array_likes(self):
self._test_equal([1, 2, 3], (1, 2, 3))
class TestArrayEqual(_GenericTest):
def setup(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', float), ('floupa', float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', float), ('floupa', float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
with suppress_warnings() as sup:
l = sup.record(FutureWarning, message="elementwise == ")
self._test_not_equal(c, b)
assert_equal(len(l), 1)
def test_masked_nan_inf(self):
# Regression test for gh-11121
a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
b = np.array([3., np.nan, 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
b = np.array([np.inf, 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_that_overrides_eq(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return bool(np.equal(self, other).all())
def __ne__(self, other):
return not self == other
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
assert_(type(a == a), bool)
assert_(a == a)
assert_(a != b)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
@pytest.mark.skipif(
not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__')
def test_subclass_that_does_not_implement_npall(self):
class MyArray(np.ndarray):
def __array_function__(self, *args, **kwargs):
return NotImplemented
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
with assert_raises(TypeError):
np.all(a)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
class TestBuildErrorMessage(object):
def test_build_err_msg_defaults(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
'2.00003, 3.00004])')
assert_equal(a, b)
def test_build_err_msg_no_verbose(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, verbose=False)
b = '\nItems are not equal: There is a mismatch'
assert_equal(a, b)
def test_build_err_msg_custom_names(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
'1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
'3.00004])')
assert_equal(a, b)
def test_build_err_msg_custom_precision(self):
x = np.array([1.000000001, 2.00002, 3.00003])
y = np.array([1.000000002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, precision=10)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array(['
'1.000000002, 2.00003 , 3.00004 ])')
assert_equal(a, b)
class TestEqual(TestArrayEqual):
def setup(self):
self._assert_func = assert_equal
def test_nan_items(self):
self._assert_func(np.nan, np.nan)
self._assert_func([np.nan], [np.nan])
self._test_not_equal(np.nan, [np.nan])
self._test_not_equal(np.nan, 1)
def test_inf_items(self):
self._assert_func(np.inf, np.inf)
self._assert_func([np.inf], [np.inf])
self._test_not_equal(np.inf, [np.inf])
def test_datetime(self):
self._test_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-01", "s")
)
self._test_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-01", "m")
)
# gh-10081
self._test_not_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-02", "s")
)
self._test_not_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-02", "m")
)
def test_nat_items(self):
# not a datetime
nadt_no_unit = np.datetime64("NaT")
nadt_s = np.datetime64("NaT", "s")
nadt_d = np.datetime64("NaT", "ns")
# not a timedelta
natd_no_unit = np.timedelta64("NaT")
natd_s = np.timedelta64("NaT", "s")
natd_d = np.timedelta64("NaT", "ns")
dts = [nadt_no_unit, nadt_s, nadt_d]
tds = [natd_no_unit, natd_s, natd_d]
for a, b in itertools.product(dts, dts):
self._assert_func(a, b)
self._assert_func([a], [b])
self._test_not_equal([a], b)
for a, b in itertools.product(tds, tds):
self._assert_func(a, b)
self._assert_func([a], [b])
self._test_not_equal([a], b)
for a, b in itertools.product(tds, dts):
self._test_not_equal(a, b)
self._test_not_equal(a, [b])
self._test_not_equal([a], [b])
self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
self._test_not_equal([a], np.timedelta64(123, "s"))
self._test_not_equal([b], np.timedelta64(123, "s"))
def test_non_numeric(self):
self._assert_func('ab', 'ab')
self._test_not_equal('ab', 'abb')
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_negative_zero(self):
self._test_not_equal(np.PZERO, np.NZERO)
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
def test_error_message(self):
with pytest.raises(AssertionError) as exc_info:
self._assert_func(np.array([1, 2]), np.array([[1, 2]]))
msg = str(exc_info.value)
msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
msg_reference = textwrap.dedent("""\
Arrays are not equal
(shapes (2,), (1, 2) mismatch)
x: array([1, 2])
y: array([[1, 2]])""")
try:
assert_equal(msg, msg_reference)
except AssertionError:
assert_equal(msg2, msg_reference)
def test_object(self):
#gh-12942
import datetime
a = np.array([datetime.datetime(2000, 1, 1),
datetime.datetime(2000, 1, 2)])
self._test_not_equal(a, a[::-1])
class TestArrayAlmostEqual(_GenericTest):
def setup(self):
self._assert_func = assert_array_almost_equal
def test_closeness(self):
# Note that in the course of time we ended up with
# `abs(x - y) < 1.5 * 10**(-decimal)`
# instead of the previously documented
# `abs(x - y) < 0.5 * 10**(-decimal)`
# so this check serves to preserve the wrongness.
# test scalars
self._assert_func(1.499999, 0.0, decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func(1.5, 0.0, decimal=0))
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, decimal=5))
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(anan, anan)
assert_raises(AssertionError,
lambda: self._assert_func(anan, aone))
assert_raises(AssertionError,
lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError,
lambda: self._assert_func(ainf, anan))
def test_inf(self):
a = np.array([[1., 2.], [3., 4.]])
b = a.copy()
a[0, 0] = np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
b[0, 0] = -np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
def test_subclass(self):
a = np.array([[1., 2.], [3., 4.]])
b = np.ma.masked_array([[1., 2.], [0., 4.]],
[[False, False], [True, False]])
self._assert_func(a, b)
self._assert_func(b, a)
self._assert_func(b, b)
# Test fully masked as well (see gh-11123).
a = np.ma.MaskedArray(3.5, mask=True)
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.masked
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array([1., 2., 3.])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array(1.)
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return super(MyArray, self).__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super(MyArray, self).__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
class TestAlmostEqual(_GenericTest):
def setup(self):
self._assert_func = assert_almost_equal
def test_closeness(self):
# Note that in the course of time we ended up with
# `abs(x - y) < 1.5 * 10**(-decimal)`
# instead of the previously documented
# `abs(x - y) < 0.5 * 10**(-decimal)`
# so this check serves to preserve the wrongness.
# test scalars
self._assert_func(1.499999, 0.0, decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func(1.5, 0.0, decimal=0))
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
assert_raises(AssertionError,
lambda: self._assert_func(np.nan, 1))
assert_raises(AssertionError,
lambda: self._assert_func(np.nan, np.inf))
assert_raises(AssertionError,
lambda: self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
assert_raises(AssertionError,
lambda: self._assert_func(np.inf, 1))
assert_raises(AssertionError,
lambda: self._assert_func(-np.inf, np.inf))
def test_simple_item(self):
self._test_not_equal(1, 2)
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
z = np.array([complex(1, 2), complex(np.nan, 1)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
self._test_not_equal(x, z)
def test_error_message(self):
"""Check the message is formatted correctly for the decimal value.
Also check the message when input includes inf or nan (gh12200)"""
x = np.array([1.00000000001, 2.00000000002, 3.00003])
y = np.array([1.00000000002, 2.00000000003, 3.00004])
# Test with a different amount of decimal digits
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y, decimal=12)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatch: 100%')
assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
assert_equal(
msgs[6],
' x: array([1.00000000001, 2.00000000002, 3.00003 ])')
assert_equal(
msgs[7],
' y: array([1.00000000002, 2.00000000003, 3.00004 ])')
# With the default value of decimal digits, only the 3rd element
# differs. Note that we only check for the formatting of the arrays
# themselves.
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatch: 33.3%')
assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')
assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')
# Check the error message when input includes inf
x = np.array([np.inf, 0])
y = np.array([np.inf, 1])
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatch: 50%')
assert_equal(msgs[4], 'Max absolute difference: 1.')
assert_equal(msgs[5], 'Max relative difference: 1.')
assert_equal(msgs[6], ' x: array([inf, 0.])')
assert_equal(msgs[7], ' y: array([inf, 1.])')
# Check the error message when dividing by zero
x = np.array([1, 2])
y = np.array([0, 0])
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatch: 100%')
assert_equal(msgs[4], 'Max absolute difference: 2')
assert_equal(msgs[5], 'Max relative difference: inf')
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return super(MyArray, self).__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super(MyArray, self).__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
class TestApproxEqual(object):
def setup(self):
self._assert_func = assert_approx_equal
def test_simple_arrays(self):
x = np.array([1234.22])
y = np.array([1234.23])
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
class TestArrayAssertLess(object):
def setup(self):
self._assert_func = assert_array_less
def test_simple_arrays(self):
x = np.array([1.1, 2.2])
y = np.array([1.2, 2.3])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 2.3])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank2(self):
x = np.array([[1.1, 2.2], [3.3, 4.4]])
y = np.array([[1.2, 2.3], [3.4, 4.5]])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([[1.0, 2.3], [3.4, 4.5]])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank3(self):
x = np.ones(shape=(2, 2, 2))
y = np.ones(shape=(2, 2, 2))+1
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y[0, 0, 0] = 0
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_simple_items(self):
x = 1.1
y = 2.2
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([2.2, 3.3])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 3.3])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_nan_noncompare(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_noncompare_array(self):
x = np.array([1.1, 2.2, 3.3])
anan = np.array(np.nan)
assert_raises(AssertionError, lambda: self._assert_func(x, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, x))
x = np.array([1.1, 2.2, np.nan])
assert_raises(AssertionError, lambda: self._assert_func(x, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, x))
y = np.array([1.0, 2.0, np.nan])
self._assert_func(y, x)
assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_inf_compare(self):
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(aone, ainf)
self._assert_func(-ainf, aone)
self._assert_func(-ainf, ainf)
assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
def test_inf_compare_array(self):
x = np.array([1.1, 2.2, np.inf])
ainf = np.array(np.inf)
assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
self._assert_func(-ainf, x)
@pytest.mark.skip(reason="The raises decorator depends on Nose")
class TestRaises(object):
def setup(self):
class MyException(Exception):
pass
self.e = MyException
def raises_exception(self, e):
raise e
def does_not_raise_exception(self):
pass
def test_correct_catch(self):
raises(self.e)(self.raises_exception)(self.e) # raises?
def test_wrong_exception(self):
try:
raises(self.e)(self.raises_exception)(RuntimeError) # raises?
except RuntimeError:
return
else:
raise AssertionError("should have caught RuntimeError")
def test_catch_no_raise(self):
try:
raises(self.e)(self.does_not_raise_exception)() # raises?
except AssertionError:
return
else:
raise AssertionError("should have raised an AssertionError")
class TestWarns(object):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
before_filters = sys.modules['warnings'].filters[:]
assert_equal(assert_warns(UserWarning, f), 3)
after_filters = sys.modules['warnings'].filters
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
# Check that the warnings state is unchanged
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_context_manager(self):
before_filters = sys.modules['warnings'].filters[:]
with assert_warns(UserWarning):
warnings.warn("yo")
after_filters = sys.modules['warnings'].filters
def no_warnings():
with assert_no_warnings():
warnings.warn("yo")
assert_raises(AssertionError, no_warnings)
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
try:
# Should raise a DeprecationWarning
assert_warns(UserWarning, f)
failed = True
except DeprecationWarning:
pass
if failed:
raise AssertionError("wrong warning caught by assert_warn")
class TestAssertAllclose(object):
def test_simple(self):
x = 1e-3
y = 1e-9
assert_allclose(x, y, atol=1)
assert_raises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
assert_raises(AssertionError, assert_allclose, a, b)
b[-1] = y * (1 + 1e-8)
assert_allclose(a, b)
assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
assert_allclose(6, 10, rtol=0.5)
assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
def test_min_int(self):
a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
# Should not raise:
assert_allclose(a, a)
def test_report_fail_percentage(self):
a = np.array([1, 1, 1, 1])
b = np.array([1, 1, 1, 2])
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
assert_('Mismatch: 25%\nMax absolute difference: 1\n'
'Max relative difference: 0.5' in msg)
def test_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
# Should not raise:
assert_allclose(a, b, equal_nan=True)
def test_not_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
def test_equal_nan_default(self):
# Make sure equal_nan default behavior remains unchanged. (All
# of these functions use assert_array_compare under the hood.)
# None of these should raise.
a = np.array([np.nan])
b = np.array([np.nan])
assert_array_equal(a, b)
assert_array_almost_equal(a, b)
assert_array_less(a, b)
assert_allclose(a, b)
def test_report_max_relative_error(self):
a = np.array([0, 1])
b = np.array([0, 2])
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
assert_('Max relative difference: 0.5' in msg)
class TestArrayAlmostEqualNulp(object):
def test_float64_pass(self):
# The number of units of least precision
# In this case, use a few places above the lowest level (ie nulp=1)
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
# Addition
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
# Subtraction
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float64_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float32_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float32_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float16_pass(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float16_fail(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_complex128_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
# The test condition needs to be at least a factor of sqrt(2) smaller
# because the real and imaginary parts both change
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def test_complex128_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
# The test condition needs to be at least a factor of sqrt(2) smaller
# because the real and imaginary parts both change
y = x + x*eps*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
def test_complex64_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def test_complex64_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x + x*eps*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
class TestULP(object):
def test_equal(self):
x = np.random.randn(10)
assert_array_max_ulp(x, x, maxulp=0)
def test_single(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float32)
x += 0.01 * np.random.randn(10).astype(np.float32)
eps = np.finfo(np.float32).eps
assert_array_max_ulp(x, x+eps, maxulp=20)
def test_double(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float64)
x += 0.01 * np.random.randn(10).astype(np.float64)
eps = np.finfo(np.float64).eps
assert_array_max_ulp(x, x+eps, maxulp=200)
def test_inf(self):
for dt in [np.float32, np.float64]:
inf = np.array([np.inf]).astype(dt)
big = np.array([np.finfo(dt).max])
assert_array_max_ulp(inf, big, maxulp=200)
def test_nan(self):
# Test that nan is 'far' from small, tiny, inf, max and min
for dt in [np.float32, np.float64]:
if dt == np.float32:
maxulp = 1e6
else:
maxulp = 1e12
inf = np.array([np.inf]).astype(dt)
nan = np.array([np.nan]).astype(dt)
big = np.array([np.finfo(dt).max])
tiny = np.array([np.finfo(dt).tiny])
zero = np.array([np.PZERO]).astype(dt)
nzero = np.array([np.NZERO]).astype(dt)
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, inf,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, big,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, tiny,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, zero,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
class TestStringEqual(object):
def test_simple(self):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
with pytest.raises(AssertionError) as exc_info:
assert_string_equal("foo\nbar", "hello\nbar")
msg = str(exc_info.value)
assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
assert_raises(AssertionError,
lambda: assert_string_equal("foo", "hello"))
def test_regex(self):
assert_string_equal("a+*b", "a+*b")
assert_raises(AssertionError,
lambda: assert_string_equal("aaa", "a+b"))
def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
try:
mod_warns = mod.__warningregistry__
except AttributeError:
# the lack of a __warningregistry__
# attribute means that no warning has
# occurred; this can be triggered in
# a parallel test scenario, while in
# a serial test scenario an initial
# warning (and therefore the attribute)
# are always created first
mod_warns = {}
num_warns = len(mod_warns)
# Python 3.4 appears to clear any pre-existing warnings of the same type,
# when raising warnings inside a catch_warnings block. So, there is a
# warning generated by the tests within the context manager, but no
# previous warnings.
if 'version' in mod_warns:
# Python 3 adds a 'version' entry to the registry,
# do not count it.
num_warns -= 1
# Behavior of warnings is Python version dependent. Adjust the
# expected result to compensate. In particular, Python 3.7 does
# not make an entry for ignored warnings.
if sys.version_info[:2] >= (3, 7):
if py37 is not None:
n_in_context = py37
elif sys.version_info[:2] >= (3, 4):
if py34 is not None:
n_in_context = py34
assert_equal(num_warns, n_in_context)
def test_warn_len_equal_call_scenarios():
# assert_warn_len_equal is called under
# varying circumstances depending on serial
# vs. parallel test scenarios; this test
# simply aims to probe both code paths and
# check that no assertion is uncaught
# parallel scenario -- no warning issued yet
class mod(object):
pass
mod_inst = mod()
assert_warn_len_equal(mod=mod_inst,
n_in_context=0)
# serial test scenario -- the __warningregistry__
# attribute should be present
class mod(object):
def __init__(self):
self.__warningregistry__ = {'warning1':1,
'warning2':2}
mod_inst = mod()
assert_warn_len_equal(mod=mod_inst,
n_in_context=2)
def _get_fresh_mod():
# Get this module, with warning registry empty
my_mod = sys.modules[__name__]
try:
my_mod.__warningregistry__.clear()
except AttributeError:
# will not have a __warningregistry__ unless warning has been
# raised in the module at some point
pass
return my_mod
def test_clear_and_catch_warnings():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
# Without specified modules, don't clear warnings during context
# Python 3.7 catch_warnings doesn't make an entry for 'ignore'.
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 1, py37=0)
# Confirm that specifying module keeps old warning, does not add new
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 1, py37=0)
# Another warning, no module spec does add to warnings dict, except on
# Python 3.4 (see comments in `assert_warn_len_equal`)
# Python 3.7 catch_warnings doesn't make an entry for 'ignore'.
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 2, py34=1, py37=0)
def test_suppress_warnings_module():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
def warn_other_module():
# Apply along axis is implemented in python; stacklevel=2 means
# we end up inside its module, not ours.
def warn(arr):
warnings.warn("Some warning 2", stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
# Test module based warning suppression:
assert_warn_len_equal(my_mod, 0)
with suppress_warnings() as sup:
sup.record(UserWarning)
# suppress warning from other module (may have .pyc ending),
# if apply_along_axis is moved, had to be changed.
sup.filter(module=np.lib.shape_base)
warnings.warn("Some warning")
warn_other_module()
# Check that the suppression did test the file correctly (this module
# got filtered)
assert_equal(len(sup.log), 1)
assert_equal(sup.log[0].message.args[0], "Some warning")
assert_warn_len_equal(my_mod, 0, py37=0)
sup = suppress_warnings()
# Will have to be changed if apply_along_axis is moved:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# And test repeat works:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# Without specified modules, don't clear warnings during context
# Python 3.7 does not add ignored warnings.
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 1, py37=0)
def test_suppress_warnings_type():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
# Test module based warning suppression:
with suppress_warnings() as sup:
sup.filter(UserWarning)
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
sup = suppress_warnings()
sup.filter(UserWarning)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# And test repeat works:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# Without specified modules, don't clear warnings during context
# Python 3.7 does not add ignored warnings.
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 1, py37=0)
def test_suppress_warnings_decorate_no_record():
sup = suppress_warnings()
sup.filter(UserWarning)
@sup
def warn(category):
warnings.warn('Some warning', category)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
warn(UserWarning) # should be supppressed
warn(RuntimeWarning)
assert_equal(len(w), 1)
def test_suppress_warnings_record():
sup = suppress_warnings()
log1 = sup.record()
with sup:
log2 = sup.record(message='Some other warning 2')
sup.filter(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
assert_equal(len(sup.log), 2)
assert_equal(len(log1), 1)
assert_equal(len(log2),1)
assert_equal(log2[0].message.args[0], 'Some other warning 2')
# Do it again, with the same context to see if some warnings survived:
with sup:
log2 = sup.record(message='Some other warning 2')
sup.filter(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
assert_equal(len(sup.log), 2)
assert_equal(len(log1), 1)
assert_equal(len(log2), 1)
assert_equal(log2[0].message.args[0], 'Some other warning 2')
# Test nested:
with suppress_warnings() as sup:
sup.record()
with suppress_warnings() as sup2:
sup2.record(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
assert_equal(len(sup2.log), 1)
assert_equal(len(sup.log), 1)
def test_suppress_warnings_forwarding():
def warn_other_module():
# Apply along axis is implemented in python; stacklevel=2 means
# we end up inside its module, not ours.
def warn(arr):
warnings.warn("Some warning", stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("always"):
for i in range(2):
warnings.warn("Some warning")
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("location"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some warning")
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("module"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some warning")
warn_other_module()
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("once"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some other warning")
warn_other_module()
assert_equal(len(sup.log), 2)
def test_tempdir():
with tempdir() as tdir:
fpath = os.path.join(tdir, 'tmp')
with open(fpath, 'w'):
pass
assert_(not os.path.isdir(tdir))
raised = False
try:
with tempdir() as tdir:
raise ValueError()
except ValueError:
raised = True
assert_(raised)
assert_(not os.path.isdir(tdir))
def test_temppath():
with temppath() as fpath:
with open(fpath, 'w'):
pass
assert_(not os.path.isfile(fpath))
raised = False
try:
with temppath() as fpath:
raise ValueError()
except ValueError:
raised = True
assert_(raised)
assert_(not os.path.isfile(fpath))
class my_cacw(clear_and_catch_warnings):
class_modules = (sys.modules[__name__],)
def test_clear_and_catch_warnings_inherit():
# Test can subclass and add default modules
my_mod = _get_fresh_mod()
with my_cacw():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestAssertNoGcCycles(object):
""" Test assert_no_gc_cycles """
def test_passes(self):
def no_cycle():
b = []
b.append([])
return b
with assert_no_gc_cycles():
no_cycle()
assert_no_gc_cycles(no_cycle)
def test_asserts(self):
def make_cycle():
a = []
a.append(a)
a.append(a)
return a
with assert_raises(AssertionError):
with assert_no_gc_cycles():
make_cycle()
with assert_raises(AssertionError):
assert_no_gc_cycles(make_cycle)
@pytest.mark.slow
def test_fails(self):
"""
Test that in cases where the garbage cannot be collected, we raise an
error, instead of hanging forever trying to clear it.
"""
class ReferenceCycleInDel(object):
"""
An object that not only contains a reference cycle, but creates new
cycles whenever it's garbage-collected and its __del__ runs
"""
make_cycle = True
def __init__(self):
self.cycle = self
def __del__(self):
# break the current cycle so that `self` can be freed
self.cycle = None
if ReferenceCycleInDel.make_cycle:
# but create a new one so that the garbage collector has more
# work to do.
ReferenceCycleInDel()
try:
w = weakref.ref(ReferenceCycleInDel())
try:
with assert_raises(RuntimeError):
# this will be unable to get a baseline empty garbage
assert_no_gc_cycles(lambda: None)
except AssertionError:
# the above test is only necessary if the GC actually tried to free
# our object anyway, which python 2.7 does not.
if w() is not None:
pytest.skip("GC does not call __del__ on cyclic objects")
raise
finally:
# make sure that we stop creating reference cycles
ReferenceCycleInDel.make_cycle = False
|
|
# coding=utf8
"""
github.py - Willie Github Module
Copyright 2012, Dimitri Molenaars http://tyrope.nl/
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net/
"""
from __future__ import unicode_literals
from datetime import datetime
import sys
if sys.version_info.major < 3:
from urllib2 import HTTPError
else:
from urllib.error import HTTPError
import json
from willie import web, tools
from willie.module import commands, rule, NOLIMIT
import os
import re
from willie.logger import get_logger
LOGGER = get_logger(__name__)
issueURL = (r'https?://(?:www\.)?github.com/'
'([A-z0-9\-]+/[A-z0-9\-]+)/'
'(?:issues|pull)/'
'([\d]+)')
regex = re.compile(issueURL)
def checkConfig(bot):
if not bot.config.has_option('github', 'oauth_token') or not bot.config.has_option('github', 'repo'):
return False
else:
return [bot.config.github.oauth_token, bot.config.github.repo]
def configure(config):
"""
| [github] | example | purpose |
| -------- | ------- | ------- |
| oauth_token | 5868e7af57496cc3ae255868e7af57496cc3ae25 | The OAuth token to connect to your github repo |
| repo | embolalia/willie | The GitHub repo you're working from. |
"""
chunk = ''
if config.option('Configuring github issue reporting and searching module', False):
config.interactive_add('github', 'oauth_token', 'Github API Oauth2 token', '')
config.interactive_add('github', 'repo', 'Github repository', 'embolalia/willie')
return chunk
def setup(bot):
if not bot.memory.contains('url_callbacks'):
bot.memory['url_callbacks'] = tools.WillieMemory()
bot.memory['url_callbacks'][regex] = issue_info
def shutdown(bot):
del bot.memory['url_callbacks'][regex]
@commands('makeissue', 'makebug')
def issue(bot, trigger):
"""Create a GitHub issue, also known as a bug report. Syntax: .makeissue Title of the bug report"""
# check input
if not trigger.group(2):
return bot.say('Please title the issue')
# Is the Oauth token and repo available?
gitAPI = checkConfig(bot)
if not gitAPI:
return bot.say('Git module not configured, make sure github.oauth_token and github.repo are defined')
# parse input
now = ' '.join(str(datetime.utcnow()).split(' ')).split('.')[0] + ' UTC'
body = 'Submitted by: %s\nFrom channel: %s\nAt %s' % (trigger.nick, trigger.sender, now)
data = {"title": trigger.group(2), "body": body}
# submit
try:
raw = web.post('https://api.github.com/repos/' + gitAPI[1] + '/issues?access_token=' + gitAPI[0], json.dumps(data))
except HTTPError:
bot.say('The GitHub API returned an error.')
return NOLIMIT
data = json.loads(raw)
bot.say('Issue #%s posted. %s' % (data['number'], data['html_url']))
LOGGER.warning('Issue #%s created in %s', data['number'], trigger.sender)
@commands('addtrace', 'addtraceback')
def add_traceback(bot, trigger):
"""Add a traceback to a GitHub issue.
This pulls the traceback from the exceptions log file. To use, put .addtrace
followed by the issue number to add the comment to, then the signature of
the error (the message shown to the channel when the error occured). This
command will only work for errors from unhandled exceptions."""
# Make sure the API is set up
gitAPI = checkConfig(bot)
if not gitAPI:
return bot.say('GitHub module not configured, make sure github.oauth_token and github.repo are defined')
if not trigger.group(2):
bot.say('Please give both the issue number and the error message.')
return
# Make sure the input is valid
args = trigger.group(2).split(None, 1)
if len(args) != 2:
bot.say('Please give both the issue number and the error message.')
return
number, trace = args
# Make sure the given issue number exists
issue_data = web.get('https://api.github.com/repos/%s/issues/%s' % (gitAPI[1], number))
issue_data = json.loads(issue_data)
if 'message' in issue_data and issue_data['message'] == 'Not Found':
return bot.say("That issue doesn't exist.")
# Find the relevant lines from the log file
post = ''
logfile = os.path.join(bot.config.logdir, 'exceptions.log')
with open(logfile) as log:
in_trace = False
for data in log:
if data == 'Signature: ' + trace + '\n':
post = data
in_trace = True
elif data == '----------------------------------------\n':
in_trace = False
elif in_trace:
post += data
# Give an error if we didn't find the traceback
if not post:
return bot.say("I don't remember getting that error. Please post it "
"yourself at https://github.com/%s/issues/%s"
% (gitAPI[1], number))
# Make the comment
try:
raw = web.post('https://api.github.com/repos/' + gitAPI[1] + '/issues/'
+ number + '/comments?access_token=' + gitAPI[0],
json.dumps({'body': '``\n' + post + '``'}))
except OSError: # HTTPError:
bot.say('The GitHub API returned an error.')
return NOLIMIT
data = json.loads(raw)
bot.say('Added traceback to issue #%s. %s' % (number, data['html_url']))
LOGGER.warning('Traceback added to #%s in %s.', number, trigger.sender)
@commands('findissue', 'findbug')
def findIssue(bot, trigger):
"""Search for a GitHub issue by keyword or ID. usage: .findissue search keywords/ID (optional) You can specify the first keyword as "CLOSED" to search closed issues."""
if not trigger.group(2):
return bot.reply('What are you searching for?')
# Is the Oauth token and repo available?
gitAPI = checkConfig(bot)
if not gitAPI:
return bot.say('Git module not configured, make sure github.oauth_token and github.repo are defined')
firstParam = trigger.group(2).split(' ')[0]
if firstParam.isdigit():
URL = 'https://api.github.com/repos/%s/issues/%s' % (gitAPI[1], firstParam)
elif firstParam == 'CLOSED':
if '%20'.join(trigger.group(2).split(' ')[1:]) not in ('', '\x02', '\x03'):
URL = 'https://api.github.com/legacy/issues/search/' + gitAPI[1] + '/closed/' + '%20'.join(trigger.group(2).split(' ')[1:])
else:
return bot.reply('What are you searching for?')
else:
URL = 'https://api.github.com/legacy/issues/search/%s/open/%s' % (gitAPI[1], web.quote(trigger.group(2)))
try:
raw = web.get(URL)
except HTTPError:
bot.say('The GitHub API returned an error.')
return NOLIMIT
try:
if firstParam.isdigit():
data = json.loads(raw)
else:
data = json.loads(raw)['issues'][-1]
except (KeyError, IndexError):
return bot.say('No search results.')
try:
if len(data['body'].split('\n')) > 1:
body = data['body'].split('\n')[0] + '...'
else:
body = data['body'].split('\n')[0]
except (KeyError):
LOGGER.exception('API returned an invalid result on query request %s',
trigger.group(2))
bot.say('Invalid result, please try again later.')
return NOLIMIT
bot.reply('[#%s]\x02title:\x02 %s \x02|\x02 %s' % (data['number'], data['title'], body))
bot.say(data['html_url'])
@rule('.*%s.*' % issueURL)
def issue_info(bot, trigger, match=None):
match = match or trigger
URL = 'https://api.github.com/repos/%s/issues/%s' % (match.group(1), match.group(2))
try:
raw = web.get(URL)
except HTTPError:
bot.say('The GitHub API returned an error.')
return NOLIMIT
data = json.loads(raw)
try:
if len(data['body'].split('\n')) > 1:
body = data['body'].split('\n')[0] + '...'
else:
body = data['body'].split('\n')[0]
except (KeyError):
bot.say('The API says this is an invalid issue. Please report this if you know it\'s a correct link!')
return NOLIMIT
bot.say('[#%s]\x02title:\x02 %s \x02|\x02 %s' % (data['number'], data['title'], body))
|
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GCI Organization Admins to propose winners for their orgs."""
from google.appengine.ext import db
from google.appengine.ext import ndb
from django import forms
from django.utils.translation import ugettext
from melange.request import access
from melange.request import exception
from soc.modules.gci.views.base import GCIRequestHandler
from soc.views.helper import lists
from soc.views.helper import url_patterns
from soc.modules.gci.logic import organization as org_logic
from soc.modules.gci.logic import org_score as org_score_logic
from soc.modules.gci.models import organization as organization_model
from soc.modules.gci.models import profile as profile_model
from soc.modules.gci.templates import org_list
from soc.modules.gci.views import forms as gci_forms
from soc.modules.gci.views.helper import url_patterns as gci_url_patterns
from soc.modules.gci.views.helper import url_names
DEF_WINNER_MORE_THAN_ONCE_ERROR = ugettext(
'You cannot set the same winner more than once')
class ProposeWinnersForm(gci_forms.GCIModelForm):
"""Django form to propose the grand prize winners."""
EMPTY_CHOICE = 'EMPTY_CHOICE'
class Meta:
model = None
def __init__(self, request_data=None, **kwargs):
super(ProposeWinnersForm, self).__init__(**kwargs)
self.request_data = request_data
choices = [(ProposeWinnersForm.EMPTY_CHOICE, '---')]
possible_winners = org_score_logic.getPossibleWinners(request_data.url_org)
for possible_winner in possible_winners:
choices.append(self._getChoiceOption(possible_winner))
self.fields.get('first_proposed_winner').choices = choices
self.fields.get('second_proposed_winner').choices = choices
self.fields.get('backup_proposed_winner').choices = choices
# check if at least one grand prize winner is already set
if len(self.request_data.url_org.proposed_winners) > 0:
self.fields.get('first_proposed_winner').initial = str(
self.request_data.url_org.proposed_winners[0])
# check if both grand prize winners are already set
if len(self.request_data.url_org.proposed_winners) > 1:
self.fields.get('second_proposed_winner').initial = str(
self.request_data.organization.proposed_winners[1])
# check if backup winner is already set
backup_winner_key = (organization_model.GCIOrganization.backup_winner
.get_value_for_datastore(self.request_data.url_org))
if backup_winner_key:
self.fields.get('backup_proposed_winner').initial = str(backup_winner_key)
first_proposed_winner = forms.ChoiceField(label='First Grand Prize Winner')
second_proposed_winner = forms.ChoiceField(label='Second Grand Prize Winner')
backup_proposed_winner = forms.ChoiceField(label='Backup Grand Prize Winner')
def clean(self):
first_proposed_winner = self.cleaned_data.get(
'first_proposed_winner', ProposeWinnersForm.EMPTY_CHOICE)
second_proposed_winner = self.cleaned_data.get(
'second_proposed_winner', ProposeWinnersForm.EMPTY_CHOICE)
backup_proposed_winner = self.cleaned_data.get(
'backup_proposed_winner', ProposeWinnersForm.EMPTY_CHOICE)
# TODO (daniel): the logic below should be simplified
key_names = set([
first_proposed_winner, second_proposed_winner, backup_proposed_winner])
if len(key_names) == 3:
# there are three different key_names, so everything is OK
pass
elif len(key_names) == 2:
# it is not OK, because at least one of the fields is duplicated
self._errors['__all__'] = DEF_WINNER_MORE_THAN_ONCE_ERROR
elif list(key_names)[0] != ProposeWinnersForm.EMPTY_CHOICE:
# it is not OK, when there is one choice which is not empty
self._errors['__all__'] = DEF_WINNER_MORE_THAN_ONCE_ERROR
def _getChoiceOption(self, student):
return (str(student.key), self._formatPossibleWinner(student))
def _formatPossibleWinner(self, student):
return '%s' % student.public_name
class ProposeWinnersPage(GCIRequestHandler):
"""Page to propose winners by organization admins"""
def templatePath(self):
return 'modules/gci/propose_winners/base.html'
def djangoURLPatterns(self):
return [
gci_url_patterns.url(r'propose_winners/%s$' % url_patterns.ORG, self,
name=url_names.GCI_ORG_PROPOSE_WINNERS),
]
def checkAccess(self, data, check, mutator):
check.isOrgAdminForOrganization(ndb.Key.from_old_key(data.url_org.key()))
if not data.timeline.allReviewsStopped():
raise exception.Forbidden(
message='This page may be accessed when the review period is over')
def context(self, data, check, mutator):
form = ProposeWinnersForm(request_data=data, data=data.POST or None)
context = {
'page_name': 'Propose winners for %s' % data.url_org.name,
'forms': [form]
}
return context
def post(self, data, check, mutator):
"""Handles POST requests."""
form = ProposeWinnersForm(request_data=data, data=data.POST)
if not form.is_valid():
# TODO(nathaniel): problematic self-call.
return self.get(data, check, mutator)
first_key_str = data.POST.get(
'first_proposed_winner', ProposeWinnersForm.EMPTY_CHOICE)
second_key_str = data.POST.get(
'second_proposed_winner', ProposeWinnersForm.EMPTY_CHOICE)
backup_key_str = data.POST.get(
'backup_proposed_winner', ProposeWinnersForm.EMPTY_CHOICE)
proposed_winners = self._getProposedWinnersList(
first_key_str, second_key_str)
backup_winner = self._getBackupWinner(backup_key_str)
def txn():
organization = organization_model.GCIOrganization.get(
data.organization.key())
organization.proposed_winners = proposed_winners
organization.backup_winner = backup_winner
organization.put()
db.run_in_transaction(txn)
data.redirect.organization()
return data.redirect.to(url_names.GCI_ORG_PROPOSE_WINNERS)
def _getProfileByKeyStr(self, key_str):
"""Returns the GCIProfile entity based on the specified string
representation of db.Key.
"""
try:
key = ndb.Key(key_str)
# TODO(daniel): find out what actual exception class is
except Exception:
return None
return key.get()
def _getBackupWinner(self, backup_key_str):
"""Returns the GCIProfile entity belonging to the backup winner chosen
by the organization.
Args:
backup_key_str: the string representation of the key associated with
the profile proposed by the organization.
Returns:
the GCIProfile entity associated with the specified argument or None
if it does not point to any existing profile
"""
return self._getProfileByKeyStr(backup_key_str)
def _getProposedWinnersList(self, first_key_str, second_key_str):
"""Returns the list which contains the keys of the GCIProfile entities
belonging to students proposed by the organization.
Args:
first_key_str: the string representation of the first key associated
with the profile proposed by the organization.
second_key_str: the string representation of the second key associated
with the profile proposed by the organization.
Returns:
a list with the keys of GCIProfile entity that correspond to
the specified arguments.
"""
proposed_winners = []
profile = self._getProfileByKeyStr(first_key_str)
if profile:
proposed_winners.append(profile.key)
profile = self._getProfileByKeyStr(second_key_str)
if profile:
proposed_winners.append(profile.key)
return proposed_winners
class OrganizationsForProposeWinnersList(org_list.OrgList):
"""Lists all organizations for which the current user may propose the Grand
Prize Winner and the row action takes their to ProposeWinnersPage for
the corresponding organization.
"""
def _getDescription(self):
return ugettext('Choose an organization for which to propose the '
'Grand Prize Winners.')
def _getRedirect(self):
def redirect(e, *args):
# TODO(nathaniel): make this .organization call unnecessary.
self.data.redirect.organization(organization=e)
return self.data.redirect.urlOf(url_names.GCI_ORG_PROPOSE_WINNERS)
return redirect
def _getListConfig(self):
"""Returns ListConfiguration object for the list.
"""
list_config = lists.ListConfiguration()
list_config.addPlainTextColumn('name', 'Name',
lambda e, *args: e.name.strip())
list_config.addSimpleColumn('link_id', 'Link ID', hidden=True)
list_config.setRowAction(self._getRedirect())
return list_config
def _getQuery(self):
"""Returns Query object to fetch entities for the list.
"""
return org_logic.queryForOrgAdminAndStatus(
self.data.profile, ['new', 'active'])
class ChooseOrganizationForProposeWinnersPage(GCIRequestHandler):
"""View with a list of organizations. When a user clicks on one of them,
he or she is moved to the propose winner page for this organization.
"""
access_checker = access.NON_STUDENT_PROFILE_ACCESS_CHECKER
def templatePath(self):
return 'modules/gci/org_list/base.html'
def djangoURLPatterns(self):
return [
gci_url_patterns.url(
r'org_choose_for_propose_winners/%s$' % url_patterns.PROGRAM, self,
name=url_names.GCI_ORG_CHOOSE_FOR_PROPOSE_WINNNERS),
]
def jsonContext(self, data, check, mutator):
list_content = OrganizationsForProposeWinnersList(data).getListData()
if list_content:
return list_content.content()
else:
raise exception.Forbidden(message='You do not have access to this data')
def context(self, data, check, mutator):
return {
'page_name': "Choose an organization for which to display scores.",
'org_list': OrganizationsForProposeWinnersList(data),
#'program_select': ProgramSelect(self.data, 'gci_accepted_orgs'),
}
class ProposedWinnersForOrgsList(org_list.OrgList):
"""Lists all organizations for which the current user may propose the Grand
Prize Winner and the row action takes their to ProposeWinnersPage for
the corresponding organization.
"""
def _getDescription(self):
return ugettext('Proposed Grand Prize Winners')
def _getRedirect(self):
def redirect(e, *args):
# TODO(nathaniel): make this .organization call unnecessary.
self.data.redirect.organization(organization=e)
return self.data.redirect.urlOf(url_names.GCI_ORG_PROPOSE_WINNERS)
return redirect
def _getListConfig(self):
"""Returns ListConfiguration object for the list.
"""
def proposedWinnersFunc(organization, *args):
profiles = profile_model.GCIProfile.get(organization.proposed_winners)
return ', '.join([p.public_name for p in profiles if p])
list_config = lists.ListConfiguration()
list_config.addPlainTextColumn('name', 'Name',
lambda e, *args: e.name.strip())
list_config.addPlainTextColumn('proposed_winners', 'Proposed Winners',
proposedWinnersFunc)
list_config.addPlainTextColumn('backup_winner', 'Backup Winner',
lambda e, *args: e.backup_winner.public_name if e.backup_winner else '')
list_config.addSimpleColumn('profile_id', 'Username', hidden=True)
list_config.setRowAction(self._getRedirect())
return list_config
def _getQuery(self):
"""Returns Query object to fetch entities for the list.
"""
return org_logic.queryForProgramAndStatus(
self.data.program, ['new', 'active'])
class ViewProposedWinnersPage(GCIRequestHandler):
"""View with a list of organizations with the proposed Grand Prize Winners.
"""
access_checker = access.PROGRAM_ADMINISTRATOR_ACCESS_CHECKER
def templatePath(self):
return 'modules/gci/org_list/base.html'
def djangoURLPatterns(self):
return [
gci_url_patterns.url(
r'view_proposed_winners/%s$' % url_patterns.PROGRAM, self,
name=url_names.GCI_VIEW_PROPOSED_WINNERS),
]
def jsonContext(self, data, check, mutator):
list_content = ProposedWinnersForOrgsList(data).getListData()
if list_content:
return list_content.content()
else:
raise exception.Forbidden(message='You do not have access to this data')
def context(self, data, check, mutator):
return {
'page_name': "Proposed Grand Prize Winners.",
'org_list': ProposedWinnersForOrgsList(data),
#'program_select': ProgramSelect(self.data, 'gci_accepted_orgs'),
}
|
|
"""Modules for I2C bus on the Opsis.
FIXME: Refactor this properly...
"""
from migen.fhdl import *
from migen.fhdl.specials import TSTriple
from migen.genlib.cdc import MultiReg
from migen.genlib.fsm import FSM, NextState
from migen.genlib.misc import chooser
from migen.genlib.misc import split, displacer, chooser
from litex.soc.cores.gpio import GPIOIn, GPIOOut
from litex.soc.interconnect.csr import *
from gateware import i2c
class I2CShiftReg(Module, AutoCSR):
def __init__(self, pads):
STATUS_FULL = 1
STATUS_EMPTY = 2
self.shift_reg = shift_reg = CSRStorage(8, write_from_dev=True)
self.status = status = CSRStorage(2, reset=STATUS_EMPTY, write_from_dev=True)
self.slave_addr = slave_addr = CSRStorage(7)
###
scl_raw = Signal()
sda_i = Signal()
sda_raw = Signal()
sda_drv = Signal()
scl_drv = Signal()
_sda_drv_reg = Signal()
self._sda_i_async = _sda_i_async = Signal()
self._scl_i_async = _scl_i_async = Signal()
_scl_drv_reg = Signal()
self.sync += _sda_drv_reg.eq(sda_drv)
self.sync += _scl_drv_reg.eq(scl_drv)
self.comb += [
pads.scl.w.eq(0),
pads.scl.oe.eq(_scl_drv_reg),
_scl_i_async.eq(pads.scl.r),
pads.sda.w.eq(0),
pads.sda.oe.eq(_sda_drv_reg),
_sda_i_async.eq(pads.sda.r),
]
self.specials += [
MultiReg(_scl_i_async, scl_raw),
MultiReg(_sda_i_async, sda_raw),
]
# for debug
self.scl = scl_raw
self.sda_i = sda_i
self.sda_o = Signal()
self.comb += self.sda_o.eq(~_sda_drv_reg)
self.sda_oe = _sda_drv_reg
shift_reg_full = Signal()
shift_reg_empty = Signal()
scl_i = Signal()
samp_count = Signal(3)
samp_carry = Signal()
self.sync += [
Cat(samp_count, samp_carry).eq(samp_count + 1),
If(samp_carry,
scl_i.eq(scl_raw),
sda_i.eq(sda_raw)
)
]
scl_r = Signal()
sda_r = Signal()
scl_rising = Signal()
scl_falling = Signal()
sda_rising = Signal()
sda_falling = Signal()
self.sync += [
scl_r.eq(scl_i),
sda_r.eq(sda_i)
]
self.comb += [
shift_reg_full.eq(status.storage[0]),
shift_reg_empty.eq(status.storage[1]),
scl_rising.eq(scl_i & ~scl_r),
scl_falling.eq(~scl_i & scl_r),
sda_rising.eq(sda_i & ~sda_r),
sda_falling.eq(~sda_i & sda_r)
]
start = Signal()
self.comb += start.eq(scl_i & sda_falling)
din = Signal(8)
counter = Signal(max=9)
counter_reset = Signal()
self.sync += [
If(start | counter_reset, counter.eq(0)),
If(scl_rising,
If(counter == 8,
counter.eq(0)
).Else(
counter.eq(counter + 1),
din.eq(Cat(sda_i, din[:7]))
)
)
]
self.din = din
self.counter = counter
is_read = Signal()
update_is_read = Signal()
self.sync += If(update_is_read, is_read.eq(din[0]))
data_bit = Signal()
zero_drv = Signal()
data_drv = Signal()
pause_drv = Signal()
self.comb += scl_drv.eq(pause_drv)
self.comb += If(zero_drv, sda_drv.eq(1)).Elif(data_drv,
sda_drv.eq(~data_bit))
data_drv_en = Signal()
data_drv_stop = Signal()
self.sync += If(data_drv_en, data_drv.eq(1)).Elif(data_drv_stop,
data_drv.eq(0))
self.sync += If(data_drv_en, chooser(shift_reg.storage,
counter, data_bit, 8,
reverse=True))
self.submodules.fsm = fsm = FSM()
fsm.act("WAIT_START",
data_drv_stop.eq(1),
)
fsm.act("RCV_ADDRESS",
data_drv_stop.eq(1),
If(counter == 8,
If(din[1:] == slave_addr.storage,
update_is_read.eq(1),
NextState("ACK_ADDRESS0"),
).Else(
NextState("WAIT_START"),
)
)
)
fsm.act("ACK_ADDRESS0",
counter_reset.eq(1),
If(~scl_i, NextState("ACK_ADDRESS1")),
)
fsm.act("ACK_ADDRESS1",
counter_reset.eq(1),
zero_drv.eq(1),
If(scl_i, NextState("ACK_ADDRESS2")),
)
fsm.act("ACK_ADDRESS2",
counter_reset.eq(1),
zero_drv.eq(1),
If(~scl_i,
NextState("PAUSE")
)
)
fsm.act("PAUSE",
counter_reset.eq(1),
pause_drv.eq(1),
If(~shift_reg_empty & is_read,
counter_reset.eq(1),
NextState("DO_READ"),
).Elif(~shift_reg_full & ~is_read,
NextState("DO_WRITE"),
)
)
fsm.act("DO_READ",
If(~scl_i,
If(counter == 8,
data_drv_stop.eq(1),
status.we.eq(1),
status.dat_w.eq(STATUS_EMPTY),
NextState("ACK_READ0"),
).Else(
data_drv_en.eq(1),
)
)
)
fsm.act("ACK_READ0",
counter_reset.eq(1),
If(scl_rising,
If(sda_i,
NextState("WAIT_START"),
).Else(
NextState("ACK_READ1"),
)
)
)
fsm.act("ACK_READ1",
counter_reset.eq(1),
If(scl_falling,
NextState("PAUSE"),
)
)
fsm.act("DO_WRITE",
If(counter == 8,
shift_reg.dat_w.eq(din),
shift_reg.we.eq(1),
NextState("ACK_WRITE0"),
)
)
fsm.act("ACK_WRITE0",
counter_reset.eq(1),
If(~scl_i, NextState("ACK_WRITE1")),
)
fsm.act("ACK_WRITE1",
counter_reset.eq(1),
zero_drv.eq(1),
If(scl_i, NextState("ACK_WRITE2")),
)
fsm.act("ACK_WRITE2",
counter_reset.eq(1),
zero_drv.eq(1),
If(~scl_i,
NextState("PAUSE"),
status.we.eq(1),
status.dat_w.eq(STATUS_FULL),
)
)
for state in fsm.actions.keys():
fsm.act(state, If(start, NextState("RCV_ADDRESS")))
for state in fsm.actions.keys():
fsm.act(state, If(self.slave_addr.re, NextState("WAIT_START")))
class OpsisI2C(Module, AutoCSR):
"""I2C bus on the Opsis.
Used for;
* Small EEPROM which contains FX2 firmware + MAC address.
* Loading firmware onto the FX2.
"""
def __init__(self, platform):
self.submodules.mux = i2c.I2CMux(platform.request("opsis_i2c"))
self.submodules.master = i2c.I2C(self.mux.get_i2c_pads())
# Use a proper Tristate for the FX2 reset so the "pull up" works.
fx2_reset = TSTriple(1)
self.comb += [
fx2_reset.o.eq(0),
]
self.specials += [
fx2_reset.get_tristate(platform.request("fx2_reset")),
]
self.submodules.fx2_reset = GPIOOut(fx2_reset.oe)
self.submodules.fx2_hack = I2CShiftReg(self.mux.get_i2c_pads())
|
|
import logging
import os
from contextlib import contextmanager
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
TextIO,
Tuple,
Union,
cast,
)
from funcy import compact, lremove
from rich.rule import Rule
from rich.syntax import Syntax
from dvc.exceptions import DvcException
from dvc.stage import PipelineStage
from dvc.stage.serialize import to_pipeline_file
from dvc.types import OptStr
from dvc.utils.serialize import dumps_yaml
if TYPE_CHECKING:
from dvc.repo import Repo
from dvc.dvcfile import DVCFile
from dvc.stage import Stage
from rich.tree import Tree
from dvc.ui import ui
PROMPTS = {
"cmd": "[b]Command[/b] to execute",
"code": "Path to a [b]code[/b] file/directory",
"data": "Path to a [b]data[/b] file/directory",
"models": "Path to a [b]model[/b] file/directory",
"params": "Path to a [b]parameters[/b] file",
"metrics": "Path to a [b]metrics[/b] file",
"plots": "Path to a [b]plots[/b] file/directory",
"live": "Path to log [b]dvclive[/b] outputs",
}
def _prompts(
keys: Iterable[str],
defaults: Dict[str, str] = None,
validator: Callable[[str, str], Union[str, Tuple[str, str]]] = None,
allow_omission: bool = True,
stream: Optional[TextIO] = None,
) -> Dict[str, OptStr]:
from dvc.ui.prompt import Prompt
defaults = defaults or {}
return {
key: Prompt.prompt_(
PROMPTS[key],
console=ui.error_console,
default=defaults.get(key),
validator=partial(validator, key) if validator else None,
allow_omission=allow_omission,
stream=stream,
)
for key in keys
}
@contextmanager
def _disable_logging(highest_level=logging.CRITICAL):
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
def build_workspace_tree(workspace: Dict[str, str]) -> "Tree":
from rich.tree import Tree
tree = Tree(
"DVC assumes the following workspace structure:",
highlight=True,
)
for value in sorted(workspace.values()):
tree.add(f"[green]{value}[/green]")
return tree
PIPELINE_FILE_LINK = (
"https://dvc.org/doc/user-guide/project-structure/pipelines-files"
)
def init_interactive(
name: str,
defaults: Dict[str, str],
provided: Dict[str, str],
validator: Callable[[str, str], Union[str, Tuple[str, str]]] = None,
live: bool = False,
stream: Optional[TextIO] = None,
) -> Dict[str, str]:
command = provided.pop("cmd", None)
primary = lremove(provided.keys(), ["code", "data", "models", "params"])
secondary = lremove(
provided.keys(), ["live"] if live else ["metrics", "plots"]
)
prompts = primary + secondary
workspace = {**defaults, **provided}
if not live and "live" not in provided:
workspace.pop("live", None)
for key in ("plots", "metrics"):
if live and key not in provided:
workspace.pop(key, None)
ret: Dict[str, str] = {}
if command:
ret["cmd"] = command
if not prompts and command:
return ret
ui.error_write(
f"This command will guide you to set up a [bright_blue]{name}[/]",
"stage in [green]dvc.yaml[/].",
f"\nSee [repr.url]{PIPELINE_FILE_LINK}[/].\n",
styled=True,
)
if not command:
ret.update(
compact(_prompts(["cmd"], allow_omission=False, stream=stream))
)
if prompts:
ui.error_write(styled=True)
if not prompts:
return ret
ui.error_write(
"Enter the paths for dependencies and outputs of the command.",
styled=True,
)
if workspace:
ui.error_write(build_workspace_tree(workspace), styled=True)
ui.error_write(styled=True)
ret.update(
compact(
_prompts(prompts, defaults, validator=validator, stream=stream)
)
)
return ret
def _check_stage_exists(
dvcfile: "DVCFile", name: str, force: bool = False
) -> None:
if not force and dvcfile.exists() and name in dvcfile.stages:
from dvc.stage.exceptions import DuplicateStageName
hint = "Use '--force' to overwrite."
raise DuplicateStageName(
f"Stage '{name}' already exists in 'dvc.yaml'. {hint}"
)
def loadd_params(path: str) -> Dict[str, List[str]]:
from dvc.utils.serialize import LOADERS
_, ext = os.path.splitext(path)
return {path: list(LOADERS[ext](path))}
def validate_prompts(key: str, value: str) -> Union[Any, Tuple[Any, str]]:
from dvc.ui.prompt import InvalidResponse
if key == "params":
assert isinstance(value, str)
msg_format = (
"[prompt.invalid]'{0}' {1}. "
"Please retry with an existing parameters file."
)
if not os.path.exists(value):
raise InvalidResponse(msg_format.format(value, "does not exist"))
if os.path.isdir(value):
raise InvalidResponse(msg_format.format(value, "is a directory"))
elif key in ("code", "data"):
if not os.path.exists(value):
return value, (
f"[yellow]'{value}' does not exist in the workspace. "
'"exp run" may fail.[/]'
)
return value
def init(
repo: "Repo",
name: str = None,
type: str = "default", # pylint: disable=redefined-builtin
defaults: Dict[str, str] = None,
overrides: Dict[str, str] = None,
interactive: bool = False,
force: bool = False,
stream: Optional[TextIO] = None,
) -> "Stage":
from dvc.dvcfile import make_dvcfile
dvcfile = make_dvcfile(repo, "dvc.yaml")
name = name or type
_check_stage_exists(dvcfile, name, force=force)
defaults = defaults.copy() if defaults else {}
overrides = overrides.copy() if overrides else {}
with_live = type == "live"
if interactive:
defaults = init_interactive(
name,
validator=validate_prompts,
defaults=defaults,
live=with_live,
provided=overrides,
stream=stream,
)
else:
if with_live:
# suppress `metrics`/`plots` if live is selected, unless
# it is also provided via overrides/cli.
# This makes output to be a checkpoint as well.
defaults.pop("metrics", None)
defaults.pop("plots", None)
else:
defaults.pop("live", None) # suppress live otherwise
context: Dict[str, str] = {**defaults, **overrides}
assert "cmd" in context
params_kv = []
params = context.get("params")
if params:
params_kv.append(loadd_params(params))
checkpoint_out = bool(context.get("live"))
models = context.get("models")
stage = repo.stage.create(
name=name,
cmd=context["cmd"],
deps=compact([context.get("code"), context.get("data")]),
params=params_kv,
metrics_no_cache=compact([context.get("metrics")]),
plots_no_cache=compact([context.get("plots")]),
live=context.get("live"),
force=force,
**{"checkpoints" if checkpoint_out else "outs": compact([models])},
)
if interactive:
ui.error_write(Rule(style="green"), styled=True)
_yaml = dumps_yaml(to_pipeline_file(cast(PipelineStage, stage)))
syn = Syntax(_yaml, "yaml", theme="ansi_dark")
ui.error_write(syn, styled=True)
from dvc.ui.prompt import Confirm
if not interactive or Confirm.ask(
"Do you want to add the above contents to dvc.yaml?",
console=ui.error_console,
default=True,
stream=stream,
):
with _disable_logging(), repo.scm_context(autostage=True, quiet=True):
stage.dump(update_lock=False)
stage.ignore_outs()
if params:
repo.scm_context.track_file(params)
else:
raise DvcException("Aborting ...")
return stage
|
|
""" Tests for geo spatial data types"""
import numpy as np
import pytest
from numpy import testing
from pytest import param
import ibis
pytestmark = pytest.mark.geospatial
# TODO find a way to just run for the backends that support geo, without
# skipping if dependencies are missing
pytest.importorskip('geoalchemy2')
pytest.importorskip('geopandas')
shapely = pytest.importorskip('shapely')
# geo literals declaration
point_0 = ibis.literal((0, 0), type='point').name('tmp')
point_0_4326 = ibis.literal((0, 0), type='point;4326').name('tmp')
point_geom_0 = ibis.literal((0, 0), type='point;4326:geometry').name('p')
point_geom_1 = ibis.literal((1, 1), type='point;4326:geometry').name('p')
point_geom_0_srid0 = ibis.literal((0, 0), type='point;0:geometry').name('p')
point_geom_1_srid0 = ibis.literal((1, 1), type='point;0:geometry').name('p')
point_geom_2 = ibis.literal((2, 2), type='point;4326:geometry').name('p')
point_geog_0 = ibis.literal((0, 0), type='point;4326:geography').name('p')
point_geog_1 = ibis.literal((1, 1), type='point;4326:geography').name('p')
point_geog_2 = ibis.literal((2, 2), type='point;4326:geography').name('p')
polygon_0 = ibis.literal(
(
((1, 0), (0, 1), (-1, 0), (0, -1), (1, 0)),
((0.1, 0), (0, 0.1), (-0.1, 0), (0, -0.1), (0.1, 0)),
),
type='polygon;4326:geometry',
).name('p')
# test input data with shapely geometries
shp_point_0 = shapely.geometry.Point(0, 0)
shp_point_1 = shapely.geometry.Point(1, 1)
shp_point_2 = shapely.geometry.Point(2, 2)
shp_linestring_0 = shapely.geometry.LineString(
[shp_point_0, shp_point_1, shp_point_2]
)
shp_linestring_1 = shapely.geometry.LineString(
[shp_point_2, shp_point_1, shp_point_0]
)
shp_polygon_0 = shapely.geometry.Polygon(shp_linestring_0)
shp_multilinestring_0 = shapely.geometry.MultiLineString(
[shp_linestring_0, shp_linestring_1]
)
shp_multipoint_0 = shapely.geometry.MultiPoint(
[shp_point_0, shp_point_1, shp_point_2]
)
shp_multipolygon_0 = shapely.geometry.MultiPolygon([shp_polygon_0])
@pytest.mark.parametrize(
('expr', 'expected'),
[
(point_0, {'postgres': "'POINT (0 0)'"}),
(point_0_4326, {'postgres': "'SRID=4326;POINT (0 0)'"}),
(point_geom_0, {'postgres': "'SRID=4326;POINT (0 0)'::geometry"}),
(point_geom_1, {'postgres': "'SRID=4326;POINT (1 1)'::geometry"}),
(point_geom_2, {'postgres': "'SRID=4326;POINT (2 2)'::geometry"}),
(point_geog_0, {'postgres': "'SRID=4326;POINT (0 0)'::geography"}),
(point_geog_1, {'postgres': "'SRID=4326;POINT (1 1)'::geography"}),
(point_geog_2, {'postgres': "'SRID=4326;POINT (2 2)'::geography"}),
],
)
def test_literal_geospatial_explicit(con, expr, expected):
result = str(con.compile(expr))
result_expected = f"SELECT {expected['postgres']} AS tmp"
# use `in` op because if name is specified omniscidb doesn't compile
# with alias but postgresql does. but if name is not provided,
# omniscidb uses tmp as a default alias but postgres doesn't use alias
assert result in result_expected
@pytest.mark.parametrize(
('shp', 'expected'),
[
(shp_point_0, {'postgres': "'POINT (0 0)'"}),
(shp_point_1, {'postgres': "'POINT (1 1)'"}),
(shp_point_2, {'postgres': "'POINT (2 2)'"}),
(shp_linestring_0, {'postgres': "'LINESTRING (0 0, 1 1, 2 2)'"}),
(shp_linestring_1, {'postgres': "'LINESTRING (2 2, 1 1, 0 0)'"}),
(shp_polygon_0, {'postgres': "'POLYGON ((0 0, 1 1, 2 2, 0 0))'"}),
(
shp_multipolygon_0,
{'postgres': "'MULTIPOLYGON (((0 0, 1 1, 2 2, 0 0)))'"},
),
],
)
def test_literal_geospatial_inferred(con, shp, expected):
result = str(con.compile(ibis.literal(shp)))
result_expected = f"SELECT {expected['postgres']} AS tmp"
# omniscidb uses tmp as a default alias but postgres doesn't use alias
assert result in result_expected
@pytest.mark.parametrize(
('shp', 'expected'),
[
(
shp_multilinestring_0,
{
'postgres': (
"'MULTILINESTRING ((0 0, 1 1, 2 2), (2 2, 1 1, 0 0))'"
)
},
),
(shp_multipoint_0, {'postgres': "'MULTIPOINT (0 0, 1 1, 2 2)'"}),
],
)
def test_literal_multi_geospatial_inferred(con, shp, expected):
result = str(con.compile(ibis.literal(shp)))
result_expected = f"SELECT {expected['postgres']} AS tmp"
assert result in result_expected
@pytest.mark.parametrize(
('expr_fn', 'expected'),
[
param(lambda t: t['geo_linestring'].length(), [1.41] * 5, id='length'),
param(lambda t: t['geo_point'].x(), [0, 1, 2, 3, 4], id='x'),
param(lambda t: t['geo_point'].y(), [0, 1, 2, 3, 4], id='y'),
param(
lambda t: t['geo_linestring'].x_min(),
[0, 1, 2, 3, 4],
id='x_min',
marks=pytest.mark.notimpl(["postgres"]),
),
param(
lambda t: t['geo_linestring'].x_max(),
[1, 2, 3, 4, 5],
id='x_max',
marks=pytest.mark.notimpl(["postgres"]),
),
param(
lambda t: t['geo_linestring'].y_min(),
[0, 1, 2, 3, 4],
id='y_min',
marks=pytest.mark.notimpl(["postgres"]),
),
param(
lambda t: t['geo_linestring'].y_max(),
[1, 2, 3, 4, 5],
id='y_max',
marks=pytest.mark.notimpl(["postgres"]),
),
param(
lambda t: t['geo_multipolygon'].n_rings(),
[2, 3, 1, 1, 1],
id='n_rings',
marks=pytest.mark.notimpl(["postgres"]),
),
param(
lambda t: t['geo_polygon'].set_srid(4326).perimeter(),
[96.34, 114.36, 10.24, 10.24, 10.24],
id='perimeter',
marks=pytest.mark.notimpl(
['postgres'], reason='TODO: fix different results issue'
),
),
param(
lambda t: t['geo_multipolygon'].n_points(),
[7, 11, 5, 5, 5],
id='n_points',
marks=pytest.mark.notimpl(
['postgres'], reason='TODO: fix different results issue'
),
),
],
)
def test_geo_spatial_unops(geotable, expr_fn, expected):
"""Testing for geo spatial unary operations."""
expr = expr_fn(geotable)
result = expr.execute()
testing.assert_almost_equal(result, expected, decimal=2)
@pytest.mark.parametrize(
('expr_fn', 'expected'),
[
param(
lambda t: t['geo_linestring'].contains(point_geom_1_srid0),
{
# does not contain the border
'postgres': [False]
* 5,
},
id='contains',
),
param(
lambda t: t['geo_linestring'].disjoint(point_geom_0_srid0),
{'postgres': [False, True, True, True, True]},
id='disjoint',
),
param(
lambda t: t['geo_point'].d_within(point_geom_1_srid0, 2.0),
{'postgres': [True, True, True, False, False]},
id='d_within',
),
param(
lambda t: t['geo_point'].d_fully_within(t['geo_linestring'], 2.0),
{'postgres': [True, True, True, True, True]},
id='d_fully_within',
),
param(
lambda t: t['geo_linestring'].intersects(point_geom_0_srid0),
{'postgres': [True, False, False, False, False]},
id='intersects',
),
param(
lambda t: t['geo_linestring'].distance(point_geom_0_srid0),
{'postgres': [0.0, 1.41, 2.82, 4.24, 5.66]},
id='distance',
),
param(
lambda t: t['geo_linestring'].max_distance(point_geom_0_srid0),
{'postgres': [1.41, 2.82, 4.24, 5.66, 7.08]},
id='max_distance',
marks=pytest.mark.notimpl(["postgres"]),
),
param(
lambda t: t.geo_polygon.contains(ibis.geo_point(30, 10)),
{'postgres': [True, False, False, False, False]},
id='point',
marks=pytest.mark.notimpl(["postgres"]),
),
],
)
def test_geo_spatial_binops(geotable, expr_fn, expected):
"""Testing for geo spatial binary operations."""
expr = expr_fn(geotable)
result = expr.execute()
testing.assert_almost_equal(result, expected["postgres"], decimal=2)
@pytest.mark.parametrize(
('expr_fn', 'expected'),
[
param(
lambda t: t['geo_linestring'].end_point(),
[True, True, True, True, True],
id='end_point',
),
param(
lambda t: t['geo_linestring'].point_n(1),
[True, True, True, True, True],
id='point_n',
marks=pytest.mark.notimpl(["postgres"]),
),
param(
lambda t: t['geo_linestring'].start_point(),
[True, True, True, True, True],
id='start_point',
),
],
)
def test_get_point(geotable, expr_fn, expected):
"""Testing for geo spatial get point operations."""
arg = expr_fn(geotable)
# Note: there is a difference in how OmnisciDB and PostGIS consider
# boundaries with the contains predicate. Work around this by adding a
# small buffer.
expr = geotable['geo_linestring'].buffer(0.01).contains(arg)
result = geotable[geotable, expr.name('tmp')].execute()['tmp']
testing.assert_almost_equal(result, expected, decimal=2)
@pytest.mark.parametrize(('arg', 'expected'), [(polygon_0, [1.98] * 5)])
def test_area(geotable, arg, expected):
"""Testing for geo spatial area operation."""
expr = geotable[geotable.id, arg.area().name('tmp')]
result = expr.execute()['tmp']
testing.assert_almost_equal(result, expected, decimal=2)
@pytest.mark.parametrize(
('condition', 'expected'),
[
(lambda t: point_geom_2.srid(), {'postgres': 4326}),
(lambda t: point_geom_0.srid(), {'postgres': 4326}),
(lambda t: t.geo_point.srid(), {'postgres': 0}),
(lambda t: t.geo_linestring.srid(), {'postgres': 0}),
(lambda t: t.geo_polygon.srid(), {'postgres': 0}),
(lambda t: t.geo_multipolygon.srid(), {'postgres': 0}),
],
)
def test_srid(geotable, condition, expected):
"""Testing for geo spatial srid operation."""
expr = geotable[geotable.id, condition(geotable).name('tmp')]
result = expr.execute()['tmp'][[0]]
assert np.all(result == expected["postgres"])
@pytest.mark.parametrize(
('condition', 'expected'),
[
(lambda t: point_geom_0.set_srid(4326).srid(), 4326),
(lambda t: point_geom_0.set_srid(4326).set_srid(0).srid(), 0),
(lambda t: t.geo_point.set_srid(4326).srid(), 4326),
(lambda t: t.geo_linestring.set_srid(4326).srid(), 4326),
(lambda t: t.geo_polygon.set_srid(4326).srid(), 4326),
(lambda t: t.geo_multipolygon.set_srid(4326).srid(), 4326),
],
)
def test_set_srid(geotable, condition, expected):
"""Testing for geo spatial set_srid operation."""
expr = geotable[geotable.id, condition(geotable).name('tmp')]
result = expr.execute()['tmp'][[0]]
assert np.all(result == expected)
@pytest.mark.parametrize(
('condition', 'expected'),
[
(lambda t: point_geom_0.transform(900913).srid(), 900913),
(lambda t: point_geom_2.transform(900913).srid(), 900913),
(
lambda t: t.geo_point.set_srid(4326).transform(900913).srid(),
900913,
),
(
lambda t: t.geo_linestring.set_srid(4326).transform(900913).srid(),
900913,
),
(
lambda t: t.geo_polygon.set_srid(4326).transform(900913).srid(),
900913,
),
(
lambda t: t.geo_multipolygon.set_srid(4326)
.transform(900913)
.srid(),
900913,
),
],
)
def test_transform(geotable, condition, expected):
"""Testing for geo spatial transform operation."""
expr = geotable[geotable.id, condition(geotable).name('tmp')]
result = expr.execute()['tmp'][[0]]
assert np.all(result == expected)
@pytest.mark.parametrize(
'expr_fn',
[
param(lambda t: t.geo_point.set_srid(4326), id='geom_geo_point'),
param(lambda t: point_geom_0, id='point_geom_0'),
param(lambda t: point_geom_1, id='point_geom_1'),
param(lambda t: point_geom_2, id='point_geom_2'),
param(lambda t: point_geog_0, id='point_geog_0'),
param(lambda t: point_geog_1, id='point_geog_1'),
param(lambda t: point_geog_2, id='point_geog_2'),
],
)
def test_cast_geography(geotable, expr_fn):
"""Testing for geo spatial transform operation."""
p = expr_fn(geotable).cast('geography')
expr = geotable[geotable.id, p.distance(p).name('tmp')]
result = expr.execute()['tmp'][[0]]
# distance from a point to a same point should be 0
assert np.all(result == 0)
@pytest.mark.parametrize(
'expr_fn',
[
param(lambda t: t.geo_point.set_srid(4326), id='t_geo_point'),
param(lambda t: point_geom_0, id='point_geom_0'),
param(lambda t: point_geom_1, id='point_geom_1'),
param(lambda t: point_geom_2, id='point_geom_2'),
param(lambda t: point_geog_0, id='point_geog_0'),
param(lambda t: point_geog_1, id='point_geog_1'),
param(lambda t: point_geog_2, id='point_geog_2'),
],
)
def test_cast_geometry(geotable, expr_fn):
"""Testing for geo spatial transform operation."""
p = expr_fn(geotable).cast('geometry')
expr = geotable[geotable.id, p.distance(p).name('tmp')]
result = expr.execute()['tmp'][[0]]
# distance from a point to a same point should be 0
assert np.all(result == 0)
def test_geo_dataframe(geotable):
"""Testing for geo dataframe output."""
import geopandas
assert isinstance(geotable.execute(), geopandas.geodataframe.GeoDataFrame)
@pytest.mark.parametrize(
'modifier',
[
{},
{'srid': '4326'},
{'srid': '4326', 'geo_type': 'geometry'},
{'srid': '4326', 'geo_type': 'geography'},
],
)
@pytest.mark.parametrize(
'shape,value,expected',
[
# Geometry primitives (2D)
('point', (30, 10), 'POINT (30 10)'),
(
'linestring',
((30, 10), (10, 30), (40, 40)),
'LINESTRING (30 10, 10 30, 40 40)',
),
(
'polygon',
(
((35, 10), (45, 45), (15, 40), (10, 20), (35, 10)),
((20, 30), (35, 35), (30, 20), (20, 30)),
),
(
'POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), '
+ '(20 30, 35 35, 30 20, 20 30))'
),
),
(
'polygon',
(((30, 10), (40, 40), (20, 40), (10, 20), (30, 10)),),
'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))',
),
# Multipart geometries (2D)
(
'multipoint',
((10, 40), (40, 30), (20, 20), (30, 10)),
'MULTIPOINT ((10 40), (40 30), (20 20), (30 10))',
),
(
'multilinestring',
(
((10, 10), (20, 20), (10, 40)),
((40, 40), (30, 30), (40, 20), (30, 10)),
),
(
'MULTILINESTRING ((10 10, 20 20, 10 40), '
+ '(40 40, 30 30, 40 20, 30 10))'
),
),
(
'multipolygon',
(
(((40, 40), (20, 45), (45, 30), (40, 40)),),
(
(
(20, 35),
(10, 30),
(10, 10),
(30, 5),
(45, 20),
(20, 35),
),
((30, 20), (20, 15), (20, 25), (30, 20)),
),
),
(
'MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)), '
+ '((20 35, 10 30, 10 10, 30 5, 45 20, 20 35), '
+ '(30 20, 20 15, 20 25, 30 20)))'
),
),
],
)
def test_geo_literals_smoke(con, shape, value, modifier, expected):
"""Smoke tests for geo spatial literals"""
expr_type = '{}{}{}'.format(
shape,
';{}'.format(modifier['srid']) if 'srid' in modifier else '',
':{}'.format(modifier['geo_type']) if 'geo_type' in modifier else '',
)
expr = ibis.literal(value, type=expr_type).name('tmp')
result_expected = "SELECT '{}{}'{}".format(
'SRID={};'.format(modifier['srid']) if 'srid' in modifier else '',
expected,
'::{}'.format(modifier['geo_type']) if 'geo_type' in modifier else '',
)
assert str(con.compile(expr)) == result_expected
@pytest.mark.parametrize(
'fn_expr',
[
pytest.param(lambda t: t.geo_point.srid(), id='point_srid'),
pytest.param(
lambda t: t.geo_point.set_srid(4326), id='point_set_srid'
),
pytest.param(lambda t: t.geo_point.x(), id='point_x'),
pytest.param(lambda t: t.geo_point.y(), id='point_y'),
pytest.param(
lambda t: t.geo_linestring.contains(t.geo_point),
id='linestring_contains',
),
pytest.param(
lambda t: t.geo_linestring.end_point(), id='linestring_end_point'
),
pytest.param(
lambda t: t.geo_linestring.length(), id='linestring_length'
),
pytest.param(
lambda t: t.geo_linestring.max_distance(t.geo_point),
id='linestring_max_distance',
marks=pytest.mark.notimpl(["postgres"]),
),
pytest.param(
lambda t: t.geo_linestring.point_n(1),
id='linestring_point_n',
marks=pytest.mark.notimpl(["postgres"]),
),
pytest.param(
lambda t: t.geo_linestring.start_point(),
id='linestring_start_point',
),
pytest.param(
lambda t: t.geo_linestring.x_max(),
id='linestring_x_max',
marks=pytest.mark.notimpl(["postgres"]),
),
pytest.param(
lambda t: t.geo_linestring.x_min(),
id='linestring_x_min',
marks=pytest.mark.notimpl(["postgres"]),
),
pytest.param(
lambda t: t.geo_linestring.y_max(),
id='linestring_y_max',
marks=pytest.mark.notimpl(["postgres"]),
),
pytest.param(
lambda t: t.geo_linestring.y_min(),
id='linestring_y_min',
marks=pytest.mark.notimpl(["postgres"]),
),
pytest.param(lambda t: t.geo_polygon.area(), id='polygon_area'),
pytest.param(
lambda t: t.geo_polygon.perimeter(), id='polygon_perimeter'
),
pytest.param(
lambda t: t.geo_multipolygon.n_points(), id='multipolygon_n_points'
),
pytest.param(
lambda t: t.geo_multipolygon.n_rings(),
id='multipolygon_n_rings',
marks=pytest.mark.notimpl(["postgres"]),
),
# TODO: the mock tests don't support multipoint and multilinestring
# yet, but once they do, add some more tests here.
],
)
def test_geo_ops_smoke(geotable, fn_expr):
"""Smoke tests for geo spatial operations."""
assert fn_expr(geotable).compile() != ''
def test_geo_equals(geotable):
# Fix https://github.com/ibis-project/ibis/pull/2956
expr = geotable.mutate(
[
geotable.geo_point.y().name('Location_Latitude'),
geotable.geo_point.y().name('Latitude'),
]
)
result = str(expr.compile().compile())
assert result == (
'SELECT t0.id, ST_AsEWKB(t0.geo_point) AS geo_point, '
'ST_AsEWKB(t0.geo_linestring) AS geo_linestring, '
'ST_AsEWKB(t0.geo_polygon) AS geo_polygon, '
'ST_AsEWKB(t0.geo_multipolygon) AS geo_multipolygon, '
'ST_Y(t0.geo_point) AS "Location_Latitude", '
'ST_Y(t0.geo_point) AS "Latitude" \n'
'FROM geo AS t0'
)
# simple test using ==
expected = 'SELECT t0.geo_point = t0.geo_point AS tmp \nFROM geo AS t0'
expr = geotable.geo_point == geotable.geo_point
assert str(expr.compile().compile()) == expected
assert expr.execute().all()
# using geo_equals
expected = (
'SELECT ST_Equals(t0.geo_point, t0.geo_point) AS tmp \nFROM geo AS t0'
)
expr = geotable.geo_point.geo_equals(geotable.geo_point)
assert str(expr.compile().compile()) == expected
assert expr.execute().all()
# equals returns a boolean object
assert geotable.geo_point.equals(geotable.geo_point)
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 19:07:37 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
Empty = []
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Input
Ticker1 = 'UVXY'
Ticker2 = '^VIX'
#Remote Signal
Ticker3 = '^VIX'
#Here we go
Asset1 = YahooGrabber(Ticker1)
Asset2 = YahooGrabber(Ticker2)
#Remote Signal
Asset3 = YahooGrabber(Ticker3)
#Match lengths
#Trimmer
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
Asset3 = Asset3[-len(Asset2):]
#Asset2 = Asset2[-600:]
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Prepare the remote controller
Asset3['LogRet'] = np.log(Asset3['Adj Close']/Asset3['Adj Close'].shift(1))
Asset3['LogRet'] = Asset3['LogRet'].fillna(0)
#Brute Force Optimization
iterations = range(0, 20)
for i in iterations:
Counter = Counter + 1
a = rand.random()
b = 1 - a
c = 0
d = rand.random()
if abs(c) + abs(d) > 1:
continue
e = 1 + rand.random()
f = 1 - rand.random()
g = rand.randint(3,20)
window = int(g)
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['Price/MA'] = Asset3['Adj Close']/Asset3['MA']
Asset3['Signal'] = np.where(Asset3['Price/MA'] >= 1, 1, 0)
Asset3['CumSignal'] = Asset3['Signal'].cumsum()
Asset3['CumSignal'].loc[Asset3['CumSignal'] == 0] = 1
Asset3['CumSignalDiff'] = Asset3['CumSignal'].diff()
Asset3['CumSignalDiff'] = Asset3['CumSignalDiff'].fillna(0)
Asset3['Erase'] = np.where(Asset3['Signal'] == Asset3['Signal'].shift(1),
1, 0)
Asset3['Erase'] = np.where(Asset3['Signal'] == 0,
0, Asset3['Erase'])
Asset3['TriggerSignal'] = Asset3['Signal'] - Asset3['Erase']
Asset3['LongPrice'] = np.where(Asset3['TriggerSignal'] == 1, Asset3['Adj Close'], 0)
Asset3['LongPriceFilled'] = Asset3['LongPrice']
Asset3['LongPriceFilled'] = np.where(Asset3['LongPriceFilled'] == 0,
Asset3['LongPriceFilled'].shift(1), Asset3['LongPriceFilled'])
Asset3['LongPriceFilled'] = Asset3['LongPriceFilled'].fillna(0)
for m in range(0,20):
Asset3['LongPriceFilled'].loc[(Asset3['LongPriceFilled'].cumsum() > 1) &
(Asset3['LongPriceFilled'] == 0) & (Asset3['LongPriceFilled'].shift(-1) == 0
)] = Asset3['LongPriceFilled'].shift(1)
Asset3['LongPriceFilled'].loc[(Asset3['LongPriceFilled'] == 0) &
(Asset3['LongPriceFilled'].cumsum() > 1)] = Asset3['LongPriceFilled'].shift(1)
Asset3['LongPriceFilled'].loc[(Asset3['LongPrice'] != 0) &
(Asset3['LongPriceFilled'].cumsum() > 1)] = 0
Asset3['Regime'] = np.where(Asset3['Signal'].shift(1) == 1, 1, 0)
Asset3['CumRegime'] = Asset3['Regime'].cumsum()
Asset3['CumRegimeDiff'] = Asset3['CumRegime'].diff()
Asset3['Counter'] = range(0,len(Asset3))
Asset3['HighDiff'] = Asset3['High'] / Asset3['LongPriceFilled']
Asset3['LowDiff'] = Asset3['Low'] / Asset3['LongPriceFilled']
Asset3 = Asset3.replace([np.inf, -np.inf], np.nan)
Asset3[['HighDiff', 'LowDiff']] = Asset3[['HighDiff', 'LowDiff']].fillna(1)
Asset3['RegimeHighDiff'] = 1
Asset3['RegimeHighDiff'] = np.where(Asset3['Regime'] == 1, Asset3['HighDiff'], 1)
Asset3['RegimeLowDiff'] = 1
Asset3['RegimeLowDiff'] = np.where(Asset3['Regime'] == 1, Asset3['LowDiff'], 1)
Asset3['StopOut'] = 0
Asset3['StopOut'] = np.where(Asset3['RegimeLowDiff'] < f, (f - 1), 0 )
Asset3['StopOut'] = np.where(Asset3['StopOut'].shift(1) == Asset3['StopOut'],
0, Asset3['StopOut'])
Asset3['GainOut'] = 0
Asset3['GainOut'] = np.where(Asset3['RegimeHighDiff'] > e, (e-1), 0 )
Asset3['GainOut'] = np.where(Asset3['GainOut'].shift(1) == Asset3['GainOut'],
0, Asset3['GainOut'])
Regime = Asset3[['Counter','StopOut','GainOut','CumSignalDiff',
'CumRegimeDiff']].loc[(Asset3['RegimeLowDiff'] != 1)]
Regime['NewCounter'] = range(0, len(Regime))
ToDelete = Regime.loc[(Regime['StopOut'] < 0)]
ToDelete['CounterDiff'] = ToDelete['Counter'].diff(1)
ToDelete['NewCounterDiff'] = ToDelete['NewCounter'].diff(1)
NewDelete = ToDelete.loc[(ToDelete['CounterDiff'] == ToDelete['NewCounterDiff'])]
for y in NewDelete.Counter:
Asset3['StopOut'].loc[Asset3['Counter'] == y] = 0
Asset3['GainOut'].loc[(Asset3['StopOut'] < 0) & (Asset3['GainOut'] < 0)] = 0
ToDelete = Regime.loc[(Regime['GainOut'] > 0)]
ToDelete['CounterDiff'] = ToDelete['Counter'].diff(1)
ToDelete['NewCounterDiff'] = ToDelete['NewCounter'].diff(1)
NewDelete = ToDelete.loc[(ToDelete['CounterDiff'] == ToDelete['NewCounterDiff'])]
for y in NewDelete.Counter:
Asset3['GainOut'].loc[Asset3['Counter'] == y] = 0
Asset3['Stops'] = Asset3['StopOut'] + Asset3['GainOut']
Asset1['Position'] = a
Asset1['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
c,a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
d,b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
Portfolio['LongShort'] = (Portfolio['Asset1Pass'] + Portfolio['Asset2Pass'] +
(Asset3['Stops'] * d))
print(Counter)
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.1):
continue
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .003:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(f)
Empty.append(g)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
z1 = Dataset.iloc[8]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[8]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[8]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
print(Dataset[k])
a = Dataset[kfloat][0]
b = Dataset[kfloat][1]
c = Dataset[kfloat][2]
d = Dataset[kfloat][3]
e = Dataset[kfloat][4]
f = Dataset[kfloat][5]
g = Dataset[kfloat][6]
window = int(g)
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['Price/MA'] = Asset3['Adj Close']/Asset3['MA']
Asset3['Signal'] = np.where(Asset3['Price/MA'] >= 1, 1, 0)
Asset3['CumSignal'] = Asset3['Signal'].cumsum()
Asset3['CumSignal'].loc[Asset3['CumSignal'] == 0] = 1
Asset3['CumSignalDiff'] = Asset3['CumSignal'].diff()
Asset3['CumSignalDiff'] = Asset3['CumSignalDiff'].fillna(0)
Asset3['Erase'] = np.where(Asset3['Signal'] == Asset3['Signal'].shift(1),
1, 0)
Asset3['Erase'] = np.where(Asset3['Signal'] == 0,
0, Asset3['Erase'])
Asset3['TriggerSignal'] = Asset3['Signal'] - Asset3['Erase']
Asset3['LongPrice'] = np.where(Asset3['TriggerSignal'] == 1, Asset3['Adj Close'], 0)
Asset3['LongPriceFilled'] = Asset3['LongPrice']
Asset3['LongPriceFilled'] = np.where(Asset3['LongPriceFilled'] == 0,
Asset3['LongPriceFilled'].shift(1), Asset3['LongPriceFilled'])
Asset3['LongPriceFilled'] = Asset3['LongPriceFilled'].fillna(0)
for m in range(0,20):
Asset3['LongPriceFilled'].loc[(Asset3['LongPriceFilled'].cumsum() > 1) &
(Asset3['LongPriceFilled'] == 0) & (Asset3['LongPriceFilled'].shift(-1) == 0
)] = Asset3['LongPriceFilled'].shift(1)
Asset3['LongPriceFilled'].loc[(Asset3['LongPriceFilled'] == 0) &
(Asset3['LongPriceFilled'].cumsum() > 1)] = Asset3['LongPriceFilled'].shift(1)
Asset3['LongPriceFilled'].loc[(Asset3['LongPrice'] != 0) &
(Asset3['LongPriceFilled'].cumsum() > 1)] = 0
Asset3['Regime'] = np.where(Asset3['Signal'].shift(1) == 1, 1, 0)
Asset3['CumRegime'] = Asset3['Regime'].cumsum()
Asset3['CumRegimeDiff'] = Asset3['CumRegime'].diff()
Asset3['Counter'] = range(0,len(Asset3))
Asset3['HighDiff'] = Asset3['High'] / Asset3['LongPriceFilled']
Asset3['LowDiff'] = Asset3['Low'] / Asset3['LongPriceFilled']
Asset3 = Asset3.replace([np.inf, -np.inf], np.nan)
Asset3[['HighDiff', 'LowDiff']] = Asset3[['HighDiff', 'LowDiff']].fillna(1)
Asset3['RegimeHighDiff'] = 1
Asset3['RegimeHighDiff'] = np.where(Asset3['Regime'] == 1, Asset3['HighDiff'], 1)
Asset3['RegimeLowDiff'] = 1
Asset3['RegimeLowDiff'] = np.where(Asset3['Regime'] == 1, Asset3['LowDiff'], 1)
Asset3['StopOut'] = 0
Asset3['StopOut'] = np.where(Asset3['RegimeLowDiff'] < f, (f - 1), 0 )
Asset3['StopOut'] = np.where(Asset3['StopOut'].shift(1) == Asset3['StopOut'],
0, Asset3['StopOut'])
Asset3['GainOut'] = 0
Asset3['GainOut'] = np.where(Asset3['RegimeHighDiff'] > e, (e-1), 0 )
Asset3['GainOut'] = np.where(Asset3['GainOut'].shift(1) == Asset3['GainOut'],
0, Asset3['GainOut'])
Regime = Asset3[['Counter','StopOut','GainOut','CumSignalDiff',
'CumRegimeDiff']].loc[(Asset3['RegimeLowDiff'] != 1)]
Regime['NewCounter'] = range(0, len(Regime))
ToDelete = Regime.loc[(Regime['StopOut'] < 0)]
ToDelete['CounterDiff'] = ToDelete['Counter'].diff(1)
ToDelete['NewCounterDiff'] = ToDelete['NewCounter'].diff(1)
NewDelete = ToDelete.loc[(ToDelete['CounterDiff'] == ToDelete['NewCounterDiff'])]
for y in NewDelete.Counter:
Asset3['StopOut'].loc[Asset3['Counter'] == y] = 0
Asset3['GainOut'].loc[(Asset3['StopOut'] < 0) & (Asset3['GainOut'] < 0)] = 0
ToDelete = Regime.loc[(Regime['GainOut'] > 0)]
ToDelete['CounterDiff'] = ToDelete['Counter'].diff(1)
ToDelete['NewCounterDiff'] = ToDelete['NewCounter'].diff(1)
NewDelete = ToDelete.loc[(ToDelete['CounterDiff'] == ToDelete['NewCounterDiff'])]
for y in NewDelete.Counter:
Asset3['GainOut'].loc[Asset3['Counter'] == y] = 0
Asset3['Stops'] = Asset3['StopOut'] + Asset3['GainOut']
Asset1['Position'] = a
Asset1['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
c,a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
d,b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
Portfolio['LongShort'] = (Portfolio['Asset1Pass'] + Portfolio['Asset2Pass'] +
(Asset3['Stops'] * d))
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
dailyreturn = Portfolio['LongShort'].mean()
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
Portfolio['Multiplier'].plot()
|
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import, with_statement
import re
import six
from behave import model, i18n
from behave.textutil import text as _text
DEFAULT_LANGUAGE = "en"
def parse_file(filename, language=None):
with open(filename, "rb") as f:
# file encoding is assumed to be utf8. Oh, yes.
data = f.read().decode("utf8")
return parse_feature(data, language, filename)
def parse_feature(data, language=None, filename=None):
# ALL data operated on by the parser MUST be unicode
assert isinstance(data, six.text_type)
try:
result = Parser(language).parse(data, filename)
except ParserError as e:
e.filename = filename
raise
return result
def parse_steps(text, language=None, filename=None):
"""
Parse a number of steps a multi-line text from a scenario.
Scenario line with title and keyword is not provided.
:param text: Multi-line text with steps to parse (as unicode).
:param language: i18n language identifier (optional).
:param filename: Filename (optional).
:return: Parsed steps (if successful).
"""
assert isinstance(text, six.text_type)
try:
result = Parser(language, variant="steps").parse_steps(text, filename)
except ParserError as e:
e.filename = filename
raise
return result
def parse_tags(text):
"""
Parse tags from text (one or more lines, as string).
:param text: Multi-line text with tags to parse (as unicode).
:return: List of tags (if successful).
"""
# assert isinstance(text, unicode)
if not text:
return []
return Parser(variant="tags").parse_tags(text)
class ParserError(Exception):
def __init__(self, message, line, filename=None, line_text=None):
if line:
message += " at line %d" % line
if line_text:
message += ': "%s"' % line_text.strip()
super(ParserError, self).__init__(message)
self.line = line
self.line_text = line_text
self.filename = filename
def __str__(self):
arg0 = _text(self.args[0])
if self.filename:
filename = _text(self.filename)
return u'Failed to parse "%s": %s' % (filename, arg0)
else:
return u"Failed to parse <string>: %s" % arg0
if six.PY2:
__unicode__ = __str__
__str__ = lambda self: self.__unicode__().encode("utf-8")
class Parser(object):
"""Feature file parser for behave."""
# pylint: disable=too-many-instance-attributes
def __init__(self, language=None, variant=None):
if not variant:
variant = "feature"
self.language = language
self.variant = variant
self.state = "init"
self.line = 0
self.last_step = None
self.multiline_start = None
self.multiline_leading = None
self.multiline_terminator = None
self.filename = None
self.feature = None
self.statement = None
self.tags = []
self.lines = []
self.table = None
self.examples = None
self.keywords = None
if self.language:
self.keywords = i18n.languages[self.language]
# NOT-NEEDED: self.reset()
def reset(self):
# This can probably go away.
if self.language:
self.keywords = i18n.languages[self.language]
else:
self.keywords = None
self.state = "init"
self.line = 0
self.last_step = None
self.multiline_start = None
self.multiline_leading = None
self.multiline_terminator = None
self.filename = None
self.feature = None
self.statement = None
self.tags = []
self.lines = []
self.table = None
self.examples = None
def parse(self, data, filename=None):
self.reset()
self.filename = filename
for line in data.split("\n"):
self.line += 1
if not line.strip() and self.state != "multiline":
# -- SKIP EMPTY LINES, except in multiline string args.
continue
self.action(line)
if self.table:
self.action_table("")
feature = self.feature
if feature:
feature.parser = self
self.reset()
return feature
def _build_feature(self, keyword, line):
name = line[len(keyword) + 1:].strip()
language = self.language or DEFAULT_LANGUAGE
self.feature = model.Feature(self.filename, self.line, keyword,
name, tags=self.tags, language=language)
# -- RESET STATE:
self.tags = []
def _build_background_statement(self, keyword, line):
if self.tags:
msg = "Background supports no tags: @%s" % (" @".join(self.tags))
raise ParserError(msg, self.line, self.filename, line)
name = line[len(keyword) + 1:].strip()
statement = model.Background(self.filename, self.line, keyword, name)
self.statement = statement
self.feature.background = self.statement
def _build_scenario_statement(self, keyword, line):
name = line[len(keyword) + 1:].strip()
self.statement = model.Scenario(self.filename, self.line,
keyword, name, tags=self.tags)
self.feature.add_scenario(self.statement)
# -- RESET STATE:
self.tags = []
def _build_scenario_outline_statement(self, keyword, line):
# pylint: disable=C0103
# C0103 Invalid name "build_scenario_outline_statement", too long.
name = line[len(keyword) + 1:].strip()
self.statement = model.ScenarioOutline(self.filename, self.line,
keyword, name, tags=self.tags)
self.feature.add_scenario(self.statement)
# -- RESET STATE:
self.tags = []
def _build_examples(self, keyword, line):
if not isinstance(self.statement, model.ScenarioOutline):
message = "Examples must only appear inside scenario outline"
raise ParserError(message, self.line, self.filename, line)
name = line[len(keyword) + 1:].strip()
self.examples = model.Examples(self.filename, self.line,
keyword, name, tags=self.tags)
# pylint: disable=E1103
# E1103 Instance of "Background" has no "examples" member
# (but some types could not be inferred).
self.statement.examples.append(self.examples)
# -- RESET STATE:
self.tags = []
def diagnose_feature_usage_error(self):
if self.feature:
return "Multiple features in one file are not supported."
else:
return "Feature should not be used here."
def diagnose_background_usage_error(self):
if self.feature and self.feature.scenarios:
return "Background may not occur after Scenario/ScenarioOutline."
elif self.tags:
return "Background does not support tags."
else:
return "Background should not be used here."
def diagnose_scenario_usage_error(self):
if not self.feature:
return "Scenario may not occur before Feature."
else:
return "Scenario should not be used here."
def diagnose_scenario_outline_usage_error(self): # pylint: disable=invalid-name
if not self.feature:
return "ScenarioOutline may not occur before Feature."
else:
return "ScenarioOutline should not be used here."
def ask_parse_failure_oracle(self, line):
"""
Try to find the failure reason when a parse failure occurs:
Oracle, oracle, ... what went wrong?
Zzzz
:param line: Text line where parse failure occured (as string).
:return: Reason (as string) if an explanation is found.
Otherwise, empty string or None.
"""
feature_kwd = self.match_keyword("feature", line)
if feature_kwd:
return self.diagnose_feature_usage_error()
background_kwd = self.match_keyword("background", line)
if background_kwd:
return self.diagnose_background_usage_error()
scenario_kwd = self.match_keyword("scenario", line)
if scenario_kwd:
return self.diagnose_scenario_usage_error()
scenario_outline_kwd = self.match_keyword("scenario_outline", line)
if scenario_outline_kwd:
return self.diagnose_scenario_outline_usage_error()
# -- OTHERWISE:
if self.variant == "feature" and not self.feature:
return "No feature found."
# -- FINALLY: No glue what went wrong.
return None
def action(self, line):
if line.strip().startswith("#") and self.state != "multiline":
if self.state != "init" or self.tags or self.variant != "feature":
return
# -- DETECT: language comment (at begin of feature file; state=init)
line = line.strip()[1:].strip()
if line.lstrip().lower().startswith("language:"):
language = line[9:].strip()
self.language = language
self.keywords = i18n.languages[language]
return
func = getattr(self, "action_" + self.state, None)
if func is None:
line = line.strip()
msg = "Parser in unknown state %s;" % self.state
raise ParserError(msg, self.line, self.filename, line)
if not func(line):
line = line.strip()
msg = u'\nParser failure in state %s, at line %d: "%s"\n' % \
(self.state, self.line, line)
reason = self.ask_parse_failure_oracle(line)
if reason:
msg += u"REASON: %s" % reason
raise ParserError(msg, None, self.filename)
def action_init(self, line):
line = line.strip()
if line.startswith("@"):
self.tags.extend(self.parse_tags(line))
return True
feature_kwd = self.match_keyword("feature", line)
if feature_kwd:
self._build_feature(feature_kwd, line)
self.state = "feature"
return True
return False
# def subaction_detect_next_scenario(self, line):
# if line.startswith("@"):
# self.tags.extend(self.parse_tags(line))
# self.state = "next_scenario"
# return True
#
# scenario_kwd = self.match_keyword("scenario", line)
# if scenario_kwd:
# self._build_scenario_statement(scenario_kwd, line)
# self.state = "scenario"
# return True
#
# scenario_outline_kwd = self.match_keyword("scenario_outline", line)
# if scenario_outline_kwd:
# self._build_scenario_outline_statement(scenario_outline_kwd, line)
# self.state = "scenario"
# return True
#
# # -- OTHERWISE:
# return False
def subaction_detect_taggable_statement(self, line):
"""Subaction is used after first tag line is detected.
Additional lines with tags or taggable_statement follow.
Taggable statements (excluding Feature) are:
* Scenario
* ScenarioOutline
* Examples (within ScenarioOutline)
"""
if line.startswith("@"):
self.tags.extend(self.parse_tags(line))
self.state = "taggable_statement"
return True
scenario_kwd = self.match_keyword("scenario", line)
if scenario_kwd:
self._build_scenario_statement(scenario_kwd, line)
self.state = "scenario"
return True
scenario_outline_kwd = self.match_keyword("scenario_outline", line)
if scenario_outline_kwd:
self._build_scenario_outline_statement(scenario_outline_kwd, line)
self.state = "scenario"
return True
examples_kwd = self.match_keyword("examples", line)
if examples_kwd:
self._build_examples(examples_kwd, line)
self.state = "table"
return True
# -- OTHERWISE:
return False
def action_feature(self, line):
line = line.strip()
# OLD: if self.subaction_detect_next_scenario(line):
if self.subaction_detect_taggable_statement(line):
# -- DETECTED: Next Scenario, ScenarioOutline (or tags)
return True
background_kwd = self.match_keyword("background", line)
if background_kwd:
self._build_background_statement(background_kwd, line)
self.state = "steps"
return True
self.feature.description.append(line)
return True
# def action_next_scenario(self, line):
# """
# Entered after first tag for Scenario/ScenarioOutline is detected.
# """
# line = line.strip()
# if self.subaction_detect_next_scenario(line):
# return True
#
# return False
def action_taggable_statement(self, line):
"""Entered after first tag for Scenario/ScenarioOutline or
Examples is detected (= taggable_statement except Feature).
Taggable statements (excluding Feature) are:
* Scenario
* ScenarioOutline
* Examples (within ScenarioOutline)
"""
line = line.strip()
if self.subaction_detect_taggable_statement(line):
# -- DETECTED: Next Scenario, ScenarioOutline or Examples (or tags)
return True
return False
def action_scenario(self, line):
"""
Entered when Scenario/ScenarioOutline keyword/line is detected.
Hunts/collects scenario description lines.
DETECT:
* first step of Scenario/ScenarioOutline
* next Scenario/ScenarioOutline.
"""
line = line.strip()
step = self.parse_step(line)
if step:
# -- FIRST STEP DETECTED: End collection of scenario descriptions.
self.state = "steps"
self.statement.steps.append(step)
return True
# -- CASE: Detect next Scenario/ScenarioOutline
# * Scenario with scenario description, but without steps.
# * Title-only scenario without scenario description and steps.
# OLD: if self.subaction_detect_next_scenario(line):
if self.subaction_detect_taggable_statement(line):
# -- DETECTED: Next Scenario, ScenarioOutline (or tags)
return True
# -- OTHERWISE: Add scenario description line.
# pylint: disable=E1103
# E1103 Instance of "Background" has no "description" member...
self.statement.description.append(line)
return True
def action_steps(self, line):
"""
Entered when first step is detected (or nested step parsing).
Subcases:
* step
* multi-line text (doc-string), following a step
* table, following a step
* examples for a ScenarioOutline, after ScenarioOutline steps
DETECT:
* next Scenario/ScenarioOutline or Examples (in a ScenarioOutline)
"""
# pylint: disable=R0911
# R0911 Too many return statements (8/6)
stripped = line.lstrip()
if stripped.startswith('"""') or stripped.startswith("'''"):
self.state = "multiline"
self.multiline_start = self.line
self.multiline_terminator = stripped[:3]
self.multiline_leading = line.index(stripped[0])
return True
line = line.strip()
step = self.parse_step(line)
if step:
self.statement.steps.append(step)
return True
if self.subaction_detect_taggable_statement(line):
# -- DETECTED: Next Scenario, ScenarioOutline or Examples (or tags)
return True
if line.startswith("|"):
assert self.statement.steps, "TABLE-START without step detected."
self.state = "table"
return self.action_table(line)
return False
def action_multiline(self, line):
if line.strip().startswith(self.multiline_terminator):
step = self.statement.steps[-1]
step.text = model.Text(u"\n".join(self.lines), u"text/plain",
self.multiline_start)
if step.name.endswith(":"):
step.name = step.name[:-1]
self.lines = []
self.multiline_terminator = None
self.state = "steps"
return True
self.lines.append(line[self.multiline_leading:])
# -- BETTER DIAGNOSTICS: May remove non-whitespace in execute_steps()
removed_line_prefix = line[:self.multiline_leading]
if removed_line_prefix.strip():
message = "BAD-INDENT in multiline text: "
message += "Line '%s' would strip leading '%s'" % \
(line, removed_line_prefix)
raise ParserError(message, self.line, self.filename)
return True
def action_table(self, line):
line = line.strip()
if not line.startswith("|"):
if self.examples:
self.examples.table = self.table
self.examples = None
else:
step = self.statement.steps[-1]
step.table = self.table
if step.name.endswith(":"):
step.name = step.name[:-1]
self.table = None
self.state = "steps"
return self.action_steps(line)
# -- SUPPORT: Escaped-pipe(s) in Gherkin cell values.
# Search for pipe(s) that are not preceeded with an escape char.
cells = [cell.replace("\\|", "|").strip()
for cell in re.split(r"(?<!\\)\|", line[1:-1])]
if self.table is None:
self.table = model.Table(cells, self.line)
else:
if len(cells) != len(self.table.headings):
raise ParserError("Malformed table", self.line)
self.table.add_row(cells, self.line)
return True
def match_keyword(self, keyword, line):
if not self.keywords:
self.language = DEFAULT_LANGUAGE
self.keywords = i18n.languages[DEFAULT_LANGUAGE]
for alias in self.keywords[keyword]:
if line.startswith(alias + ":"):
return alias
return False
def parse_tags(self, line):
"""
Parse a line with one or more tags:
* A tag starts with the AT sign.
* A tag consists of one word without whitespace chars.
* Multiple tags are separated with whitespace chars
* End-of-line comment is stripped.
:param line: Line with one/more tags to process.
:raise ParseError: If syntax error is detected.
"""
assert line.startswith("@")
tags = []
for word in line.split():
if word.startswith("@"):
tags.append(model.Tag(word[1:], self.line))
elif word.startswith("#"):
break # -- COMMENT: Skip rest of line.
else:
# -- BAD-TAG: Abort here.
raise ParserError("tag: %s (line: %s)" % (word, line),
self.line, self.filename)
return tags
def parse_step(self, line):
for step_type in ("given", "when", "then", "and", "but"):
for kw in self.keywords[step_type]:
if kw.endswith("<"):
whitespace = ""
kw = kw[:-1]
else:
whitespace = " "
# try to match the keyword; also attempt a purely lowercase
# match if that'll work
if not (line.startswith(kw + whitespace)
or line.lower().startswith(kw.lower() + whitespace)):
continue
name = line[len(kw):].strip()
if step_type in ("and", "but"):
if not self.last_step:
raise ParserError("No previous step", self.line)
step_type = self.last_step
else:
self.last_step = step_type
step = model.Step(self.filename, self.line, kw, step_type,
name)
return step
return None
def parse_steps(self, text, filename=None):
"""
Parse support for execute_steps() functionality that supports step with:
* multiline text
* table
:param text: Text that contains 0..* steps
:return: List of parsed steps (as model.Step objects).
"""
assert isinstance(text, six.text_type)
if not self.language:
self.language = DEFAULT_LANGUAGE
self.reset()
self.filename = filename
self.statement = model.Scenario(filename, 0, u"scenario", u"")
self.state = "steps"
for line in text.split("\n"):
self.line += 1
if not line.strip() and self.state != "multiline":
# -- SKIP EMPTY LINES, except in multiline string args.
continue
self.action(line)
# -- FINALLY:
if self.table:
self.action_table("")
steps = self.statement.steps
return steps
|
|
"""A tasklet decorator.
Tasklets are a way to write concurrently running functions without
threads; tasklets are executed by an event loop and can suspend
themselves blocking for I/O or some other operation using a yield
statement. The notion of a blocking operation is abstracted into the
Future class, but a tasklet may also yield an RPC in order to wait for
that RPC to complete.
The @tasklet decorator wraps generator function so that when it is
called, a Future is returned while the generator is executed by the
event loop. Within the tasklet, any yield of a Future waits for and
returns the Future's result. For example:
@tasklet
def foo():
a = yield <some Future>
b = yield <another Future>
raise Return(a + b)
def main():
f = foo()
x = f.get_result()
print x
Note that blocking until the Future's result is available using
get_result() is somewhat inefficient (though not vastly -- it is not
busy-waiting). In most cases such code should be rewritten as a tasklet
instead:
@tasklet
def main_tasklet():
f = foo()
x = yield f
print x
Calling a tasklet automatically schedules it with the event loop:
def main():
f = main_tasklet()
eventloop.run() # Run until no tasklets left to do
f.done() # Returns True
As a special feature, if the wrapped function is not a generator
function, its return value is returned via the Future. This makes the
following two equivalent:
@tasklet
def foo():
return 42
@tasklet
def foo():
if False: yield # The presence of 'yield' makes foo a generator
raise Return(42) # Or, after PEP 380, return 42
This feature (inspired by Monocle) is handy in case you are
implementing an interface that expects tasklets but you have no need to
suspend -- there's no need to insert a dummy yield in order to make
the tasklet into a generator.
"""
import collections
import logging
import os
import sys
import types
from .google_imports import apiproxy_stub_map
from .google_imports import apiproxy_rpc
from .google_imports import datastore
from .google_imports import datastore_errors
from .google_imports import datastore_rpc
from .google_imports import namespace_manager
from . import eventloop
from . import utils
__all__ = ['Return', 'tasklet', 'synctasklet', 'toplevel', 'sleep',
'add_flow_exception', 'get_return_value',
'get_context', 'set_context',
'make_default_context', 'make_context',
'Future', 'MultiFuture', 'QueueFuture', 'SerialQueueFuture',
'ReducingFuture',
]
_logging_debug = utils.logging_debug
def _is_generator(obj):
"""Helper to test for a generator object.
NOTE: This tests for the (iterable) object returned by calling a
generator function, not for a generator function.
"""
return isinstance(obj, types.GeneratorType)
class _State(utils.threading_local):
"""Hold thread-local state."""
current_context = None
def __init__(self):
super(_State, self).__init__()
self.all_pending = set()
def add_pending(self, fut):
_logging_debug('all_pending: add %s', fut)
self.all_pending.add(fut)
def remove_pending(self, fut, status='success'):
if fut in self.all_pending:
_logging_debug('all_pending: %s: remove %s', status, fut)
self.all_pending.remove(fut)
else:
_logging_debug('all_pending: %s: not found %s', status, fut)
def clear_all_pending(self):
if self.all_pending:
logging.info('all_pending: clear %s', self.all_pending)
self.all_pending.clear()
else:
_logging_debug('all_pending: clear no-op')
def dump_all_pending(self, verbose=False):
pending = []
for fut in self.all_pending:
if verbose:
line = fut.dump() + ('\n' + '-'*40)
else:
line = fut.dump_stack()
pending.append(line)
return '\n'.join(pending)
_state = _State()
# Tuple of exceptions that should not be logged (except in debug mode).
_flow_exceptions = ()
def add_flow_exception(exc):
"""Add an exception that should not be logged.
The argument must be a subclass of Exception.
"""
global _flow_exceptions
if not isinstance(exc, type) or not issubclass(exc, Exception):
raise TypeError('Expected an Exception subclass, got %r' % (exc,))
as_set = set(_flow_exceptions)
as_set.add(exc)
_flow_exceptions = tuple(as_set)
def _init_flow_exceptions():
"""Internal helper to initialize _flow_exceptions.
This automatically adds webob.exc.HTTPException, if it can be imported.
"""
global _flow_exceptions
_flow_exceptions = ()
add_flow_exception(datastore_errors.Rollback)
try:
from webob import exc
except ImportError:
pass
else:
add_flow_exception(exc.HTTPException)
_init_flow_exceptions()
class Future(object):
"""A Future has 0 or more callbacks.
The callbacks will be called when the result is ready.
NOTE: This is somewhat inspired but not conformant to the Future interface
defined by PEP 3148. It is also inspired (and tries to be somewhat
compatible with) the App Engine specific UserRPC and MultiRpc classes.
"""
# TODO: Trim the API; there are too many ways to do the same thing.
# TODO: Compare to Monocle's much simpler Callback class.
# Constants for state property.
IDLE = apiproxy_rpc.RPC.IDLE # Not yet running (unused)
RUNNING = apiproxy_rpc.RPC.RUNNING # Not yet completed.
FINISHING = apiproxy_rpc.RPC.FINISHING # Completed.
# XXX Add docstrings to all methods. Separate PEP 3148 API from RPC API.
_geninfo = None # Extra info about suspended generator.
def __init__(self, info=None):
# TODO: Make done a method, to match PEP 3148?
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._info = info # Info from the caller about this Future's purpose.
self._where = utils.get_stack()
self._context = None
self._reset()
def _reset(self):
self._done = False
self._result = None
self._exception = None
self._traceback = None
self._callbacks = []
self._immediate_callbacks = []
_state.add_pending(self)
self._next = None # Links suspended Futures together in a stack.
# TODO: Add a __del__ that complains if neither get_exception() nor
# check_success() was ever called? What if it's not even done?
def __repr__(self):
if self._done:
if self._exception is not None:
state = 'exception %s: %s' % (self._exception.__class__.__name__,
self._exception)
else:
state = 'result %r' % (self._result,)
else:
state = 'pending'
line = '?'
for line in self._where:
if 'tasklets.py' not in line:
break
if self._info:
line += ' for %s' % self._info
if self._geninfo:
line += ' %s' % self._geninfo
return '<%s %x created by %s; %s>' % (
self.__class__.__name__, id(self), line, state)
def dump(self):
return '%s\nCreated by %s' % (self.dump_stack(),
'\n called by '.join(self._where))
def dump_stack(self):
lines = []
fut = self
while fut is not None:
lines.append(str(fut))
fut = fut._next
return '\n waiting for '.join(lines)
def add_callback(self, callback, *args, **kwds):
if self._done:
eventloop.queue_call(None, callback, *args, **kwds)
else:
self._callbacks.append((callback, args, kwds))
def add_immediate_callback(self, callback, *args, **kwds):
if self._done:
callback(*args, **kwds)
else:
self._immediate_callbacks.append((callback, args, kwds))
def set_result(self, result):
if self._done:
raise RuntimeError('Result cannot be set twice.')
self._result = result
self._done = True
_state.remove_pending(self)
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def set_exception(self, exc, tb=None):
if not isinstance(exc, BaseException):
raise TypeError('exc must be an Exception; received %r' % exc)
if self._done:
raise RuntimeError('Exception cannot be set twice.')
self._exception = exc
self._traceback = tb
self._done = True
_state.remove_pending(self, status='fail')
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def done(self):
return self._done
@property
def state(self):
# This is just for compatibility with UserRPC and MultiRpc.
# A Future is considered running as soon as it is created.
if self._done:
return self.FINISHING
else:
return self.RUNNING
def wait(self):
if self._done:
return
ev = eventloop.get_event_loop()
while not self._done:
if not ev.run1():
logging.info('Deadlock in %s', self)
logging.info('All pending Futures:\n%s', _state.dump_all_pending())
_logging_debug('All pending Futures (verbose):\n%s',
_state.dump_all_pending(verbose=True))
self.set_exception(RuntimeError('Deadlock waiting for %s' % self))
def get_exception(self):
self.wait()
return self._exception
def get_traceback(self):
self.wait()
return self._traceback
def check_success(self):
self.wait()
if self._exception is not None:
raise self._exception.__class__, self._exception, self._traceback
def get_result(self):
self.check_success()
return self._result
# TODO: Have a tasklet that does this
@classmethod
def wait_any(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
for f in waiting_on:
if f.state == cls.FINISHING:
return f
ev.run1()
return None
# TODO: Have a tasklet that does this
@classmethod
def wait_all(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
waiting_on = set(f for f in waiting_on if f.state == cls.RUNNING)
ev.run1()
def _help_tasklet_along(self, ns, ds_conn, gen, val=None, exc=None, tb=None):
# XXX Docstring
info = utils.gen_info(gen)
__ndb_debug__ = info
try:
save_context = get_context()
save_namespace = namespace_manager.get_namespace()
save_ds_connection = datastore._GetConnection()
try:
set_context(self._context)
if ns != save_namespace:
namespace_manager.set_namespace(ns)
if ds_conn is not save_ds_connection:
datastore._SetConnection(ds_conn)
if exc is not None:
_logging_debug('Throwing %s(%s) into %s',
exc.__class__.__name__, exc, info)
value = gen.throw(exc.__class__, exc, tb)
else:
_logging_debug('Sending %r to %s', val, info)
value = gen.send(val)
self._context = get_context()
finally:
ns = namespace_manager.get_namespace()
ds_conn = datastore._GetConnection()
set_context(save_context)
if save_namespace != ns:
namespace_manager.set_namespace(save_namespace)
if save_ds_connection is not ds_conn:
datastore._SetConnection(save_ds_connection)
except StopIteration, err:
result = get_return_value(err)
_logging_debug('%s returned %r', info, result)
self.set_result(result)
return
except GeneratorExit:
# In Python 2.5, this derives from Exception, but we don't want
# to handle it like other Exception instances. So we catch and
# re-raise it immediately. See issue 127. http://goo.gl/2p5Pn
# TODO: Remove when Python 2.5 is no longer supported.
raise
except Exception, err:
_, _, tb = sys.exc_info()
if isinstance(err, _flow_exceptions):
# Flow exceptions aren't logged except in "heavy debug" mode,
# and then only at DEBUG level, without a traceback.
_logging_debug('%s raised %s(%s)',
info, err.__class__.__name__, err)
elif utils.DEBUG and logging.getLogger().level < logging.DEBUG:
# In "heavy debug" mode, log a warning with traceback.
# (This is the same condition as used in utils.logging_debug().)
logging.warning('%s raised %s(%s)',
info, err.__class__.__name__, err, exc_info=True)
else:
# Otherwise, log a warning without a traceback.
logging.warning('%s raised %s(%s)', info, err.__class__.__name__, err)
self.set_exception(err, tb)
return
else:
_logging_debug('%s yielded %r', info, value)
if isinstance(value, (apiproxy_stub_map.UserRPC,
datastore_rpc.MultiRpc)):
# TODO: Tail recursion if the RPC is already complete.
eventloop.queue_rpc(value, self._on_rpc_completion,
value, ns, ds_conn, gen)
return
if isinstance(value, Future):
# TODO: Tail recursion if the Future is already done.
if self._next:
raise RuntimeError('Future has already completed yet next is %r' %
self._next)
self._next = value
self._geninfo = utils.gen_info(gen)
_logging_debug('%s is now blocked waiting for %s', self, value)
value.add_callback(self._on_future_completion, value, ns, ds_conn, gen)
return
if isinstance(value, (tuple, list)):
# Arrange for yield to return a list of results (not Futures).
info = 'multi-yield from %s' % utils.gen_info(gen)
mfut = MultiFuture(info)
try:
for subfuture in value:
mfut.add_dependent(subfuture)
mfut.complete()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
mfut.set_exception(err, tb)
mfut.add_callback(self._on_future_completion, mfut, ns, ds_conn, gen)
return
if _is_generator(value):
# TODO: emulate PEP 380 here?
raise NotImplementedError('Cannot defer to another generator.')
raise RuntimeError('A tasklet should not yield a plain value: '
'%.200s yielded %.200r' % (info, value))
def _on_rpc_completion(self, rpc, ns, ds_conn, gen):
try:
result = rpc.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self._help_tasklet_along(ns, ds_conn, gen, exc=err, tb=tb)
else:
self._help_tasklet_along(ns, ds_conn, gen, result)
def _on_future_completion(self, future, ns, ds_conn, gen):
if self._next is future:
self._next = None
self._geninfo = None
_logging_debug('%s is no longer blocked waiting for %s', self, future)
exc = future.get_exception()
if exc is not None:
self._help_tasklet_along(ns, ds_conn, gen,
exc=exc, tb=future.get_traceback())
else:
val = future.get_result() # This won't raise an exception.
self._help_tasklet_along(ns, ds_conn, gen, val)
def sleep(dt):
"""Public function to sleep some time.
Example:
yield tasklets.sleep(0.5) # Sleep for half a sec.
"""
fut = Future('sleep(%.3f)' % dt)
eventloop.queue_call(dt, fut.set_result, None)
return fut
class MultiFuture(Future):
"""A Future that depends on multiple other Futures.
This is used internally by 'v1, v2, ... = yield f1, f2, ...'; the
semantics (e.g. error handling) are constrained by that use case.
The protocol from the caller's POV is:
mf = MultiFuture()
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
.
. (More mf.add_dependent() and/or mf.putq() calls)
.
mf.complete() # No more dependents will be added.
.
. (Time passes)
.
results = mf.get_result()
Now, results is a list of results from all dependent Futures in
the order in which they were added.
It is legal to add the same dependent multiple times.
Callbacks can be added at any point.
From a dependent Future POV, there's nothing to be done: a callback
is automatically added to each dependent Future which will signal
its completion to the MultiFuture.
Error handling: if any dependent future raises an error, it is
propagated to mf. To force an early error, you can call
mf.set_exception() instead of mf.complete(). After this you can't
call mf.add_dependent() or mf.putq() any more.
"""
def __init__(self, info=None):
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._full = False
self._dependents = set()
self._results = []
super(MultiFuture, self).__init__(info=info)
def __repr__(self):
# TODO: This may be invoked before __init__() returns,
# from Future.__init__(). Beware.
line = super(MultiFuture, self).__repr__()
lines = [line]
for fut in self._results:
lines.append(fut.dump_stack().replace('\n', '\n '))
return '\n waiting for '.join(lines)
# TODO: Maybe rename this method, since completion of a Future/RPC
# already means something else. But to what?
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._finish()
# TODO: Maybe don't overload set_exception() with this?
def set_exception(self, exc, tb=None):
self._full = True
super(MultiFuture, self).set_exception(exc, tb)
def _finish(self):
if not self._full:
raise RuntimeError('MultiFuture cannot finish until completed.')
if self._dependents:
raise RuntimeError('MultiFuture cannot finish whilst waiting for '
'dependents %r' % self._dependents)
if self._done:
raise RuntimeError('MultiFuture done before finishing.')
try:
result = [r.get_result() for r in self._results]
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
else:
self.set_result(result)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if isinstance(fut, list):
mfut = MultiFuture()
map(mfut.add_dependent, fut)
mfut.complete()
fut = mfut
elif not isinstance(fut, Future):
raise TypeError('Expected Future, received %s: %r' % (type(fut), fut))
if self._full:
raise RuntimeError('MultiFuture cannot add a dependent once complete.')
self._results.append(fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
self._dependents.remove(fut)
if self._full and not self._dependents and not self._done:
self._finish()
class QueueFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However, instead of returning results as a list, it lets you
retrieve results as soon as they are ready, one at a time, using
getq(). The Future itself finishes with a result of None when the
last result is ready (regardless of whether it was retrieved).
The getq() method returns a Future which blocks until the next
result is ready, and then returns that result. Each getq() call
retrieves one unique result. Extra getq() calls after the last
result is already returned return EOFError as their Future's
exception. (I.e., q.getq() returns a Future as always, but yieding
that Future raises EOFError.)
NOTE: Values can also be pushed directly via .putq(value). However
there is no flow control -- if the producer is faster than the
consumer, the queue will grow unbounded.
"""
# TODO: Refactor to share code with MultiFuture.
def __init__(self, info=None):
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._waiting = collections.deque()
# Invariant: at least one of _completed and _waiting is empty.
# Also: _full and not _dependents <==> _done.
super(QueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self.set_result(None)
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
super(QueueFuture, self).set_exception(exc, tb)
if not self._dependents:
self._mark_finished()
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._full:
raise RuntimeError('QueueFuture add dependent once complete.')
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
exc = fut.get_exception()
tb = fut.get_traceback()
val = None
if exc is None:
val = fut.get_result()
if self._waiting:
waiter = self._waiting.popleft()
self._pass_result(waiter, exc, tb, val)
else:
self._completed.append((exc, tb, val))
if self._full and not self._dependents and not self._done:
self.set_result(None)
self._mark_finished()
def _mark_finished(self):
if not self.done():
raise RuntimeError('Future not done before marking as finished.')
while self._waiting:
waiter = self._waiting.popleft()
self._pass_eof(waiter)
def getq(self):
fut = Future()
if self._completed:
exc, tb, val = self._completed.popleft()
self._pass_result(fut, exc, tb, val)
elif self._full and not self._dependents:
self._pass_eof(fut)
else:
self._waiting.append(fut)
return fut
def _pass_eof(self, fut):
if not self._done:
raise RuntimeError('QueueFuture cannot pass EOF until done.')
exc = self.get_exception()
if exc is not None:
tb = self.get_traceback()
else:
exc = EOFError('Queue is empty')
tb = None
self._pass_result(fut, exc, tb, None)
def _pass_result(self, fut, exc, tb, val):
if exc is not None:
fut.set_exception(exc, tb)
else:
fut.set_result(val)
class SerialQueueFuture(Future):
"""Like QueueFuture but maintains the order of insertion.
This class is used by Query operations.
Invariants:
- At least one of _queue and _waiting is empty.
- The Futures in _waiting are always pending.
(The Futures in _queue may be pending or completed.)
In the discussion below, add_dependent() is treated the same way as
putq().
If putq() is ahead of getq(), the situation is like this:
putq()
v
_queue: [f1, f2, ...]; _waiting: []
^
getq()
Here, putq() appends a Future to the right of _queue, and getq()
removes one from the left.
If getq() is ahead of putq(), it's like this:
putq()
v
_queue: []; _waiting: [f1, f2, ...]
^
getq()
Here, putq() removes a Future from the left of _waiting, and getq()
appends one to the right.
When both are empty, putq() appends a Future to the right of _queue,
while getq() appends one to the right of _waiting.
The _full flag means that no more calls to putq() will be made; it
is set by calling either complete() or set_exception().
Calling complete() signals that no more putq() calls will be made.
If getq() is behind, subsequent getq() calls will eat up _queue
until it is empty, and after that will return a Future that passes
EOFError (note that getq() itself never raises EOFError). If getq()
is ahead when complete() is called, the Futures in _waiting are all
passed an EOFError exception (thereby eating up _waiting).
If, instead of complete(), set_exception() is called, the exception
and traceback set there will be used instead of EOFError.
"""
def __init__(self, info=None):
self._full = False
self._queue = collections.deque()
self._waiting = collections.deque()
super(SerialQueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('SerialQueueFuture cannot complete twice.')
self._full = True
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(EOFError('Queue is empty'))
if not self._queue:
self.set_result(None)
def set_exception(self, exc, tb=None):
self._full = True
super(SerialQueueFuture, self).set_exception(exc, tb)
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
if self._waiting:
waiter = self._waiting.popleft()
waiter.set_result(value)
return
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._full:
raise RuntimeError('SerialQueueFuture cannot add dependent '
'once complete.')
if self._waiting:
waiter = self._waiting.popleft()
fut.add_callback(_transfer_result, fut, waiter)
else:
self._queue.append(fut)
def getq(self):
if self._queue:
fut = self._queue.popleft()
# TODO: Isn't it better to call self.set_result(None) in complete()?
if not self._queue and self._full and not self._done:
self.set_result(None)
else:
fut = Future()
if self._full:
if not self._done:
raise RuntimeError('self._queue should be non-empty.')
err = self.get_exception()
if err is not None:
tb = self.get_traceback()
else:
err = EOFError('Queue is empty')
tb = None
fut.set_exception(err, tb)
else:
self._waiting.append(fut)
return fut
def _transfer_result(fut1, fut2):
"""Helper to transfer result or errors from one Future to another."""
exc = fut1.get_exception()
if exc is not None:
tb = fut1.get_traceback()
fut2.set_exception(exc, tb)
else:
val = fut1.get_result()
fut2.set_result(val)
class ReducingFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However the result, instead of being a list of results of dependent
Futures, is computed by calling a 'reducer' tasklet. The reducer tasklet
takes a list of values and returns a single value. It may be called
multiple times on sublists of values and should behave like
e.g. sum().
NOTE: The reducer input values may be reordered compared to the
order in which they were added to the queue.
"""
# TODO: Refactor to reuse some code with MultiFuture.
def __init__(self, reducer, info=None, batch_size=20):
self._reducer = reducer
self._batch_size = batch_size
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._queue = collections.deque()
super(ReducingFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('ReducingFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
self._queue.clear()
super(ReducingFuture, self).set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if self._full:
raise RuntimeError('ReducingFuture cannot add dependent once complete.')
self._internal_add_dependent(fut)
def _internal_add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future; received %r' % fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
if self._done:
return # Already done.
try:
val = fut.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
self._queue.append(val)
if len(self._queue) >= self._batch_size:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self._queue.append(nval)
if self._full and not self._dependents:
self._mark_finished()
def _mark_finished(self):
if not self._queue:
self.set_result(None)
elif len(self._queue) == 1:
self.set_result(self._queue.pop())
else:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self.set_result(nval)
# Alias for StopIteration used to mark return values.
# To use this, raise Return(<your return value>). The semantics
# are exactly the same as raise StopIteration(<your return value>)
# but using Return clarifies that you are intending this to be the
# return value of a tasklet.
# TODO: According to Monocle authors Steve and Greg Hazel, Twisted
# used an exception to signal a return value from a generator early
# on, and they found out it was error-prone. Should I worry?
Return = StopIteration
def get_return_value(err):
# XXX Docstring
if not err.args:
result = None
elif len(err.args) == 1:
result = err.args[0]
else:
result = err.args
return result
def tasklet(func):
# XXX Docstring
@utils.wrapping(func)
def tasklet_wrapper(*args, **kwds):
# XXX Docstring
# TODO: make most of this a public function so you can take a bare
# generator and turn it into a tasklet dynamically. (Monocle has
# this I believe.)
# __ndb_debug__ = utils.func_info(func)
fut = Future('tasklet %s' % utils.func_info(func))
fut._context = get_context()
try:
result = func(*args, **kwds)
except StopIteration, err:
# Just in case the function is not a generator but still uses
# the "raise Return(...)" idiom, we'll extract the return value.
result = get_return_value(err)
if _is_generator(result):
ns = namespace_manager.get_namespace()
ds_conn = datastore._GetConnection()
eventloop.queue_call(None, fut._help_tasklet_along, ns, ds_conn, result)
else:
fut.set_result(result)
return fut
return tasklet_wrapper
def synctasklet(func):
"""Decorator to run a function as a tasklet when called.
Use this to wrap a request handler function that will be called by
some web application framework (e.g. a Django view function or a
webapp.RequestHandler.get method).
"""
@utils.wrapping(func)
def synctasklet_wrapper(*args, **kwds):
__ndb_debug__ = utils.func_info(func)
taskletfunc = tasklet(func)
return taskletfunc(*args, **kwds).get_result()
return synctasklet_wrapper
def toplevel(func):
"""A sync tasklet that sets a fresh default Context.
Use this for toplevel view functions such as
webapp.RequestHandler.get() or Django view functions.
"""
@utils.wrapping(func)
def add_context_wrapper(*args, **kwds):
__ndb_debug__ = utils.func_info(func)
_state.clear_all_pending()
# Create and install a new context.
ctx = make_default_context()
try:
set_context(ctx)
return synctasklet(func)(*args, **kwds)
finally:
set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
return add_context_wrapper
_CONTEXT_KEY = '__CONTEXT__'
def get_context():
# XXX Docstring
ctx = None
if os.getenv(_CONTEXT_KEY):
ctx = _state.current_context
if ctx is None:
ctx = make_default_context()
set_context(ctx)
return ctx
def make_default_context():
# XXX Docstring
return make_context()
@utils.positional(0)
def make_context(conn=None, config=None):
# XXX Docstring
from . import context # Late import to deal with circular imports.
return context.Context(conn=conn, config=config)
def set_context(new_context):
# XXX Docstring
os.environ[_CONTEXT_KEY] = '1'
_state.current_context = new_context
# TODO: Rework the following into documentation.
# A tasklet/coroutine/generator can yield the following things:
# - Another tasklet/coroutine/generator; this is entirely equivalent to
# "for x in g: yield x"; this is handled entirely by the @tasklet wrapper.
# (Actually, not. @tasklet returns a function that when called returns
# a Future. You can use the pep380 module's @gwrap decorator to support
# yielding bare generators though.)
# - An RPC (or MultiRpc); the tasklet will be resumed when this completes.
# This does not use the RPC's callback mechanism.
# - A Future; the tasklet will be resumed when the Future is done.
# This uses the Future's callback mechanism.
# A Future can be used in several ways:
# - Yield it from a tasklet; see above.
# - Check (poll) its status via f.done.
# - Call its wait() method, perhaps indirectly via check_success()
# or get_result(). This invokes the event loop.
# - Call the Future.wait_any() or Future.wait_all() method.
# This is waits for any or all Futures and RPCs in the argument list.
# XXX HIRO XXX
# - A tasklet is a (generator) function decorated with @tasklet.
# - Calling a tasklet schedules the function for execution and returns a Future.
# - A function implementing a tasklet may:
# = yield a Future; this waits for the Future which returns f.get_result();
# = yield an RPC; this waits for the RPC and then returns rpc.get_result();
# = raise Return(result); this sets the outer Future's result;
# = raise StopIteration or return; this sets the outer Future's result;
# = raise another exception: this sets the outer Future's exception.
# - If a function implementing a tasklet is not a generator it will be
# immediately executed to completion and the tasklet wrapper will
# return a Future that is already done. (XXX Alternative behavior:
# it schedules the call to be run by the event loop.)
# - Code not running in a tasklet can call f.get_result() or f.wait() on
# a future. This is implemented by a simple loop like the following:
# while not self._done:
# eventloop.run1()
# - Here eventloop.run1() runs one "atomic" part of the event loop:
# = either it calls one immediately ready callback;
# = or it waits for the first RPC to complete;
# = or it sleeps until the first callback should be ready;
# = or it raises an exception indicating all queues are empty.
# - It is possible but suboptimal to call rpc.get_result() or
# rpc.wait() directly on an RPC object since this will not allow
# other callbacks to run as they become ready. Wrapping an RPC in a
# Future will take care of this issue.
# - The important insight is that when a generator function
# implementing a tasklet yields, raises or returns, there is always a
# wrapper that catches this event and either turns it into a
# callback sent to the event loop, or sets the result or exception
# for the tasklet's Future.
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import warnings
import numpy as np
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from matplotlib.patches import PathPatch
from matplotlib import rcParams
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .ticks import Ticks
from .ticklabels import TickLabels
from .axislabels import AxisLabels
from .grid_paths import get_lon_lat_path, get_gridline_path
__all__ = ['CoordinateHelper']
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
LINES_TO_PATCHES_LINESTYLE = {'-': 'solid',
'--': 'dashed',
'-.': 'dashdot',
':': 'dotted',
'none': 'none',
'None': 'none',
' ': 'none',
'': 'none'}
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid='ignore'):
return np.mod(values - coord_wrap, 360.) - (360. - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : float
The angle at which the longitude wraps (defaults to 360)
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(self, parent_axes=None, parent_map=None, transform=None,
coord_index=None, coord_type='scalar', coord_unit=None,
coord_wrap=None, frame=None, format_unit=None):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self.format_unit = format_unit
self.frame = frame
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
self.ticks.display_minor_ticks(rcParams['xtick.minor.visible'])
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self.grid_lines_kwargs = {'visible': False,
'facecolor': 'none',
'edgecolor': rcParams['grid.color'],
'linestyle': LINES_TO_PATCHES_LINESTYLE[rcParams['grid.linestyle']],
'linewidth': rcParams['grid.linewidth'],
'alpha': rcParams['grid.alpha'],
'transform': self.parent_axes.transData}
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == 'lines' and not self.transform.has_inverse:
raise ValueError('The specified transform has no inverse, so the '
'grid cannot be drawn using grid_type=\'lines\'')
if grid_type is None:
grid_type = 'lines' if self.transform.has_inverse else 'contours'
if grid_type in ('lines', 'contours'):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs.pop('color')
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs['visible']:
if not draw_grid:
self.grid_lines_kwargs['visible'] = False
else:
self.grid_lines_kwargs['visible'] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : float, optional
The value to wrap at for angular coordinates
"""
self.coord_type = coord_type
if coord_type == 'longitude' and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != 'longitude' and coord_wrap is not None:
raise NotImplementedError('coord_wrap is not yet supported '
'for non-longitude coordinates')
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == 'scalar':
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ['longitude', 'latitude']:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(unit=self.coord_unit,
format_unit=self.format_unit)
else:
raise ValueError("coord_type should be one of 'scalar', 'longitude', or 'latitude'")
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or Formatter
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter "
"instance")
def format_coord(self, value, format='auto'):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == 'longitude':
value = wrap_angle_at(value, self.coord_wrap)
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def set_ticks(self, values=None, spacing=None, number=None, size=None,
width=None, color=None, alpha=None, direction=None,
exclude_overlapping=None):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError("At most one of values, spacing, or number should "
"be specified")
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
if direction is not None:
if direction in ('in', 'out'):
self.ticks.set_tick_out(direction == 'out')
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn("exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(self, color=None, size=None, pad=None,
exclude_overlapping=None, **kwargs):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self.ticklabels.set_size(size)
if color is not None:
self.ticklabels.set_color(color)
if pad is not None:
self.ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop('fontdict', None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
if fontdict is not None:
self.axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group('grid lines')
self._update_ticks()
if self.grid_lines_kwargs['visible']:
if self._grid_type == 'lines':
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == 'lines':
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group('grid lines')
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox, ticks_locs):
renderer.open_group('ticks')
self.ticks.draw(renderer, ticks_locs)
self.ticklabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
tick_out_size=self.ticks.out_size)
renderer.close_group('ticks')
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, ticks_locs, visible_ticks):
renderer.open_group('axis labels')
self.axislabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=ticks_locs,
visible_ticks=visible_ticks)
renderer.close_group('axis labels')
def _update_ticks(self):
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(*coord_range[self.coord_index])
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(self._fl_spacing, self.get_minor_frequency(), *coord_range[self.coord_index])
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# http://matplotlib.org/users/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
with np.errstate(invalid='ignore'):
world0 = self.transform.transform(pixel0)[:, self.coord_index]
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
with np.errstate(invalid='ignore'):
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == 'lower' else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
with np.errstate(invalid='ignore'):
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = (world1 - world0)
dy = (world2 - world0)
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.)
dy = wrap_angle_at(dy, 180.)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack([spine.normal_angle, spine.normal_angle[-1]])
with np.errstate(invalid='ignore'):
reset = (((normal_angle_full - tick_angle) % 360 > 90.) &
((tick_angle - normal_angle_full) % 360 > 90.))
tick_angle[reset] -= 180.
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
with np.errstate(invalid='ignore'):
w1[w2 - w1 > 180.] += 360
w2[w1 - w2 > 180.] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(minor_ticks_w_coordinates, spine, axis, w1,
w2, tick_angle, ticks='minor')
# format tick labels, add to scene
text = self.formatter(self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(self, tick_world_coordinates, spine, axis, w1, w2,
tick_angle, ticks='major'):
if self.coord_type == 'longitude':
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack([tick_world_coordinates_values,
tick_world_coordinates_values + 360])
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid='ignore'):
intersections = np.hstack([np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0]])
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.e-13, atol=1.e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (spine.data[imax, 0] - spine.data[imin, 0])
y_data_i = spine.data[imin, 1] + frac * (spine.data[imax, 1] - spine.data[imin, 1])
x_pix_i = spine.pixel[imin, 0] + frac * (spine.pixel[imax, 0] - spine.pixel[imin, 0])
y_pix_i = spine.pixel[imin, 1] + frac * (spine.pixel[imax, 1] - spine.pixel[imin, 1])
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.:
delta_angle -= 360.
elif delta_angle < -180.:
delta_angle += 360.
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap)
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == 'major':
self.ticks.add(axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(dict(axis=axis,
pixel=(x_pix_i, y_pix_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac))
self.lbl_world.append(world)
else:
self.ticks.add_minor(minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(coord_range[1][0], coord_range[1][1], n_samples)
else:
xy_world[subset, 0] = np.linspace(coord_range[0][0], coord_range[0][1], n_samples)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(self._get_gridline(xy_world[subset], pixel[subset], xy_world_round[subset]))
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == 'scalar':
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _update_grid_contour(self):
if hasattr(self, '_grid') and self._grid:
for line in self._grid.collections:
line.remove()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res),
np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == 'longitude':
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (tick_world_coordinates_values[0] + tick_world_coordinates_values[1])
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(tick_world_coordinates_values, mid)
# Replace wraps by NaN
with np.errstate(invalid='ignore'):
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (np.abs(np.diff(field[:-1, :], axis=1)) > 180)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
with np.errstate(invalid='ignore'):
self._grid = self.parent_axes.contour(x, y, field.transpose(), levels=np.sort(tick_world_coordinates_values))
else:
self._grid = None
def tick_params(self, which='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : string, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if 'colors' in kwargs:
if 'color' not in kwargs:
kwargs['color'] = kwargs['colors']
if 'labelcolor' not in kwargs:
kwargs['labelcolor'] = kwargs['colors']
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == 'minor':
if len(set(kwargs) - {'length'}) > 0:
raise ValueError("When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)")
else:
if 'length' in kwargs:
self.ticks.set_minor_ticksize(kwargs['length'])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(size=kwargs.get('length'),
width=kwargs.get('width'),
color=kwargs.get('color'),
direction=kwargs.get('direction'))
# Set the tick position
position = None
for arg in ('bottom', 'left', 'top', 'right'):
if arg in kwargs and position is None:
position = ''
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(color=kwargs.get('labelcolor'),
size=kwargs.get('labelsize'),
pad=kwargs.get('pad'))
# Set the tick label position
position = None
for arg in ('bottom', 'left', 'top', 'right'):
if 'label' + arg in kwargs and position is None:
position = ''
if kwargs.get('label' + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if 'grid_color' in kwargs:
self.grid_lines_kwargs['edgecolor'] = kwargs['grid_color']
if 'grid_alpha' in kwargs:
self.grid_lines_kwargs['alpha'] = kwargs['grid_alpha']
if 'grid_linewidth' in kwargs:
self.grid_lines_kwargs['linewidth'] = kwargs['grid_linewidth']
if 'grid_linestyle' in kwargs:
if kwargs['grid_linestyle'] in LINES_TO_PATCHES_LINESTYLE:
self.grid_lines_kwargs['linestyle'] = LINES_TO_PATCHES_LINESTYLE[kwargs['grid_linestyle']]
else:
self.grid_lines_kwargs['linestyle'] = kwargs['grid_linestyle']
|
|
# -*- coding: utf-8 -*-
import datetime
import re
from dateutil import parser
from nose.tools import * # flake8: noqa
import mock
from rest_framework import serializers as ser
from tests.base import ApiTestCase
from tests import factories
from api.base.settings.defaults import API_BASE
from api.base.filters import FilterMixin
from api.base.exceptions import (
InvalidFilterError,
InvalidFilterOperator,
InvalidFilterComparisonType,
InvalidFilterMatchType,
InvalidFilterValue,
InvalidFilterFieldError
)
class FakeSerializer(ser.Serializer):
filterable_fields = ('string_field', 'list_field', 'date_field', 'int_field', 'bool_field')
string_field = ser.CharField()
list_field = ser.ListField()
date_field = ser.DateField()
datetime_field = ser.DateTimeField()
int_field = ser.IntegerField()
float_field = ser.FloatField()
bool_field = ser.BooleanField(source='foobar')
class FakeView(FilterMixin):
serializer_class = FakeSerializer
class TestFilterMixin(ApiTestCase):
def setUp(self):
super(TestFilterMixin, self).setUp()
self.view = FakeView()
def test_parse_query_params_default_operators(self):
query_params = {
'filter[string_field]': 'foo',
'filter[list_field]': 'bar',
'filter[int_field]': '42',
'filter[bool_field]': 'false',
}
fields = self.view.parse_query_params(query_params)
assert_in('string_field', fields)
assert_equal(fields['string_field'][0]['op'], 'icontains')
assert_in('list_field', fields)
assert_equal(fields['list_field'][0]['op'], 'contains')
assert_in('int_field', fields)
assert_equal(fields['int_field'][0]['op'], 'eq')
assert_in('foobar', fields)
assert_equal(fields['foobar'][0]['op'], 'eq')
def test_parse_query_params_casts_values(self):
query_params = {
'filter[string_field]': 'foo',
'filter[list_field]': 'bar',
'filter[int_field]': '42',
'filter[bool_field]': 'false',
}
fields = self.view.parse_query_params(query_params)
assert_in('string_field', fields)
assert_equal(fields['string_field'][0]['value'], 'foo')
assert_in('list_field', fields)
assert_equal(fields['list_field'][0]['value'], 'bar')
assert_in('int_field', fields)
assert_equal(fields['int_field'][0]['value'], 42)
assert_in('foobar', fields)
assert_equal(fields['foobar'][0]['value'], False)
def test_parse_query_params_uses_field_source_attribute(self):
query_params = {
'filter[bool_field]': 'false',
}
fields = self.view.parse_query_params(query_params)
assert_in('foobar', fields)
assert_equal(fields['foobar'][0]['value'], False)
assert_equal(fields['foobar'][0]['op'], 'eq')
def test_parse_query_params_generalizes_dates(self):
query_params = {
'filter[date_field]': '2014-12-12'
}
fields = self.view.parse_query_params(query_params)
start = parser.parse('2014-12-12')
stop = start + datetime.timedelta(days=1)
for match in fields['date_field']:
if match['op'] == 'gte':
assert_equal(match['value'], start)
elif match['op'] == 'lt':
assert_equal(match['value'], stop)
else:
self.fail()
def test_parse_query_params_comparable_field(self):
query_params = {
'filter[int_field][gt]': 42,
'fitler[int_field][lte]': 9000
}
fields = self.view.parse_query_params(query_params)
for match in fields['int_field']:
if match['op'] == 'gt':
assert_equal(match['value'], 42)
elif match['op'] == 'lte':
assert_equal(match['value'], 9000)
else:
self.fail()
def test_parse_query_params_matchable_field(self):
query_params = {
'filter[string_field][contains]': 'foo',
'filter[string_field][icontains]': 'bar'
}
fields = self.view.parse_query_params(query_params)
for match in fields['string_field']:
if match['op'] == 'contains':
assert_equal(match['value'], 'foo')
elif match['op'] == 'icontains':
assert_equal(match['value'], 'bar')
else:
self.fail()
def test_parse_query_params_raises_InvalidFilterError_bad_field(self):
query_params = {
'filter[fake]': 'foo'
}
with assert_raises(InvalidFilterError):
self.view.parse_query_params(query_params)
def test_parse_query_params_raises_InvalidFilterComparisonType(self):
query_params = {
'filter[string_field][gt]': 'foo'
}
with assert_raises(InvalidFilterComparisonType):
self.view.parse_query_params(query_params)
def test_parse_query_params_raises_InvalidFilterMatchType(self):
query_params = {
'filter[date_field][icontains]': '2015'
}
with assert_raises(InvalidFilterMatchType):
self.view.parse_query_params(query_params)
def test_parse_query_params_raises_InvalidFilterOperator(self):
query_params = {
'filter[int_field][bar]': 42
}
with assert_raises(InvalidFilterOperator):
self.view.parse_query_params(query_params)
def test_InvalidFilterOperator_parameterizes_valid_operators(self):
query_params = {
'filter[int_field][bar]': 42
}
try:
self.view.parse_query_params(query_params)
except InvalidFilterOperator as err:
ops = re.search(r'one of (?P<ops>.+)\.$', err.detail).groupdict()['ops']
assert_equal(ops, "gt, gte, lt, lte, eq")
query_params = {
'filter[string_field][bar]': 'foo'
}
try:
self.view.parse_query_params(query_params)
except InvalidFilterOperator as err:
ops = re.search(r'one of (?P<ops>.+)\.$', err.detail).groupdict()['ops']
assert_equal(ops, "contains, icontains, eq")
def test_parse_query_params_supports_multiple_filters(self):
query_params = {
'filter[string_field]': 'foo',
'filter[string_field]': 'bar',
}
fields = self.view.parse_query_params(query_params)
assert_in('string_field', fields)
for match in fields['string_field']:
assert_in(match['value'], ('foo', 'bar'))
def test_convert_value_bool(self):
value = 'true'
field = FakeSerializer._declared_fields['bool_field']
value = self.view.convert_value(value, field)
assert_true(isinstance(value, bool))
assert_true(value)
def test_convert_value_date(self):
value = '2014-12-12'
field = FakeSerializer._declared_fields['date_field']
value = self.view.convert_value(value, field)
assert_true(isinstance(value, datetime.datetime))
assert_equal(value, parser.parse('2014-12-12'))
def test_convert_value_int(self):
value = '9000'
field = FakeSerializer._declared_fields['int_field']
value = self.view.convert_value(value, field)
assert_equal(value, 9000)
def test_convert_value_float(self):
value = '42'
orig_type = type(value)
field = FakeSerializer._declared_fields['float_field']
value = self.view.convert_value(value, field)
assert_equal(value, 42.0)
|
|
#!/usr/bin/env python
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for cinder management.
"""
from __future__ import print_function
import os
import sys
import warnings
warnings.simplefilter('once', DeprecationWarning)
from oslo_config import cfg
from oslo_db.sqlalchemy import migration
import oslo_messaging as messaging
from oslo_utils import uuidutils
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder import context
from cinder import db
from cinder.db import migration as db_migration
from cinder.db.sqlalchemy import api as db_api
from cinder.i18n import _
from cinder.objects import base as objects_base
from cinder.openstack.common import log as logging
from cinder import rpc
from cinder import utils
from cinder import version
CONF = cfg.CONF
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def param2id(object_id):
"""Helper function to convert various id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if uuidutils.is_uuid_like(object_id):
return object_id
elif '-' in object_id:
# FIXME(ja): mapping occurs in nova?
pass
else:
try:
return int(object_id)
except ValueError:
return object_id
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell', dest="shell",
metavar='<bpython|ipython|python>',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
from IPython import embed
embed()
except ImportError:
try:
# Ipython < 0.11
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
import IPython
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
# no IPython module
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
import rlcompleter # noqa
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', required=True, help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly.
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print('%s' % caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'cinder-manage db sync' before running this command."))
exit(1)
class HostCommands(object):
"""List hosts."""
@args('zone', nargs='?', default=None,
help='Availability Zone (default: %(default)s)')
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
args: [zone]
"""
print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'})
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print(_("%(host)-25s\t%(availability_zone)-15s")
% {'host': h['host'],
'availability_zone': h['availability_zone']})
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
@args('version', nargs='?', default=None,
help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return db_migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
db_migration.INIT_VERSION))
@args('age_in_days', type=int,
help='Purge deleted rows older than age in days')
def purge(self, age_in_days):
"""Purge deleted rows older than a given age from cinder tables."""
age_in_days = int(age_in_days)
if age_in_days <= 0:
print(_("Must supply a positive, non-zero value for age"))
exit(1)
ctxt = context.get_admin_context()
db.purge_deleted_rows(ctxt, age_in_days)
class VersionCommands(object):
"""Class for exposing the codebase version."""
def __init__(self):
pass
def list(self):
print(version.version_string())
def __call__(self):
self.list()
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state."""
def __init__(self):
self._client = None
def rpc_client(self):
if self._client is None:
if not rpc.initialized():
rpc.init(CONF)
target = messaging.Target(topic=CONF.volume_topic)
serializer = objects_base.CinderObjectSerializer()
self._client = rpc.get_client(target, serializer=serializer)
return self._client
@args('volume_id',
help='Volume ID to be deleted')
def delete(self, volume_id):
"""Delete a volume, bypassing the check that it
must be available.
"""
ctxt = context.get_admin_context()
volume = db.volume_get(ctxt, param2id(volume_id))
host = volume['host']
if not host:
print(_("Volume not yet assigned to host."))
print(_("Deleting volume from database and skipping rpc."))
db.volume_destroy(ctxt, param2id(volume_id))
return
if volume['status'] == 'in-use':
print(_("Volume is in-use."))
print(_("Detach volume from instance and then try again."))
return
cctxt = self.rpc_client().prepare(server=host)
cctxt.cast(ctxt, "delete_volume", volume_id=volume['id'])
@args('--currenthost', required=True, help='Existing volume host name')
@args('--newhost', required=True, help='New volume host name')
def update_host(self, currenthost, newhost):
"""Modify the host name associated with a volume.
Particularly to recover from cases where one has moved
their Cinder Volume node, or modified their backend_name in a
multi-backend config.
"""
ctxt = context.get_admin_context()
volumes = db.volume_get_all_by_host(ctxt,
currenthost)
for v in volumes:
db.volume_update(ctxt, v['id'],
{'host': newhost})
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
def __init__(self):
pass
@args('param', nargs='?', default=None,
help='Configuration parameter to display (default: %(default)s)')
def list(self, param=None):
"""List parameters configured for cinder.
Lists all parameters configured for cinder unless an optional argument
is specified. If the parameter is specified we only print the
requested parameter. If the parameter is not found an appropriate
error is produced by .get*().
"""
param = param and param.strip()
if param:
print('%s = %s' % (param, CONF.get(param)))
else:
for key, value in CONF.iteritems():
print('%s = %s' % (key, value))
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
print(_("Line %(dis)d : %(line)s") %
{'dis': len(lines) - index, 'line': line})
if error_found == 0:
print(_("No errors in logfiles!"))
@args('num_entries', nargs='?', type=int, default=10,
help='Number of entries to list (default: %(default)d)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the cinder syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_("Unable to find system log file!"))
sys.exit(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_("Last %s cinder syslog entries:-") % (entries))
for line in lines:
if line.find("cinder") > 0:
count += 1
print(_("%s") % (line))
if count == entries:
break
if count == 0:
print(_("No cinder entries in syslog!"))
class BackupCommands(object):
"""Methods for managing backups."""
def list(self):
"""List all backups (including ones in progress) and the host
on which the backup operation is running.
"""
ctxt = context.get_admin_context()
backups = db.backup_get_all(ctxt)
hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s"
print(hdr % (_('ID'),
_('User ID'),
_('Project ID'),
_('Host'),
_('Name'),
_('Container'),
_('Status'),
_('Size'),
_('Object Count')))
res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d"
for backup in backups:
object_count = 0
if backup['object_count'] is not None:
object_count = backup['object_count']
print(res % (backup['id'],
backup['user_id'],
backup['project_id'],
backup['host'],
backup['display_name'],
backup['container'],
backup['status'],
backup['size'],
object_count))
class ServiceCommands(object):
"""Methods for managing services."""
def list(self):
"""Show a list of all cinder services."""
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print(print_format % (_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated At')))
for svc in services:
alive = utils.service_is_up(svc)
art = ":-)" if alive else "XXX"
status = 'enabled'
if svc['disabled']:
status = 'disabled'
print(print_format % (svc['binary'], svc['host'].partition('.')[0],
svc['availability_zone'], status, art,
svc['updated_at']))
CATEGORIES = {
'backup': BackupCommands,
'config': ConfigCommands,
'db': DbCommands,
'host': HostCommands,
'logs': GetLogCommands,
'service': ServiceCommands,
'shell': ShellCommands,
'version': VersionCommands,
'volume': VolumeCommands,
}
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
for category in CATEGORIES:
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
handler=add_command_parsers)
def get_arg_string(args):
arg = None
if args[0] == '-':
# (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars
# is optional args. Notice that cfg module takes care of
# actual ArgParser so prefix_chars is always '-'.
if args[1] == '-':
# This is long optional arg
arg = args[2:]
else:
arg = args[3:]
else:
arg = args
return arg
def fetch_func_args(func):
fn_args = []
for args, kwargs in getattr(func, 'args', []):
arg = get_arg_string(args[0])
fn_args.append(getattr(CONF.category, arg))
return fn_args
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
script_name = sys.argv[0]
if len(sys.argv) < 2:
print(_("\nOpenStack Cinder version: %(version)s\n") %
{'version': version.version_string()})
print(script_name + " category action [<args>]")
print(_("Available categories:"))
for category in CATEGORIES:
print(_("\t%s") % category)
sys.exit(2)
try:
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup("cinder")
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run cinder-manage as root.'))
sys.exit(2)
fn = CONF.category.action_fn
fn_args = fetch_func_args(fn)
fn(*fn_args)
|
|
"""
This script contains functionality for downloading, cleaning up and converting Donald Trump tweets to a numpy data
format suitable for training a character level modelling network.
"""
import html
import json
import os
import random
import urllib.request as req
import numpy as np
from unidecode import unidecode
# Thanks to this guy who did the hard work of collecting all Trump tweets!
URI_FORMAT = 'http://www.trumptwitterarchive.com/data/realdonaldtrump/{}.json'
FIRST_YEAR = 2009
LAST_YEAR = 2017
CACHE_DIR = os.path.dirname(os.path.abspath(__file__)) + '/cache'
MIN_CHAR_OCCURRENCES = 500
def download_yearly_batch(year):
"""
Given a year, download the JSON encoded batch of Trump tweets for that year and returns the JSON string
"""
print('Downloading tweets from {}'.format(year))
with req.urlopen(URI_FORMAT.format(year)) as uri:
return uri.read().decode()
def download_yearly_batch_cached(year):
"""
Given a year, fetch the JSON encoded Trump tweets from cache or download it and then cache it. Returns the
parsed JSON.
"""
path = '{}/{}.json'.format(CACHE_DIR, year)
if not os.path.exists(path):
with open(path, 'w') as file:
file.write(download_yearly_batch(year))
with open(path, 'r') as file:
return json.load(file)
def filter_oc(tweets):
"""
Filter out retweets and replies, because we are only interested in original Trump prose
"""
return [tweet for tweet in tweets if is_oc(tweet)]
def is_oc(tweet):
"""
Check if a tweet is original content and not a retweet or reply
"""
if tweet['is_retweet']:
return False
if tweet['in_reply_to_user_id_str'] is not None:
return False
if '@realDonaldTrump' in tweet['text']:
# Here he's copying other peoples tweets and responding to them, but they're not replies or retweets
return False
return True
def extract_tweet_text(tweets):
"""
Just grab 'em by the "text" fields
"""
return [tweet['text'] for tweet in tweets]
def cleanup(tweets):
"""
Convert HTML entities to normal characters and convert to ASCII
"""
return [unidecode(html.unescape(tweet)) for tweet in tweets]
def get_yearly_tweets(year):
"""
Get all original tweets from the given year as plain text, filtered and cleaned up
"""
return cleanup(extract_tweet_text(filter_oc(download_yearly_batch_cached(year))))
def get_all_tweets():
"""
Get all original tweets as plain text, filtered and cleaned up
"""
all_tweets = []
for year in range(FIRST_YEAR, LAST_YEAR + 1):
all_tweets.extend(get_yearly_tweets(year))
return all_tweets
def count_chars(tweets):
"""
Count the occurrence of all characters in the given tweets. Returns a dictionary with characters as keys and
the integer number of occurrences as values.
"""
counts = {}
for tweet in tweets:
for char in tweet:
if char not in counts:
counts[char] = 0
counts[char] += 1
return counts
def get_char_exclude_list(tweets):
"""
Get a list of characters that have too few occurrences and should be excludes from the data set
"""
return [char for char, num in count_chars(tweets).items() if num < MIN_CHAR_OCCURRENCES]
def exclude_tweets_with_rare_chars(tweets):
"""
Exclude tweets that contain characters with too little overall occurrences
"""
excludes = get_char_exclude_list(tweets)
return [tweet for tweet in tweets if not any(char in tweet for char in excludes)]
def get_features(tweet, unique_chars):
"""
Given a tweet and a character list, determine the 0-based integer class for every character in the tweet and return
the list of classes. Will prepend a special class with index len(unique_chars) to the list, which indicates the
start of the tweet. This allows the model to learn to predict the first character from scratch.
"""
return [len(unique_chars)] + [unique_chars.index(char) for char in tweet]
def get_labels(tweet, unique_chars):
"""
Given a tweet and a character list, determine the 0-based integer class for every character in the tweet and return
the list of classes. Will append a special class with index len(unique_chars) to the list, which indicates the
end of the tweet. This allows the model to learn to predict when the tweet is done.
"""
return [unique_chars.index(char) for char in tweet] + [len(unique_chars)]
def get_unique_chars(tweets):
"""
Returns a list of unique characters occurring in the given tweets, sorted by natural order
"""
return sorted(char for char, _ in count_chars(tweets).items())
def create_training_data():
"""
Create all data required for training. Will collect all tweets and transform it to trainable features and labels.
Returns:
features: 3D numpy array of shape [num_tweets, max_time_steps, 1] where max_time_steps is the number of characters
of the longest tweet in the data set + 1, to accommodate the 'start of tweet' special character followed by
the indices of the characters in every tweet. Zero padded to max_time_steps for shorter tweets.
labels: 3D numpy array with same shape as `features`. Contains the indices of the characters in every tweet,
followed by a special label with class len(unique_chars) that indicates the end of the tweet. Zero padded to
max_time_steps for shorter tweets.
mask: 2D numpy array of shape [num_tweets, max_time_steps]. Contains 1's for time steps that contain actual
feature/label pairs, and 0's for the zero-padded steps of shorter tweets. Needed to ignore the training error
on padded time steps.
settings: dictionary that contains the unique characters used for the training data, and the maximum number of
time steps. Needed for training and being able to reproduce characters from integer classes for sampling
synthetic tweets.
"""
# Collect all usable tweets and shuffle them deterministically (shuffling is important for training)
all_tweets = exclude_tweets_with_rare_chars(get_all_tweets())
random.seed(12345)
random.shuffle(all_tweets)
print("got all {} tweets, creating features and labels".format(len(all_tweets)))
unique_chars = get_unique_chars(all_tweets)
# The maximum number of time steps is the longest tweet length + 1 for the special 'start tweet' character.
max_steps = max(len(tweet) + 1 for tweet in all_tweets)
# Create the numpy array for all features and labels
features = np.zeros([len(all_tweets), max_steps], dtype=int)
labels = np.zeros_like(features)
mask = np.zeros([len(all_tweets), max_steps], dtype=float)
for i in range(len(all_tweets)):
tweet = all_tweets[i]
num_steps = len(tweet) + 1
features[i, :num_steps] = get_features(tweet, unique_chars)
labels[i, :num_steps] = get_labels(tweet, unique_chars)
mask[i, :num_steps] = 1
return features, labels, mask, {'chars': unique_chars, 'maxSteps': max_steps}
def export_training_data():
"""
Export features, labels, mask and settings to files so that it can be used by the training script
"""
features, labels, mask, settings = create_training_data()
np.save(CACHE_DIR + '/features.npy', features)
np.save(CACHE_DIR + '/mask.npy', mask)
np.save(CACHE_DIR + '/labels.npy', labels)
with open(CACHE_DIR + '/settings.json', 'w') as file:
json.dump(settings, file)
if __name__ == "__main__":
export_training_data()
|
|
"""Support for esphome devices."""
import asyncio
import logging
import math
from typing import Any, Callable, Dict, List, Optional
from aioesphomeapi import (
APIClient,
APIConnectionError,
DeviceInfo,
EntityInfo,
EntityState,
HomeassistantServiceCall,
UserService,
UserServiceArgType,
)
import voluptuous as vol
from homeassistant import const
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, State, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.storage import Store
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
# Import config flow so that it's added to the registry
from .config_flow import EsphomeFlowHandler # noqa: F401
from .entry_data import DATA_KEY, RuntimeEntryData
DOMAIN = "esphome"
_LOGGER = logging.getLogger(__name__)
STORAGE_VERSION = 1
# No config schema - only configuration entry
CONFIG_SCHEMA = vol.Schema({}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Stub to allow setting up this component.
Configuration through YAML is not supported at this time.
"""
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up the esphome component."""
hass.data.setdefault(DATA_KEY, {})
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
password = entry.data[CONF_PASSWORD]
cli = APIClient(
hass.loop,
host,
port,
password,
client_info=f"Home Assistant {const.__version__}",
)
# Store client in per-config-entry hass.data
store = Store(
hass, STORAGE_VERSION, f"esphome.{entry.entry_id}", encoder=JSONEncoder
)
entry_data = hass.data[DATA_KEY][entry.entry_id] = RuntimeEntryData(
client=cli, entry_id=entry.entry_id, store=store
)
async def on_stop(event: Event) -> None:
"""Cleanup the socket client on HA stop."""
await _cleanup_instance(hass, entry)
# Use async_listen instead of async_listen_once so that we don't deregister
# the callback twice when shutting down Home Assistant.
# "Unable to remove unknown listener <function EventBus.async_listen_once.<locals>.onetime_listener>"
entry_data.cleanup_callbacks.append(
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, on_stop)
)
@callback
def async_on_state(state: EntityState) -> None:
"""Send dispatcher updates when a new state is received."""
entry_data.async_update_state(hass, state)
@callback
def async_on_service_call(service: HomeassistantServiceCall) -> None:
"""Call service when user automation in ESPHome config is triggered."""
domain, service_name = service.service.split(".", 1)
service_data = service.data
if service.data_template:
try:
data_template = {
key: Template(value) for key, value in service.data_template.items()
}
template.attach(hass, data_template)
service_data.update(
template.render_complex(data_template, service.variables)
)
except TemplateError as ex:
_LOGGER.error("Error rendering data template: %s", ex)
return
if service.is_event:
# ESPHome uses servicecall packet for both events and service calls
# Ensure the user can only send events of form 'esphome.xyz'
if domain != "esphome":
_LOGGER.error("Can only generate events under esphome domain!")
return
hass.bus.async_fire(service.service, service_data)
else:
hass.async_create_task(
hass.services.async_call(
domain, service_name, service_data, blocking=True
)
)
async def send_home_assistant_state(
entity_id: str, _, new_state: Optional[State]
) -> None:
"""Forward Home Assistant states to ESPHome."""
if new_state is None:
return
await cli.send_home_assistant_state(entity_id, new_state.state)
@callback
def async_on_state_subscription(entity_id: str) -> None:
"""Subscribe and forward states for requested entities."""
unsub = async_track_state_change(hass, entity_id, send_home_assistant_state)
entry_data.disconnect_callbacks.append(unsub)
# Send initial state
hass.async_create_task(
send_home_assistant_state(entity_id, None, hass.states.get(entity_id))
)
async def on_login() -> None:
"""Subscribe to states and list entities on successful API login."""
try:
entry_data.device_info = await cli.device_info()
entry_data.available = True
await _async_setup_device_registry(hass, entry, entry_data.device_info)
entry_data.async_update_device_state(hass)
entity_infos, services = await cli.list_entities_services()
await entry_data.async_update_static_infos(hass, entry, entity_infos)
await _setup_services(hass, entry_data, services)
await cli.subscribe_states(async_on_state)
await cli.subscribe_service_calls(async_on_service_call)
await cli.subscribe_home_assistant_states(async_on_state_subscription)
hass.async_create_task(entry_data.async_save_to_store())
except APIConnectionError as err:
_LOGGER.warning("Error getting initial data: %s", err)
# Re-connection logic will trigger after this
await cli.disconnect()
try_connect = await _setup_auto_reconnect_logic(hass, cli, entry, host, on_login)
async def complete_setup() -> None:
"""Complete the config entry setup."""
infos, services = await entry_data.async_load_from_store()
await entry_data.async_update_static_infos(hass, entry, infos)
await _setup_services(hass, entry_data, services)
# Create connection attempt outside of HA's tracked task in order
# not to delay startup.
hass.loop.create_task(try_connect(is_disconnect=False))
hass.async_create_task(complete_setup())
return True
async def _setup_auto_reconnect_logic(
hass: HomeAssistantType, cli: APIClient, entry: ConfigEntry, host: str, on_login
):
"""Set up the re-connect logic for the API client."""
async def try_connect(tries: int = 0, is_disconnect: bool = True) -> None:
"""Try connecting to the API client. Will retry if not successful."""
if entry.entry_id not in hass.data[DOMAIN]:
# When removing/disconnecting manually
return
data: RuntimeEntryData = hass.data[DOMAIN][entry.entry_id]
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
data.disconnect_callbacks = []
data.available = False
data.async_update_device_state(hass)
if is_disconnect:
# This can happen often depending on WiFi signal strength.
# So therefore all these connection warnings are logged
# as infos. The "unavailable" logic will still trigger so the
# user knows if the device is not connected.
_LOGGER.info("Disconnected from ESPHome API for %s", host)
if tries != 0:
# If not first re-try, wait and print message
# Cap wait time at 1 minute. This is because while working on the
# device (e.g. soldering stuff), users don't want to have to wait
# a long time for their device to show up in HA again (this was
# mentioned a lot in early feedback)
#
# In the future another API will be set up so that the ESP can
# notify HA of connectivity directly, but for new we'll use a
# really short reconnect interval.
tries = min(tries, 10) # prevent OverflowError
wait_time = int(round(min(1.8 ** tries, 60.0)))
_LOGGER.info("Trying to reconnect in %s seconds", wait_time)
await asyncio.sleep(wait_time)
try:
await cli.connect(on_stop=try_connect, login=True)
except APIConnectionError as error:
_LOGGER.info("Can't connect to ESPHome API for %s: %s", host, error)
# Schedule re-connect in event loop in order not to delay HA
# startup. First connect is scheduled in tracked tasks.
data.reconnect_task = hass.loop.create_task(
try_connect(tries + 1, is_disconnect=False)
)
else:
_LOGGER.info("Successfully connected to %s", host)
hass.async_create_task(on_login())
return try_connect
async def _async_setup_device_registry(
hass: HomeAssistantType, entry: ConfigEntry, device_info: DeviceInfo
):
"""Set up device registry feature for a particular config entry."""
sw_version = device_info.esphome_version
if device_info.compilation_time:
sw_version += f" ({device_info.compilation_time})"
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, device_info.mac_address)},
name=device_info.name,
manufacturer="espressif",
model=device_info.model,
sw_version=sw_version,
)
async def _register_service(
hass: HomeAssistantType, entry_data: RuntimeEntryData, service: UserService
):
service_name = f"{entry_data.device_info.name}_{service.name}"
schema = {}
for arg in service.args:
schema[vol.Required(arg.name)] = {
UserServiceArgType.BOOL: cv.boolean,
UserServiceArgType.INT: vol.Coerce(int),
UserServiceArgType.FLOAT: vol.Coerce(float),
UserServiceArgType.STRING: cv.string,
UserServiceArgType.BOOL_ARRAY: [cv.boolean],
UserServiceArgType.INT_ARRAY: [vol.Coerce(int)],
UserServiceArgType.FLOAT_ARRAY: [vol.Coerce(float)],
UserServiceArgType.STRING_ARRAY: [cv.string],
}[arg.type_]
async def execute_service(call):
await entry_data.client.execute_service(service, call.data)
hass.services.async_register(
DOMAIN, service_name, execute_service, vol.Schema(schema)
)
async def _setup_services(
hass: HomeAssistantType, entry_data: RuntimeEntryData, services: List[UserService]
):
old_services = entry_data.services.copy()
to_unregister = []
to_register = []
for service in services:
if service.key in old_services:
# Already exists
matching = old_services.pop(service.key)
if matching != service:
# Need to re-register
to_unregister.append(matching)
to_register.append(service)
else:
# New service
to_register.append(service)
for service in old_services.values():
to_unregister.append(service)
entry_data.services = {serv.key: serv for serv in services}
for service in to_unregister:
service_name = f"{entry_data.device_info.name}_{service.name}"
hass.services.async_remove(DOMAIN, service_name)
for service in to_register:
await _register_service(hass, entry_data, service)
async def _cleanup_instance(
hass: HomeAssistantType, entry: ConfigEntry
) -> RuntimeEntryData:
"""Cleanup the esphome client if it exists."""
data: RuntimeEntryData = hass.data[DATA_KEY].pop(entry.entry_id)
if data.reconnect_task is not None:
data.reconnect_task.cancel()
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
for cleanup_callback in data.cleanup_callbacks:
cleanup_callback()
await data.client.disconnect()
return data
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload an esphome config entry."""
entry_data = await _cleanup_instance(hass, entry)
tasks = []
for platform in entry_data.loaded_platforms:
tasks.append(hass.config_entries.async_forward_entry_unload(entry, platform))
if tasks:
await asyncio.wait(tasks)
return True
async def platform_async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities,
*,
component_key: str,
info_type,
entity_type,
state_type,
) -> None:
"""Set up an esphome platform.
This method is in charge of receiving, distributing and storing
info and state updates.
"""
entry_data: RuntimeEntryData = hass.data[DOMAIN][entry.entry_id]
entry_data.info[component_key] = {}
entry_data.old_info[component_key] = {}
entry_data.state[component_key] = {}
@callback
def async_list_entities(infos: List[EntityInfo]):
"""Update entities of this platform when entities are listed."""
old_infos = entry_data.info[component_key]
new_infos = {}
add_entities = []
for info in infos:
if not isinstance(info, info_type):
# Filter out infos that don't belong to this platform.
continue
if info.key in old_infos:
# Update existing entity
old_infos.pop(info.key)
else:
# Create new entity
entity = entity_type(entry.entry_id, component_key, info.key)
add_entities.append(entity)
new_infos[info.key] = info
# Remove old entities
for info in old_infos.values():
entry_data.async_remove_entity(hass, component_key, info.key)
# First copy the now-old info into the backup object
entry_data.old_info[component_key] = entry_data.info[component_key]
# Then update the actual info
entry_data.info[component_key] = new_infos
# Add entities to Home Assistant
async_add_entities(add_entities)
signal = f"esphome_{entry.entry_id}_on_list"
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_list_entities)
)
@callback
def async_entity_state(state: EntityState):
"""Notify the appropriate entity of an updated state."""
if not isinstance(state, state_type):
return
entry_data.state[component_key][state.key] = state
entry_data.async_update_entity(hass, component_key, state.key)
signal = f"esphome_{entry.entry_id}_on_state"
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_entity_state)
)
def esphome_state_property(func):
"""Wrap a state property of an esphome entity.
This checks if the state object in the entity is set, and
prevents writing NAN values to the Home Assistant state machine.
"""
@property
def _wrapper(self):
# pylint: disable=protected-access
if self._state is None:
return None
val = func(self)
if isinstance(val, float) and math.isnan(val):
# Home Assistant doesn't use NAN values in state machine
# (not JSON serializable)
return None
return val
return _wrapper
class EsphomeEnumMapper:
"""Helper class to convert between hass and esphome enum values."""
def __init__(self, func: Callable[[], Dict[int, str]]):
"""Construct a EsphomeEnumMapper."""
self._func = func
def from_esphome(self, value: int) -> str:
"""Convert from an esphome int representation to a hass string."""
return self._func()[value]
def from_hass(self, value: str) -> int:
"""Convert from a hass string to a esphome int representation."""
inverse = {v: k for k, v in self._func().items()}
return inverse[value]
def esphome_map_enum(func: Callable[[], Dict[int, str]]):
"""Map esphome int enum values to hass string constants.
This class has to be used as a decorator. This ensures the aioesphomeapi
import is only happening at runtime.
"""
return EsphomeEnumMapper(func)
class EsphomeEntity(Entity):
"""Define a generic esphome entity."""
def __init__(self, entry_id: str, component_key: str, key: int):
"""Initialize."""
self._entry_id = entry_id
self._component_key = component_key
self._key = key
self._remove_callbacks: List[Callable[[], None]] = []
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
kwargs = {
"entry_id": self._entry_id,
"component_key": self._component_key,
"key": self._key,
}
self._remove_callbacks.append(
async_dispatcher_connect(
self.hass,
(
f"esphome_{kwargs.get('entry_id')}"
f"_update_{kwargs.get('component_key')}_{kwargs.get('key')}"
),
self._on_state_update,
)
)
self._remove_callbacks.append(
async_dispatcher_connect(
self.hass,
(
f"esphome_{kwargs.get('entry_id')}_remove_"
f"{kwargs.get('component_key')}_{kwargs.get('key')}"
),
self.async_remove,
)
)
self._remove_callbacks.append(
async_dispatcher_connect(
self.hass,
f"esphome_{kwargs.get('entry_id')}_on_device_update",
self._on_device_update,
)
)
async def _on_state_update(self) -> None:
"""Update the entity state when state or static info changed."""
self.async_write_ha_state()
async def _on_device_update(self) -> None:
"""Update the entity state when device info has changed."""
if self._entry_data.available:
# Don't update the HA state yet when the device comes online.
# Only update the HA state when the full state arrives
# through the next entity state packet.
return
self.async_write_ha_state()
async def async_will_remove_from_hass(self) -> None:
"""Unregister callbacks."""
for remove_callback in self._remove_callbacks:
remove_callback()
self._remove_callbacks = []
@property
def _entry_data(self) -> RuntimeEntryData:
return self.hass.data[DATA_KEY][self._entry_id]
@property
def _static_info(self) -> EntityInfo:
# Check if value is in info database. Use a single lookup.
info = self._entry_data.info[self._component_key].get(self._key)
if info is not None:
return info
# This entity is in the removal project and has been removed from .info
# already, look in old_info
return self._entry_data.old_info[self._component_key].get(self._key)
@property
def _device_info(self) -> DeviceInfo:
return self._entry_data.device_info
@property
def _client(self) -> APIClient:
return self._entry_data.client
@property
def _state(self) -> Optional[EntityState]:
try:
return self._entry_data.state[self._component_key][self._key]
except KeyError:
return None
@property
def available(self) -> bool:
"""Return if the entity is available."""
device = self._device_info
if device.has_deep_sleep:
# During deep sleep the ESP will not be connectable (by design)
# For these cases, show it as available
return True
return self._entry_data.available
@property
def unique_id(self) -> Optional[str]:
"""Return a unique id identifying the entity."""
if not self._static_info.unique_id:
return None
return self._static_info.unique_id
@property
def device_info(self) -> Dict[str, Any]:
"""Return device registry information for this entity."""
return {
"connections": {(dr.CONNECTION_NETWORK_MAC, self._device_info.mac_address)}
}
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._static_info.name
@property
def should_poll(self) -> bool:
"""Disable polling."""
return False
|
|
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import numpy as np
def get_albedo_colortable():
"""
colors(*,i)=[0, 0, 050] & boundary[i]=0.000 & i=i+1
colors(*,i)=[0, 0, 200] & boundary[i]=0.020 & i=i+1 ; 0.020
colors(*,i)=[0, 0, 255] & boundary[i]=0.040 & i=i+1 ; 0.040
colors(*,i)=[255, 24, 0] & boundary[i]=0.060 & i=i+1 ; 0.060
colors(*,i)=[220, 40, 4] & boundary[i]=0.080 & i=i+1 ; 0.080
colors(*,i)=[192, 65, 7] & boundary[i]=0.100 & i=i+1 ; 0.100
colors(*,i)=[129, 25, 14] & boundary[i]=0.120 & i=i+1 ; 0.120
colors(*,i)=[ 74, 134, 0] & boundary[i]=0.140 & i=i+1 ; 0.140
colors(*,i)=[152, 186, 0] & boundary[i]=0.160 & i=i+1 ; 0.160
colors(*,i)=[153, 147, 0] & boundary[i]=0.180 & i=i+1 ; 0.180
colors(*,i)=[139, 123, 0] & boundary[i]=0.200 & i=i+1 ; 0.200
colors(*,i)=[125, 99, 0] & boundary[i]=0.220 & i=i+1 ; 0.220
colors(*,i)=[111, 75, 0] & boundary[i]=0.240 & i=i+1 ; 0.240
colors(*,i)=[126, 91, 14] & boundary[i]=0.260 & i=i+1 ; 0.260
colors(*,i)=[141, 108, 28] & boundary[i]=0.280 & i=i+1 ; 0.280
colors(*,i)=[156, 125, 42] & boundary[i]=0.300 & i=i+1 ; 0.300
colors(*,i)=[171, 142, 56] & boundary[i]=0.325 & i=i+1 ; 0.325
colors(*,i)=[186, 159, 71] & boundary[i]=0.350 & i=i+1 ; 0.350
colors(*,i)=[201, 176, 85] & boundary[i]=0.375 & i=i+1 ; 0.375
colors(*,i)=[216, 193, 99] & boundary[i]=0.400 & i=i+1 ; 0.400
colors(*,i)=[231, 210, 113] & boundary[i]=0.450 & i=i+1 ; 0.450
colors(*,i)=[240, 220, 120] & boundary[i]=0.500 & i=i+1 ; 0.500
colors(*,i)=[246, 225, 135] & boundary[i]=0.550 & i=i+1 ; 0.550
colors(*,i)=[246, 235, 155] & boundary[i]=0.600 & i=i+1 ; 0.600
colors(*,i)=[240, 240, 180] & boundary[i]=0.650 & i=i+1 ; 0.650
colors(*,i)=[250, 250, 210] & boundary[i]=0.700 & i=i+1 ; 0.750
colors(*,i)=[230, 253, 200] & boundary[i]=0.750 & i=i+1 ; 0.700
which means for instance that the interval 0.18 - 0.20 is coded with the RGB value [139,123,0]. Missing values (255) are coded in white. If you multiply these intervals by 254 you have the equivalent intervals directly in the way the albedo product is coded.
"""
ct = [[0, 0, 050],
[0, 0, 200],
[0, 0, 255],
[255, 24, 0],
[220, 40, 4],
[30, 70, 0],
[50, 100, 0],
[74, 134, 0],
[152, 186, 0],
[153, 147, 0],
[139, 123, 0],
[125, 99, 0],
[111, 75, 0],
[126, 91, 14],
[141, 108, 28],
[156, 125, 42],
[171, 142, 56],
[186, 159, 71],
[201, 176, 85],
[216, 193, 99],
[231, 210, 113],
[240, 220, 120],
[246, 225, 135],
[246, 235, 155],
[240, 240, 180],
[250, 250, 210],
[230, 253, 200]]
ct = np.asarray(ct)
ct = ct / 255.
# define boundaries
lbounds = [0.000,
0.020,
0.040,
0.060,
0.080,
0.100,
0.120,
0.140,
0.160,
0.180,
0.200,
0.220,
0.240,
0.260,
0.280,
0.300,
0.325,
0.350,
0.375,
0.400,
0.450,
0.500,
0.550,
0.600,
0.650,
0.700,
0.750]
return lbounds, ct
def get_albedo_colortable1():
"""
colors(*,i)=[0, 0, 050] & boundary[i]=0.000 & i=i+1
colors(*,i)=[0, 0, 200] & boundary[i]=0.020 & i=i+1 ; 0.020
colors(*,i)=[0, 0, 255] & boundary[i]=0.040 & i=i+1 ; 0.040
colors(*,i)=[255, 24, 0] & boundary[i]=0.060 & i=i+1 ; 0.060
colors(*,i)=[220, 40, 4] & boundary[i]=0.080 & i=i+1 ; 0.080
colors(*,i)=[192, 65, 7] & boundary[i]=0.100 & i=i+1 ; 0.100
colors(*,i)=[129, 25, 14] & boundary[i]=0.120 & i=i+1 ; 0.120
colors(*,i)=[ 74, 134, 0] & boundary[i]=0.140 & i=i+1 ; 0.140
colors(*,i)=[152, 186, 0] & boundary[i]=0.160 & i=i+1 ; 0.160
colors(*,i)=[153, 147, 0] & boundary[i]=0.180 & i=i+1 ; 0.180
colors(*,i)=[139, 123, 0] & boundary[i]=0.200 & i=i+1 ; 0.200
colors(*,i)=[125, 99, 0] & boundary[i]=0.220 & i=i+1 ; 0.220
colors(*,i)=[111, 75, 0] & boundary[i]=0.240 & i=i+1 ; 0.240
colors(*,i)=[126, 91, 14] & boundary[i]=0.260 & i=i+1 ; 0.260
colors(*,i)=[141, 108, 28] & boundary[i]=0.280 & i=i+1 ; 0.280
colors(*,i)=[156, 125, 42] & boundary[i]=0.300 & i=i+1 ; 0.300
colors(*,i)=[171, 142, 56] & boundary[i]=0.325 & i=i+1 ; 0.325
colors(*,i)=[186, 159, 71] & boundary[i]=0.350 & i=i+1 ; 0.350
colors(*,i)=[201, 176, 85] & boundary[i]=0.375 & i=i+1 ; 0.375
colors(*,i)=[216, 193, 99] & boundary[i]=0.400 & i=i+1 ; 0.400
colors(*,i)=[231, 210, 113] & boundary[i]=0.450 & i=i+1 ; 0.450
colors(*,i)=[240, 220, 120] & boundary[i]=0.500 & i=i+1 ; 0.500
colors(*,i)=[246, 225, 135] & boundary[i]=0.550 & i=i+1 ; 0.550
colors(*,i)=[246, 235, 155] & boundary[i]=0.600 & i=i+1 ; 0.600
colors(*,i)=[240, 240, 180] & boundary[i]=0.650 & i=i+1 ; 0.650
colors(*,i)=[250, 250, 210] & boundary[i]=0.700 & i=i+1 ; 0.750
colors(*,i)=[230, 253, 200] & boundary[i]=0.750 & i=i+1 ; 0.700
which means for instance that the interval 0.18 - 0.20 is coded with the RGB value [139,123,0]. Missing values (255) are coded in white. If you multiply these intervals by 254 you have the equivalent intervals directly in the way the albedo product is coded.
"""
ct = [[0, 0, 050],
[0, 0, 200],
[0, 0, 255],
[255, 24, 0],
[220, 40, 4],
[192, 65, 7],
[129, 25, 14],
[74, 134, 0],
[152, 186, 0],
[153, 147, 0],
[139, 123, 0],
[125, 99, 0],
[111, 75, 0],
[126, 91, 14],
[141, 108, 28],
[156, 125, 42],
[171, 142, 56],
[186, 159, 71],
[201, 176, 85],
[216, 193, 99],
[231, 210, 113],
[240, 220, 120],
[246, 225, 135],
[246, 235, 155],
[240, 240, 180],
[250, 250, 210],
[230, 253, 200]]
ct = np.asarray(ct)
ct = ct / 255.
# define boundaries
lbounds = [0.000,
0.020,
0.040,
0.060,
0.080,
0.100,
0.120,
0.140,
0.160,
0.180,
0.200,
0.220,
0.240,
0.260,
0.280,
0.300,
0.325,
0.350,
0.375,
0.400,
0.450,
0.500,
0.550,
0.600,
0.650,
0.700,
0.750]
return lbounds, ct
class ColorMapGenerator(object):
"""
Generate colormaps from RGB value lists
"""
def __init__(self):
pass
def albedo(self):
lb, ct = get_albedo_colortable()
return self.rgb_to_cmap(lb, ct, name='albedo')
def rgb_to_cmap(self, lbound, rgb, name='mymap'):
"""
generate a colormap based on a list of lower boundaries
and an RGB list
inspired by http://faculty.washington.edu/rjl/clawpack/trunk/python/pyclaw/plotters/colormaps.py
Parameters
----------
lbound : array
array which specifies the lower boundaries
rgb : array
a list of [n,3] dimension which specifies the RGB values
Example
-------
> import matplotlib.pylab as plt
> x = plt.randn(100,100) + 1.
> lb, ct = get_albedo_colortable()
> C = ColorMapGenerator()
> cma = C.rgb_to_cmap(lb, ct)
> plt.imshow(x, cmap=cma, vmin=0., vmax=1.)
> plt.colorbar()
> plt.show()
"""
from matplotlib.colors import LinearSegmentedColormap, ColorConverter
import numpy as np
if len(lbound) != len(rgb):
raise ValueError(
'Inconsistent geometries for boundaries and RGB table')
lbound = np.asarray(lbound)
# check that boundaries in ascending order
if np.any(np.diff(lbound) < 0.):
raise ValueError('Boundaries are not in ascending order!')
n = len(lbound)
bmin = lbound.min()
bmax = lbound.max()
CC = ColorConverter()
R = []
G = []
B = []
x = []
for i in xrange(n):
R.append(rgb[i, 0])
G.append(rgb[i, 1])
B.append(rgb[i, 2])
x.append((lbound[i] - bmin) / (bmax - bmin))
x = np.asarray(x)
cmap_dict = {}
red_list = []
green_list = []
blue_list = []
# in case of homogeneous colors, generate a tuple with 0 and 1 (see:
# http://stackoverflow.com/questions/16267143/matplotlib-single-colored-colormap-with-saturation)
if bmax == bmin:
for xx in [0., 1.]:
red_list.append((xx, R[i], R[i]))
green_list.append((xx, G[i], G[i]))
blue_list.append((xx, B[i], B[i]))
else:
for i in xrange(n):
red_list.append((x[i], R[i], R[i]))
green_list.append((x[i], G[i], G[i]))
blue_list.append((x[i], B[i], B[i]))
cmap_dict['red'] = red_list
cmap_dict['green'] = green_list
cmap_dict['blue'] = blue_list
cmap = LinearSegmentedColormap(name, cmap_dict)
return cmap
|
|
import logging
from flask import flash
from flask_login import current_user
from scout.build import build_managed_variant
from scout.constants import CHROMOSOMES, CHROMOSOMES_38
from scout.parse.variant.managed_variant import parse_managed_variant_lines
from scout.server.extensions import store
from scout.server.utils import user_institutes
from .forms import (
CATEGORY_CHOICES,
SUBCATEGORY_CHOICES,
ManagedVariantAddForm,
ManagedVariantModifyForm,
ManagedVariantsFilterForm,
)
LOG = logging.getLogger(__name__)
VARS_PER_PAGE = 50
def set_query_coordinates(query_options, request_form):
"""Set query coordinates based on submitted form
Args:
query_options(dict): managed variants optional params
request_form(ImmutableMultiDict): form submitted by user to filter managed variants
"""
chrom = request_form.get("chromosome")
if chrom is None or chrom == "All":
return
query_options["chromosome"] = chrom
if request_form.get("position"):
query_options["position"] = int(request_form.get("position"))
if request_form.get("end"):
query_options["end"] = int(request_form.get("end"))
def managed_variants(request):
"""Create and return managed variants' data
Args:
request(werkzeug.local.LocalProxy): request containing form data
Returns
data(dict): data to be displayed in template page
"""
page = int(request.form.get("page", 1))
skip_count = VARS_PER_PAGE * max(page - 1, 0)
# Retrieve form data for the 3 types of form present on the managed variants page
filters_form = ManagedVariantsFilterForm(request.form)
add_form = ManagedVariantAddForm()
modify_form = ManagedVariantModifyForm()
# Retrieve form data to compose variants query
categories = request.form.getlist("category") or [cat[0] for cat in CATEGORY_CHOICES]
query_options = {"sub_category": []}
# Set variant sub-category in query_options
for sub_cat in request.form.getlist("sub_category") or [
subcat[0] for subcat in SUBCATEGORY_CHOICES
]:
query_options["sub_category"].append(sub_cat)
if request.form.get("description") is not None and request.form.get("description") != "":
query_options["description"] = request.form["description"]
# Set requested variant coordinates in query options
set_query_coordinates(query_options, request.form)
# Get all variants according to the selected fields in filter form
managed_variants_query = store.managed_variants(
category=categories, query_options=query_options
)
variant_count = store.count_managed_variants(category=categories, query_options=query_options)
more_variants = True if variant_count > (skip_count + VARS_PER_PAGE) else False
managed_variants_res = managed_variants_query.skip(skip_count).limit(VARS_PER_PAGE)
managed_variants = [managed_variant for managed_variant in managed_variants_res]
return {
"page": page,
"filters_form": filters_form,
"add_form": add_form,
"modify_form": modify_form,
"managed_variants": managed_variants,
"more_variants": more_variants,
"cytobands_37": store.cytoband_by_chrom("37"),
"cytobands_38": store.cytoband_by_chrom("38"),
"chromosomes_37": CHROMOSOMES,
"chromosomes_38": CHROMOSOMES_38,
"subcategory_choices": [[choice[1], choice[0]] for choice in SUBCATEGORY_CHOICES],
}
def add_managed_variant(request):
"""Add a managed variant.
Args:
request(werkzeug.local.LocalProxy): request containing form data
"""
add_form = ManagedVariantAddForm(request.form)
institutes = list(user_institutes(store, current_user))
current_user_id = current_user._id
managed_variant_obj = build_managed_variant(
dict(
chromosome=add_form["chromosome"].data,
position=add_form["position"].data,
end=add_form["end"].data,
reference=add_form["reference"].data,
alternative=add_form["alternative"].data,
institutes=institutes,
maintainer=[current_user_id],
category=add_form["category"].data,
sub_category=add_form["sub_category"].data,
description=add_form["description"].data,
)
)
return store.upsert_managed_variant(managed_variant_obj)
def upload_managed_variants(store, lines, institutes, current_user_id):
"""Add managed variants from a CSV file"""
total_variant_lines = 0
new_managed_variants = 0
for managed_variant_info in parse_managed_variant_lines(lines):
total_variant_lines += 1
if not validate_managed_variant(managed_variant_info):
flash(
f"Managed variant info line {total_variant_lines} has errors ({managed_variant_info})",
"warning",
)
continue
managed_variant_info.update({"maintainer": [current_user_id], "institutes": institutes})
managed_variant_obj = build_managed_variant(managed_variant_info)
if store.upsert_managed_variant(managed_variant_obj):
new_managed_variants += 1
return new_managed_variants, total_variant_lines
def validate_managed_variant(managed_variant_info):
"""
Returns true
Args:
managed_variant_info: dict
Returns:
boolean
"""
mandatory_fields = [
"chromosome",
"position",
"reference",
"alternative",
"category",
"sub_category",
]
record_ok = True
for mandatory_field in mandatory_fields:
if not managed_variant_info.get(mandatory_field):
record_ok = False
return record_ok
def modify_managed_variant(store, managed_variant_id, edit_form):
"""Modify a managed variant."""
managed_variant = store.find_managed_variant(managed_variant_id)
if managed_variant is None:
return
original_obj_id = managed_variant["_id"]
managed_variant.update(
{
"chromosome": edit_form["chromosome"].data,
"position": edit_form["position"].data,
"end": edit_form["end"].data,
"reference": edit_form["reference"].data,
"alternative": edit_form["alternative"].data,
"category": edit_form["category"].data,
"sub_category": edit_form["sub_category"].data,
"description": edit_form["description"].data,
}
)
# new ids must be built upon update
updated_variant = build_managed_variant(managed_variant)
LOG.debug(
"Updated variant has mvid %s and old id is %s.",
updated_variant["managed_variant_id"],
original_obj_id,
)
result = store.upsert_managed_variant(updated_variant, original_obj_id)
return result
def remove_managed_variant(store, managed_variant_id):
"""Remove a managed variant."""
removed_variant = store.delete_managed_variant(managed_variant_id)
return removed_variant
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import urlparse
import mock
from oslo.config import cfg
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api.extensions import PluginAwareExtensionManager
from neutron.api.v2 import attributes
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron.common import config
from neutron.common import exceptions as q_exc
from neutron import context
from neutron.manager import NeutronManager
from neutron.openstack.common.notifier import api as notifer_api
from neutron.openstack.common import policy as common_policy
from neutron.openstack.common import uuidutils
from neutron.tests import base
from neutron.tests.unit import testlib_api
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
EXTDIR = os.path.join(ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def _get_path(resource, id=None, action=None, fmt=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if fmt is not None:
path = path + '.%s' % fmt
return path
class ResourceIndexTestCase(base.BaseTestCase):
def test_index_json(self):
index = webtest.TestApp(router.Index({'foo': 'bar'}))
res = index.get('')
self.assertTrue('resources' in res.json)
self.assertTrue(len(res.json['resources']) == 1)
resource = res.json['resources'][0]
self.assertTrue('collection' in resource)
self.assertTrue(resource['collection'] == 'bar')
self.assertTrue('name' in resource)
self.assertTrue(resource['name'] == 'foo')
self.assertTrue('links' in resource)
self.assertTrue(len(resource['links']) == 1)
link = resource['links'][0]
self.assertTrue('href' in link)
self.assertTrue(link['href'] == 'http://localhost/bar')
self.assertTrue('rel' in link)
self.assertTrue(link['rel'] == 'self')
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure 'stale' patched copies of the plugin are never returned
NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
PluginAwareExtensionManager._instance = None
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
self.addCleanup(self._plugin_patcher.stop)
self.addCleanup(cfg.CONF.reset)
api = router.APIRouter()
self.api = webtest.TestApp(api)
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource]
policy_attrs = [name for (name, info) in attr_info.items()
if info.get('required_by_policy') or
info.get('primary_key')]
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=[], **kwargs):
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict((arg, mock.ANY)
for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo', 'bar'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(q_exc.Invalid, router.APIRouter)
def test_native_pagination_without_allow_sorting(self):
cfg.CONF.set_override('allow_sorting', False)
instance = self.plugin.return_value
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def setUp(self):
super(JSONV2TestCase, self).setUp()
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertTrue('networks' in res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(len(res['networks']), 1)
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in input_dict.iteritems():
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(len(res['networks']), 0)
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 2)
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(next_links), 1)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(urlparse.parse_qs(url.query), params)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), params)
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(urlparse.parse_qs(url.query),
expected_params)
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertTrue('network' in res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
def test_create_use_defaults(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['admin_state_up'], True)
self.assertEqual(net['status'], "ACTIVE")
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True,
extra_environ=env)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_no_body(self):
data = {'whoa': None}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_no_resource(self):
data = {}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bulk_no_networks(self):
data = {'networks': []}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': unicode(tenant_id)}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(port['network_id'], net_id)
self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef')
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertFalse('v2attrs:something' in net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
common_policy._rules['get_network:name'] = common_policy.parse_rule(
"rule:admin_only")
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
try:
self.assertNotIn('name', res['network'])
finally:
del common_policy._rules['get_network:name']
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
plugin = 'neutron.tests.unit.test_api_v2.TestSubresourcePlugin'
NeutronManager._instance = None
PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
cfg.CONF.set_override('core_plugin', plugin)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
self.addCleanup(self._plugin_patcher.stop)
self.addCleanup(cfg.CONF.reset)
router.SUB_RESOURCES['dummy'] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
api = router.APIRouter()
self.api = webtest.TestApp(api)
def tearDown(self):
router.SUB_RESOURCES = {}
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
super(SubresourceTest, self).tearDown()
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class XMLV2TestCase(JSONV2TestCase):
fmt = 'xml'
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertTrue('fake' not in res)
for key in keys:
self.assertTrue(key in res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def _resource_op_notifier(self, opname, resource, expected_errors=False,
notification_level='INFO'):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(notifer_api, 'notify') as mynotifier:
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected = [mock.call(mock.ANY,
'network.' + cfg.CONF.host,
resource + "." + opname + ".start",
notification_level,
mock.ANY),
mock.call(mock.ANY,
'network.' + cfg.CONF.host,
resource + "." + opname + ".end",
notification_level,
mock.ANY)]
self.assertEqual(expected, mynotifier.call_args_list)
self.assertEqual(res.status_int, expected_code)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
def test_network_create_notifer_with_log_level(self):
cfg.CONF.set_override('default_notification_level', 'DEBUG')
self._resource_op_notifier('create', 'network',
notification_level='DEBUG')
class QuotaTest(APIv2TestBase):
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertTrue("Quota exceeded for resources" in
res.json['NeutronError'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
q_exc.NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertTrue("Quota exceeded for resources" in
res.json['NeutronError'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure 'stale' patched copies of the plugin are never returned
NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin and extensions path
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
NeutronManager.get_plugin().supported_extension_aliases = ["v2attrs"]
api = router.APIRouter()
self.api = webtest.TestApp(api)
def tearDown(self):
super(ExtensionTestCase, self).tearDown()
self._plugin_patcher.stop()
self.api = None
self.plugin = None
cfg.CONF.reset()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_extended_create(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
self.assertTrue('network' in res.json)
net = res.json['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertEqual(net['v2attrs:something'], "123")
self.assertFalse('v2attrs:something_else' in net)
class TestSubresourcePlugin():
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(sorted(actual_val), expect_val)
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, None,
["fields"]))
def test_blank_values(self):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(actual_val, expect_val)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': attributes.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': attributes.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
|
|
# encoding: utf-8
"""
Base classes and other objects used by enumerations
"""
from __future__ import absolute_import, print_function
import sys
import textwrap
def alias(*aliases):
"""
Decorating a class with @alias('FOO', 'BAR', ..) allows the class to
be referenced by each of the names provided as arguments.
"""
def decorator(cls):
# alias must be set in globals from caller's frame
caller = sys._getframe(1)
globals_dict = caller.f_globals
for alias in aliases:
globals_dict[alias] = cls
return cls
return decorator
class _DocsPageFormatter(object):
"""
Formats a RestructuredText documention page (string) for the enumeration
class parts passed to the constructor. An immutable one-shot service
object.
"""
def __init__(self, clsname, clsdict):
self._clsname = clsname
self._clsdict = clsdict
@property
def page_str(self):
"""
The RestructuredText documentation page for the enumeration. This is
the only API member for the class.
"""
tmpl = ".. _%s:\n\n%s\n\n%s\n\n----\n\n%s"
components = (
self._ms_name,
self._page_title,
self._intro_text,
self._member_defs,
)
return tmpl % components
@property
def _intro_text(self):
"""
The docstring of the enumeration, formatted for use at the top of the
documentation page
"""
try:
cls_docstring = self._clsdict["__doc__"]
except KeyError:
cls_docstring = ""
if cls_docstring is None:
return ""
return textwrap.dedent(cls_docstring).strip()
def _member_def(self, member):
"""
Return an individual member definition formatted as an RST glossary
entry, wrapped to fit within 78 columns.
"""
member_docstring = textwrap.dedent(member.docstring).strip()
member_docstring = textwrap.fill(
member_docstring,
width=78,
initial_indent=" " * 4,
subsequent_indent=" " * 4,
)
return "%s\n%s\n" % (member.name, member_docstring)
@property
def _member_defs(self):
"""
A single string containing the aggregated member definitions section
of the documentation page
"""
members = self._clsdict["__members__"]
member_defs = [
self._member_def(member) for member in members if member.name is not None
]
return "\n".join(member_defs)
@property
def _ms_name(self):
"""
The Microsoft API name for this enumeration
"""
return self._clsdict["__ms_name__"]
@property
def _page_title(self):
"""
The title for the documentation page, formatted as code (surrounded
in double-backtics) and underlined with '=' characters
"""
title_underscore = "=" * (len(self._clsname) + 4)
return "``%s``\n%s" % (self._clsname, title_underscore)
class MetaEnumeration(type):
"""
The metaclass for Enumeration and its subclasses. Adds a name for each
named member and compiles state needed by the enumeration class to
respond to other attribute gets
"""
def __new__(meta, clsname, bases, clsdict):
meta._add_enum_members(clsdict)
meta._collect_valid_settings(clsdict)
meta._generate_docs_page(clsname, clsdict)
return type.__new__(meta, clsname, bases, clsdict)
@classmethod
def _add_enum_members(meta, clsdict):
"""
Dispatch ``.add_to_enum()`` call to each member so it can do its
thing to properly add itself to the enumeration class. This
delegation allows member sub-classes to add specialized behaviors.
"""
enum_members = clsdict["__members__"]
for member in enum_members:
member.add_to_enum(clsdict)
@classmethod
def _collect_valid_settings(meta, clsdict):
"""
Return a sequence containing the enumeration values that are valid
assignment values. Return-only values are excluded.
"""
enum_members = clsdict["__members__"]
valid_settings = []
for member in enum_members:
valid_settings.extend(member.valid_settings)
clsdict["_valid_settings"] = valid_settings
@classmethod
def _generate_docs_page(meta, clsname, clsdict):
"""
Return the RST documentation page for the enumeration.
"""
clsdict["__docs_rst__"] = _DocsPageFormatter(clsname, clsdict).page_str
class EnumerationBase(object):
"""
Base class for all enumerations, used directly for enumerations requiring
only basic behavior. It's __dict__ is used below in the Python 2+3
compatible metaclass definition.
"""
__members__ = ()
__ms_name__ = ""
@classmethod
def validate(cls, value):
"""
Raise |ValueError| if *value* is not an assignable value.
"""
if value not in cls._valid_settings:
raise ValueError(
"%s not a member of %s enumeration" % (value, cls.__name__)
)
Enumeration = MetaEnumeration("Enumeration", (object,), dict(EnumerationBase.__dict__))
class XmlEnumeration(Enumeration):
"""
Provides ``to_xml()`` and ``from_xml()`` methods in addition to base
enumeration features
"""
__members__ = ()
__ms_name__ = ""
@classmethod
def from_xml(cls, xml_val):
"""
Return the enumeration member corresponding to the XML value
*xml_val*.
"""
return cls._xml_to_member[xml_val]
@classmethod
def to_xml(cls, enum_val):
"""
Return the XML value of the enumeration value *enum_val*.
"""
cls.validate(enum_val)
return cls._member_to_xml[enum_val]
class EnumMember(object):
"""
Used in the enumeration class definition to define a member value and its
mappings
"""
def __init__(self, name, value, docstring):
self._name = name
if isinstance(value, int):
value = EnumValue(name, value, docstring)
self._value = value
self._docstring = docstring
def add_to_enum(self, clsdict):
"""
Add a name to *clsdict* for this member.
"""
self.register_name(clsdict)
@property
def docstring(self):
"""
The description of this member
"""
return self._docstring
@property
def name(self):
"""
The distinguishing name of this member within the enumeration class,
e.g. 'MIDDLE' for MSO_VERTICAL_ANCHOR.MIDDLE, if this is a named
member. Otherwise the primitive value such as |None|, |True| or
|False|.
"""
return self._name
def register_name(self, clsdict):
"""
Add a member name to the class dict *clsdict* containing the value of
this member object. Where the name of this object is None, do
nothing; this allows out-of-band values to be defined without adding
a name to the class dict.
"""
if self.name is None:
return
clsdict[self.name] = self.value
@property
def valid_settings(self):
"""
A sequence containing the values valid for assignment for this
member. May be zero, one, or more in number.
"""
return (self._value,)
@property
def value(self):
"""
The enumeration value for this member, often an instance of
EnumValue, but may be a primitive value such as |None|.
"""
return self._value
class EnumValue(int):
"""
A named enumeration value, providing __str__ and __doc__ string values
for its symbolic name and description, respectively. Subclasses int, so
behaves as a regular int unless the strings are asked for.
"""
def __new__(cls, member_name, int_value, docstring):
return super(EnumValue, cls).__new__(cls, int_value)
def __init__(self, member_name, int_value, docstring):
super(EnumValue, self).__init__()
self._member_name = member_name
self._docstring = docstring
@property
def __doc__(self):
"""
The description of this enumeration member
"""
return self._docstring.strip()
def __str__(self):
"""
The symbolic name and string value of this member, e.g. 'MIDDLE (3)'
"""
return "{0:s} ({1:d})".format(self._member_name, self)
class ReturnValueOnlyEnumMember(EnumMember):
"""
Used to define a member of an enumeration that is only valid as a query
result and is not valid as a setting, e.g. MSO_VERTICAL_ANCHOR.MIXED (-2)
"""
@property
def valid_settings(self):
"""
No settings are valid for a return-only value.
"""
return ()
class XmlMappedEnumMember(EnumMember):
"""
Used to define a member whose value maps to an XML attribute value.
"""
def __init__(self, name, value, xml_value, docstring):
super(XmlMappedEnumMember, self).__init__(name, value, docstring)
self._xml_value = xml_value
def add_to_enum(self, clsdict):
"""
Compile XML mappings in addition to base add behavior.
"""
super(XmlMappedEnumMember, self).add_to_enum(clsdict)
self.register_xml_mapping(clsdict)
def register_xml_mapping(self, clsdict):
"""
Add XML mappings to the enumeration class state for this member.
"""
member_to_xml = self._get_or_add_member_to_xml(clsdict)
member_to_xml[self.value] = self.xml_value
xml_to_member = self._get_or_add_xml_to_member(clsdict)
xml_to_member[self.xml_value] = self.value
@property
def xml_value(self):
"""
The XML attribute value that corresponds to this enumeration value
"""
return self._xml_value
@staticmethod
def _get_or_add_member_to_xml(clsdict):
"""
Add the enum -> xml value mapping to the enumeration class state
"""
if "_member_to_xml" not in clsdict:
clsdict["_member_to_xml"] = dict()
return clsdict["_member_to_xml"]
@staticmethod
def _get_or_add_xml_to_member(clsdict):
"""
Add the xml -> enum value mapping to the enumeration class state
"""
if "_xml_to_member" not in clsdict:
clsdict["_xml_to_member"] = dict()
return clsdict["_xml_to_member"]
|
|
"""
Manage ELBs
.. versionadded:: 2014.7.0
Create and destroy ELBs. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit elb credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
elb.keyid: GKTADJGHEIQSXMKKRBJ08H
elb.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
Ensure myelb ELB exists:
boto_elb.present:
- name: myelb
- region: us-east-1
- availability_zones:
- us-east-1a
- us-east-1c
- us-east-1d
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
- listeners:
- elb_port: 443
instance_port: 80
elb_protocol: HTTPS
instance_protocol: HTTP
certificate: 'arn:aws:iam::1111111:server-certificate/mycert'
policies:
- my-ssl-policy
- cookie-policy
- elb_port: 8210
instance_port: 8210
elb_protocol: TCP
- backends:
- instance_port: 80
policies:
- enable-proxy-protocol
- health_check:
target: 'HTTP:80/'
- attributes:
cross_zone_load_balancing:
enabled: true
access_log:
enabled: true
s3_bucket_name: 'mybucket'
s3_bucket_prefix: 'my-logs'
emit_interval: 5
connecting_settings:
idle_timeout: 60
- cnames:
- name: mycname.example.com.
zone: example.com.
ttl: 60
- name: myothercname.example.com.
zone: example.com.
- security_groups:
- my-security-group
- policies:
- policy_name: my-ssl-policy
policy_type: SSLNegotiationPolicyType
policy:
Protocol-TLSv1.2: true
Protocol-SSLv3: false
Server-Defined-Cipher-Order: true
ECDHE-ECDSA-AES128-GCM-SHA256: true
- policy_name: cookie-policy
policy_type: LBCookieStickinessPolicyType
policy: {} # no policy means this is a session cookie
- policy_name: enable-proxy-protocol
policy_type: ProxyProtocolPolicyType
policy:
ProxyProtocol: true
# Using a profile from pillars
Ensure myelb ELB exists:
boto_elb.present:
- name: myelb
- region: us-east-1
- profile: myelbprofile
# Passing in a profile
Ensure myelb ELB exists:
boto_elb.present:
- name: myelb
- region: us-east-1
- profile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's possible to specify attributes from pillars by specifying a pillar. You
can override the values defined in the pillard by setting the attributes on the
resource. The module will use the default pillar key 'boto_elb_attributes',
which allows you to set default attributes for all ELB resources.
Setting the attributes pillar:
.. code-block:: yaml
my_elb_attributes:
cross_zone_load_balancing:
enabled: true
connection_draining:
enabled: true
timeout: 20
access_log:
enabled: true
s3_bucket_name: 'mybucket'
s3_bucket_prefix: 'my-logs'
emit_interval: 5
Overriding the attribute values on the resource:
.. code-block:: yaml
Ensure myelb ELB exists:
boto_elb.present:
- name: myelb
- region: us-east-1
- attributes_from_pillar: my_elb_attributes
# override cross_zone_load_balancing:enabled
- attributes:
cross_zone_load_balancing:
enabled: false
- profile: myelbprofile
It's possible to specify cloudwatch alarms that will be setup along with the
ELB. Note the alarm name will be defined by the name attribute provided, plus
the ELB resource name.
.. code-block:: yaml
Ensure myelb ELB exists:
boto_elb.present:
- name: myelb
- region: us-east-1
- profile: myelbprofile
- alarms:
UnHealthyHostCount:
name: 'ELB UnHealthyHostCount **MANAGED BY SALT**'
attributes:
metric: UnHealthyHostCount
namespace: AWS/ELB
statistic: Average
comparison: '>='
threshold: 1.0
period: 600
evaluation_periods: 6
unit: null
description: ELB UnHealthyHostCount
alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm']
insufficient_data_actions: []
ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm']
You can also use alarms from pillars, and override values from the pillar
alarms by setting overrides on the resource. Note that 'boto_elb_alarms'
will be used as a default value for all resources, if defined and can be
used to ensure alarms are always set for a resource.
Setting the alarms in a pillar:
.. code-block:: yaml
my_elb_alarm:
UnHealthyHostCount:
name: 'ELB UnHealthyHostCount **MANAGED BY SALT**'
attributes:
metric: UnHealthyHostCount
namespace: AWS/ELB
statistic: Average
comparison: '>='
threshold: 1.0
period: 600
evaluation_periods: 6
unit: null
description: ELB UnHealthyHostCount
alarm_actions: ['arn:aws:sns:us-east-1:12345:myalarm']
insufficient_data_actions: []
ok_actions: ['arn:aws:sns:us-east-1:12345:myalarm']
Overriding the alarm values on the resource:
.. code-block:: yaml
Ensure myelb ELB exists:
boto_elb.present:
- name: myelb
- region: us-east-1
- profile: myelbprofile
- alarms_from_pillar: my_elb_alarm
# override UnHealthyHostCount:attributes:threshold
- alarms:
UnHealthyHostCount:
attributes:
threshold: 2.0
Tags can also be set:
.. versionadded:: 2016.3.0
.. code-block:: yaml
Ensure myelb ELB exists:
boto_elb.present:
- name: myelb
- region: us-east-1
- profile: myelbprofile
- tags:
MyTag: 'My Tag Value'
OtherTag: 'My Other Value'
"""
import hashlib
import logging
import re
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto_elb.exists" in __salt__:
return "boto_elb"
return (False, "boto_elb module could not be loaded")
def present(
name,
listeners,
availability_zones=None,
subnets=None,
subnet_names=None,
security_groups=None,
scheme="internet-facing",
health_check=None,
attributes=None,
attributes_from_pillar="boto_elb_attributes",
cnames=None,
alarms=None,
alarms_from_pillar="boto_elb_alarms",
policies=None,
policies_from_pillar="boto_elb_policies",
backends=None,
region=None,
key=None,
keyid=None,
profile=None,
wait_for_sync=True,
tags=None,
instance_ids=None,
instance_names=None,
):
"""
Ensure the ELB exists.
name
Name of the ELB.
availability_zones
A list of availability zones for this ELB.
listeners
A list of listener lists; example::
[
['443', 'HTTPS', 'arn:aws:iam::1111111:server-certificate/mycert'],
['8443', '80', 'HTTPS', 'HTTP', 'arn:aws:iam::1111111:server-certificate/mycert']
]
subnets
A list of subnet IDs in your VPC to attach to your LoadBalancer.
subnet_names
A list of subnet names in your VPC to attach to your LoadBalancer.
security_groups
The security groups assigned to your LoadBalancer within your VPC. Must
be passed either as a list or a comma-separated string.
For example, a list:
.. code-block:: yaml
- security_groups:
- secgroup-one
- secgroup-two
Or as a comma-separated string:
.. code-block:: yaml
- security_groups: secgroup-one,secgroup-two
scheme
The type of a LoadBalancer, ``internet-facing`` or ``internal``. Once
set, can not be modified.
health_check
A dict defining the health check for this ELB.
attributes
A dict defining the attributes to set on this ELB.
Unknown keys will be silently ignored.
See the :mod:`salt.modules.boto_elb.set_attributes` function for
recognized attributes.
attributes_from_pillar
name of pillar dict that contains attributes. Attributes defined for this specific
state will override those from pillar.
cnames
A list of cname dicts with attributes needed for the DNS add_record state.
By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier.
See the boto_route53 state for information about these attributes.
Other DNS modules can be called by specifying the provider keyword.
the cnames dict will be passed to the state as kwargs.
See the :mod:`salt.states.boto_route53` state for information about
these attributes.
alarms:
a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ELB.
All attributes should be specified except for dimension which will be
automatically set to this ELB.
See the :mod:`salt.states.boto_cloudwatch_alarm` state for information
about these attributes.
alarms_from_pillar:
name of pillar dict that contains alarm settings. Alarms defined for this specific
state will override those from pillar.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53.
tags
dict of tags
instance_ids
list of instance ids. The state will ensure that these, and ONLY these, instances
are registered with the ELB. This is additive with instance_names.
instance_names
list of instance names. The state will ensure that these, and ONLY these, instances
are registered with the ELB. This is additive with instance_ids.
"""
# load data from attributes_from_pillar and merge with attributes
tmp = __salt__["config.option"](attributes_from_pillar, {})
attributes = salt.utils.dictupdate.update(tmp, attributes) if attributes else tmp
ret = {"name": name, "result": True, "comment": "", "changes": {}}
if not isinstance(security_groups, (str, list, type(None))):
msg = (
"The 'security_group' parameter must be either a list or a "
"comma-separated string."
)
log.error(msg)
ret.update({"comment": msg, "result": False})
return ret
if isinstance(security_groups, str):
security_groups = security_groups.split(",")
_ret = _elb_present(
name,
availability_zones,
listeners,
subnets,
subnet_names,
security_groups,
scheme,
region,
key,
keyid,
profile,
)
ret.update(
{
"changes": _ret["changes"],
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
exists = __salt__["boto_elb.exists"](name, region, key, keyid, profile)
if not exists and __opts__["test"]:
return ret
if attributes:
_ret = _attributes_present(name, attributes, region, key, keyid, profile)
ret.update(
{
"changes": salt.utils.dictupdate.update(
ret["changes"], _ret["changes"]
),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
_ret = _health_check_present(name, health_check, region, key, keyid, profile)
ret.update(
{
"changes": salt.utils.dictupdate.update(ret["changes"], _ret["changes"]),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
if cnames:
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if lb:
for cname in cnames:
_ret = None
dns_provider = "boto_route53"
cname.update({"record_type": "CNAME", "value": lb["dns_name"]})
if "provider" in cname:
dns_provider = cname.pop("provider")
if dns_provider == "boto_route53":
for p in ("profile", "key", "keyid", "region", "wait_for_sync"):
cname[p] = locals().get(p) if p not in cname else cname[p]
_ret = __states__["boto_route53.present"](**cname)
ret.update(
{
"changes": salt.utils.dictupdate.update(
ret["changes"], _ret["changes"]
),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
_ret = _alarms_present(
name, alarms, alarms_from_pillar, region, key, keyid, profile
)
ret.update(
{
"changes": salt.utils.dictupdate.update(ret["changes"], _ret["changes"]),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
_ret = _policies_present(
name,
policies,
policies_from_pillar,
listeners,
backends,
region,
key,
keyid,
profile,
)
ret.update(
{
"changes": salt.utils.dictupdate.update(ret["changes"], _ret["changes"]),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
_ret = _tags_present(name, tags, region, key, keyid, profile)
ret.update(
{
"changes": salt.utils.dictupdate.update(ret["changes"], _ret["changes"]),
"comment": " ".join([ret["comment"], _ret["comment"]]),
}
)
ret["result"] = ret["result"] if _ret["result"] else _ret["result"]
if ret["result"] is False:
return ret
if not instance_ids:
instance_ids = []
if instance_names:
# AWS borks on adding instances in "non-running" states, so filter 'em out.
running_states = ("pending", "rebooting", "running", "stopping", "stopped")
for n in instance_names:
instance_ids += __salt__["boto_ec2.find_instances"](
name=n,
region=region,
key=key,
keyid=keyid,
profile=profile,
in_states=running_states,
)
# Backwards compat: Only touch attached instances if requested (e.g. if some are defined).
if instance_ids:
if __opts__["test"]:
if __salt__["boto_elb.set_instances"](
name, instance_ids, True, region, key, keyid, profile
):
ret["comment"] += " ELB {} instances would be updated.".format(name)
ret["result"] = None
else:
success = __salt__["boto_elb.set_instances"](
name, instance_ids, False, region, key, keyid, profile
)
if not success:
ret["comment"] += "Failed to set requested instances."
ret["result"] = False
return ret
def register_instances(
name, instances, region=None, key=None, keyid=None, profile=None
):
"""
Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from
the ``instances`` list does not remove it from the ELB.
name
The name of the Elastic Load Balancer to add EC2 instances to.
instances
A list of EC2 instance IDs that this Elastic Load Balancer should
distribute traffic to. This state will only ever append new instances
to the ELB. EC2 instances already associated with this ELB will not be
removed if they are not in the ``instances`` list.
.. versionadded:: 2015.8.0
.. code-block:: yaml
add-instances:
boto_elb.register_instances:
- name: myloadbalancer
- instances:
- instance-id1
- instance-id2
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.exists"](name, region, key, keyid, profile)
if not lb:
msg = "Could not find lb {}".format(name)
log.error(msg)
ret.update({"comment": msg, "result": False})
return ret
health = __salt__["boto_elb.get_instance_health"](name, region, key, keyid, profile)
nodes = [
value["instance_id"]
for value in health
if value["description"] != "Instance deregistration currently in progress."
]
new = [value for value in instances if value not in nodes]
if not new:
msg = "Instance/s {} already exist.".format(str(instances).strip("[]"))
log.debug(msg)
ret.update({"comment": msg})
return ret
if __opts__["test"]:
ret["comment"] = "ELB {} is set to register : {}.".format(name, new)
ret["result"] = None
return ret
state = __salt__["boto_elb.register_instances"](
name, instances, region, key, keyid, profile
)
if state:
msg = "Load Balancer {} has been changed".format(name)
log.info(msg)
new = set().union(nodes, instances)
ret.update(
{
"comment": msg,
"changes": {"old": "\n".join(nodes), "new": "\n".join(list(new))},
}
)
else:
msg = "Load balancer {} failed to add instances".format(name)
log.error(msg)
ret.update({"comment": msg, "result": False})
return ret
DEFAULT_PILLAR_LISTENER_POLICY_KEY = "boto_elb_listener_policies"
def _elb_present(
name,
availability_zones,
listeners,
subnets,
subnet_names,
security_groups,
scheme,
region,
key,
keyid,
profile,
):
ret = {"result": True, "comment": "", "changes": {}}
if not salt.utils.data.exactly_one((availability_zones, subnets, subnet_names)):
raise SaltInvocationError(
"Exactly one of availability_zones, subnets, "
"subnet_names must be provided as arguments."
)
if not listeners:
listeners = []
for listener in listeners:
if len(listener) < 3:
raise SaltInvocationError(
"Listeners must have at minimum port,"
" instance_port and protocol values in"
" the provided list."
)
if "elb_port" not in listener:
raise SaltInvocationError("elb_port is a required value for listeners.")
if "instance_port" not in listener:
raise SaltInvocationError(
"instance_port is a required value for listeners."
)
if "elb_protocol" not in listener:
raise SaltInvocationError("elb_protocol is a required value for listeners.")
listener["elb_protocol"] = listener["elb_protocol"].upper()
if listener["elb_protocol"] == "HTTPS" and "certificate" not in listener:
raise SaltInvocationError(
"certificate is a required value for"
" listeners if HTTPS is set for"
" elb_protocol."
)
# best attempt at principle of least surprise here:
# only use the default pillar in cases where we don't explicitly
# define policies OR policies_from_pillar on a listener
policies = listener.setdefault("policies", [])
policies_pillar = listener.get("policies_from_pillar", None)
if not policies and policies_pillar is None:
policies_pillar = DEFAULT_PILLAR_LISTENER_POLICY_KEY
if policies_pillar:
policies += __salt__["pillar.get"](policies_pillar, {}).get(
listener["elb_protocol"], []
)
# Look up subnet ids from names if provided
if subnet_names:
subnets = []
for i in subnet_names:
r = __salt__["boto_vpc.get_resource_id"](
"subnet", name=i, region=region, key=key, keyid=keyid, profile=profile
)
if "error" in r:
ret["comment"] = "Error looking up subnet ids: {}".format(r["error"])
ret["result"] = False
return ret
if "id" not in r:
ret["comment"] = "Subnet {} does not exist.".format(i)
ret["result"] = False
return ret
subnets.append(r["id"])
_security_groups = None
if subnets:
vpc_id = __salt__["boto_vpc.get_subnet_association"](
subnets, region, key, keyid, profile
)
vpc_id = vpc_id.get("vpc_id")
if not vpc_id:
ret["comment"] = "Subnets {} do not map to a valid vpc id.".format(subnets)
ret["result"] = False
return ret
_security_groups = __salt__["boto_secgroup.convert_to_group_ids"](
security_groups,
vpc_id=vpc_id,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not _security_groups:
ret[
"comment"
] = "Security groups {} do not map to valid security group ids.".format(
security_groups
)
ret["result"] = False
return ret
exists = __salt__["boto_elb.exists"](name, region, key, keyid, profile)
if not exists:
if __opts__["test"]:
ret["comment"] = "ELB {} is set to be created.".format(name)
ret["result"] = None
return ret
created = __salt__["boto_elb.create"](
name=name,
availability_zones=availability_zones,
listeners=listeners,
subnets=subnets,
security_groups=_security_groups,
scheme=scheme,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if created:
ret["changes"]["old"] = {"elb": None}
ret["changes"]["new"] = {"elb": name}
ret["comment"] = "ELB {} created.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to create {} ELB.".format(name)
else:
ret["comment"] = "ELB {} present.".format(name)
_ret = _security_groups_present(
name, _security_groups, region, key, keyid, profile
)
ret["changes"] = salt.utils.dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
_ret = _listeners_present(name, listeners, region, key, keyid, profile)
ret["changes"] = salt.utils.dictupdate.update(ret["changes"], _ret["changes"])
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
if availability_zones:
_ret = _zones_present(name, availability_zones, region, key, keyid, profile)
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], _ret["changes"]
)
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
if ret["result"] is False:
return ret
elif subnets:
_ret = _subnets_present(name, subnets, region, key, keyid, profile)
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], _ret["changes"]
)
ret["comment"] = " ".join([ret["comment"], _ret["comment"]])
if not _ret["result"]:
ret["result"] = _ret["result"]
return ret
def _listeners_present(name, listeners, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["comment"] = "{} ELB configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
if not listeners:
listeners = []
expected_listeners_by_tuple = {}
for l in listeners:
l_key = __salt__["boto_elb.listener_dict_to_tuple"](l)
expected_listeners_by_tuple[l_key] = l
actual_listeners_by_tuple = {}
for l in lb["listeners"]:
l_key = __salt__["boto_elb.listener_dict_to_tuple"](l)
actual_listeners_by_tuple[l_key] = l
to_delete = []
to_create = []
for t, l in expected_listeners_by_tuple.items():
if t not in actual_listeners_by_tuple:
to_create.append(l)
for t, l in actual_listeners_by_tuple.items():
if t not in expected_listeners_by_tuple:
to_delete.append(l)
if __opts__["test"]:
msg = []
if to_create or to_delete:
msg.append("ELB {} set to have listeners modified:".format(name))
for listener in to_create:
msg.append(
"Listener {} added.".format(
__salt__["boto_elb.listener_dict_to_tuple"](listener)
)
)
for listener in to_delete:
msg.append(
"Listener {} deleted.".format(
__salt__["boto_elb.listener_dict_to_tuple"](listener)
)
)
ret["result"] = None
else:
msg.append("Listeners already set on ELB {}.".format(name))
ret["comment"] = " ".join(msg)
return ret
if to_delete:
ports = [l["elb_port"] for l in to_delete]
deleted = __salt__["boto_elb.delete_listeners"](
name, ports, region, key, keyid, profile
)
if deleted:
ret["comment"] = "Deleted listeners on {} ELB.".format(name)
else:
ret["comment"] = "Failed to delete listeners on {} ELB.".format(name)
ret["result"] = False
if to_create:
created = __salt__["boto_elb.create_listeners"](
name, to_create, region, key, keyid, profile
)
if created:
msg = "Created listeners on {0} ELB."
ret["comment"] = " ".join([ret["comment"], msg.format(name)])
else:
msg = "Failed to create listeners on {0} ELB."
ret["comment"] = " ".join([ret["comment"], msg.format(name)])
ret["result"] = False
if to_create or to_delete:
ret["changes"]["listeners"] = {}
ret["changes"]["listeners"]["old"] = lb["listeners"]
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
ret["changes"]["listeners"]["new"] = lb["listeners"]
else:
ret["comment"] = "Listeners already set on ELB {}.".format(name)
return ret
def _security_groups_present(name, security_groups, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["comment"] = "{} ELB configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
if not security_groups:
security_groups = []
change_needed = False
if set(security_groups) != set(lb["security_groups"]):
change_needed = True
if change_needed:
if __opts__["test"]:
ret["comment"] = "ELB {} set to have security groups modified.".format(name)
ret["result"] = None
return ret
changed = __salt__["boto_elb.apply_security_groups"](
name, security_groups, region, key, keyid, profile
)
if changed:
ret["comment"] = "Modified security_groups on {} ELB.".format(name)
else:
ret["comment"] = "Failed to modify security_groups on {} ELB.".format(name)
ret["result"] = False
ret["changes"]["old"] = {"security_groups": lb["security_groups"]}
ret["changes"]["new"] = {"security_groups": security_groups}
else:
ret["comment"] = "security_groups already set on ELB {}.".format(name)
return ret
def _attributes_present(name, attributes, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
_attributes = __salt__["boto_elb.get_attributes"](name, region, key, keyid, profile)
if not _attributes:
ret["result"] = False
ret["comment"] = "Failed to retrieve attributes for ELB {}.".format(name)
return ret
attrs_to_set = []
if "cross_zone_load_balancing" in attributes:
czlb = attributes["cross_zone_load_balancing"]
_czlb = _attributes["cross_zone_load_balancing"]
if czlb["enabled"] != _czlb["enabled"]:
attrs_to_set.append("cross_zone_load_balancing")
if "connection_draining" in attributes:
cd = attributes["connection_draining"]
_cd = _attributes["connection_draining"]
if cd["enabled"] != _cd["enabled"] or cd.get("timeout", 300) != _cd.get(
"timeout"
):
attrs_to_set.append("connection_draining")
if "connecting_settings" in attributes:
cs = attributes["connecting_settings"]
_cs = _attributes["connecting_settings"]
if cs["idle_timeout"] != _cs["idle_timeout"]:
attrs_to_set.append("connecting_settings")
if "access_log" in attributes:
for attr, val in attributes["access_log"].items():
if str(_attributes["access_log"][attr]) != str(val):
attrs_to_set.append("access_log")
if "s3_bucket_prefix" in attributes["access_log"]:
sbp = attributes["access_log"]["s3_bucket_prefix"]
if sbp.startswith("/") or sbp.endswith("/"):
raise SaltInvocationError(
"s3_bucket_prefix can not start or end with /."
)
if attrs_to_set:
if __opts__["test"]:
ret["comment"] = "ELB {} set to have attributes set.".format(name)
ret["result"] = None
return ret
was_set = __salt__["boto_elb.set_attributes"](
name, attributes, region, key, keyid, profile
)
if was_set:
ret["changes"]["old"] = {"attributes": _attributes}
ret["changes"]["new"] = {"attributes": attributes}
ret["comment"] = "Set attributes on ELB {}.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to set attributes on ELB {}.".format(name)
else:
ret["comment"] = "Attributes already set on ELB {}.".format(name)
return ret
def _health_check_present(name, health_check, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
if not health_check:
health_check = {}
_health_check = __salt__["boto_elb.get_health_check"](
name, region, key, keyid, profile
)
if not _health_check:
ret["result"] = False
ret["comment"] = "Failed to retrieve health_check for ELB {}.".format(name)
return ret
need_to_set = False
for attr, val in health_check.items():
if str(_health_check[attr]) != str(val):
need_to_set = True
if need_to_set:
if __opts__["test"]:
ret["comment"] = "ELB {} set to have health check set.".format(name)
ret["result"] = None
return ret
was_set = __salt__["boto_elb.set_health_check"](
name, health_check, region, key, keyid, profile
)
if was_set:
ret["changes"]["old"] = {"health_check": _health_check}
_health_check = __salt__["boto_elb.get_health_check"](
name, region, key, keyid, profile
)
ret["changes"]["new"] = {"health_check": _health_check}
ret["comment"] = "Set health check on ELB {}.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to set health check on ELB {}.".format(name)
else:
ret["comment"] = "Health check already set on ELB {}.".format(name)
return ret
def _zones_present(name, availability_zones, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["result"] = False
ret["comment"] = "Failed to retrieve ELB {}.".format(name)
return ret
to_enable = []
to_disable = []
_zones = lb["availability_zones"]
for zone in availability_zones:
if zone not in _zones:
to_enable.append(zone)
for zone in _zones:
if zone not in availability_zones:
to_disable.append(zone)
if to_enable or to_disable:
if __opts__["test"]:
ret["comment"] = "ELB {} to have availability zones set.".format(name)
ret["result"] = None
return ret
if to_enable:
enabled = __salt__["boto_elb.enable_availability_zones"](
name, to_enable, region, key, keyid, profile
)
if enabled:
ret["comment"] = "Enabled availability zones on {} ELB.".format(name)
else:
ret[
"comment"
] = "Failed to enable availability zones on {} ELB.".format(name)
ret["result"] = False
if to_disable:
disabled = __salt__["boto_elb.disable_availability_zones"](
name, to_disable, region, key, keyid, profile
)
if disabled:
msg = "Disabled availability zones on {0} ELB."
ret["comment"] = " ".join([ret["comment"], msg.format(name)])
else:
msg = "Failed to disable availability zones on {0} ELB."
ret["comment"] = " ".join([ret["comment"], msg.format(name)])
ret["result"] = False
ret["changes"]["old"] = {"availability_zones": lb["availability_zones"]}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
ret["changes"]["new"] = {"availability_zones": lb["availability_zones"]}
else:
ret["comment"] = "Availability zones already set on ELB {}.".format(name)
return ret
def _subnets_present(name, subnets, region, key, keyid, profile):
ret = {"result": True, "comment": "", "changes": {}}
if not subnets:
subnets = []
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["result"] = False
ret["comment"] = "Failed to retrieve ELB {}.".format(name)
return ret
to_enable = []
to_disable = []
_subnets = lb["subnets"]
for subnet in subnets:
if subnet not in _subnets:
to_enable.append(subnet)
for subnet in _subnets:
if subnet not in subnets:
to_disable.append(subnet)
if to_enable or to_disable:
if __opts__["test"]:
ret["comment"] = "ELB {} to have subnets set.".format(name)
ret["result"] = None
return ret
if to_enable:
attached = __salt__["boto_elb.attach_subnets"](
name, to_enable, region, key, keyid, profile
)
if attached:
ret["comment"] = "Attached subnets on {} ELB.".format(name)
else:
ret["comment"] = "Failed to attach subnets on {} ELB.".format(name)
ret["result"] = False
if to_disable:
detached = __salt__["boto_elb.detach_subnets"](
name, to_disable, region, key, keyid, profile
)
if detached:
ret["comment"] = " ".join(
[ret["comment"], "Detached subnets on {} ELB.".format(name)]
)
else:
ret["comment"] = " ".join(
[
ret["comment"],
"Failed to detach subnets on {} ELB.".format(name),
]
)
ret["result"] = False
ret["changes"]["old"] = {"subnets": lb["subnets"]}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
ret["changes"]["new"] = {"subnets": lb["subnets"]}
else:
ret["comment"] = "Subnets already set on ELB {}.".format(name)
return ret
def _alarms_present(name, alarms, alarms_from_pillar, region, key, keyid, profile):
"""helper method for present. ensure that cloudwatch_alarms are set"""
current = __salt__["config.option"](alarms_from_pillar, {})
if alarms:
current = salt.utils.dictupdate.update(current, alarms)
ret = {"name": name, "result": True, "comment": "", "changes": {}}
for _, info in current.items():
info["name"] = name + " " + info["name"]
info["attributes"]["description"] = (
name + " " + info["attributes"]["description"]
)
info["attributes"]["dimensions"] = {"LoadBalancerName": [name]}
kwargs = {
"name": info["name"],
"attributes": info["attributes"],
"region": region,
"key": key,
"keyid": keyid,
"profile": profile,
}
# No test=False cluase needed since the state handles that itself...
results = __states__["boto_cloudwatch_alarm.present"](**kwargs)
if not results.get("result"):
ret["result"] = results["result"]
if results.get("changes", {}) != {}:
ret["changes"][info["name"]] = results["changes"]
if "comment" in results:
ret["comment"] += results["comment"]
return ret
def _policies_present(
name,
policies,
policies_from_pillar,
listeners,
backends,
region,
key,
keyid,
profile,
):
"""helper method for present. ensure that ELB policies are set"""
if policies is None:
policies = []
pillar_policies = __salt__["config.option"](policies_from_pillar, [])
policies = policies + pillar_policies
if backends is None:
backends = []
# check for policy name uniqueness and correct type
policy_names = set()
for p in policies:
if "policy_name" not in p:
raise SaltInvocationError("policy_name is a required value for policies.")
if "policy_type" not in p:
raise SaltInvocationError("policy_type is a required value for policies.")
if "policy" not in p:
raise SaltInvocationError("policy is a required value for listeners.")
# check for unique policy names
if p["policy_name"] in policy_names:
raise SaltInvocationError(
"Policy names must be unique: policy {} is declared twice.".format(
p["policy_name"]
)
)
policy_names.add(p["policy_name"])
# check that listeners refer to valid policy names
for l in listeners:
for p in l.get("policies", []):
if p not in policy_names:
raise SaltInvocationError(
"Listener {} on ELB {} refers to undefined policy {}.".format(
l["elb_port"], name, p
)
)
# check that backends refer to valid policy names
for b in backends:
for p in b.get("policies", []):
if p not in policy_names:
raise SaltInvocationError(
"Backend {} on ELB {} refers to undefined policy {}.".format(
b["instance_port"], name, p
)
)
ret = {"result": True, "comment": "", "changes": {}}
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
if not lb:
ret["comment"] = "{} ELB configuration could not be retrieved.".format(name)
ret["result"] = False
return ret
# Policies have two names:
# - a short name ('name') that's only the policy name (e.g. testpolicy)
# - a canonical name ('cname') that contains the policy type and hash
# (e.g. SSLNegotiationPolicy-testpolicy-14b32f668639cc8ea1391e062af98524)
policies_by_cname = {}
cnames_by_name = {}
for p in policies:
cname = _policy_cname(p)
policies_by_cname[cname] = p
cnames_by_name[p["policy_name"]] = cname
expected_policy_names = policies_by_cname.keys()
actual_policy_names = lb["policies"]
# This is sadly a huge hack to get around the fact that AWS assigns a
# default SSLNegotiationPolicyType policy (with the naming scheme
# ELBSecurityPolicy-YYYY-MM) to all ELBs terminating SSL without an
# explicit policy set. If we don't keep track of the default policies and
# explicitly exclude them from deletion, orchestration will fail because we
# attempt to delete the default policy that's being used by listeners that
# were created with no explicit policy.
default_aws_policies = set()
expected_policies_by_listener = {}
for l in listeners:
expected_policies_by_listener[l["elb_port"]] = {
cnames_by_name[p] for p in l.get("policies", [])
}
actual_policies_by_listener = {}
for l in lb["listeners"]:
listener_policies = set(l.get("policies", []))
actual_policies_by_listener[l["elb_port"]] = listener_policies
# Determine if any actual listener policies look like default policies,
# so we can exclude them from deletion below (see note about this hack
# above).
for p in listener_policies:
if re.match(r"^ELBSecurityPolicy-\d{4}-\d{2}$", p):
default_aws_policies.add(p)
expected_policies_by_backend = {}
for b in backends:
expected_policies_by_backend[b["instance_port"]] = {
cnames_by_name[p] for p in b.get("policies", [])
}
actual_policies_by_backend = {}
for b in lb["backends"]:
backend_policies = set(b.get("policies", []))
actual_policies_by_backend[b["instance_port"]] = backend_policies
to_delete = []
to_create = []
for policy_name in expected_policy_names:
if policy_name not in actual_policy_names:
to_create.append(policy_name)
for policy_name in actual_policy_names:
if policy_name not in expected_policy_names:
if policy_name not in default_aws_policies:
to_delete.append(policy_name)
listeners_to_update = set()
for port, policies in expected_policies_by_listener.items():
if policies != actual_policies_by_listener.get(port, set()):
listeners_to_update.add(port)
for port, policies in actual_policies_by_listener.items():
if policies != expected_policies_by_listener.get(port, set()):
listeners_to_update.add(port)
backends_to_update = set()
for port, policies in expected_policies_by_backend.items():
if policies != actual_policies_by_backend.get(port, set()):
backends_to_update.add(port)
for port, policies in actual_policies_by_backend.items():
if policies != expected_policies_by_backend.get(port, set()):
backends_to_update.add(port)
if __opts__["test"]:
msg = []
if to_create or to_delete:
msg.append("ELB {} set to have policies modified:".format(name))
for policy in to_create:
msg.append("Policy {} added.".format(policy))
for policy in to_delete:
msg.append("Policy {} deleted.".format(policy))
ret["result"] = None
else:
msg.append("Policies already set on ELB {}.".format(name))
for listener in listeners_to_update:
msg.append("Listener {} policies updated.".format(listener))
for backend in backends_to_update:
msg.append("Backend {} policies updated.".format(backend))
ret["comment"] = " ".join(msg)
return ret
if to_create:
for policy_name in to_create:
created = __salt__["boto_elb.create_policy"](
name=name,
policy_name=policy_name,
policy_type=policies_by_cname[policy_name]["policy_type"],
policy=policies_by_cname[policy_name]["policy"],
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if created:
ret["changes"].setdefault(policy_name, {})["new"] = policy_name
comment = "Policy {} was created on ELB {}".format(policy_name, name)
ret["comment"] = " ".join([ret["comment"], comment])
ret["result"] = True
else:
ret["result"] = False
return ret
for port in listeners_to_update:
policy_set = __salt__["boto_elb.set_listener_policy"](
name=name,
port=port,
policies=list(expected_policies_by_listener.get(port, [])),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if policy_set:
policy_key = "listener_{}_policy".format(port)
ret["changes"][policy_key] = {
"old": list(actual_policies_by_listener.get(port, [])),
"new": list(expected_policies_by_listener.get(port, [])),
}
comment = "Policy {} was created on ELB {} listener {}".format(
expected_policies_by_listener[port], name, port
)
ret["comment"] = " ".join([ret["comment"], comment])
ret["result"] = True
else:
ret["result"] = False
return ret
for port in backends_to_update:
policy_set = __salt__["boto_elb.set_backend_policy"](
name=name,
port=port,
policies=list(expected_policies_by_backend.get(port, [])),
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if policy_set:
policy_key = "backend_{}_policy".format(port)
ret["changes"][policy_key] = {
"old": list(actual_policies_by_backend.get(port, [])),
"new": list(expected_policies_by_backend.get(port, [])),
}
comment = "Policy {} was created on ELB {} backend {}".format(
expected_policies_by_backend[port], name, port
)
ret["comment"] = " ".join([ret["comment"], comment])
ret["result"] = True
else:
ret["result"] = False
return ret
if to_delete:
for policy_name in to_delete:
deleted = __salt__["boto_elb.delete_policy"](
name=name,
policy_name=policy_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if deleted:
ret["changes"].setdefault(policy_name, {})["old"] = policy_name
comment = "Policy {} was deleted from ELB {}".format(policy_name, name)
ret["comment"] = " ".join([ret["comment"], comment])
ret["result"] = True
else:
ret["result"] = False
return ret
return ret
def _policy_cname(policy_dict):
policy_name = policy_dict["policy_name"]
policy_type = policy_dict["policy_type"]
policy = policy_dict["policy"]
canonical_policy_repr = str(sorted(list(policy.items()), key=lambda x: str(x[0])))
policy_hash = hashlib.md5(
salt.utils.stringutils.to_bytes(str(canonical_policy_repr))
).hexdigest()
if policy_type.endswith("Type"):
policy_type = policy_type[:-4]
return "{}-{}-{}".format(policy_type, policy_name, policy_hash)
def absent(name, region=None, key=None, keyid=None, profile=None):
"""
Ensure an ELB does not exist
name
name of the ELB
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
exists = __salt__["boto_elb.exists"](name, region, key, keyid, profile)
if exists:
if __opts__["test"]:
ret["comment"] = "ELB {} is set to be removed.".format(name)
ret["result"] = None
return ret
deleted = __salt__["boto_elb.delete"](name, region, key, keyid, profile)
if deleted:
ret["changes"]["old"] = {"elb": name}
ret["changes"]["new"] = {"elb": None}
ret["comment"] = "ELB {} deleted.".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to delete {} ELB.".format(name)
else:
ret["comment"] = "{} ELB does not exist.".format(name)
return ret
def _tags_present(name, tags, region, key, keyid, profile):
"""
helper function to validate tags on elb
"""
ret = {"result": True, "comment": "", "changes": {}}
if tags:
lb = __salt__["boto_elb.get_elb_config"](name, region, key, keyid, profile)
tags_to_add = tags
tags_to_update = {}
tags_to_remove = []
if lb.get("tags"):
for _tag in lb["tags"]:
if _tag not in tags.keys():
if _tag not in tags_to_remove:
tags_to_remove.append(_tag)
else:
if tags[_tag] != lb["tags"][_tag]:
tags_to_update[_tag] = tags[_tag]
tags_to_add.pop(_tag)
if tags_to_remove:
if __opts__["test"]:
msg = "The following tag{} set to be removed: {}.".format(
("s are" if len(tags_to_remove) > 1 else " is"),
", ".join(tags_to_remove),
)
ret["comment"] = " ".join([ret["comment"], msg])
ret["result"] = None
else:
_ret = __salt__["boto_elb.delete_tags"](
name, tags_to_remove, region, key, keyid, profile
)
if not _ret:
ret["result"] = False
msg = "Error attempting to delete tag {}.".format(tags_to_remove)
ret["comment"] = " ".join([ret["comment"], msg])
return ret
if "old" not in ret["changes"]:
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], {"old": {"tags": {}}}
)
for _tag in tags_to_remove:
ret["changes"]["old"]["tags"][_tag] = lb["tags"][_tag]
if tags_to_add or tags_to_update:
if __opts__["test"]:
if tags_to_add:
msg = "The following tag{} set to be added: {}.".format(
("s are" if len(tags_to_add.keys()) > 1 else " is"),
", ".join(tags_to_add.keys()),
)
ret["comment"] = " ".join([ret["comment"], msg])
ret["result"] = None
if tags_to_update:
msg = "The following tag {} set to be updated: {}.".format(
(
"values are"
if len(tags_to_update.keys()) > 1
else "value is"
),
", ".join(tags_to_update.keys()),
)
ret["comment"] = " ".join([ret["comment"], msg])
else:
all_tag_changes = salt.utils.dictupdate.update(
tags_to_add, tags_to_update
)
_ret = __salt__["boto_elb.set_tags"](
name, all_tag_changes, region, key, keyid, profile
)
if not _ret:
ret["result"] = False
msg = "Error attempting to set tags."
ret["comment"] = " ".join([ret["comment"], msg])
return ret
if "old" not in ret["changes"]:
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], {"old": {"tags": {}}}
)
if "new" not in ret["changes"]:
ret["changes"] = salt.utils.dictupdate.update(
ret["changes"], {"new": {"tags": {}}}
)
for tag in all_tag_changes:
ret["changes"]["new"]["tags"][tag] = tags[tag]
if "tags" in lb:
if lb["tags"]:
if tag in lb["tags"]:
ret["changes"]["old"]["tags"][tag] = lb["tags"][tag]
if not tags_to_update and not tags_to_remove and not tags_to_add:
msg = "Tags are already set."
ret["comment"] = " ".join([ret["comment"], msg])
return ret
|
|
"""Common Shell Utilities."""
import os
import sys
from subprocess import Popen, PIPE
from multiprocessing import Process
from threading import Thread
from ..core.meta import MetaMixin
from ..core.exc import FrameworkError
def exec_cmd(cmd_args, *args, **kw):
"""
Execute a shell call using Subprocess. All additional `*args` and
`**kwargs` are passed directly to subprocess.Popen. See `Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of `Popen()`.
:param cmd_args: List of command line arguments.
:type cmd_args: list.
:param args: Additional arguments are passed to Popen().
:param kwargs: Additional keyword arguments are passed to Popen().
:returns: The (stdout, stderror, return_code) of the command.
:rtype: tuple
Usage:
.. code-block:: python
from cement.utils import shell
stdout, stderr, exitcode = shell.exec_cmd(['echo', 'helloworld'])
"""
if 'stdout' not in kw.keys():
kw['stdout'] = PIPE
if 'stderr' not in kw.keys():
kw['stderr'] = PIPE
proc = Popen(cmd_args, *args, **kw)
(stdout, stderr) = proc.communicate()
proc.wait()
return (stdout, stderr, proc.returncode)
def exec_cmd2(cmd_args, *args, **kw):
"""
Similar to exec_cmd, however does not capture stdout, stderr (therefore
allowing it to print to console). All additional `*args` and
`**kwargs` are passed directly to subprocess.Popen. See `Subprocess
<http://docs.python.org/library/subprocess.html>`_ for more information
on the features of `Popen()`.
:param cmd_args: List of command line arguments.
:type cmd_args: list.
:param args: Additional arguments are passed to Popen().
:param kwargs: Additional keyword arguments are passed to Popen().
:returns: The integer return code of the command.
:rtype: int
Usage:
.. code-block:: python
from cement.utils import shell
exitcode = shell.exec_cmd2(['echo', 'helloworld'])
"""
proc = Popen(cmd_args, *args, **kw)
proc.wait()
return proc.returncode
def spawn_process(target, start=True, join=False, *args, **kwargs):
"""
A quick wrapper around multiprocessing.Process(). By default the start()
function will be called before the spawned process object is returned.
See `MultiProcessing
<https://docs.python.org/2/library/multiprocessing.html>`_ for more
information on the features of `Process()`.
:param target: The target function to execute in the sub-process.
:param start: Call start() on the process before returning the process
object.
:param join: Call join() on the process before returning the process
object. Only called if start=True.
:param args: Additional arguments are passed to Process().
:param kwargs: Additional keyword arguments are passed to Process().
:returns: The process object returned by Process().
Usage:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
p = shell.spawn_process(add, args=(12, 27))
p.join()
"""
proc = Process(target=target, *args, **kwargs)
if start and not join:
proc.start()
elif start and join:
proc.start()
proc.join()
return proc
def spawn_thread(target, start=True, join=False, *args, **kwargs):
"""
A quick wrapper around threading.Thread(). By default the start()
function will be called before the spawned thread object is returned
See `Threading
<https://docs.python.org/2/library/threading.html>`_ for more
information on the features of `Thread()`.
:param target: The target function to execute in the thread.
:param start: Call start() on the thread before returning the thread
object.
:param join: Call join() on the thread before returning the thread
object. Only called if start=True.
:param args: Additional arguments are passed to Thread().
:param kwargs: Additional keyword arguments are passed to Thread().
:returns: The thread object returned by Thread().
Usage:
.. code-block:: python
from cement.utils import shell
def add(a, b):
print(a + b)
t = shell.spawn_thread(add, args=(12, 27))
t.join()
"""
thr = Thread(target=target, *args, **kwargs)
if start and not join:
thr.start()
elif start and join:
thr.start()
thr.join()
return thr
class Prompt(MetaMixin):
"""
A wrapper around `raw_input` or `input` (py3) whose purpose is to limit
the redundent tasks of gather usr input. Can be used in several ways
depending on the use case (simple input, options, and numbered
selection).
:param text: The text displayed at the input prompt.
Usage:
Simple prompt to halt operations and wait for user to hit enter:
.. code-block:: python
p = shell.Prompt("Press Enter To Continue", default='ENTER')
.. code-block:: text
$ python myapp.py
Press Enter To Continue
$
Provide a numbered list for longer selections:
.. code-block:: python
p = Prompt("Where do you live?",
options=[
'San Antonio, TX',
'Austin, TX',
'Dallas, TX',
'Houston, TX',
],
numbered = True,
)
.. code-block:: text
Where do you live?
1: San Antonio, TX
2: Austin, TX
3: Dallas, TX
4: Houston, TX
Enter the number for your selection:
Create a more complex prompt, and process the input from the user:
.. code-block:: python
class MyPrompt(Prompt):
class Meta:
text = "Do you agree to the terms?"
options = ['Yes', 'no', 'maybe-so']
options_separator = '|'
default = 'no'
clear = True
max_attempts = 99
def process_input(self):
if self.input.lower() == 'yes':
# do something crazy
pass
else:
# don't do anything... maybe exit?
print("User doesn't agree! I'm outa here")
sys.exit(1)
MyPrompt()
.. code-block:: text
$ python myapp.py
[TERMINAL CLEAR]
Do you agree to the terms? [Yes|no|maybe-so] no
User doesn't agree! I'm outa here
$ echo $?
$ 1
"""
class Meta:
"""
Optional meta-data (can also be passed as keyword arguments to the
parent class).
"""
# The text that is displayed to prompt the user
text = "Tell me someting interesting:"
#: A default value to use if the user doesn't provide any input
default = None
#: Options to provide to the user. If set, the input must match one
#: of the items in the options selection.
options = None
#: Separator to use within the option selection (non-numbered)
options_separator = ','
#: Display options in a numbered list, where the user can enter a
#: number. Useful for long selections.
numbered = False
#: The text to display along with the numbered selection for user
#: input.
selection_text = "Enter the number for your selection:"
#: Whether or not to automatically prompt() the user once the class
#: is instantiated.
auto = True
#: Whether to treat user input as case insensitive (only used to
#: compare user input with available options).
case_insensitive = True
#: Whether or not to clear the terminal when prompting the user.
clear = False
#: Command to issue when clearing the terminal.
clear_command = 'clear'
#: Max attempts to get proper input from the user before giving up.
max_attempts = 10
#: Raise an exception when max_attempts is hit? If not, Prompt
#: passes the input through as `None`.
max_attempts_exception = True
def __init__(self, text=None, *args, **kw):
if text is not None:
kw['text'] = text
super(Prompt, self).__init__(*args, **kw)
self.input = None
if self._meta.auto:
self.prompt()
def _prompt(self):
if self._meta.clear:
os.system(self._meta.clear_command)
text = ""
if self._meta.options is not None:
if self._meta.numbered is True:
text = text + self._meta.text + "\n\n"
count = 1
for option in self._meta.options:
text = text + "%s: %s\n" % (count, option)
count += 1
text = text + "\n"
text = text + self._meta.selection_text
else:
sep = self._meta.options_separator
text = "%s [%s]" % (self._meta.text,
sep.join(self._meta.options))
else:
text = self._meta.text
if sys.version_info[0] < 3: # pragma: nocover # noqa
self.input = raw_input("%s " % text) # pragma: nocover # noqa
else: # pragma: nocover # noqa
self.input = input("%s " % text) # pragma: nocover # noqa
if self.input == '' and self._meta.default is not None:
self.input = self._meta.default
elif self.input == '':
self.input = None
def prompt(self):
"""
Prompt the user, and store their input as `self.input`.
"""
attempt = 0
while self.input is None:
if attempt >= int(self._meta.max_attempts):
if self._meta.max_attempts_exception is True:
raise FrameworkError("Maximum attempts exceeded getting "
"valid user input")
else:
return self.input
attempt += 1
self._prompt()
if self.input is None:
continue
elif self._meta.options is not None:
if self._meta.numbered:
try:
self.input = self._meta.options[int(self.input) - 1]
except (IndexError, ValueError) as e:
self.input = None
continue
else:
if self._meta.case_insensitive is True:
lower_options = [x.lower()
for x in self._meta.options]
if not self.input.lower() in lower_options:
self.input = None
continue
else:
if self.input not in self._meta.options:
self.input = None
continue
self.process_input()
return self.input
def process_input(self):
"""
Does not do anything. Is intended to be used in a sub-class to handle
user input after it is prompted.
"""
pass
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import os
import unittest
from iptest import IronPythonTestCase, is_cli, is_netcoreapp, run_test, skipUnlessIronPython
@unittest.skipIf(is_netcoreapp, 'no clr.CompileModules')
@skipUnlessIronPython()
class CompilerTest(IronPythonTestCase):
def compileCode(self, name, *codeArr):
import clr
inputFiles = []
counter = 0
for code in codeArr:
inputFile = os.path.join(self.temporary_dir, name + ("" if counter == 0 else str(counter)) + ".py")
self.write_to_file(inputFile, code)
inputFiles.append(inputFile)
counter+=1
dllFile = os.path.join(self.temporary_dir, name + ".dll")
clr.CompileModules(dllFile, mainModule=inputFiles[0], *inputFiles)
self.delete_files(*inputFiles)
clr.AddReferenceToFileAndPath(dllFile)
def compilePackage(self, packageName, codeDict):
import clr
packagePath = os.path.join(self.temporary_dir, packageName)
self.ensure_directory_present(packagePath)
fileList = []
for fileName, code in codeDict.items():
filePath = os.path.join(packagePath, fileName)
self.ensure_directory_present(os.path.dirname(filePath))
self.write_to_file(filePath, code)
fileList.append(filePath)
dllFile = os.path.join(self.temporary_dir, packageName + ".dll")
clr.CompileModules(dllFile, mainModule=fileList[0], *fileList)
self.delete_files(*fileList)
clr.AddReferenceToFileAndPath(dllFile)
############################ Tests ###################################################
def test_simple(self):
self.compileCode("simpleTest", "def f(): return 42")
import simpleTest
self.assertEqual(simpleTest.f(), 42)
def test_simple_dynsite(self):
#containing a dynamic site.
self.compileCode("simpleDynSiteTest", "def f(a , b): return a + b")
import simpleDynSiteTest
self.assertEqual(simpleDynSiteTest.f(2,3), 5)
def test_syntax_error(self):
self.assertRaises(SyntaxError, self.compileCode, "syntaxerrTest", "def f() pass")
def test_runtime_error(self):
self.compileCode("runtimeError", "def f(): print(a)")
from runtimeError import f
self.assertRaises(NameError, f)
def test_multiple_files(self):
self.compileCode("multiFiles", "def f(): return 42", "def g(): return 33")
import multiFiles, multiFiles1
self.assertEqual(multiFiles.f(), 42)
self.assertEqual(multiFiles1.g(), 33)
def test_multifile_import(self):
self.compileCode("multiFileImport", "import multiFileImport1\ndef f(): return multiFileImport1.f()", "def f(): return 42")
import multiFileImport
self.assertEqual(multiFileImport.f(), 42)
def test_multifile_import_external(self):
self.compileCode("multiFileImportExternal", "import external\ndef f(): return external.f()")
self.write_to_file(os.path.join(self.temporary_dir, "external.py"), "def f(): return 'hello'")
import multiFileImportExternal
self.assertEqual(multiFileImportExternal.f(), 'hello')
def test_load_order_builtins(self):
self.compileCode("sys", "def f(): return 'hello'")
import sys
self.assertRaises(AttributeError, lambda: sys.f)
def test_load_order_modfile(self):
import clr
fileName = os.path.join(self.temporary_dir,"loadOrderMod.py")
dllName = os.path.join(self.temporary_dir,"loadOrderMod.dll")
self.write_to_file(fileName, "def f(): return 'hello'")
clr.CompileModules(dllName, fileName)
self.write_to_file(fileName, "def f(): return 'bonjour'")
clr.AddReferenceToFileAndPath(dllName)
import loadOrderMod
self.assertEqual(loadOrderMod.f(), 'hello')
def test_exceptions(self):
self.compileCode("exceptionsTest", "def f(): raise SystemError")
import exceptionsTest
self.assertRaises(SystemError, exceptionsTest.f)
def test_package_init(self):
self.compilePackage("initPackage", { "__init__.py" : "def f(): return 42" });
import initPackage
self.assertEqual(initPackage.f(), 42)
def test_package_simple(self):
self.compilePackage("simplePackage", { "__init__.py" : "from . import a\nfrom . import b\ndef f(): return a.f() + b.f()",
"a.py" : "def f() : return 10",
"b.py" : "def f() : return 20"})
import simplePackage
self.assertEqual(simplePackage.f(), 30)
self.assertEqual(simplePackage.a.f(), 10)
self.assertEqual(simplePackage.b.f(), 20)
def test_package_subpackage(self):
self.compilePackage("subPackage", { "__init__.py" : "from . import a\nfrom .b import c\ndef f(): return a.f() + c.f()",
"a.py" : "def f(): return 10",
"b/__init__.py" : "def f(): return 'kthxbye'",
"b/c.py" : "def f(): return 20"})
import subPackage
self.assertEqual(subPackage.f(), 30)
self.assertEqual(subPackage.b.f(), 'kthxbye')
self.assertEqual(subPackage.b.c.f(), 20)
def test_package_subpackage_relative_imports(self):
self.compilePackage("subPackage_relative", { "__init__.py" : "from .foo import bar",
"foo/__init__.py" : "from .foo import bar",
"foo/foo.py" : "bar = 'BAR'"})
import subPackage_relative
self.assertEqual(subPackage_relative.bar, 'BAR')
#TODO add some more tests for main after this bug is fixed.
def test_main(self):
self.compileCode("mainTest", "def f(): return __name__")
#this probably won't work. Need to verify once bug is fixed.
import mainTest
self.assertEqual(mainTest.f(), "mainTest")
def test_empty_file(self):
self.compileCode("emptyFile", "")
import emptyFile
def test_negative(self):
import clr
self.assertRaises(TypeError, clr.CompileModules, None, None)
self.assertRaises(IOError, clr.CompileModules, "foo.dll", "ffoo.py")
def test_overwrite(self):
import clr
self.write_to_file(os.path.join(self.temporary_dir, "overwrite.py"), "def foo(): return 'bar'")
dllFile = os.path.join(self.temporary_dir, "overwrite.dll")
clr.CompileModules(dllFile, os.path.join(self.temporary_dir, "overwrite.py"))
self.write_to_file(os.path.join(self.temporary_dir, "overwrite1.py"), "def foo(): return 'boo'")
clr.CompileModules(dllFile, os.path.join(self.temporary_dir, "overwrite1.py"))
clr.AddReferenceToFileAndPath(dllFile)
import overwrite1
self.assertEqual(overwrite1.foo(), 'boo')
def test_cyclic_modules(self):
self.compileCode("cyclic_modules", "import cyclic_modules1\nA = 0", "import cyclic_modules\nA=1")
import cyclic_modules
self.assertEqual(cyclic_modules.A, 0)
self.assertEqual(cyclic_modules.cyclic_modules1.A, 1)
import cyclic_modules1
self.assertEqual(cyclic_modules1.A, 1)
self.assertEqual(cyclic_modules1.cyclic_modules.A, 0)
def test_cyclic_pkg(self):
self.compilePackage("cyclic_package", { "__init__.py" : "from . import cyclic_submodules0\nfrom . import cyclic_submodules1",
"cyclic_submodules0.py" : "import cyclic_package.cyclic_submodules1\nA = 2",
"cyclic_submodules1.py" : "import cyclic_package.cyclic_submodules0\nA = 3"})
import cyclic_package
self.assertEqual(cyclic_package.cyclic_submodules0.A, 2)
self.assertEqual(cyclic_package.cyclic_submodules0.cyclic_package.cyclic_submodules1.A, 3)
self.assertEqual(cyclic_package.cyclic_submodules1.A, 3)
self.assertEqual(cyclic_package.cyclic_submodules1.cyclic_package.cyclic_submodules0.A, 2)
def test_system_core_cp20623(self):
self.compileCode("cp20623", "import System\nA=System.DateTime(350000000).Second\nprint(A)")
import cp20623
self.assertEqual(cp20623.A, 35)
#TODO: need to also generate a standalone exe from cp20623 and try running it
def test_cp30178(self):
self.compileCode("cp30178", 'mydict = { "a": ("Fail", "tuple") }')
import cp30178
self.assertEqual(cp30178.mydict, {'a' : ('Fail', 'tuple')})
run_test(__name__)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import copy
import datetime
import errno
import functools
import hashlib
import inspect
import logging as std_logging
import os
import pyclbr
import random
import re
import shutil
import socket
import struct
import sys
import tempfile
import textwrap
import time
from xml.sax import saxutils
import eventlet
import netaddr
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_context import context as common_context
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import prettytable
import six
from six.moves import range
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
import nova.network
from nova import safe_utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
# used in limits
TIME_UNITS = {
'SECOND': 1,
'MINUTE': 60,
'HOUR': 3600,
'DAY': 86400
}
_IS_NEUTRON = None
synchronized = lockutils.synchronized_with_prefix('nova-')
SM_IMAGE_PROP_PREFIX = "image_"
SM_INHERITABLE_KEYS = (
'min_ram', 'min_disk', 'disk_format', 'container_format',
)
# Keys which hold large structured data that won't fit in the
# size constraints of the system_metadata table, so we avoid
# storing and/or loading them.
SM_SKIP_KEYS = (
# Legacy names
'mappings', 'block_device_mapping',
# Modern names
'img_mappings', 'img_block_device_mapping',
)
# Image attributes which Cinder stores in volume image metadata
# as regular properties
VIM_IMAGE_ATTRIBUTES = (
'image_id', 'image_name', 'size', 'checksum',
'container_format', 'disk_format', 'min_ram', 'min_disk',
)
_FILE_CACHE = {}
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns Boolean indicating whether the vpn_server is listening.
Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
# NOTE(tonyb) session_id isn't used for a real VPN connection so using a
# cryptographically weak value is fine.
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
LOG.warning(_LW('Expected to receive %(exp)s bytes, '
'but actually %(act)s'),
dict(exp=struct.calcsize(fmt), act=len(received)))
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
return (identifier == 0x40 and client_sess == session_id)
def get_root_helper():
if CONF.workarounds.disable_rootwrap:
cmd = 'sudo'
else:
cmd = 'sudo nova-rootwrap %s' % CONF.rootwrap_config
return cmd
def _get_rootwrap_helper():
if CONF.use_rootwrap_daemon:
return RootwrapDaemonHelper(CONF.rootwrap_config)
else:
return RootwrapProcessHelper()
class RootwrapProcessHelper(object):
def trycmd(self, *cmd, **kwargs):
kwargs['root_helper'] = get_root_helper()
return processutils.trycmd(*cmd, **kwargs)
def execute(self, *cmd, **kwargs):
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
class RootwrapDaemonHelper(RootwrapProcessHelper):
_clients = {}
@synchronized('daemon-client-lock')
def _get_client(cls, rootwrap_config):
try:
return cls._clients[rootwrap_config]
except KeyError:
from oslo_rootwrap import client
new_client = client.Client([
"sudo", "nova-rootwrap-daemon", rootwrap_config])
cls._clients[rootwrap_config] = new_client
return new_client
def __init__(self, rootwrap_config):
self.client = self._get_client(rootwrap_config)
def trycmd(self, *args, **kwargs):
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = self.execute(*args, **kwargs)
failed = False
except processutils.ProcessExecutionError as exn:
out, err = '', six.text_type(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def execute(self, *cmd, **kwargs):
# NOTE(dims): This method is to provide compatibility with the
# processutils.execute interface. So that calling daemon or direct
# rootwrap to honor the same set of flags in kwargs and to ensure
# that we don't regress any current behavior.
cmd = [str(c) for c in cmd]
loglevel = kwargs.pop('loglevel', std_logging.DEBUG)
log_errors = kwargs.pop('log_errors', None)
process_input = kwargs.pop('process_input', None)
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
sanitized_cmd = strutils.mask_password(' '.join(cmd))
LOG.info(_LI('Executing RootwrapDaemonHelper.execute '
'cmd=[%(cmd)r] kwargs=[%(kwargs)r]'),
{'cmd': sanitized_cmd, 'kwargs': kwargs})
while attempts > 0:
attempts -= 1
try:
start_time = time.time()
LOG.log(loglevel, _('Running cmd (subprocess): %s'),
sanitized_cmd)
(returncode, out, err) = self.client.execute(
cmd, process_input)
end_time = time.time() - start_time
LOG.log(loglevel,
'CMD "%(sanitized_cmd)s" returned: %(return_code)s '
'in %(end_time)0.3fs',
{'sanitized_cmd': sanitized_cmd,
'return_code': returncode,
'end_time': end_time})
if not ignore_exit_code and returncode not in check_exit_code:
out = strutils.mask_password(out)
err = strutils.mask_password(err)
raise processutils.ProcessExecutionError(
exit_code=returncode,
stdout=out,
stderr=err,
cmd=sanitized_cmd)
return (out, err)
except processutils.ProcessExecutionError as err:
# if we want to always log the errors or if this is
# the final attempt that failed and we want to log that.
if log_errors == processutils.LOG_ALL_ERRORS or (
log_errors == processutils.LOG_FINAL_ERROR and
not attempts):
format = _('%(desc)r\ncommand: %(cmd)r\n'
'exit code: %(code)r\nstdout: %(stdout)r\n'
'stderr: %(stderr)r')
LOG.log(loglevel, format, {"desc": err.description,
"cmd": err.cmd,
"code": err.exit_code,
"stdout": err.stdout,
"stderr": err.stderr})
if not attempts:
LOG.log(loglevel, _('%r failed. Not Retrying.'),
sanitized_cmd)
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'),
sanitized_cmd)
if delay_on_retry:
time.sleep(random.randint(20, 200) / 100.0)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and kwargs.get('run_as_root'):
if CONF.use_rootwrap_daemon:
return RootwrapDaemonHelper(CONF.rootwrap_config).execute(
*cmd, **kwargs)
else:
return RootwrapProcessHelper().execute(*cmd, **kwargs)
return processutils.execute(*cmd, **kwargs)
def ssh_execute(dest, *cmd, **kwargs):
"""Convenience wrapper to execute ssh command."""
ssh_cmd = ['ssh', '-o', 'BatchMode=yes']
ssh_cmd.append(dest)
ssh_cmd.extend(cmd)
return execute(*ssh_cmd, **kwargs)
def trycmd(*args, **kwargs):
"""Convenience wrapper around oslo's trycmd() method."""
if kwargs.get('run_as_root', False):
if CONF.use_rootwrap_daemon:
return RootwrapDaemonHelper(CONF.rootwrap_config).trycmd(
*args, **kwargs)
else:
return RootwrapProcessHelper().trycmd(*args, **kwargs)
return processutils.trycmd(*args, **kwargs)
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for _x in range(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def last_completed_audit_period(unit=None, before=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
before: Give the audit period most recently completed before
<timestamp>. Defaults to now.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.instance_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
if before is not None:
rightnow = before
else:
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
if length is None:
length = CONF.password_length
r = random.SystemRandom()
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [r.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
r.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([r.choice(symbols) for _i in range(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
r.shuffle(password)
return ''.join(password)
def get_my_linklocal(interface):
try:
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
msg = _('Link Local address is not found.:%s') % if_str
raise exception.NovaException(msg)
except Exception as ex:
msg = _("Couldn't get Link Local IP of %(interface)s"
" :%(ex)s") % {'interface': interface, 'ex': ex}
raise exception.NovaException(msg)
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def utf8(value):
"""Try to turn a string into utf-8 if possible.
The original code was copied from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
if value is None or isinstance(value, six.binary_type):
return value
if not isinstance(value, six.text_type):
value = six.text_type(value)
return value.encode('utf-8')
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
def parse_server_string(server_str):
"""Parses the given server_string and returns a tuple of host and port.
If it's not a combination of host part and port, the port element
is an empty string. If the input is invalid expression, return a tuple of
two empty strings.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except (ValueError, netaddr.AddrFormatError):
LOG.error(_LE('Invalid server_string: %s'), server_str)
return ('', '')
def is_valid_ipv6_cidr(address):
try:
netaddr.IPNetwork(address, version=6).cidr
return True
except (TypeError, netaddr.AddrFormatError):
return False
def get_shortened_ipv6(address):
addr = netaddr.IPAddress(address, version=6)
return str(addr.ipv6())
def get_shortened_ipv6_cidr(address):
net = netaddr.IPNetwork(address, version=6)
return str(net.cidr)
def is_valid_cidr(address):
"""Check if address is valid
The provided address can be a IPv6 or a IPv4
CIDR address.
"""
try:
# Validate the correct CIDR Address
netaddr.IPNetwork(address)
except netaddr.AddrFormatError:
return False
# Prior validation partially verify /xx part
# Verify it here
ip_segment = address.split('/')
if (len(ip_segment) <= 1 or
ip_segment[1] == ''):
return False
return True
def get_ip_version(network):
"""Returns the IP version of a network (IPv4 or IPv6).
Raises AddrFormatError if invalid network.
"""
if netaddr.IPNetwork(network).version == 6:
return "IPv6"
elif netaddr.IPNetwork(network).version == 4:
return "IPv4"
def safe_ip_format(ip):
"""Transform ip string to "safe" format.
Will return ipv4 addresses unchanged, but will nest ipv6 addresses
inside square brackets.
"""
try:
if netaddr.IPAddress(ip).version == 6:
return '[%s]' % ip
except (TypeError, netaddr.AddrFormatError): # hostname
pass
# it's IPv4 or hostname
return ip
def monkey_patch():
"""If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example:
'nova.api.ec2.cloud:nova.notifications.notify_decorator'
Parameters of the decorator is as follows.
(See nova.notifications.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
if six.PY2:
is_method = inspect.ismethod
else:
def is_method(obj):
# Unbound methods became regular functions on Python 3
return inspect.ismethod(obj) or inspect.isfunction(obj)
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key, value in module_data.items():
# set the decorator for the class methods
if isinstance(value, pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, is_method):
setattr(clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(value, pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def sanitize_hostname(hostname, default_name=None):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs except
the length of hostname.
Window, Linux, and Dnsmasq has different limitation:
Windows: 255 (net_bios limits to 15, but window will truncate it)
Linux: 64
Dnsmasq: 63
Due to nova-network will leverage dnsmasq to set hostname, so we chose
63.
"""
def truncate_hostname(name):
if len(name) > 63:
LOG.warning(_LW("Hostname %(hostname)s is longer than 63, "
"truncate it to %(truncated_name)s"),
{'hostname': name, 'truncated_name': name[:63]})
return name[:63]
if isinstance(hostname, six.text_type):
# Remove characters outside the Unicode range U+0000-U+00FF
hostname = hostname.encode('latin-1', 'ignore')
if six.PY3:
hostname = hostname.decode('latin-1')
hostname = truncate_hostname(hostname)
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
# NOTE(eliqiao): set hostname to default_display_name to avoid
# empty hostname
if hostname == "" and default_name is not None:
return truncate_hostname(default_name)
return hostname
@contextlib.contextmanager
def temporary_mutation(obj, **kwargs):
"""Temporarily set the attr on a particular object to a given value then
revert when finished.
One use of this is to temporarily set the read_deleted flag on a context
object:
with temporary_mutation(context, read_deleted="yes"):
do_something_that_needed_deleted_objects()
"""
def is_dict_like(thing):
return hasattr(thing, 'has_key')
def get(thing, attr, default):
if is_dict_like(thing):
return thing.get(attr, default)
else:
return getattr(thing, attr, default)
def set_value(thing, attr, val):
if is_dict_like(thing):
thing[attr] = val
else:
setattr(thing, attr, val)
def delete(thing, attr):
if is_dict_like(thing):
del thing[attr]
else:
delattr(thing, attr)
NOT_PRESENT = object()
old_values = {}
for attr, new_value in kwargs.items():
old_values[attr] = get(obj, attr, NOT_PRESENT)
set_value(obj, attr, new_value)
try:
yield
finally:
for attr, old_value in old_values.items():
if old_value is NOT_PRESENT:
delete(obj, attr)
else:
set_value(obj, attr, old_value)
def generate_mac_address():
"""Generate an Ethernet MAC address."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
mac = [0xfa, 0x16, 0x3e,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:param owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
if 'dir' not in argdict:
argdict['dir'] = CONF.tempdir
tmpdir = tempfile.mkdtemp(**argdict)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.error(_LE('Could not remove tmpdir: %s'), e)
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
class UndoManager(object):
"""Provides a mechanism to facilitate rolling back a series of actions
when an exception is raised.
"""
def __init__(self):
self.undo_stack = []
def undo_with(self, undo_func):
self.undo_stack.append(undo_func)
def _rollback(self):
for undo_func in reversed(self.undo_stack):
undo_func()
def rollback_and_reraise(self, msg=None, **kwargs):
"""Rollback a series of actions then re-raise the exception.
.. note:: (sirp) This should only be called within an
exception handler.
"""
with excutils.save_and_reraise_exception():
if msg:
LOG.exception(msg, **kwargs)
self._rollback()
def mkfs(fs, path, label=None, run_as_root=False):
"""Format a file or block device
:param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
'btrfs', etc.)
:param path: Path to file or block device to format
:param label: Volume label to use
"""
if fs == 'swap':
args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
# add -F to force no interactive execute on non-block device.
if fs in ('ext3', 'ext4', 'ntfs'):
args.extend(['-F'])
if label:
if fs in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
args.extend([label_opt, label])
args.append(path)
execute(*args, run_as_root=run_as_root)
def last_bytes(file_like_object, num):
"""Return num bytes from the end of the file, and remaining byte count.
:param file_like_object: The file to read
:param num: The number of bytes to return
:returns (data, remaining)
"""
try:
file_like_object.seek(-num, os.SEEK_END)
except IOError as e:
# seek() fails with EINVAL when trying to go before the start of the
# file. It means that num is larger than the file size, so just
# go to the start.
if e.errno == errno.EINVAL:
file_like_object.seek(0, os.SEEK_SET)
else:
raise
remaining = file_like_object.tell()
return (file_like_object.read(), remaining)
def metadata_to_dict(metadata, include_deleted=False):
result = {}
for item in metadata:
if not include_deleted and item.get('deleted'):
continue
result[item['key']] = item['value']
return result
def dict_to_metadata(metadata):
result = []
for key, value in six.iteritems(metadata):
result.append(dict(key=key, value=value))
return result
def instance_meta(instance):
if isinstance(instance['metadata'], dict):
return instance['metadata']
else:
return metadata_to_dict(instance['metadata'])
def instance_sys_meta(instance):
if not instance.get('system_metadata'):
return {}
if isinstance(instance['system_metadata'], dict):
return instance['system_metadata']
else:
return metadata_to_dict(instance['system_metadata'],
include_deleted=True)
def expects_func_args(*args):
def _decorator_checker(dec):
@functools.wraps(dec)
def _decorator(f):
base_f = safe_utils.get_wrapped_function(f)
arg_names, a, kw, _default = inspect.getargspec(base_f)
if a or kw or set(args) <= set(arg_names):
# NOTE (ndipanov): We can't really tell if correct stuff will
# be passed if it's a function with *args or **kwargs so
# we still carry on and hope for the best
return dec(f)
else:
raise TypeError("Decorated function %(f_name)s does not "
"have the arguments expected by the "
"decorator %(d_name)s" %
{'f_name': base_f.__name__,
'd_name': dec.__name__})
return _decorator
return _decorator_checker
class ExceptionHelper(object):
"""Class to wrap another and translate the ClientExceptions raised by its
function calls to the actual ones.
"""
def __init__(self, target):
self._target = target
def __getattr__(self, name):
func = getattr(self._target, name)
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except messaging.ExpectedException as e:
six.reraise(*e.exc_info)
return wrapper
def check_string_length(value, name=None, min_length=0, max_length=None):
"""Check the length of specified string
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
if name is None:
msg = _("The input is not a string or unicode")
else:
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
if name is None:
name = value
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
raise exception.InvalidInput(message=msg)
if max_length and len(value) > max_length:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_length}
raise exception.InvalidInput(message=msg)
def validate_integer(value, name, min_value=None, max_value=None):
"""Make sure that value is a valid integer, potentially within range."""
try:
value = int(str(value))
except (ValueError, UnicodeEncodeError):
msg = _('%(value_name)s must be an integer')
raise exception.InvalidInput(reason=(
msg % {'value_name': name}))
if min_value is not None:
if value < min_value:
msg = _('%(value_name)s must be >= %(min_value)d')
raise exception.InvalidInput(
reason=(msg % {'value_name': name,
'min_value': min_value}))
if max_value is not None:
if value > max_value:
msg = _('%(value_name)s must be <= %(max_value)d')
raise exception.InvalidInput(
reason=(
msg % {'value_name': name,
'max_value': max_value})
)
return value
def spawn(func, *args, **kwargs):
"""Passthrough method for eventlet.spawn.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
It will also grab the context from the threadlocal store and add it to
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
_context = common_context.get_current()
@functools.wraps(func)
def context_wrapper(*args, **kwargs):
# NOTE: If update_store is not called after spawn it won't be
# available for the logger to pull from threadlocal storage.
if _context is not None:
_context.update_store()
return func(*args, **kwargs)
return eventlet.spawn(context_wrapper, *args, **kwargs)
def spawn_n(func, *args, **kwargs):
"""Passthrough method for eventlet.spawn_n.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
It will also grab the context from the threadlocal store and add it to
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
_context = common_context.get_current()
@functools.wraps(func)
def context_wrapper(*args, **kwargs):
# NOTE: If update_store is not called after spawn_n it won't be
# available for the logger to pull from threadlocal storage.
if _context is not None:
_context.update_store()
func(*args, **kwargs)
eventlet.spawn_n(context_wrapper, *args, **kwargs)
def is_none_string(val):
"""Check if a string represents a None value.
"""
if not isinstance(val, six.string_types):
return False
return val.lower() == 'none'
def is_neutron():
global _IS_NEUTRON
if _IS_NEUTRON is not None:
return _IS_NEUTRON
# TODO(sdague): As long as network_api_class is importable
# is_neutron can return None to mean we have no idea what their
# class is.
_IS_NEUTRON = (nova.network.is_neutron() is True)
return _IS_NEUTRON
def is_auto_disk_config_disabled(auto_disk_config_raw):
auto_disk_config_disabled = False
if auto_disk_config_raw is not None:
adc_lowered = auto_disk_config_raw.strip().lower()
if adc_lowered == "disabled":
auto_disk_config_disabled = True
return auto_disk_config_disabled
def get_auto_disk_config_from_instance(instance=None, sys_meta=None):
if sys_meta is None:
sys_meta = instance_sys_meta(instance)
return sys_meta.get("image_auto_disk_config")
def get_auto_disk_config_from_image_props(image_properties):
return image_properties.get("auto_disk_config")
def get_system_metadata_from_image(image_meta, flavor=None):
system_meta = {}
prefix_format = SM_IMAGE_PROP_PREFIX + '%s'
for key, value in six.iteritems(image_meta.get('properties', {})):
if key in SM_SKIP_KEYS:
continue
new_value = safe_truncate(six.text_type(value), 255)
system_meta[prefix_format % key] = new_value
for key in SM_INHERITABLE_KEYS:
value = image_meta.get(key)
if key == 'min_disk' and flavor:
if image_meta.get('disk_format') == 'vhd':
value = flavor['root_gb']
else:
value = max(value or 0, flavor['root_gb'])
if value is None:
continue
system_meta[prefix_format % key] = value
return system_meta
def get_image_from_system_metadata(system_meta):
image_meta = {}
properties = {}
if not isinstance(system_meta, dict):
system_meta = metadata_to_dict(system_meta, include_deleted=True)
for key, value in six.iteritems(system_meta):
if value is None:
continue
# NOTE(xqueralt): Not sure this has to inherit all the properties or
# just the ones we need. Leaving it for now to keep the old behaviour.
if key.startswith(SM_IMAGE_PROP_PREFIX):
key = key[len(SM_IMAGE_PROP_PREFIX):]
if key in SM_SKIP_KEYS:
continue
if key in SM_INHERITABLE_KEYS:
image_meta[key] = value
else:
properties[key] = value
image_meta['properties'] = properties
return image_meta
def get_image_metadata_from_volume(volume):
properties = copy.copy(volume.get('volume_image_metadata', {}))
image_meta = {'properties': properties}
# Volume size is no longer related to the original image size,
# so we take it from the volume directly. Cinder creates
# volumes in Gb increments, and stores size in Gb, whereas
# glance reports size in bytes. As we're returning glance
# metadata here, we need to convert it.
image_meta['size'] = volume.get('size', 0) * units.Gi
# NOTE(yjiang5): restore the basic attributes
# NOTE(mdbooth): These values come from volume_glance_metadata
# in cinder. This is a simple key/value table, and all values
# are strings. We need to convert them to ints to avoid
# unexpected type errors.
for attr in VIM_IMAGE_ATTRIBUTES:
val = properties.pop(attr, None)
if attr in ('min_ram', 'min_disk'):
image_meta[attr] = int(val or 0)
# NOTE(yjiang5): Always set the image status as 'active'
# and depends on followed volume_api.check_attach() to
# verify it. This hack should be harmless with that check.
image_meta['status'] = 'active'
return image_meta
def get_hash_str(base_str):
"""Returns string that represents MD5 hash of base_str (in hex format).
If base_str is a Unicode string, encode it to UTF-8.
"""
if isinstance(base_str, six.text_type):
base_str = base_str.encode('utf-8')
return hashlib.md5(base_str).hexdigest()
def filter_and_format_resource_metadata(resource_type, resource_list,
search_filts, metadata_type=None):
"""Get all metadata for a list of resources after filtering.
Search_filts is a list of dictionaries, where the values in the dictionary
can be string or regex string, or a list of strings/regex strings.
Let's call a dict a 'filter block' and an item in the dict
a 'filter'. A tag is returned if it matches ALL the filters in
a filter block. If more than one values are specified for a
filter, a tag is returned if it matches ATLEAST ONE value of the filter. If
more than one filter blocks are specified, the tag should match ALL the
filter blocks.
For example:
search_filts = [{'key': ['key1', 'key2'], 'value': 'val1'},
{'value': 'val2'}]
The filter translates to 'match any tag for which':
((key=key1 AND value=val1) OR (key=key2 AND value=val1)) AND
(value=val2)
This example filter will never match a tag.
:param resource_type: The resource type as a string, e.g. 'instance'
:param resource_list: List of resource objects
:param search_filts: Filters to filter metadata to be returned. Can be
dict (e.g. {'key': 'env', 'value': 'prod'}, or a list of dicts
(e.g. [{'key': 'env'}, {'value': 'beta'}]. Note that the values
of the dict can be regular expressions.
:param metadata_type: Provided to search for a specific metadata type
(e.g. 'system_metadata')
:returns: List of dicts where each dict is of the form {'key':
'somekey', 'value': 'somevalue', 'instance_id':
'some-instance-uuid-aaa'} if resource_type is 'instance'.
"""
if isinstance(search_filts, dict):
search_filts = [search_filts]
def _get_id(resource):
if resource_type == 'instance':
return resource.get('uuid')
def _match_any(pattern_list, string):
if isinstance(pattern_list, str):
pattern_list = [pattern_list]
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(resource, search_filt, input_metadata):
ids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if ids and _get_id(resource) not in ids:
return {}
for k, v in six.iteritems(input_metadata):
# Both keys and value defined -- AND
if (keys_filter and values_filter and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
for res in resource_list:
if resource_type == 'instance':
# NOTE(rushiagr): metadata_type should be 'metadata' or
# 'system_metadata' if resource_type is instance. Defaulting to
# 'metadata' if not specified.
if metadata_type is None:
metadata_type = 'metadata'
metadata = res.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(res, filt, metadata)
for (k, v) in metadata.items():
formatted_metadata_list.append({'key': k, 'value': v,
'%s_id' % resource_type: _get_id(res)})
return formatted_metadata_list
def safe_truncate(value, length):
"""Safely truncates unicode strings such that their encoded length is
no greater than the length provided.
"""
b_value = encodeutils.safe_encode(value)[:length]
# NOTE(chaochin) UTF-8 character byte size varies from 1 to 6. If
# truncating a long byte string to 255, the last character may be
# cut in the middle, so that UnicodeDecodeError will occur when
# converting it back to unicode.
decode_ok = False
while not decode_ok:
try:
u_value = encodeutils.safe_decode(b_value)
decode_ok = True
except UnicodeDecodeError:
b_value = b_value[:-1]
return u_value
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload:
delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s", filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_cached_file(filename):
"""Delete cached file if present.
:param filename: filename to delete
"""
global _FILE_CACHE
if filename in _FILE_CACHE:
del _FILE_CACHE[filename]
def isotime(at=None):
"""Current time as ISO string,
as timeutils.isotime() is deprecated
:returns: Current time in ISO format
"""
if not at:
at = timeutils.utcnow()
date_string = at.strftime("%Y-%m-%dT%H:%M:%S")
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
date_string += ('Z' if tz == 'UTC' else tz)
return date_string
def strtime(at):
return at.strftime("%Y-%m-%dT%H:%M:%S.%f")
def print_dict(dct, dict_property="Property", wrap=0, dict_value='Value'):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
:param dict_value: header label for the value (second) column
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
for k, v in sorted(dct.items()):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY2:
print(encodeutils.safe_encode(pt.get_string()))
else:
print(encodeutils.safe_encode(pt.get_string()).decode())
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
if six.get_method_self(fn) is not None:
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
return missing
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from oslo_serialization import jsonutils
import webob
from nova.compute import api as compute_api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests import uuidsentinel as uuids
FAKE_UUID = fakes.FAKE_UUID
FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '10.0.2.12')]
DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12')]
INVALID_NETWORKS = [('invalid', 'invalid-ip-address')]
def return_security_group_non_existing(context, project_id, group_name):
raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
security_group_id=group_name)
def return_security_group_get_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_security_group_get(context, security_group_id, session):
return {'id': security_group_id}
def return_instance_add_security_group(context, instance_id,
security_group_id):
pass
class CreateserverextTest(test.TestCase):
def setUp(self):
super(CreateserverextTest, self).setUp()
self.security_group = None
self.injected_files = None
self.networks = None
self.user_data = None
def create(*args, **kwargs):
if 'security_group' in kwargs:
self.security_group = kwargs['security_group']
else:
self.security_group = None
if 'injected_files' in kwargs:
self.injected_files = kwargs['injected_files']
else:
self.injected_files = None
if 'requested_networks' in kwargs:
self.networks = kwargs['requested_networks']
else:
self.networks = None
if 'user_data' in kwargs:
self.user_data = kwargs['user_data']
resv_id = None
return ([{'id': '1234', 'display_name': 'fakeinstance',
'uuid': FAKE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': "",
'updated_at': "",
'fixed_ips': [],
'progress': 0}], resv_id)
self.stubs.Set(compute_api.API, 'create', create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Createserverext', 'User_data',
'Security_groups', 'Os_networks'])
def _create_security_group_request_dict(self, security_groups):
server = {}
server['name'] = 'new-server-test'
server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
server['flavorRef'] = 1
if security_groups is not None:
sg_list = []
for name in security_groups:
sg_list.append({'name': name})
server['security_groups'] = sg_list
return {'server': server}
def _create_networks_request_dict(self, networks):
server = {}
server['name'] = 'new-server-test'
server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
server['flavorRef'] = 1
if networks is not None:
network_list = []
for uuid, fixed_ip in networks:
network_list.append({'uuid': uuid, 'fixed_ip': fixed_ip})
server['networks'] = network_list
return {'server': server}
def _create_user_data_request_dict(self, user_data):
server = {}
server['name'] = 'new-server-test'
server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
server['flavorRef'] = 1
server['user_data'] = user_data
return {'server': server}
def _get_create_request_json(self, body_dict):
req = webob.Request.blank('/v2/fake/os-create-server-ext')
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body_dict)
return req
def _create_instance_with_networks_json(self, networks):
body_dict = self._create_networks_request_dict(networks)
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
return request, response, self.networks
def _create_instance_with_user_data_json(self, networks):
body_dict = self._create_user_data_request_dict(networks)
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
return request, response, self.user_data
def test_create_instance_with_no_networks(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(networks=None)
self.assertEqual(response.status_int, 202)
self.assertIsNone(networks)
def test_create_instance_with_one_network(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst([FAKE_NETWORKS[0]])
self.assertEqual(response.status_int, 202)
self.assertEqual([FAKE_NETWORKS[0]], networks.as_tuples())
def test_create_instance_with_two_networks(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(FAKE_NETWORKS)
self.assertEqual(response.status_int, 202)
self.assertEqual(FAKE_NETWORKS, networks.as_tuples())
def test_create_instance_with_duplicate_networks(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(DUPLICATE_NETWORKS)
self.assertEqual(response.status_int, 400)
self.assertIsNone(networks)
def test_create_instance_with_network_no_id(self):
body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
del body_dict['server']['networks'][0]['uuid']
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
self.assertEqual(response.status_int, 400)
self.assertIsNone(self.networks)
def test_create_instance_with_network_invalid_id(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(INVALID_NETWORKS)
self.assertEqual(response.status_int, 400)
self.assertIsNone(networks)
def test_create_instance_with_network_empty_fixed_ip(self):
networks = [('1', '')]
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(networks)
self.assertEqual(response.status_int, 400)
self.assertIsNone(networks)
def test_create_instance_with_network_non_string_fixed_ip(self):
networks = [('1', 12345)]
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(networks)
self.assertEqual(response.status_int, 400)
self.assertIsNone(networks)
def test_create_instance_with_network_no_fixed_ip(self):
body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
del body_dict['server']['networks'][0]['fixed_ip']
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
self.assertEqual(response.status_int, 202)
self.assertEqual([('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)],
self.networks.as_tuples())
def test_create_instance_with_userdata(self):
user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
user_data_contents = base64.b64encode(user_data_contents)
_create_inst = self._create_instance_with_user_data_json
request, response, user_data = _create_inst(user_data_contents)
self.assertEqual(response.status_int, 202)
self.assertEqual(user_data, user_data_contents)
def test_create_instance_with_userdata_none(self):
user_data_contents = None
_create_inst = self._create_instance_with_user_data_json
request, response, user_data = _create_inst(user_data_contents)
self.assertEqual(response.status_int, 202)
self.assertEqual(user_data, user_data_contents)
def test_create_instance_with_userdata_with_non_b64_content(self):
user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
_create_inst = self._create_instance_with_user_data_json
request, response, user_data = _create_inst(user_data_contents)
self.assertEqual(response.status_int, 400)
self.assertIsNone(user_data)
def test_create_instance_with_security_group_json(self):
security_groups = ['test', 'test1']
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_get_by_name)
self.stub_out('nova.db.instance_add_security_group',
return_instance_add_security_group)
body_dict = self._create_security_group_request_dict(security_groups)
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
self.assertEqual(response.status_int, 202)
self.assertJsonEqual(self.security_group, security_groups)
def test_get_server_by_id_verify_security_groups_json(self):
self.stub_out('nova.db.instance_get', fakes.fake_instance_get())
self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get())
req = webob.Request.blank('/v2/fake/os-create-server-ext/' +
uuids.server)
req.headers['Content-Type'] = 'application/json'
response = req.get_response(fakes.wsgi_app(
init_only=('os-create-server-ext', 'servers')))
self.assertEqual(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
expected_security_group = [{"name": "test"}]
self.assertEqual(res_dict['server'].get('security_groups'),
expected_security_group)
|
|
#!/usr/bin/python
# AUTHOR
# Daniel Pulido <[email protected]>
# COPYRIGHT
# Copyright (c) 2015 Daniel Pulido <[email protected]>
# LICENSE
# MIT License (http://opensource.org/licenses/MIT)
"""
Various patterns to scan pixels on a grid. Rectangular patterns are scanned
first along the x-coordinate then the y-coordinate. Radial patterns are
scanned clockwise. Transformation filters are available to apply
standard transformations (e.g., rotation, scale, translation) on the
coordinates.
"""
import math
import random
import sys
from math import frexp, copysign
from sys import float_info
# ======================================================================
# Distance metrics
# ----------------------------------------------------------------------
def chebyshev(point1, point2):
"""Computes distance between 2D points using chebyshev metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float
"""
return max(abs(point1[0] - point2[0]), abs(point1[1] - point2[1]))
def manhattan(point1, point2):
"""Computes distance between 2D points using manhattan metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float
"""
return abs(point1[0] - point2[0]) + abs(point1[1] - point2[1])
def hilbertrot(n, x, y, rx, ry):
"""Rotates and flips a quadrant appropriately for the Hilbert scan
generator. See https://en.wikipedia.org/wiki/Hilbert_curve.
"""
if ry == 0:
if rx == 1:
x = n - 1 - x
y = n - 1 - y
return y, x
return x, y
# ======================================================================
# Scan transformations
# ----------------------------------------------------------------------
class clip(object):
"""Clip coordinates that exceed boundary
"""
def __init__(self,
scan,
minx=-sys.maxsize,
maxx=sys.maxsize,
miny=-sys.maxsize,
maxy=sys.maxsize,
predicate=None,
abort=False):
"""
:param scan: Pixel scan generator
:type scan: function
:param minx: Minimum x-coordinate (default = -sys.maxsize)
:type minx: int
:param maxx: Maximum x-coordinate (default = sys.maxsize)
:type maxx: int
:param miny: Minimum y-coordinate (default = -sys.maxsize)
:type miny: int
:param maxy: Maximum y-coordinate (default = sys.maxsize)
:type maxy: int
:param predicate: Optional function that takes 2 arguments (x and y)
and returns true if coordinate should be kept
otherwise false (default = None)
:type predicate: function
:param abort: Abort iteration if boundary is crossed
:type abort: bool
"""
self.scan = scan
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
self.predicate = predicate
self.abort = abort
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
while True:
x, y = next(self.scan)
if self.predicate is not None and not self.predicate(x, y):
if self.abort:
raise StopIteration("Boundary crossed!")
elif (x < self.minx or
x > self.maxx or
y < self.miny or
y > self.maxy):
if self.abort:
raise StopIteration("Boundary crossed!")
else:
return x, y
class reflection(object):
"""Reflect coordinates about x and y axes
"""
def __init__(self, scan, rx=False, ry=False):
"""
:param scan: Pixel scan generator
:type scan: function
:param rx: True if x-coordinate should be reflected (default=False)
:type rx: bool
:param ry: True if y-coordinate should be reflected (default=False)
:type ry: bool
"""
self.scan = scan
self.rx = rx
self.ry = ry
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
x, y = next(self.scan)
xr = -x if self.rx else x
yr = -y if self.ry else y
return xr, yr
class reservoir(object):
def __init__(self, scan, npoints):
"""Randomly sample points using the reservoir sampling method. This is
only useful if you need exactly 'npoints' sampled. Otherwise use the
'sample' transformation to randomly sample at a given rate. This method
requires storing 'npoints' in memory and precomputing the random
selection so it may be slower than 'sample'.
:param scan: Pixel scan generator
:type scan: function
:param npoints: Sample size
:type npoints: int
"""
# Validate inputs
if npoints <= 0:
raise ValueError("Sample size must be positive")
self.reservoir = []
self.count = 0
# Populate reservoir
for index, point in enumerate(scan):
if index < npoints:
self.reservoir.append(point)
else:
j = random.randint(0, index)
if j < npoints:
self.reservoir[j] = point
# Shuffle the reservoir in case population was small and the
# points were not sufficiently randomized
random.shuffle(self.reservoir)
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
if self.count < len(self.reservoir):
self.count += 1
return self.reservoir[self.count-1]
raise StopIteration("Reservoir exhausted")
class rotation(object):
"""Rotate coordinates by given angle. If the final transformation axes do
not align with the x and y axes then it may yield duplicate coordinates
during scanning.
"""
def __init__(self, scan, angle=0):
"""
:param scan: Pixel scan generator
:type scan: function
:param angle: Counter-clockwise angle in degrees (default=0)
:type angle: float
"""
self.scan = scan
self.angle = angle * (math.pi / 180.0)
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
x, y = next(self.scan)
ca, sa = math.cos(self.angle), math.sin(self.angle)
xr = ca * x - sa * y
yr = sa * x + ca * y
return xr, yr
class sample(object):
"""Randomly sample points at the given probability.
"""
def __init__(self, scan, probability=1):
"""
:param scan: Pixel scan generator
:type scan: function
:param probability: Sampling probability in interval [0,1] (default=1)
:type probability: float
"""
if probability < 0 or probability > 1:
raise ValueError("Sampling probability must be in range [0,1]")
self.scan = scan
self.probability = probability
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
if self.probability == 1:
x, y = next(self.scan)
else:
while True:
x, y = next(self.scan)
if random.random() <= self.probability:
break
return x, y
class scale(object):
"""Scale coordinates by given factor
"""
def __init__(self, scan, sx=1, sy=1):
"""
:param scan: Pixel scan generator
:type scan: function
:param sx: x-coordinate scale factor (default=1)
:type sx: float
:param sy: y-coordinate scale factor (default=1)
:type sy: float
"""
if sx <= 0:
raise ValueError("X-scale must be positive")
if sy <= 0:
raise ValueError("Y-scale must be positive")
self.scan = scan
self.sx = sx
self.sy = sy
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
x, y = next(self.scan)
xr = self.sx * x
yr = self.sy * y
return xr, yr
class skip(object):
"""Skip points at the given step size
"""
def __init__(self, scan, start=0, stop=sys.maxsize, step=1):
"""
:param scan: Pixel scan generator
:type scan: function
:param start: Iteration starting 0-based index (default = 0)
:type start: int
:param stop: Iteration stopping 0-based index (default = sys.maxsize)
:type stop: int
:param step: Iteration step size (default = 1)
:type step: int
"""
if start < 0:
raise ValueError("Start must be non-negative")
if stop < 0:
raise ValueError("Stop must be non-negative")
if stop < start:
raise ValueError("Stop must be greater than start")
if step <= 0:
raise ValueError("Step must be positive")
self.scan = scan
self.start = start
self.stop = stop
self.step = step
self.index = -1
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
while True:
x, y = next(self.scan)
self.index += 1
if (self.index < self.start):
continue
if (self.index > self.stop):
raise StopIteration("skip stopping")
if ((self.index-self.start) % self.step != 0):
continue
return x, y
class snap(object):
"""Snap x and y coordinates to a grid point
"""
def __init__(self, scan):
"""
:param scan: Pixel scan generator
:type scan: function
"""
self.scan = scan
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
x, y = next(self.scan)
xs = int(round(x))
ys = int(round(y))
return xs, ys
class swap(object):
"""Swap x and y coordinates
"""
def __init__(self, scan):
"""
:param scan: Pixel scan generator
:type scan: function
"""
self.scan = scan
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
x, y = next(self.scan)
return y, x
class translation(object):
"""Translate coordinates by given offset
"""
def __init__(self, scan, tx=0, ty=0):
"""
:param scan: Pixel scan generator
:type scan: function
:param sx: x-coordinate translation offset (default = 0)
:type sx: float
:param sy: y-coordinate translaation offset (default = 0)
:type sy: float
"""
self.scan = scan
self.tx = tx
self.ty = ty
def __iter__(self):
return self
def __next__(self):
"""Next point in iteration
"""
x, y = next(self.scan)
xr = x + self.tx
yr = y + self.ty
return xr, yr
# ======================================================================
# Scan patterns
# ----------------------------------------------------------------------
def circlescan(x0, y0, r1, r2):
"""Scan pixels in a circle pattern around a center point
:param x0: Center x-coordinate
:type x0: float
:param y0: Center y-coordinate
:type y0: float
:param r1: Initial radius
:type r1: float
:param r2: Final radius
:type r2: float
:returns: Coordinate generator
:rtype: function
"""
# Validate inputs
if r1 < 0:
raise ValueError("Initial radius must be non-negative")
if r2 < 0:
raise ValueError("Final radius must be non-negative")
# List of pixels visited in previous diameter
previous = []
# Scan distances outward (1) or inward (-1)
rstep = 1 if r2 >= r1 else -1
for distance in range(r1, r2 + rstep, rstep):
if distance == 0:
yield x0, y0
else:
# Computes points for first octant and the rotate by multiples of
# 45 degrees to compute the other octants
a = 0.707107
rotations = {0: [[1, 0], [0, 1]],
1: [[a, a], [-a, a]],
2: [[0, 1], [-1, 0]],
3: [[-a, a], [-a, -a]],
4: [[-1, 0], [0, -1]],
5: [[-a, -a], [a, -a]],
6: [[0, -1], [1, 0]],
7: [[a, -a], [a, a]]}
nangles = len(rotations)
# List of pixels visited in current diameter
current = []
for angle in range(nangles):
x = 0
y = distance
d = 1 - distance
while x < y:
xr = rotations[angle][0][0]*x + rotations[angle][0][1]*y
yr = rotations[angle][1][0]*x + rotations[angle][1][1]*y
xr = x0 + xr
yr = y0 + yr
# First check if point was in previous diameter
# since our scan pattern can lead to duplicates in
# neighboring diameters
point = (int(round(xr)), int(round(yr)))
if point not in previous:
yield xr, yr
current.append(point)
# Move pixel according to circle constraint
if (d < 0):
d += 3 + 2 * x
else:
d += 5 - 2 * (y-x)
y -= 1
x += 1
previous = current
def gridscan(xi, yi, xf, yf, stepx=1, stepy=1):
"""Scan pixels in a grid pattern along the x-coordinate then y-coordinate
:param xi: Initial x-coordinate
:type xi: int
:param yi: Initial y-coordinate
:type yi: int
:param xf: Final x-coordinate
:type xf: int
:param yf: Final y-coordinate
:type yf: int
:param stepx: Step size in x-coordinate
:type stepx: int
:param stepy: Step size in y-coordinate
:type stepy: int
:returns: Coordinate generator
:rtype: function
"""
if stepx <= 0:
raise ValueError("X-step must be positive")
if stepy <= 0:
raise ValueError("Y-step must be positive")
# Determine direction to move
dx = stepx if xf >= xi else -stepx
dy = stepy if yf >= yi else -stepy
for y in range(yi, yf + dy, dy):
for x in range(xi, xf + dx, dx):
yield x, y
def hilbertscan(size, distance):
"""Scan pixels in a Hilbert curve pattern in the first quadrant. Modified
algorithm from https://en.wikipedia.org/wiki/Hilbert_curve.
:param size: Size of enclosing square
:type size: int
:param distance: Distance along curve (Must be smaller than size**2 - 1)
:type distance: int
:returns: Coordinate generator
:rtype: function
"""
size = 2 * (1 << (size-1).bit_length())
if (distance > size**2 - 1):
raise StopIteration("Invalid distance!")
for d in range(distance):
t = d
x = 0
y = 0
s = 1
while (s < size):
rx = float_and(1, t / 2)
ry = float_and(1, float_xor(t, rx))
x, y = hilbertrot(s, x, y, rx, ry)
x += s * rx
y += s * ry
t /= 4
s *= 2
yield x, y
def ringscan(x0, y0, r1, r2, metric=chebyshev):
"""Scan pixels in a ring pattern around a center point clockwise
:param x0: Center x-coordinate
:type x0: int
:param y0: Center y-coordinate
:type y0: int
:param r1: Initial radius
:type r1: int
:param r2: Final radius
:type r2: int
:param metric: Distance metric
:type metric: function
:returns: Coordinate generator
:rtype: function
"""
# Validate inputs
if r1 < 0:
raise ValueError("Initial radius must be non-negative")
if r2 < 0:
raise ValueError("Final radius must be non-negative")
if not hasattr(metric, "__call__"):
raise TypeError("Metric not callable")
# Define clockwise step directions
direction = 0
steps = {0: [1, 0],
1: [1, -1],
2: [0, -1],
3: [-1, -1],
4: [-1, 0],
5: [-1, 1],
6: [0, 1],
7: [1, 1]}
nsteps = len(steps)
center = [x0, y0]
# Scan distances outward (1) or inward (-1)
rstep = 1 if r2 >= r1 else -1
for distance in range(r1, r2 + rstep, rstep):
initial = [x0, y0 + distance]
current = initial
# Number of tries to find a valid neighrbor
ntrys = 0
while True:
# Short-circuit special case
if distance == 0:
yield current[0], current[1]
break
# Try and take a step and check if still within distance
nextpoint = [current[i] + steps[direction][i] for i in range(2)]
if metric(center, nextpoint) != distance:
# Check if we tried all step directions and failed
ntrys += 1
if ntrys == nsteps:
break
# Try the next direction
direction = (direction + 1) % nsteps
continue
ntrys = 0
yield current[0], current[1]
# Check if we have come all the way around
current = nextpoint
if current == initial:
break
# Check if we tried all step directions and failed
if ntrys == nsteps:
break
def snakescan(xi, yi, xf, yf):
"""Scan pixels in a snake pattern along the x-coordinate then y-coordinate
:param xi: Initial x-coordinate
:type xi: int
:param yi: Initial y-coordinate
:type yi: int
:param xf: Final x-coordinate
:type xf: int
:param yf: Final y-coordinate
:type yf: int
:returns: Coordinate generator
:rtype: function
"""
# Determine direction to move
dx = 1 if xf >= xi else -1
dy = 1 if yf >= yi else -1
# Scan pixels first along x-coordinate then y-coordinate and flip
# x-direction when the end of the line is reached
x, xa, xb = xi, xi, xf
for y in range(yi, yf + dy, dy):
for x in range(xa, xb + dx, dx):
yield x, y
# Swap x-direction
if x == xa or x == xb:
dx *= -1
xa, xb = xb, xa
def walkscan(x0, y0, xn=0.25, xp=0.25, yn=0.25, yp=0.25):
"""Scan pixels in a random walk pattern with given step probabilities. The
random walk will continue indefinitely unless a skip transformation is used
with the 'stop' parameter set or a clip transformation is used with the
'abort' parameter set to True. The probabilities are normalized to one.
:param x0: Initial x-coordinate
:type x0: int
:param y0: Initial y-coordinate
:type y0: int
:param xn: Probability of moving in the negative x direction
:type xn: float
:param xp: Probability of moving in the positive x direction
:type xp: float
:param yn: Probability of moving in the negative y direction
:type yn: float
:param yp: Probability of moving in the positive y direction
:type yp: float
"""
# Validate inputs
if xn < 0:
raise ValueError("Negative x probabilty must be non-negative")
if xp < 0:
raise ValueError("Positive x probabilty must be non-negative")
if yn < 0:
raise ValueError("Negative y probabilty must be non-negative")
if yp < 0:
raise ValueError("Positive y probabilty must be non-negative")
# Compute normalized probability
total = xp + xn + yp + yn
xn /= total
xp /= total
yn /= total
yp /= total
# Compute cumulative probability
cxn = xn
cxp = cxn + xp
cyn = cxp + yn
# Initialize position
x, y = x0, y0
while True:
yield x, y
# Take random step
probability = random.random()
if probability <= cxn:
x -= 1
elif probability <= cxp:
x += 1
elif probability <= cyn:
y -= 1
else:
y += 1
# Following imported to support floating point bitwise operations in Python 3
# https://code.activestate.com/recipes/577967-floating-point-bitwise-operations
"""
This module defines bitwise operations on floating point numbers by pretending
that they consist of an infinite sting of bits extending to the left as well as
to the right. More precisely the infinite string of bits
b = [...,b[-2],b[-1],b[0],b[1],b[2],...] represents the number
x = sum( b[i]*2**i for i in range(-inf,inf) ). Negative numbers are represented
in one's complement. The identity 0.111... == 1.0 creates an ambiquity in the
representation. To avoid it positive numbers are defined to be padded with
zeros in both directions while negative numbers are padded with ones in both
directions. This choice leads to the useful identity ~a == -a and allows
+0 == ...000.000... to be the |-identity and -0 == ...111.111... to be the
&-identity. Unfortunately the choice breaks compatibility with integer bitwise
operations involving negative numbers."""
__author__ = "Pyry Pakkanen"
__copyright__ = "Copyright 2011"
__credits__ = ["Pyry Pakkanen"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Pyry Pakkanen"
__email__ = "[email protected]"
__status__ = "initial release"
(fmax, max_exp, max_10_exp, fmin, min_exp, min_10_exp, dig, mant_dig, epsilon,
radix, rounds) = float_info
def ifrexp(x):
"""Get the mantissa and exponent of a floating point number as integers."""
m, e = frexp(x)
return int(m*2**mant_dig), e
def float_not(a):
"""~a"""
return -a
def float_and(a, b):
"""a & b"""
if a == 0.0:
if copysign(1.0, a) == 1.0:
return 0.0
else:
return b
if b == 0.0:
return float_and(b, a)
if a < 0 and b < 0:
return -float_or(-a, -b)
if abs(a) >= abs(b):
return float_and_(a, b)
else:
return float_and_(b, a)
def float_or(a, b):
"""a | b"""
if a == 0.0:
if copysign(1.0, a) == 1.0:
return b
else:
return -0.0
if b == 0.0:
return float_or(b, a)
if a < 0 and b < 0:
return -float_and(-a, -b)
if abs(a) >= abs(b):
return float_or_(a, b)
else:
return float_or_(b, a)
def float_xor(a, b):
"""a ^ b"""
if a == 0.0:
if copysign(1.0, a) == 1.0:
return b
else:
return -b
if b == 0.0:
return float_xor(b, a)
if a < 0:
if b < 0:
return float_xor(-a, -b)
else:
return -float_xor(-a, b)
if b < 0:
return -float_xor(a, -b)
if abs(a) >= abs(b):
return float_xor_(a, b)
else:
return float_xor_(b, a)
# The helper functions assume that exponent(a) >= exponent(b).
# The operation lambda x: ~(-x) converts between two's complement and one's
# complement representation of a negative number. One's complement is more
# natural for floating point numbers because the zero is signed.
def float_and_(a, b):
ma, ea = ifrexp(a)
mb, eb = ifrexp(b)
mb = mb >> (ea-eb)
if ma < 0:
return (mb & ~(-ma))*2**(ea-mant_dig)
if mb < 0:
return (~(-mb) & ma)*2**(ea-mant_dig)
return (mb & ma)*2**(ea-mant_dig)
def float_or_(a, b):
ma, ea = ifrexp(a)
mb, eb = ifrexp(b)
mb = mb >> (ea-eb)
if ma < 0:
return (-(~(mb | ~(-ma))))*2**(ea-mant_dig)
if mb < 0:
return (-(~(~(-mb) | ma)))*2**(ea-mant_dig)
return (mb | ma)*2**(ea-mant_dig)
def float_xor_(a, b):
ma, ea = ifrexp(a)
mb, eb = ifrexp(b)
mb = mb >> (ea-eb)
return (mb ^ ma)*2**(ea-mant_dig)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import six
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope
from test_imperative_resnet import ResNet
from paddle.fluid.framework import _test_eager_guard
batch_size = 8
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": batch_size,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
},
"batch_size": batch_size,
"lr": 0.1,
"total_images": 1281164,
}
def optimizer_setting(params, parameter_list=None):
ls = params["learning_strategy"]
if ls["name"] == "piecewise_decay":
if "total_images" not in params:
total_images = 1281167
else:
total_images = params["total_images"]
batch_size = ls["batch_size"]
step = int(total_images / batch_size + 1)
bd = [step * e for e in ls["epochs"]]
base_lr = params["lr"]
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
if fluid.in_dygraph_mode():
optimizer = fluid.optimizer.SGD(learning_rate=0.01,
parameter_list=parameter_list)
else:
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
# TODO(minqiyang): Add learning rate scheduler support to dygraph mode
# optimizer = fluid.optimizer.Momentum(
# learning_rate=params["lr"],
# learning_rate=fluid.layers.piecewise_decay(
# boundaries=bd, values=lr),
# momentum=0.9,
# regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
class TestDygraphResnetSortGradient(unittest.TestCase):
def func_test_resnet_sort_gradient_float32(self):
seed = 90
batch_size = train_parameters["batch_size"]
batch_num = 10
with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
resnet = ResNet()
optimizer = optimizer_setting(
train_parameters, parameter_list=resnet.parameters())
np.random.seed(seed)
import random
random.seed = seed
train_reader = paddle.batch(
paddle.dataset.flowers.train(use_xmap=False),
batch_size=batch_size)
dy_param_init_value = {}
for param in resnet.parameters():
dy_param_init_value[param.name] = param.numpy()
for batch_id, data in enumerate(train_reader()):
if batch_id >= batch_num:
break
dy_x_data = np.array(
[x[0].reshape(3, 224, 224) for x in data]).astype('float32')
y_data = np.array([x[1] for x in data]).astype('int64').reshape(
batch_size, 1)
img = to_variable(dy_x_data)
label = to_variable(y_data)
label.stop_gradient = True
out = resnet(img)
loss = fluid.layers.cross_entropy(input=out, label=label)
avg_loss = fluid.layers.mean(x=loss)
dy_out = avg_loss.numpy()
if batch_id == 0:
for param in resnet.parameters():
if param.name not in dy_param_init_value:
dy_param_init_value[param.name] = param.numpy()
avg_loss.backward()
dy_grad_value = {}
for param in resnet.parameters():
if param.trainable:
np_array = np.array(param._grad_ivar().value()
.get_tensor())
dy_grad_value[param.name + core.grad_var_suffix(
)] = np_array
optimizer.minimize(avg_loss)
resnet.clear_gradients()
dy_param_value = {}
for param in resnet.parameters():
dy_param_value[param.name] = param.numpy()
with new_program_scope():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
resnet = ResNet()
optimizer = optimizer_setting(train_parameters)
np.random.seed(seed)
import random
random.seed = seed
train_reader = paddle.batch(
paddle.dataset.flowers.train(use_xmap=False),
batch_size=batch_size)
img = fluid.layers.data(
name='pixel', shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
out = resnet(img)
loss = fluid.layers.cross_entropy(input=out, label=label)
avg_loss = fluid.layers.mean(x=loss)
optimizer.minimize(avg_loss)
# initialize params and fetch them
static_param_init_value = {}
static_param_name_list = []
static_grad_name_list = []
for param in resnet.parameters():
static_param_name_list.append(param.name)
for param in resnet.parameters():
if param.trainable:
static_grad_name_list.append(param.name +
core.grad_var_suffix())
out = exe.run(fluid.default_startup_program(),
fetch_list=static_param_name_list)
for i in range(len(static_param_name_list)):
static_param_init_value[static_param_name_list[i]] = out[i]
for batch_id, data in enumerate(train_reader()):
if batch_id >= batch_num:
break
static_x_data = np.array(
[x[0].reshape(3, 224, 224) for x in data]).astype('float32')
y_data = np.array([x[1] for x in data]).astype('int64').reshape(
[batch_size, 1])
fetch_list = [avg_loss.name]
fetch_list.extend(static_param_name_list)
fetch_list.extend(static_grad_name_list)
out = exe.run(fluid.default_main_program(),
feed={"pixel": static_x_data,
"label": y_data},
fetch_list=fetch_list)
static_param_value = {}
static_grad_value = {}
static_out = out[0]
param_start_pos = 1
grad_start_pos = len(static_param_name_list) + param_start_pos
for i in range(param_start_pos,
len(static_param_name_list) + param_start_pos):
static_param_value[static_param_name_list[
i - param_start_pos]] = out[i]
for i in range(grad_start_pos,
len(static_grad_name_list) + grad_start_pos):
static_grad_value[static_grad_name_list[
i - grad_start_pos]] = out[i]
self.assertTrue(np.allclose(static_out, dy_out))
self.assertEqual(len(dy_param_init_value), len(static_param_init_value))
for key, value in six.iteritems(static_param_init_value):
self.assertTrue(np.allclose(value, dy_param_init_value[key]))
self.assertTrue(np.isfinite(value.all()))
self.assertFalse(np.isnan(value.any()))
self.assertEqual(len(dy_grad_value), len(static_grad_value))
for key, value in six.iteritems(static_grad_value):
self.assertTrue(np.allclose(value, dy_grad_value[key]))
self.assertTrue(np.isfinite(value.all()))
self.assertFalse(np.isnan(value.any()))
self.assertEqual(len(dy_param_value), len(static_param_value))
for key, value in six.iteritems(static_param_value):
self.assertTrue(np.allclose(value, dy_param_value[key]))
self.assertTrue(np.isfinite(value.all()))
self.assertFalse(np.isnan(value.any()))
def test_resnet_sort_gradient_float32(self):
with _test_eager_guard():
self.func_test_resnet_sort_gradient_float32()
self.func_test_resnet_sort_gradient_float32()
if __name__ == '__main__':
unittest.main()
|
|
# Unix SMB/CIFS implementation.
# Copyright (C) Amitay Isaacs <[email protected]> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.dcerpc.dnsserver"""
from samba.dcerpc import dnsp, dnsserver
from samba.tests import RpcInterfaceTestCase, env_get_var_value
from samba.netcmd.dns import ARecord
class DnsserverTests(RpcInterfaceTestCase):
def setUp(self):
super(DnsserverTests, self).setUp()
self.server = env_get_var_value("SERVER_IP")
self.zone = env_get_var_value("REALM").lower()
self.conn = dnsserver.dnsserver("ncacn_ip_tcp:%s" % (self.server),
self.get_loadparm(),
self.get_credentials())
def test_operation2(self):
pass
def test_query2(self):
typeid, result = self.conn.DnssrvQuery2(dnsserver.DNS_CLIENT_VERSION_W2K,
0,
self.server,
None,
'ServerInfo')
self.assertEquals(dnsserver.DNSSRV_TYPEID_SERVER_INFO_W2K, typeid)
typeid, result = self.conn.DnssrvQuery2(dnsserver.DNS_CLIENT_VERSION_DOTNET,
0,
self.server,
None,
'ServerInfo')
self.assertEquals(dnsserver.DNSSRV_TYPEID_SERVER_INFO_DOTNET, typeid)
typeid, result = self.conn.DnssrvQuery2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0,
self.server,
None,
'ServerInfo')
self.assertEquals(dnsserver.DNSSRV_TYPEID_SERVER_INFO, typeid)
def test_operation2(self):
client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
rev_zone = '1.168.192.in-addr.arpa'
zone_create = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
zone_create.pszZoneName = rev_zone
zone_create.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
zone_create.fAllowUpdate = dnsp.DNS_ZONE_UPDATE_SECURE
zone_create.fAging = 0
zone_create.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
# Create zone
self.conn.DnssrvOperation2(client_version,
0,
self.server,
None,
0,
'ZoneCreate',
dnsserver.DNSSRV_TYPEID_ZONE_CREATE,
zone_create)
request_filter = (dnsserver.DNS_ZONE_REQUEST_REVERSE |
dnsserver.DNS_ZONE_REQUEST_PRIMARY)
typeid, zones = self.conn.DnssrvComplexOperation2(client_version,
0,
self.server,
None,
'EnumZones',
dnsserver.DNSSRV_TYPEID_DWORD,
request_filter)
self.assertEquals(1, zones.dwZoneCount)
# Delete zone
self.conn.DnssrvOperation2(client_version,
0,
self.server,
rev_zone,
0,
'DeleteZoneFromDs',
dnsserver.DNSSRV_TYPEID_NULL,
None)
typeid, zones = self.conn.DnssrvComplexOperation2(client_version,
0,
self.server,
None,
'EnumZones',
dnsserver.DNSSRV_TYPEID_DWORD,
request_filter)
self.assertEquals(0, zones.dwZoneCount)
def test_complexoperation2(self):
client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
request_filter = (dnsserver.DNS_ZONE_REQUEST_FORWARD |
dnsserver.DNS_ZONE_REQUEST_PRIMARY)
typeid, zones = self.conn.DnssrvComplexOperation2(client_version,
0,
self.server,
None,
'EnumZones',
dnsserver.DNSSRV_TYPEID_DWORD,
request_filter)
self.assertEquals(dnsserver.DNSSRV_TYPEID_ZONE_LIST, typeid)
self.assertEquals(2, zones.dwZoneCount)
request_filter = (dnsserver.DNS_ZONE_REQUEST_REVERSE |
dnsserver.DNS_ZONE_REQUEST_PRIMARY)
typeid, zones = self.conn.DnssrvComplexOperation2(client_version,
0,
self.server,
None,
'EnumZones',
dnsserver.DNSSRV_TYPEID_DWORD,
request_filter)
self.assertEquals(dnsserver.DNSSRV_TYPEID_ZONE_LIST, typeid)
self.assertEquals(0, zones.dwZoneCount)
def test_enumrecords2(self):
client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
record_type = dnsp.DNS_TYPE_NS
select_flags = (dnsserver.DNS_RPC_VIEW_ROOT_HINT_DATA |
dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA)
buflen, roothints = self.conn.DnssrvEnumRecords2(client_version,
0,
self.server,
'..RootHints',
'.',
None,
record_type,
select_flags,
None,
None)
self.assertEquals(14, roothints.count) # 1 NS + 13 A records (a-m)
def test_updaterecords2(self):
client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
record_type = dnsp.DNS_TYPE_A
select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
name = 'dummy'
rec = ARecord('1.2.3.4')
rec2 = ARecord('5.6.7.8')
# Add record
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
self.conn.DnssrvUpdateRecord2(client_version,
0,
self.server,
self.zone,
name,
add_rec_buf,
None)
buflen, result = self.conn.DnssrvEnumRecords2(client_version,
0,
self.server,
self.zone,
name,
None,
record_type,
select_flags,
None,
None)
self.assertEquals(1, result.count)
self.assertEquals(1, result.rec[0].wRecordCount)
self.assertEquals(dnsp.DNS_TYPE_A, result.rec[0].records[0].wType)
self.assertEquals('1.2.3.4', result.rec[0].records[0].data)
# Update record
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec2
del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
del_rec_buf.rec = rec
self.conn.DnssrvUpdateRecord2(client_version,
0,
self.server,
self.zone,
name,
add_rec_buf,
del_rec_buf)
buflen, result = self.conn.DnssrvEnumRecords2(client_version,
0,
self.server,
self.zone,
name,
None,
record_type,
select_flags,
None,
None)
self.assertEquals(1, result.count)
self.assertEquals(1, result.rec[0].wRecordCount)
self.assertEquals(dnsp.DNS_TYPE_A, result.rec[0].records[0].wType)
self.assertEquals('5.6.7.8', result.rec[0].records[0].data)
# Delete record
del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
del_rec_buf.rec = rec2
self.conn.DnssrvUpdateRecord2(client_version,
0,
self.server,
self.zone,
name,
None,
del_rec_buf)
self.assertRaises(RuntimeError, self.conn.DnssrvEnumRecords2,
client_version,
0,
self.server,
self.zone,
name,
None,
record_type,
select_flags,
None,
None)
|
|
# Copyright (C) 2016 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from future.utils import native
from base64 import b64decode, b64encode
from hamcrest import assert_that, empty, equal_to, is_in
from tempfile import NamedTemporaryFile
import functools
import json
import os
import psutil
import re
import requests
import subprocess
import sys
import time
import urllib.parse
from ycmd.hmac_utils import CreateHmac, CreateRequestHmac, SecureBytesEqual
from ycmd.tests import PathToTestFile
from ycmd.tests.test_utils import BuildRequest
from ycmd.user_options_store import DefaultOptions
from ycmd.utils import ( GetUnusedLocalhostPort, PathToCreatedTempDir, ReadFile,
RemoveIfExists, SafePopen, SetEnviron, ToBytes,
ToUnicode )
HEADERS = { 'content-type': 'application/json' }
HMAC_HEADER = 'x-ycm-hmac'
HMAC_SECRET_LENGTH = 16
DIR_OF_THIS_SCRIPT = os.path.dirname( os.path.abspath( __file__ ) )
PATH_TO_YCMD = os.path.join( DIR_OF_THIS_SCRIPT, '..' )
class Client_test( object ):
def __init__( self ):
self._location = None
self._port = None
self._hmac_secret = None
self._servers = []
self._logfiles = []
self._options_dict = DefaultOptions()
def setUp( self ):
self._hmac_secret = os.urandom( HMAC_SECRET_LENGTH )
self._options_dict[ 'hmac_secret' ] = ToUnicode(
b64encode( self._hmac_secret ) )
def tearDown( self ):
for server in self._servers:
if server.is_running():
server.terminate()
for logfile in self._logfiles:
RemoveIfExists( logfile )
def Start( self, idle_suicide_seconds = 60,
check_interval_seconds = 60 * 10 ):
# The temp options file is deleted by ycmd during startup
with NamedTemporaryFile( mode = 'w+', delete = False ) as options_file:
json.dump( self._options_dict, options_file )
options_file.flush()
self._port = GetUnusedLocalhostPort()
self._location = 'http://127.0.0.1:' + str( self._port )
# Define environment variable to enable subprocesses coverage. See:
# http://coverage.readthedocs.org/en/coverage-4.0.3/subprocess.html
env = os.environ.copy()
SetEnviron( env, 'COVERAGE_PROCESS_START', '.coveragerc' )
ycmd_args = [
sys.executable,
PATH_TO_YCMD,
'--port={0}'.format( self._port ),
'--options_file={0}'.format( options_file.name ),
'--log=debug',
'--idle_suicide_seconds={0}'.format( idle_suicide_seconds ),
'--check_interval_seconds={0}'.format( check_interval_seconds ),
]
filename_format = os.path.join( PathToCreatedTempDir(),
'server_{port}_{std}.log' )
stdout = filename_format.format( port = self._port, std = 'stdout' )
stderr = filename_format.format( port = self._port, std = 'stderr' )
self._logfiles.extend( [ stdout, stderr ] )
ycmd_args.append( '--stdout={0}'.format( stdout ) )
ycmd_args.append( '--stderr={0}'.format( stderr ) )
_popen_handle = SafePopen( ycmd_args,
stdin_windows = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env )
self._servers.append( psutil.Process( _popen_handle.pid ) )
self._WaitUntilReady()
extra_conf = PathToTestFile( 'client', '.ycm_extra_conf.py' )
self.PostRequest( 'load_extra_conf_file', { 'filepath': extra_conf } )
def _IsReady( self, filetype = None ):
params = { 'subserver': filetype } if filetype else None
response = self.GetRequest( 'ready', params )
response.raise_for_status()
return response.json()
def _WaitUntilReady( self, filetype = None, timeout = 5 ):
expiration = time.time() + timeout
while True:
try:
if time.time() > expiration:
server = ( 'the {0} subserver'.format( filetype ) if filetype else
'ycmd' )
raise RuntimeError( 'Waited for {0} to be ready for {1} seconds, '
'aborting.'.format( server, timeout ) )
if self._IsReady( filetype ):
return
except requests.exceptions.ConnectionError:
pass
finally:
time.sleep( 0.1 )
def StartSubserverForFiletype( self, filetype ):
filepath = PathToTestFile( 'client', 'some_file' )
# Calling the BufferVisit event before the FileReadyToParse one is needed
# for the TypeScript completer.
self.PostRequest( 'event_notification',
BuildRequest( filepath = filepath,
filetype = filetype,
event_name = 'BufferVisit' ) )
self.PostRequest( 'event_notification',
BuildRequest( filepath = filepath,
filetype = filetype,
event_name = 'FileReadyToParse' ) )
self._WaitUntilReady( filetype )
response = self.PostRequest(
'debug_info',
BuildRequest( filepath = filepath,
filetype = filetype )
).json()
pid_match = re.search( 'process ID: (\d+)', response )
if not pid_match:
raise RuntimeError( 'Cannot find PID in debug informations for {0} '
'filetype.'.format( filetype ) )
subserver_pid = int( pid_match.group( 1 ) )
self._servers.append( psutil.Process( subserver_pid ) )
logfiles = re.findall( '(\S+\.log)', response )
if not logfiles:
raise RuntimeError( 'Cannot find logfiles in debug informations for {0} '
'filetype.'.format( filetype ) )
self._logfiles.extend( logfiles )
def AssertServersAreRunning( self ):
for server in self._servers:
assert_that( server.is_running(), equal_to( True ) )
def AssertServersShutDown( self, timeout = 5 ):
_, alive_procs = psutil.wait_procs( self._servers, timeout = timeout )
assert_that( alive_procs, empty() )
def AssertLogfilesAreRemoved( self ):
existing_logfiles = []
for logfile in self._logfiles:
if os.path.isfile( logfile ):
existing_logfiles.append( logfile )
assert_that( existing_logfiles, empty() )
def GetRequest( self, handler, params = None ):
return self._Request( 'GET', handler, params = params )
def PostRequest( self, handler, data = None ):
return self._Request( 'POST', handler, data = data )
def _ToUtf8Json( self, data ):
return ToBytes( json.dumps( data ) if data else None )
def _Request( self, method, handler, data = None, params = None ):
request_uri = self._BuildUri( handler )
data = self._ToUtf8Json( data )
headers = self._ExtraHeaders( method,
request_uri,
data )
response = requests.request( method,
request_uri,
headers = headers,
data = data,
params = params )
return response
def _BuildUri( self, handler ):
return native( ToBytes( urllib.parse.urljoin( self._location, handler ) ) )
def _ExtraHeaders( self, method, request_uri, request_body = None ):
if not request_body:
request_body = bytes( b'' )
headers = dict( HEADERS )
headers[ HMAC_HEADER ] = b64encode(
CreateRequestHmac( ToBytes( method ),
ToBytes( urllib.parse.urlparse( request_uri ).path ),
request_body,
self._hmac_secret ) )
return headers
def AssertResponse( self, response ):
assert_that( response.status_code, equal_to( requests.codes.ok ) )
assert_that( HMAC_HEADER, is_in( response.headers ) )
assert_that(
self._ContentHmacValid( response ),
equal_to( True )
)
def _ContentHmacValid( self, response ):
our_hmac = CreateHmac( response.content, self._hmac_secret )
their_hmac = ToBytes( b64decode( response.headers[ HMAC_HEADER ] ) )
return SecureBytesEqual( our_hmac, their_hmac )
@staticmethod
def CaptureLogfiles( test ):
@functools.wraps( test )
def Wrapper( self, *args ):
try:
test( self, *args )
finally:
for logfile in self._logfiles:
if os.path.isfile( logfile ):
sys.stdout.write( 'Logfile {0}:\n\n'.format( logfile ) )
sys.stdout.write( ReadFile( logfile ) )
sys.stdout.write( '\n' )
return Wrapper
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationpolicy(base_resource) :
""" Configuration for Authentication Policy resource. """
def __init__(self) :
self._name = ""
self._rule = ""
self._action = ""
self._undefaction = ""
self._comment = ""
self._logaction = ""
self._newname = ""
self._hits = 0
self._boundto = ""
self._activepolicy = 0
self._priority = 0
self._nextfactor = ""
self._gotopriorityexpression = ""
self._description = ""
self._policysubtype = ""
self.___count = 0
@property
def name(self) :
"""Name for the advance AUTHENTICATION policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after AUTHENTICATION policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy" or 'my authentication policy').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the advance AUTHENTICATION policy.
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after AUTHENTICATION policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy" or 'my authentication policy').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
"""Name of the NetScaler named rule, or a default syntax expression, that the policy uses to determine whether to attempt to authenticate the user with the AUTHENTICATION server.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
"""Name of the NetScaler named rule, or a default syntax expression, that the policy uses to determine whether to attempt to authenticate the user with the AUTHENTICATION server.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def action(self) :
"""Name of the authentication action to be performed if the policy matches.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
"""Name of the authentication action to be performed if the policy matches.
"""
try :
self._action = action
except Exception as e:
raise e
@property
def undefaction(self) :
"""Action to perform if the result of policy evaluation is undefined (UNDEF). An UNDEF event indicates an internal error condition. Only the above built-in actions can be used.
"""
try :
return self._undefaction
except Exception as e:
raise e
@undefaction.setter
def undefaction(self, undefaction) :
"""Action to perform if the result of policy evaluation is undefined (UNDEF). An UNDEF event indicates an internal error condition. Only the above built-in actions can be used.
"""
try :
self._undefaction = undefaction
except Exception as e:
raise e
@property
def comment(self) :
"""Any comments to preserve information about this policy.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Any comments to preserve information about this policy.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def logaction(self) :
"""Name of messagelog action to use when a request matches this policy.
"""
try :
return self._logaction
except Exception as e:
raise e
@logaction.setter
def logaction(self, logaction) :
"""Name of messagelog action to use when a request matches this policy.
"""
try :
self._logaction = logaction
except Exception as e:
raise e
@property
def newname(self) :
"""New name for the authentication policy. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) hash (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy" or 'my authentication policy').<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
"""New name for the authentication policy. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) hash (#), space ( ), at (@), equals (=), colon (:), and underscore characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication policy" or 'my authentication policy').<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
@property
def hits(self) :
"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def nextfactor(self) :
"""On success invoke label.
"""
try :
return self._nextfactor
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def description(self) :
"""Description of the policy.
"""
try :
return self._description
except Exception as e:
raise e
@property
def policysubtype(self) :
""".<br/>Possible values = LOCAL, RADIUS, LDAP, TACACS, CERT, NEGOTIATE, SAML.
"""
try :
return self._policysubtype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationpolicy
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add authenticationpolicy.
"""
try :
if type(resource) is not list :
addresource = authenticationpolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.action = resource.action
addresource.undefaction = resource.undefaction
addresource.comment = resource.comment
addresource.logaction = resource.logaction
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].action = resource[i].action
addresources[i].undefaction = resource[i].undefaction
addresources[i].comment = resource[i].comment
addresources[i].logaction = resource[i].logaction
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete authenticationpolicy.
"""
try :
if type(resource) is not list :
deleteresource = authenticationpolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update authenticationpolicy.
"""
try :
if type(resource) is not list :
updateresource = authenticationpolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.action = resource.action
updateresource.undefaction = resource.undefaction
updateresource.comment = resource.comment
updateresource.logaction = resource.logaction
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ authenticationpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].action = resource[i].action
updateresources[i].undefaction = resource[i].undefaction
updateresources[i].comment = resource[i].comment
updateresources[i].logaction = resource[i].logaction
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of authenticationpolicy resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = authenticationpolicy()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
""" Use this API to rename a authenticationpolicy resource.
"""
try :
renameresource = authenticationpolicy()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the authenticationpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationpolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationpolicy() for _ in range(len(name))]
obj = [authenticationpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationpolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of authenticationpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the authenticationpolicy resources configured on NetScaler.
"""
try :
obj = authenticationpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of authenticationpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Policysubtype:
LOCAL = "LOCAL"
RADIUS = "RADIUS"
LDAP = "LDAP"
TACACS = "TACACS"
CERT = "CERT"
NEGOTIATE = "NEGOTIATE"
SAML = "SAML"
class authenticationpolicy_response(base_response) :
def __init__(self, length=1) :
self.authenticationpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationpolicy = [authenticationpolicy() for _ in range(length)]
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
from neutron import context as n_ctx
from oslo_config import cfg
from oslo_serialization import jsonutils
from gbpservice.neutron.services.servicechain.plugins.msc import context
from gbpservice.neutron.tests.unit.db.grouppolicy import (
test_servicechain_db as test_servicechain_db)
cfg.CONF.import_opt(
'servicechain_drivers',
'gbpservice.neutron.services.servicechain.plugins.msc.config',
group='servicechain')
SC_PLUGIN_KLASS = (
"gbpservice.neutron.services.servicechain.plugins.msc.plugin."
"ServiceChainPlugin")
class ServiceChainPluginTestCase(test_servicechain_db.ServiceChainDbTestCase):
def setUp(self, core_plugin=None, sc_plugin=None, gp_plugin=None):
if not sc_plugin:
sc_plugin = SC_PLUGIN_KLASS
super(ServiceChainPluginTestCase, self).setUp(core_plugin=core_plugin,
sc_plugin=sc_plugin,
gp_plugin=gp_plugin)
class TestGroupPolicyPluginGroupResources(
ServiceChainPluginTestCase,
test_servicechain_db.TestServiceChainResources):
def test_spec_shared(self):
# Shared spec can only point shared nodes
node = self._create_profiled_servicechain_node(
'LOADBALANCER', shared=True, shared_profile=True,
profile_tenant_id='admin', tenant_id='admin')['servicechain_node']
self.create_servicechain_spec(nodes=[node['id']], shared=True,
expected_res_status=201)
self.create_servicechain_spec(nodes=[node['id']], shared=False,
tenant_id='admin',
expected_res_status=201)
node = self._create_profiled_servicechain_node(
'LOADBALANCER', shared=False, profile_tenant_id='nonadmin',
tenant_id='nonadmin')['servicechain_node']
self.create_servicechain_spec(nodes=[node['id']], shared=True,
expected_res_status=404)
self.create_servicechain_spec(nodes=[node['id']], shared=True,
tenant_id='nonadmin',
expected_res_status=400)
self.create_servicechain_spec(nodes=[node['id']], shared=False,
tenant_id='nonadmin',
expected_res_status=201)
def test_node_shared(self):
# Shared node can only point shared profile
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
tenant_id='admin')['service_profile']
to_update = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=201)['servicechain_node']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False, tenant_id='admin',
expected_res_status=201)
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=False,
tenant_id='admin')['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=404)
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
tenant_id='admin', expected_res_status=400)
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False,
tenant_id='admin', expected_res_status=201)
self.create_servicechain_spec(nodes=[to_update['id']], shared=True,
tenant_id='nonadmin',
expected_res_status=201)
data = {'servicechain_node': {'shared': False}}
req = self.new_update_request('servicechain_nodes', data,
to_update['id'])
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertEqual('InvalidSharedAttributeUpdate',
res['NeutronError']['type'])
def test_profile_shared(self):
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
tenant_id='admin')['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=201)
data = {'service_profile': {'shared': False}}
req = self.new_update_request('service_profiles', data,
prof['id'])
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertEqual('InvalidSharedAttributeUpdate',
res['NeutronError']['type'])
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=False)['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False,
expected_res_status=201)
data = {'service_profile': {'shared': True}}
req = self.new_update_request('service_profiles', data,
prof['id'])
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertTrue(res['service_profile']['shared'])
def test_node_context_profile(self):
# Current node with profile
plugin_context = n_ctx.get_admin_context()
plugin_context.is_admin = plugin_context.is_advsvc = False
plugin_context.tenant_id = 'test-tenant'
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
current = self.create_servicechain_node(
service_profile_id=prof['id'],
expected_res_status=201)['servicechain_node']
ctx = context.ServiceChainNodeContext(self.plugin, plugin_context,
current)
self.assertIsNone(ctx.original)
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current['id'], current['id'])
self.assertEqual(ctx.current_profile['id'], prof['id'])
# Original node with profile
prof2 = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
original = self.create_servicechain_node(
service_profile_id=prof2['id'],
expected_res_status=201)['servicechain_node']
ctx = context.ServiceChainNodeContext(self.plugin, plugin_context,
current, original)
self.assertEqual(ctx.original['id'], original['id'])
self.assertEqual(ctx.original_profile['id'], prof2['id'])
self.assertEqual(ctx.current['id'], current['id'])
self.assertEqual(ctx.current_profile['id'], prof['id'])
def test_node_context_no_profile(self):
plugin_context = n_ctx.get_admin_context()
plugin_context.is_admin = plugin_context.is_advsvc = False
plugin_context.tenant_id = 'test_tenant'
current = self.create_servicechain_node(
service_type='TEST',
expected_res_status=201)['servicechain_node']
ctx = context.ServiceChainNodeContext(self.plugin, plugin_context,
current)
self.assertIsNone(ctx.original)
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current['id'], current['id'])
self.assertIsNone(ctx.current_profile)
original = self.create_servicechain_node(
service_type='TEST',
expected_res_status=201)['servicechain_node']
ctx = context.ServiceChainNodeContext(self.plugin, plugin_context,
current, original)
self.assertEqual(ctx.original['id'], original['id'])
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current['id'], current['id'])
self.assertIsNone(ctx.current_profile)
def test_spec_parameters(self):
params_node_1 = ['p1', 'p2', 'p3']
params_node_2 = ['p4', 'p5', 'p6']
params_node_3 = ['p7', 'p8', 'p9']
def params_dict(params):
return jsonutils.dumps({'Parameters':
dict((x, {}) for x in params)})
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
tenant_id='admin')['service_profile']
# Create 2 nodes with different parameters
node1 = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
config=params_dict(params_node_1),
expected_res_status=201)['servicechain_node']
node2 = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
config=params_dict(params_node_2),
expected_res_status=201)['servicechain_node']
# Create SC spec with the nodes assigned
spec = self.create_servicechain_spec(
nodes=[node1['id'], node2['id']], shared=True,
expected_res_status=201)['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1 + params_node_2),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update the spec removing one node
self.update_servicechain_spec(spec['id'], nodes=[node1['id']],
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update the spec without modifying the node list
self.update_servicechain_spec(spec['id'],
name='new_name',
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update a node with new config params
self.update_servicechain_node(node1['id'],
config=params_dict(params_node_3),
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_3),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
|
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import exceptions
from py_trace_event import trace_event
DEFAULT_WEB_CONTENTS_TIMEOUT = 90
# TODO(achuith, dtu, nduca): Add unit tests specifically for WebContents,
# independent of Tab.
class WebContents(object):
__metaclass__ = trace_event.TracedMetaClass
"""Represents web contents in the browser"""
def __init__(self, inspector_backend):
self._inspector_backend = inspector_backend
with open(os.path.join(
os.path.dirname(__file__),
'network_quiescence.js')) as f:
self._quiescence_js = f.read()
with open(os.path.join(
os.path.dirname(__file__),
'wait_for_frame.js')) as f:
self._wait_for_frame_js = f.read()
# An incrementing ID used to query frame timing javascript. Using a new id
# with each request ensures that previously timed-out wait for frame
# requests don't impact new requests.
self._wait_for_frame_id = 0
@property
def id(self):
"""Return the unique id string for this tab object."""
return self._inspector_backend.id
def GetUrl(self):
"""Returns the URL to which the WebContents is connected.
Raises:
exceptions.Error: If there is an error in inspector backend connection.
"""
return self._inspector_backend.url
def GetWebviewContexts(self):
"""Returns a list of webview contexts within the current inspector backend.
Returns:
A list of WebContents objects representing the webview contexts.
Raises:
exceptions.Error: If there is an error in inspector backend connection.
"""
webviews = []
inspector_backends = self._inspector_backend.GetWebviewInspectorBackends()
for inspector_backend in inspector_backends:
webviews.append(WebContents(inspector_backend))
return webviews
def WaitForDocumentReadyStateToBeComplete(
self, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Waits for the document to finish loading.
Raises:
exceptions.Error: See WaitForJavaScriptCondition() for a detailed list
of possible exceptions.
"""
self.WaitForJavaScriptCondition(
'document.readyState == "complete"', timeout=timeout)
def WaitForDocumentReadyStateToBeInteractiveOrBetter(
self, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Waits for the document to be interactive.
Raises:
exceptions.Error: See WaitForJavaScriptCondition() for a detailed list
of possible exceptions.
"""
self.WaitForJavaScriptCondition(
'document.readyState == "interactive" || '
'document.readyState == "complete"', timeout=timeout)
def WaitForFrameToBeDisplayed(self, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Waits for a frame to be displayed before returning.
Raises:
exceptions.Error: See WaitForJavaScriptCondition() for a detailed list
of possible exceptions.
"""
# Generate a new id for each call of this function to ensure that we track
# each request to wait seperately.
self._wait_for_frame_id += 1
self.WaitForJavaScriptCondition(
'{{ @script }}; window.__telemetry_testHasFramePassed({{ frame_id }})',
script=self._wait_for_frame_js,
frame_id=str(self._wait_for_frame_id), # Place id as a str.
timeout=timeout)
def HasReachedQuiescence(self):
"""Determine whether the page has reached quiescence after loading.
Returns:
True if 2 seconds have passed since last resource received, false
otherwise.
Raises:
exceptions.Error: See EvaluateJavaScript() for a detailed list of
possible exceptions.
"""
# Inclusion of the script that provides
# window.__telemetry_testHasReachedNetworkQuiescence()
# is idempotent, it's run on every call because WebContents doesn't track
# page loads and we need to execute anew for every newly loaded page.
return self.EvaluateJavaScript(
'{{ @script }}; window.__telemetry_testHasReachedNetworkQuiescence()',
script=self._quiescence_js)
def ExecuteJavaScript(self, *args, **kwargs):
"""Executes a given JavaScript statement. Does not return the result.
Example: runner.ExecuteJavaScript('var foo = {{ value }};', value='hi');
Args:
statement: The statement to execute (provided as a string).
Optional keyword args:
timeout: The number of seconds to wait for the statement to execute.
context_id: The id of an iframe where to execute the code; the main page
has context_id=1, the first iframe context_id=2, etc.
Additional keyword arguments provide values to be interpolated within
the statement. See telemetry.util.js_template for details.
Raises:
py_utils.TimeoutException
exceptions.EvaluationException
exceptions.WebSocketException
exceptions.DevtoolsTargetCrashException
"""
return self._inspector_backend.ExecuteJavaScript(*args, **kwargs)
def EvaluateJavaScript(self, *args, **kwargs):
"""Returns the result of evaluating a given JavaScript expression.
Example: runner.ExecuteJavaScript('document.location.href');
Args:
expression: The expression to execute (provided as a string).
Optional keyword args:
timeout: The number of seconds to wait for the expression to evaluate.
context_id: The id of an iframe where to execute the code; the main page
has context_id=1, the first iframe context_id=2, etc.
Additional keyword arguments provide values to be interpolated within
the expression. See telemetry.util.js_template for details.
Raises:
py_utils.TimeoutException
exceptions.EvaluationException
exceptions.WebSocketException
exceptions.DevtoolsTargetCrashException
"""
return self._inspector_backend.EvaluateJavaScript(*args, **kwargs)
def WaitForJavaScriptCondition(self, *args, **kwargs):
"""Wait for a JavaScript condition to become true.
Example: runner.WaitForJavaScriptCondition('window.foo == 10');
Args:
condition: The JavaScript condition (provided as string).
Optional keyword args:
timeout: The number in seconds to wait for the condition to become
True (default to 60).
context_id: The id of an iframe where to execute the code; the main page
has context_id=1, the first iframe context_id=2, etc.
Additional keyword arguments provide values to be interpolated within
the expression. See telemetry.util.js_template for details.
Raises:
py_utils.TimeoutException
exceptions.EvaluationException
exceptions.WebSocketException
exceptions.DevtoolsTargetCrashException
"""
return self._inspector_backend.WaitForJavaScriptCondition(*args, **kwargs)
def EnableAllContexts(self):
"""Enable all contexts in a page. Returns the number of available contexts.
Raises:
exceptions.WebSocketDisconnected
py_utils.TimeoutException
exceptions.DevtoolsTargetCrashException
"""
return self._inspector_backend.EnableAllContexts()
def WaitForNavigate(self, timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Waits for the navigation to complete.
The current page is expect to be in a navigation.
This function returns when the navigation is complete or when
the timeout has been exceeded.
Raises:
py_utils.TimeoutException
exceptions.DevtoolsTargetCrashException
"""
self._inspector_backend.WaitForNavigate(timeout)
def Navigate(self, url, script_to_evaluate_on_commit=None,
timeout=DEFAULT_WEB_CONTENTS_TIMEOUT):
"""Navigates to url.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
Raises:
py_utils.TimeoutException
exceptions.DevtoolsTargetCrashException
"""
if not script_to_evaluate_on_commit:
script_to_evaluate_on_commit = ''
script_to_evaluate_on_commit = (
self._quiescence_js + ';' + script_to_evaluate_on_commit)
self._inspector_backend.Navigate(url, script_to_evaluate_on_commit, timeout)
def IsAlive(self):
"""Whether the WebContents is still operating normally.
Since WebContents function asynchronously, this method does not guarantee
that the WebContents will still be alive at any point in the future.
Returns:
A boolean indicating whether the WebContents is opearting normally.
"""
return self._inspector_backend.IsInspectable()
def CloseConnections(self):
"""Closes all TCP sockets held open by the browser.
Raises:
exceptions.DevtoolsTargetCrashException if the tab is not alive.
"""
if not self.IsAlive():
raise exceptions.DevtoolsTargetCrashException
self.ExecuteJavaScript('window.chrome && chrome.benchmarking &&'
'chrome.benchmarking.closeConnections()')
def SynthesizeScrollGesture(self, x=100, y=800, xDistance=0, yDistance=-500,
xOverscroll=None, yOverscroll=None,
preventFling=None, speed=None,
gestureSourceType=None, repeatCount=None,
repeatDelayMs=None, interactionMarkerName=None,
timeout=60):
"""Runs an inspector command that causes a repeatable browser driven scroll.
Args:
x: X coordinate of the start of the gesture in CSS pixels.
y: Y coordinate of the start of the gesture in CSS pixels.
xDistance: Distance to scroll along the X axis (positive to scroll left).
yDistance: Ddistance to scroll along the Y axis (positive to scroll up).
xOverscroll: Number of additional pixels to scroll back along the X axis.
xOverscroll: Number of additional pixels to scroll back along the Y axis.
preventFling: Prevents a fling gesture.
speed: Swipe speed in pixels per second.
gestureSourceType: Which type of input events to be generated.
repeatCount: Number of additional repeats beyond the first scroll.
repeatDelayMs: Number of milliseconds delay between each repeat.
interactionMarkerName: The name of the interaction markers to generate.
Raises:
py_utils.TimeoutException
exceptions.DevtoolsTargetCrashException
"""
return self._inspector_backend.SynthesizeScrollGesture(
x=x, y=y, xDistance=xDistance, yDistance=yDistance,
xOverscroll=xOverscroll, yOverscroll=yOverscroll,
preventFling=preventFling, speed=speed,
gestureSourceType=gestureSourceType, repeatCount=repeatCount,
repeatDelayMs=repeatDelayMs,
interactionMarkerName=interactionMarkerName,
timeout=timeout)
def DispatchKeyEvent(self, keyEventType='char', modifiers=None,
timestamp=None, text=None, unmodifiedText=None,
keyIdentifier=None, domCode=None, domKey=None,
windowsVirtualKeyCode=None, nativeVirtualKeyCode=None,
autoRepeat=None, isKeypad=None, isSystemKey=None,
timeout=60):
"""Dispatches a key event to the page.
Args:
type: Type of the key event. Allowed values: 'keyDown', 'keyUp',
'rawKeyDown', 'char'.
modifiers: Bit field representing pressed modifier keys. Alt=1, Ctrl=2,
Meta/Command=4, Shift=8 (default: 0).
timestamp: Time at which the event occurred. Measured in UTC time in
seconds since January 1, 1970 (default: current time).
text: Text as generated by processing a virtual key code with a keyboard
layout. Not needed for for keyUp and rawKeyDown events (default: '').
unmodifiedText: Text that would have been generated by the keyboard if no
modifiers were pressed (except for shift). Useful for shortcut
(accelerator) key handling (default: "").
keyIdentifier: Unique key identifier (e.g., 'U+0041') (default: '').
windowsVirtualKeyCode: Windows virtual key code (default: 0).
nativeVirtualKeyCode: Native virtual key code (default: 0).
autoRepeat: Whether the event was generated from auto repeat (default:
False).
isKeypad: Whether the event was generated from the keypad (default:
False).
isSystemKey: Whether the event was a system key event (default: False).
Raises:
py_utils.TimeoutException
exceptions.DevtoolsTargetCrashException
"""
return self._inspector_backend.DispatchKeyEvent(
keyEventType=keyEventType, modifiers=modifiers, timestamp=timestamp,
text=text, unmodifiedText=unmodifiedText, keyIdentifier=keyIdentifier,
domCode=domCode, domKey=domKey,
windowsVirtualKeyCode=windowsVirtualKeyCode,
nativeVirtualKeyCode=nativeVirtualKeyCode, autoRepeat=autoRepeat,
isKeypad=isKeypad, isSystemKey=isSystemKey, timeout=timeout)
|
|
# -*- coding: utf-8 -*-
#MIT License
#Copyright (c) 2017 Marton Kelemen
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from ....application.utils import plotgen
from ....application.utils import geno_qc
from ....application.logic.knet import knet_main
from ....io import knet_IO
import gc
import numpy as np
from numpy.linalg import norm
from scipy import stats
from pathlib import Path
import random
import os
lastLayerSize_MAX = int(1000) # int(4096 /2)
# delta = (Ve/Vg)
# delta = (1-h2) / h2
#args, args.epochs, args.learnRate, args.momentum, args.evalFreq, args.savFreq, args.predictPheno, args.loadWeights, args.saveWeights, args.randomSeed, args.hidCount, args.hidl2, args.hidAct
# V(G) 0.168545 0.004763
#V(e) 0.006826 0.002168
def addActivation(myNet, hidAct):
if hidAct == 1 : H_Act = knet_main.knSigmoid( myNet)
elif hidAct == 2 : H_Act = knet_main.knRELU( myNet)
elif hidAct == 3 : print("no activatioN")
elif hidAct == 5 : H_Act = knet_main.knLeakyRELU( myNet)
else : H_Act = knet_main.knSoftplus( myNet)
def getNetworkStructure(myNet) :
layernum = 0
for layerIndex in range(0,len(myNet.layers)) :
layer = myNet.layers[layerIndex]
if type(layer) == knet_main.knnLayer: # for non input types, we have
if layer.Weights_W is not None :
layernum += 1
print("layer " + str(layernum) + " has weight matrix shaped: " + str(layer.Weights_W.shape))
def runKnet(args) :
hLayerCount = args.hidCount
hiddenShrinkage = args.hidl2
# default QC settings used for all non AMBLUP versions
_minObserved = 0.95
_minMAF = 0.01
_minVariance = 0.02
# load plink binary / phenotypes want to load them here, so that we can release the memory once the raw data is no longer used
cc = True
if args.cc == 0 : cc = False
recodecc = True
if args.recodecc == 0 : recodecc = False
genotypeData = knet_IO.loadPLINK(args.knet, loadPhenos = False)
M = genotypeData["M"]
irsIds = genotypeData["rsid"]
IDs = genotypeData["IDs"]
indicesKept = np.asarray( range(M.shape[1]) )
del genotypeData ; gc.collect() # dont need this
y = knet_IO.loadPLINKPheno(args.pheno, caseControl = cc, recodeCaseControl = recodecc)
y = stats.zscore(y) # zscore it so that Beta -> h2 computations work
# if we have a validation set
M_validation = None
y_validation = None
if args.validSet :
genotypeData = knet_IO.loadPLINK(args.validSet, loadPhenos = False, replaceMissing = True) # want to replace -1 with 0s, as we otherwise would have -1s, as later we just delete indices that failed QC for the training set, but won't care for individual missing datas
M_validation = genotypeData["M"]
IDs_validation = genotypeData["IDs"]
print("Loaded number of people for validatin: ", len(M_validation), flush=True )
del genotypeData ; gc.collect() # dont need this
if args.validPhen :
y_validation = knet_IO.loadPLINKPheno(args.validPhen, caseControl = cc, recodeCaseControl = recodecc)
y_validation = stats.zscore(y_validation) # zscore it so that Beta -> h2 computations work
if args.inference == 0 :
# 1. standardise data
if args.qc == 1 :
qc_data = geno_qc.genoQC_all(M, rsIds = irsIds, minObserved = _minObserved, minMAF = _minMAF, minVariance = _minVariance) # we MUST perform QC with the EXACT SAME settings as the 'region scanner' otherwise the region coordinates will be mismatched
#M = qc_data["X"]
rsIds_qc = qc_data["rsIds"] # save away the surviving SNP list that we have used
indicesToRemove = qc_data["indicesToRemove"]
indicesKept = qc_data["indicesKept"]
irsIds = rsIds_qc.tolist()
del qc_data; gc.collect() # overwrite
qc_data = geno_qc.removeList(M, indicesToRemove)
M = qc_data["X"]
del qc_data; gc.collect() # overwrite
else : print("Skipping internal QC", flush=True)
M, mns, sstd = geno_qc.standardise_Genotypes(M) ; gc.collect()
print("After standardising, training data in MBs is: ",geno_qc.getSizeInMBs(M) )
else :
print("Inference data QC", flush=True)
if args.snpIndices is not None :
indicesToKeep = knet_IO.loadIndices(args.snpIndices)
M = M[:,indicesToKeep]
mns = knet_IO.loadVectorFromDisk( args.mns , 'float32') # these are always float32 even in 64 runs
sstd = knet_IO.loadVectorFromDisk( args.sstd , 'float32')
snpIDs = knet_IO.loadsnpIDs(args.snpIDs)
M = M.astype('float32')
M -= mns
M /= sstd
# load final list of RSids
# load mean /SDs
#M = geno_qc.standardise_Genotypes(M) ; gc.collect()
#print("After standardising, training data in MBs is: ",geno_qc.getSizeInMBs(M) )
# get Zscores: have to standardise ONLY over the training, and not the training+ validation together: https://blog.slavv.com/37-reasons-why-your-neural-network-is-not-working-4020854bd607
# will have to implement this for genetic data
if M_validation is not None :
if args.qc == 1 :
# depending on if we are in inference mode, make sure we have the same set of SNPs
if args.inference == 0 : M_validation = np.delete(M_validation, indicesToRemove, axis=1)
else : M_validation = M_validation[:,indicesToKeep]
#qc_data = geno_qc.removeList(M_validation, indicesToRemove)
M_validation = M_validation.astype('float32')
M_validation -= mns
M_validation /= sstd
indices_validation = np.asarray( range(len(M_validation)) ) # is used for storting
print("After standardising, validation data in MBs is: ",geno_qc.getSizeInMBs(M_validation) )
# Pre-process data:
evalTrainResults = True
BNEnabled = int(args.bnorm) == 1
decay_Enabled = int(args.lr_decay) == 1
# Shuffle data before producing the minibatches to avoid having all-case or all-control minibatches
np.random.seed(args.randomSeed)
random.seed(args.randomSeed)
indices = np.asarray( range(len(M)) ) # is used for storting
random.shuffle(indices)
M = M[indices]
y = y[indices]
IDs[0] = np.array(IDs[0])
IDs[1] = np.array(IDs[1])
IDs[0] = IDs[0][indices]
IDs[1] = IDs[1][indices]
# reshape data to be the right dimensions for Convolutions
if args.convLayers > 0 :
M = M.reshape(M.shape[0], 1 , 1, M.shape[1])
if M_validation is not None :
M_validation = M_validation.reshape(M_validation.shape[0], 1 , 1, M_validation.shape[1])
# 2. create minibatch list
numIndividuals = M.shape[0]
numSNPs = M.shape[1] # numSNPs = bed.get_nb_markers(), as we may have removed SNPs, we want to know how many are left
len_M = len(M)
len_M_validation = 0
train_GWAS = list()
train_y = list()
minibatch_size = args.batch_size #M.shape[0] # 64
if args.batch_size == 0 : minibatch_size = len(M)
num_batches = len(M) // minibatch_size
# scale the delta by minibatch_size, if we dont have minibatches
ratio = float(minibatch_size) / numIndividuals # this is 1 if there are no minibatches
print("orig L2 Regularizer : " + str(hiddenShrinkage) + " minibatches scaled to " + str(hiddenShrinkage * ratio) )
hiddenShrinkage *= ratio
start = 0
end = minibatch_size
# for i in range(num_batches) :
# train_GWAS.append(M[start:end] )
# train_y.append(y[start:end])
# print("adding batch " + str(i) + " , start/end: " + str(start) + "/" + str(end) )
# start = end
# end += minibatch_size
y_batched = y.copy()
# do this in a more RAM efficient way: keep deleting the bits from the original matrix to free up space as we go along otherwise this step would double the RAM requirements temporarily
for i in range(num_batches) :
train_GWAS.append(M[0:minibatch_size] )
M = M[minibatch_size:len(M)]
train_y.append(y_batched[0:minibatch_size])
y_batched = y_batched[minibatch_size:len(y_batched)]
print("adding batch " + str(i) + ", minibatch size: " + str(minibatch_size) + " / num left in pool: " + str(len(M)) )
gc.collect()
print("train_GWAS[0].shape: " + str( train_GWAS[0].shape) + " // train_y.shape: " + str( train_y[0].shape) )
del M; gc.collect() # free up memory
if M_validation is not None :
len_M_validation = len(M_validation)
if args.batch_size == 0 : minibatch_size = len(M_validation)
test_GWAS = list()
test_y = list()
evalResults = True
num_batches = len(M_validation) // minibatch_size
print("len_M_validation is: " + str(len_M_validation) + ", minibatch size: " + str(minibatch_size) + " args.batch_size: " + str(args.batch_size) + " num_batches is: " + str(num_batches))
start = 0
end = minibatch_size
for i in range(num_batches) :
test_GWAS.append(M_validation[start:end] )
test_y.append(y_validation[start:end])
print("adding batch " + str(i) + " , start/end: " + str(start) + "/" + str(end) )
start = end
end += minibatch_size
# del M_validation; gc.collect() # free up memory, cant do this as we need this for the PRS calculation....
else :
test_GWAS = None
test_y = None
evalResults = False
# 3. initialise network params
floatPrecision = "float" +str(args.float)
print("floatPrecision is: " + floatPrecision)
knet_main.setDataType(floatPrecision)
myNet = knet_main.knn(optimizer = args.optimizer)
if args.gpu == 1 :
print("attempting to init GPU", flush=True)
knet_main.initGPU()
print("GPU successfully set", flush=True)
knet_main.set_seed(args.randomSeed)
if args.orig == 1 :
print("setting KNeT optimizer 0 to original version", flush=True)
knet_main.KNET_ORIG = True
Input = knet_main.knnLayer( myNet,np.array([-1]), knet_main.LAYER_SUBTYPE_INPUT)
# if conv was enabled we then do NOT regularize stuff at the first FC layer as we only want to regularize by h2 once
hiddenREGULARIRIZER = "REGULARIZER_RIDGE"
shrinkage = hiddenShrinkage
if args.convLayers > 0 :
lastOutput = train_GWAS[0].shape[-1] # the input to the first layer is the last element of the shape array, eg: 33380
print("Adding "+str(args.convLayers)+" conv layers, with initial input dimension: " + str(lastOutput), flush=True)
# first conv layer has special logic, we must make it so that size=stride, to avoid the massive space expansion
# first, find the smallest size/stride that will result in a whole number output size:
# for i in range(4,21) : # filter sizes of 4 to 20 are considered
# trialOutput = lastOutput
# currentStride = filter_size = i
# trialOutput = (trialOutput - filter_size +2) / currentStride + 1
# print("trialOutput : " + str(trialOutput) + " / filter_size: " + str(filter_size) + " / currentStride: " + str(currentStride) )
# if trialOutput % 1 == 0 :
# print("first Conv layer filter/stride will be: " + str(filter_size), flush=True)
# break
currentNumFilters= args.convFilters
currentStride = 3
filter_size = 5 # as it turns out it is not actually a problem if the conv outputs something that isn't an integer, so we just need to downsample it
Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = hiddenREGULARIRIZER, shrinkageParam = hiddenShrinkage, p_dropout = args.dropout, n_filters = currentNumFilters, h_filter=1, w_filter=filter_size, padding=1, stride=currentStride, oneD = True)
if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN)
addActivation(myNet,args.hidAct)
lastOutput = (lastOutput - filter_size +2) / currentStride + 1
lastOutput = int(lastOutput) # as these can only be integers
hiddenREGULARIRIZER = knet_main.REGULARIZER_NONE # only add regularizer for first layer, subsequent layers will always have none
shrinkage = 0.0
currentStride = 1
pool_size = 2
for i in range(1, args.convLayers +1) :
# decide on filter size, depending on input, Conv layers must always produce even outputs so that maxpool can half them
filter_size = 3
if lastOutput % 2 != 0 : filter_size = 4 # if the current output is not even, then we have to use a filter size of 4, otherwise we get fractions after the maxpool operation
## currentNumFilters = (i+1) * args.convFilters
currentNumFilters = currentNumFilters // 2
Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = hiddenREGULARIRIZER, shrinkageParam = shrinkage, p_dropout = args.dropout, n_filters = currentNumFilters, h_filter=1, w_filter=filter_size, padding=1, stride=currentStride, oneD = True)
if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN)
addActivation(myNet,args.hidAct)
lastOutput = (lastOutput - filter_size +2) / currentStride + 1
lastOutput = int(lastOutput) # as these can only be integers
MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True)
lastOutput = (lastOutput - pool_size) / pool_size + 1 # compute what dimensions the conv+maxpool operations are going to leave for the next layer
# Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = "REGULARIZER_RIDGE", shrinkageParam = hiddenShrinkage, p_dropout = args.dropout, n_filters = 128, h_filter=1, w_filter=8, padding=1, stride=4, oneD = True)
# if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN)
# addActivation(myNet,args.hidAct)
# MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True)
#
# Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = "REGULARIZER_NONE", shrinkageParam = 0., p_dropout = args.dropout, n_filters = 128, h_filter=1, w_filter=5, padding=1, stride=2, oneD = True) # will have to be 6 for next one ( 8 for last one)
# if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN)
# addActivation(myNet,args.hidAct)
# MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True)
# Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = "REGULARIZER_NONE", shrinkageParam = 0., p_dropout = args.dropout, n_filters = 128, h_filter=1, w_filter=5, padding=1, stride=1, oneD = True)
# if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN)
# addActivation(myNet,args.hidAct)
# MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True)
#
# Conv_Layer = knet_main.knnConvLayer( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = "REGULARIZER_NONE", shrinkageParam = 0., p_dropout = args.dropout, n_filters = 128, h_filter=1, w_filter=5, padding=1, stride=1, oneD = True)
# if BNEnabled : Spatial_Bnorm = knet_main.knnSpatialBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN)
# addActivation(myNet,args.hidAct)
# MaxPool_Layer = knet_main.knnMaxPool(myNet, oneD = True)
#
Flatten_Layer = knet_main.knnFlatten(myNet)
lastLayerSize = args.firstLayerSize #lastLayerSize_MAX
for i in range(1,hLayerCount+1) : # iterate 1 based, otherwise we will get a reduction after the first layer, no matter the widthReductionRate, as 0 is divisible by anything
if i > 1 or args.convLayers > 0 : # only add regularizer for first layer, subsequent layers will always have none
hiddenREGULARIRIZER = knet_main.REGULARIZER_NONE
shrinkage = 0.0
#if i == (hLayerCount-1) : lastWidth = 2 # enforce so that the last widht is always 2, ie 1 neuron makes it MORE like the other LESS likely
H_Layer = knet_main.knnLayer(myNet, [lastLayerSize], knet_main.LAYER_SUBTYPE_HIDDEN, regularizer = hiddenREGULARIRIZER, shrinkageParam = shrinkage, p_dropout = args.dropout)
if BNEnabled : Bnorm = knet_main.knnBatchNorm( myNet, [-1],knet_main.LAYER_SUBTYPE_HIDDEN)
addActivation(myNet,args.hidAct)
print("added layer at depth: " + str(i) + " with width: " + str(lastLayerSize) + " / shrinkage: " + str(shrinkage))
# control the 'fatness' of the network: we reduce the width at a given rate: if this is 1, then at every subsequent layer, if its 2, then every 2nd layer etc
if i % args.widthReductionRate == 0 : lastLayerSize = lastLayerSize // 2
if lastLayerSize < 2 : break # if
Output = knet_main.knnLayer( myNet,np.array([ y.reshape(y.shape[0],-1).shape[1] ]), knet_main.LAYER_SUBTYPE_OUTPUT, regularizer = "REGULARIZER_NONE", shrinkageParam = 0.0)
if len( y.shape) > 1 : Out_Act = knet_main.knSoftmax( myNet)
#knet_main.checkConvOutput(myNet, [1,5194 ])
if args.convLayers > 0 : knet_main.checkConvOutput(myNet, [*train_GWAS[0][0][0].shape])
knet_main.getNetworkMemUsage(myNet,train_GWAS[0].shape) # of RAM
if args.inference == 0 :
print("Analysis Run", flush = True)
results = myNet.learn(train_GWAS, train_y, test_GWAS, test_y, eval_test=evalResults,eval_train=evalTrainResults, num_epochs=args.epochs, eta=args.learnRate, eval_freq = args.evalFreq, friction = args.momentum, decayEnabled = decay_Enabled)
getNetworkStructure(myNet)
#writeKNetParamsToDisk(myNet, "C:/0Datasets/NNs/knet_genetic_fc/knet")
if args.saveWeights is not None : writeKNetParamsToDisk(myNet, args.saveWeights, knet_main.NETWORK_DATATYPE)
# write epoch results out
results_its = results["results"]#["results"]
os.makedirs(os.path.dirname(args.out), exist_ok=True)
#write training data means / stds to disk so that we could use those for inference runs later
print("writing means/stds to disk with datatype: " + str(sstd.dtype))
print("sstd shape is: " + str(sstd.shape) + " / mns shape: " + str(mns.shape))
knet_IO.writeVectorToDisk( args.out + "data_mns" , mns, mns.dtype)
knet_IO.writeVectorToDisk( args.out + "data_sstd" , sstd, sstd.dtype)
fileName = args.out + "nn_results.txt"
with open(fileName, "w") as file:
line = "epochs"
if "train_accuracy" in results_its: line = line + "\t" + "train_accuracy"
if "test_accuracy" in results_its: line = line + "\t" + "test_accuracy"
file.write(line + "\n")
for i in range( len(results_its["epochs"]) ):
line = str(results_its["epochs"][i])
if "train_accuracy" in results_its: line = line + "\t" + str(results_its["train_accuracy"][i])
if "test_accuracy" in results_its: line = line + "\t" + str(results_its["test_accuracy"][i])
file.write(line + "\n")
# generate plot of the results
if len(results_its["epochs"]) > 0 :
plotgen.exportNNPlot(results_its, args.out + "nnplot")
# write out the SNPs that were used for the analysis
fileName = args.out + "nn_SNPs.txt"
with open(fileName, "w") as file:
for i in range( len(irsIds) ):
file.write(irsIds[i] + "\n")
# write out the indices of the original dataset's coordinates for convenience
if indicesKept is not None: # in case we skipped QC
fileName = args.out + "nn_SNPs_indices.txt"
with open(fileName, "w") as file:
for i in range( len(indicesKept) ):
file.write( str(indicesKept[i]) + "\n")
if len_M_validation > 0 :
producePRS(myNet,M_validation, test_GWAS, IDs_validation, len_M_validation , args.out + "yhat.txt", args.out + "FIDs.txt", y_validation, args.out + "KNET_PRS")
# # write final predictions out
# yhats = list()
# totalSofar= 0
# for i in range(len(test_GWAS)) : # loop through all minbatches
# totalSofar += len(test_GWAS[i])
# yhats.append( myNet.forward_propagate(test_GWAS[i],False, forceCast_toCPU = True) )
#
#
# if totalSofar < len_M_validation :
# print("minibatches did not cover all training samples, so we create last batch out of the remainders")
# lastBatch_X = M_validation[totalSofar:len_M_validation]
# yhats.append( myNet.forward_propagate(lastBatch_X,False, forceCast_toCPU = True) )
#
#
# #yhats = list()
# #yhats.append( np.array([ [0],[1],[2] ]))
# #yhats.append( np.array([ [3],[4],[5] ]))
# #yhats.append( np.array([ [6],[7],[8] ]))
# yhat_all = np.concatenate(yhats)
# print("after merging, we have yhat predictions for : " + str(len(yhat_all)) + " samples", flush=True)
#
# print("yhat_all.shape: " + str(yhat_all.shape) + " // indices_validation.shape: " + str(indices_validation.shape) + " // indices.shape: " + str(indices.shape) )
#
#
# fileName = args.out + "yhat.txt"
# with open(fileName, "w") as file:
# file.write("Profile" + "\n")
#
#
# for i in range(yhat_all.shape[0]) :
# line = str(yhat_all[i][0] )
# for j in range(1, len(yhat_all[i]) ):
# line = line + "\t" + str(yhat_all[i][j] )
#
# file.write( line + "\n") # file.write( ( str(yhat[i])[2:-1] ).replace(" ", " ").replace(" ", "\t") + "\n")
#
# # also write out the FID / IIDs in the same order, just as a sanity check (compare this against the .fam files)
#
# fileName = args.out + "FIDs.txt"
# with open(fileName, "w") as file:
# file.write("FID" + "\t" + "IID" + "\n")
#
# for i in range( len(IDs_validation[0]) ) :
# line = IDs_validation[0][i] + "\t" + IDs_validation[1][i]
#
# file.write( line + "\n")
else :
print("Inference Run", flush = True)
loadKNetParams(myNet, args.loadWeights, knet_main.NETWORK_DATATYPE)
if args.garson == 1:
print("producing importance scores via the garson algorithm")
NNinference = myNet.dream_Garson()
else :
print("producing importance scores via deep dreaming")
os.makedirs(os.path.dirname(args.out), exist_ok=True)
# forward propagate with the 1st sample of the training set
yhat = myNet.forward_propagate(train_GWAS[0], train = False, saveInput = False, forceCast_toCPU = True)
suppressPrint_orig = myNet.suppressPrint
myNet.suppressPrint = True
StartImage = None
#StartImage = np.random.normal( size=(1,X_test.shape[1]))
print("producing inference with number of iterations: " + str(args.dreamit), flush=True)
dream = myNet.dream(0, 100,StartImage,args.dreamit , mFilterSize = 0, blur = 0.0, l2decay = 0.0, small_norm_percentile = 0,lr = 1.5,normalize = False, small_val_percentile = 0)
NNinference = dream[0].ravel()
NNinference[np.isnan(NNinference)]=0.0
myNet.suppressPrint = suppressPrint_orig
# Here this would need to be more constrained:
# both LD and MAF need to be taken into account
knet_IO.writeSNPeffects(args.out + "dream",snpIDs, NNinference)
# the validation here will refer to the TEST set
if len_M_validation > 0 :
producePRS(myNet,M_validation, test_GWAS, IDs_validation, len_M_validation , args.out + "yhat.txt", args.out + "FIDs.txt", y_validation, args.out + "KNET_PRS")
def producePRS(myNet,origData, miniBatches, IndiIDs, len_total , outLoc_yhat, outLoc_FIDs, ytrue, outLoc_PRS) :
# write final predictions out
yhats = list()
totalSofar= 0
for i in range(len(miniBatches)) : # loop through all minbatches
totalSofar += len(miniBatches[i])
yhats.append( myNet.forward_propagate(miniBatches[i],False, forceCast_toCPU = True) )
if totalSofar < len_total :
print("minibatches did not cover all training samples, so we create last batch out of the remainders")
lastBatch_X = origData[totalSofar:len_total]
yhats.append( myNet.forward_propagate(lastBatch_X,False, forceCast_toCPU = True) )
yhat_all = np.concatenate(yhats)
print("after merging, we have yhat predictions for : " + str(len(yhat_all)) + " samples", flush=True)
fileName = outLoc_yhat
with open(fileName, "w") as file:
file.write("Profile" + "\n")
for i in range(yhat_all.shape[0]) :
line = str(yhat_all[i][0] )
for j in range(1, len(yhat_all[i]) ):
line = line + "\t" + str(yhat_all[i][j] )
file.write( line + "\n") # file.write( ( str(yhat[i])[2:-1] ).replace(" ", " ").replace(" ", "\t") + "\n")
# also write out the FID / IIDs in the same order, just as a sanity check (compare this against the .fam files)
fileName = outLoc_FIDs
with open(fileName, "w") as file:
file.write("FID" + "\t" + "IID" + "\n")
for i in range( len(IndiIDs[0]) ) :
line = IndiIDs[0][i] + "\t" + IndiIDs[1][i]
file.write( line + "\n")
# write out the final r^2
yhat_all += knet_main.EPSILON # for numerical stability
rSQ = np.corrcoef( ytrue, yhat_all, rowvar=0)[1,0]**2
with open(outLoc_PRS, "w") as file:
file.write(str(rSQ) )
def writeKNetParamsToDisk(myNet, targetDir, datatype = 'float32') :
os.makedirs(os.path.dirname(targetDir), exist_ok=True)
for i in range( len(myNet.layers) ) :
if myNet.layers[i] :
if isinstance(myNet.layers[i],knet_main.knnLayer) or isinstance(myNet.layers[i],knet_main.knnSpatialBatchNorm) or isinstance(myNet.layers[i],knet_main.knnBatchNorm) : # if its a layer with trainable params
if myNet.layers[i].subtype != knet_main.LAYER_SUBTYPE_INPUT : # if its not an input layer
# it has at least 6 trainable params: Weights, Momentum, Past_Grads, (2x for bias too)
print("writing params for layer " + type(myNet.layers[i]).__name__ )
# MUST cast them to CPU before attempting to write out, otherwise GPU will hang there
knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_w" , knet_main.castOutputToCPU(myNet.layers[i].Weights_W), datatype)
knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_wb" , knet_main.castOutputToCPU(myNet.layers[i].Weights_bias), datatype)
knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_m" , knet_main.castOutputToCPU(myNet.layers[i].Momentum), datatype)
knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_mb" , knet_main.castOutputToCPU(myNet.layers[i].Bias_Momentum), datatype)
knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_p" , knet_main.castOutputToCPU(myNet.layers[i].Past_Grads), datatype)
knet_IO.writeMatrixToDisk( targetDir + "_" + str(i)+ "_pb" , knet_main.castOutputToCPU(myNet.layers[i].Past_Grads_bias), datatype)
if isinstance(myNet.layers[i],knet_main.knnSpatialBatchNorm) or isinstance(myNet.layers[i],knet_main.knnBatchNorm) : # if it is a batchnorm type, then it will have another 2 trainable params
knet_IO.writeVectorToDisk( targetDir + "_" + str(i)+ "_rv" , knet_main.castOutputToCPU(myNet.layers[i].running_var), datatype)
knet_IO.writeVectorToDisk( targetDir + "_" + str(i)+ "_rm" , knet_main.castOutputToCPU(myNet.layers[i].running_mean),datatype)
def loadKNetParams(myNet, targetDir, datatype = 'float32') :
for i in range( len(myNet.layers) ) :
if myNet.layers[i] :
if isinstance(myNet.layers[i],knet_main.knnLayer) or isinstance(myNet.layers[i],knet_main.knnSpatialBatchNorm) or isinstance(myNet.layers[i],knet_main.knnBatchNorm) : # if its a layer with trainable params
if myNet.layers[i].subtype != knet_main.LAYER_SUBTYPE_INPUT : # if its not an input layer
# it has at least 6 trainable params: Weights, Momentum, Past_Grads, (2x for bias too)
print("loading params for layer " + type(myNet.layers[i]).__name__ )
myNet.layers[i].Weights_W = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_w" ,datatype)
myNet.layers[i].Weights_bias = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_wb",datatype )
myNet.layers[i].Momentum = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_m",datatype )
myNet.layers[i].Bias_Momentum = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_mb",datatype )
myNet.layers[i].Past_Grads = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_p" ,datatype)
myNet.layers[i].Past_Grads_bias = knet_IO.loadMatrixFromDisk( targetDir + "_" + str(i)+ "_pb" ,datatype)
if isinstance(myNet.layers[i],knet_main.knnSpatialBatchNorm) or isinstance(myNet.layers[i],knet_main.knnBatchNorm) : # if it is a batchnorm type, then it will have another 2 trainable params
myNet.layers[i].running_var = knet_IO.loadVectorFromDisk( targetDir + "_" + str(i)+ "_rv" ,datatype)
myNet.layers[i].running_mean = knet_IO.loadVectorFromDisk( targetDir + "_" + str(i)+ "_rm",datatype)
myNet.connectLayers()
# inputData = train_GWAS[0]
# outPutdata = train_y[0]
def performGradientCheck(myNet, inputData, outPutdata) : # the net, Standardised SNPs, and y
# Gradient Test
grad_current = myNet.getCurrentWeightGradients(inputData, outPutdata)
numgrad = myNet.gradientCheck(inputData, outPutdata)
myNorm = norm(grad_current-numgrad)/norm(grad_current+numgrad)
return(myNorm )
|
|
#!/usr/bin/python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An interactive, stateful AJAX shell that runs Python code on the server.
Part of http://code.google.com/p/google-app-engine-samples/.
May be run as a standalone app or in an existing app as an admin-only handler.
Can be used for system administration tasks, as an interactive way to try out
APIs, or as a debugging aid during development.
The logging, os, sys, db, and users modules are imported automatically.
Interpreter state is stored in the datastore so that variables, function
definitions, and other values in the global and local namespaces can be used
across commands.
To use the shell in your app, copy shell.py, static/*, and templates/* into
your app's source directory. Then, copy the URL handlers from app.yaml into
your app.yaml.
TODO: unit tests!
"""
import logging
import new
import os
import pickle
import sys
import traceback
import types
import wsgiref.handlers
from django.template import loader
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
import django.template
import gae_django
# Set to True if stack traces should be shown in the browser, etc.
_DEBUG = True
# The entity kind for shell sessions. Feel free to rename to suit your app.
_SESSION_KIND = '_Shell_Session'
# Types that can't be pickled.
UNPICKLABLE_TYPES = (
types.ModuleType,
types.TypeType,
types.ClassType,
types.FunctionType,
)
# Unpicklable statements to seed new sessions with.
INITIAL_UNPICKLABLES = [
'import logging',
'import os',
'import sys',
'from google.appengine.ext import db',
'from google.appengine.api import users',
]
class ShellSession(db.Model):
"""A shell session. Stores the session's globals.
Each session globals is stored in one of two places:
If the global is picklable, it's stored in the parallel globals and
global_names list properties. (They're parallel lists to work around the
unfortunate fact that the datastore can't store dictionaries natively.)
If the global is not picklable (e.g. modules, classes, and functions), or if
it was created by the same statement that created an unpicklable global,
it's not stored directly. Instead, the statement is stored in the
unpicklables list property. On each request, before executing the current
statement, the unpicklable statements are evaluated to recreate the
unpicklable globals.
The unpicklable_names property stores all of the names of globals that were
added by unpicklable statements. When we pickle and store the globals after
executing a statement, we skip the ones in unpicklable_names.
Using Text instead of string is an optimization. We don't query on any of
these properties, so they don't need to be indexed.
"""
global_names = db.ListProperty(db.Text)
globals = db.ListProperty(db.Blob)
unpicklable_names = db.ListProperty(db.Text)
unpicklables = db.ListProperty(db.Text)
def set_global(self, name, value):
"""Adds a global, or updates it if it already exists.
Also removes the global from the list of unpicklable names.
Args:
name: the name of the global to remove
value: any picklable value
"""
blob = db.Blob(pickle.dumps(value))
if name in self.global_names:
index = self.global_names.index(name)
self.globals[index] = blob
else:
self.global_names.append(db.Text(name))
self.globals.append(blob)
self.remove_unpicklable_name(name)
def remove_global(self, name):
"""Removes a global, if it exists.
Args:
name: string, the name of the global to remove
"""
if name in self.global_names:
index = self.global_names.index(name)
del self.global_names[index]
del self.globals[index]
def globals_dict(self):
"""Returns a dictionary view of the globals.
"""
return dict((name, pickle.loads(val))
for name, val in zip(self.global_names, self.globals))
def add_unpicklable(self, statement, names):
"""Adds a statement and list of names to the unpicklables.
Also removes the names from the globals.
Args:
statement: string, the statement that created new unpicklable global(s).
names: list of strings; the names of the globals created by the statement.
"""
self.unpicklables.append(db.Text(statement))
for name in names:
self.remove_global(name)
if name not in self.unpicklable_names:
self.unpicklable_names.append(db.Text(name))
def remove_unpicklable_name(self, name):
"""Removes a name from the list of unpicklable names, if it exists.
Args:
name: string, the name of the unpicklable global to remove
"""
if name in self.unpicklable_names:
self.unpicklable_names.remove(name)
class FrontPageHandler(webapp.RequestHandler):
"""Creates a new session and renders the shell.html template.
"""
def get(self):
# set up the session. TODO: garbage collect old shell sessions
session_key = self.request.get('session')
if session_key:
session = ShellSession.get(session_key)
else:
# create a new session
session = ShellSession()
session.unpicklables = [db.Text(line) for line in INITIAL_UNPICKLABLES]
session_key = session.put()
template_file = os.path.join(os.path.dirname(__file__), 'templates',
'shell.html')
session_url = '/?session=%s' % session_key
vars = { 'server_software': os.environ['SERVER_SOFTWARE'],
'python_version': sys.version,
'session': str(session_key),
'user': users.get_current_user(),
'login_url': users.create_login_url(session_url),
'logout_url': users.create_logout_url(session_url),
}
rendered = loader.render_to_string('shell.html', dictionary=vars)
# rendered = webapp.template.render(template_file, vars, debug=_DEBUG)
self.response.out.write(rendered)
class StatementHandler(webapp.RequestHandler):
"""Evaluates a python statement in a given session and returns the result.
"""
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
# extract the statement to be run
statement = self.request.get('statement')
if not statement:
return
# the python compiler doesn't like network line endings
statement = statement.replace('\r\n', '\n')
# add a couple newlines at the end of the statement. this makes
# single-line expressions such as 'class Foo: pass' evaluate happily.
statement += '\n\n'
# log and compile the statement up front
try:
logging.info('Compiling and evaluating:\n%s' % statement)
compiled = compile(statement, '<string>', 'single')
except:
self.response.out.write(traceback.format_exc())
return
# create a dedicated module to be used as this statement's __main__
statement_module = new.module('__main__')
# use this request's __builtin__, since it changes on each request.
# this is needed for import statements, among other things.
import __builtin__
statement_module.__builtins__ = __builtin__
# load the session from the datastore
session = ShellSession.get(self.request.get('session'))
# swap in our custom module for __main__. then unpickle the session
# globals, run the statement, and re-pickle the session globals, all
# inside it.
old_main = sys.modules.get('__main__')
try:
sys.modules['__main__'] = statement_module
statement_module.__name__ = '__main__'
# re-evaluate the unpicklables
for code in session.unpicklables:
exec code in statement_module.__dict__
# re-initialize the globals
for name, val in session.globals_dict().items():
try:
statement_module.__dict__[name] = val
except:
msg = 'Dropping %s since it could not be unpickled.\n' % name
self.response.out.write(msg)
logging.warning(msg + traceback.format_exc())
session.remove_global(name)
# run!
old_globals = dict(statement_module.__dict__)
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = self.response.out
sys.stderr = self.response.out
exec compiled in statement_module.__dict__
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except:
self.response.out.write(traceback.format_exc())
return
# extract the new globals that this statement added
new_globals = {}
for name, val in statement_module.__dict__.items():
if name not in old_globals or val != old_globals[name]:
new_globals[name] = val
if True in [isinstance(val, UNPICKLABLE_TYPES)
for val in new_globals.values()]:
# this statement added an unpicklable global. store the statement and
# the names of all of the globals it added in the unpicklables.
session.add_unpicklable(statement, new_globals.keys())
logging.debug('Storing this statement as an unpicklable.')
else:
# this statement didn't add any unpicklables. pickle and store the
# new globals back into the datastore.
for name, val in new_globals.items():
if not name.startswith('__'):
session.set_global(name, val)
finally:
sys.modules['__main__'] = old_main
session.put()
def main():
"""Main program.
"""
application = webapp.WSGIApplication(
[('/admin/shell', FrontPageHandler),
('/admin/shell/shell.do', StatementHandler)], debug=_DEBUG)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
|
|
#! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: [email protected]
#
"""
Semi-streaming error correction.
Output sequences will be placed in 'infile.corr'.
% python scripts/correct-reads.py [ <data1> [ <data2> [ ... ] ] ]
Use -h for parameter help.
TODO: add to sandbox/README.
"""
import sys
import screed
import os
import khmer
import tempfile
import shutil
import textwrap
import argparse
from khmer.khmer_args import (build_counting_args, info, add_loadgraph_args,
report_on_config)
from khmer.utils import write_record, write_record_pair, broken_paired_reader
from khmer.kfile import (check_space, check_space_for_graph,
check_valid_file_exists)
DEFAULT_NORMALIZE_LIMIT = 20
DEFAULT_CUTOFF = 2
def correct_sequence(aligner, sequence):
# align to graph.
score, graph_alignment, read_alignment, truncated = \
aligner.align(sequence)
# next, decide whether or to keep it.
output_corrected = False
if not truncated:
graph_seq = graph_alignment.replace("-", "")
return True, graph_seq
return False, sequence
def fix_quality(record):
if len(record.sequence) < len(record.quality):
record.quality = record.quality[:len(record.sequence)]
while len(record.sequence) > len(record.quality):
record.quality += 'I' # @CTB hack
def get_parser():
epilog = """
The output is one file for each input file, <input file>.corr, placed
in the current directory. This output contains the input sequences,
corrected at low-abundance k-mers.
Note that the output reads will not necessarily be in the same
order as the reads in the input files. However, read pairs will be
kept together, in "broken-paired" format; you can use
``extract-paired-reads.py`` to extract read pairs and orphans.
Example::
correct-reads.py -x 5e7 -k 20 -C 2 data/100k-filtered.fa
"""
parser = build_counting_args(
descr='Correct reads using a semi-streaming algorithm.',
epilog=textwrap.dedent(epilog))
parser.add_argument('input_filenames', nargs='+')
parser.add_argument('--cutoff', '-C', type=int,
help='k-mers below this abundance are not trusted',
default=DEFAULT_CUTOFF)
parser.add_argument('--normalize-to', '-Z', type=int,
help='base cutoff on this median k-mer abundance',
default=DEFAULT_NORMALIZE_LIMIT)
parser.add_argument('-o', '--out', metavar="filename",
type=argparse.FileType('w'),
default=None, help='only output a single file with '
'the specified filename; use a single dash "-" to '
'specify that output should go to STDOUT (the '
'terminal)')
parser.add_argument('--variable-coverage', '-V', action='store_true',
default=False,
help='Only correct sequences that have high coverage.')
add_loadgraph_args(parser)
parser.add_argument('-s', '--savegraph', metavar="filename", default='',
help='save the k-mer countgraph to disk after all'
'reads are loaded.')
# expert options
parser.add_argument('--force', default=False, action='store_true')
parser.add_argument('--ignore-pairs', default=False, action='store_true')
parser.add_argument('--tempdir', '-T', type=str, default='./')
parser.add_argument("--theta", dest="bits_theta", type=float, default=1.0)
return parser
def main():
info('correct-reads.py', ['streaming'])
parser = get_parser()
args = parser.parse_args()
###
if len(set(args.input_filenames)) != len(args.input_filenames):
print >>sys.stderr, \
"Error: Cannot input the same filename multiple times."
sys.exit(1)
###
report_on_config(args)
check_valid_file_exists(args.input_filenames)
check_space(args.input_filenames, args.force)
if args.savegraph:
check_space_for_graph(
args.n_tables * args.min_tablesize, args.force)
K = args.ksize
CUTOFF = args.cutoff
NORMALIZE_LIMIT = args.normalize_to
if args.loadgraph:
print >>sys.stderr, 'loading k-mer countgraph from', args.loadgraph
ct = khmer.load_countgraph(args.loadgraph)
else:
print >>sys.stderr, 'making k-mer countgraph'
ct = khmer.new_countgraph(K, args.min_tablesize, args.n_tables)
tempdir = tempfile.mkdtemp('khmer', 'tmp', args.tempdir)
print >>sys.stderr, 'created temporary directory %s; ' \
'use -T to change location' % tempdir
aligner = khmer.ReadAligner(ct, args.cutoff, args.bits_theta)
# ### FIRST PASS ###
save_pass2_total = 0
n_bp = 0
n_reads = 0
written_bp = 0
written_reads = 0
corrected_reads = 0
pass2list = []
for filename in args.input_filenames:
pass2filename = os.path.basename(filename) + '.pass2'
pass2filename = os.path.join(tempdir, pass2filename)
if args.out is None:
corrfp = open(os.path.basename(filename) + '.corr', 'w')
else:
corrfp = args.out
pass2list.append((filename, pass2filename, corrfp))
screed_iter = screed.open(filename, parse_description=False)
pass2fp = open(pass2filename, 'w')
save_pass2 = 0
n = 0
paired_iter = broken_paired_reader(screed_iter, min_length=K,
force_single=args.ignore_pairs)
for n, is_pair, read1, read2 in paired_iter:
if n % 10000 == 0:
print >>sys.stderr, '...', n, filename, save_pass2, \
n_reads, n_bp, written_reads, written_bp
# we want to track paired reads here, to make sure that pairs
# are not split between first pass and second pass.
if is_pair:
n_reads += 2
n_bp += len(read1.sequence) + len(read2.sequence)
seq1 = read1.sequence.replace('N', 'A')
seq2 = read2.sequence.replace('N', 'A')
med1, _, _ = ct.get_median_count(seq1)
med2, _, _ = ct.get_median_count(seq2)
if med1 < NORMALIZE_LIMIT or med2 < NORMALIZE_LIMIT:
ct.consume(seq1)
ct.consume(seq2)
write_record_pair(read1, read2, pass2fp)
save_pass2 += 2
else:
is_aligned, new_seq1 = correct_sequence(aligner, seq1)
if is_aligned:
if new_seq1 != read1.sequence:
corrected_reads += 1
read1.sequence = new_seq1
if hasattr(read1, 'quality'):
fix_quality(read1)
is_aligned, new_seq2 = correct_sequence(aligner, seq2)
if is_aligned:
if new_seq2 != read2.sequence:
corrected_reads += 1
read2.sequence = new_seq2
if hasattr(read2, 'quality'):
fix_quality(read2)
write_record_pair(read1, read2, corrfp)
written_reads += 2
written_bp += len(read1)
written_bp += len(read2)
else:
n_reads += 1
n_bp += len(read1.sequence)
seq = read1.sequence.replace('N', 'A')
med, _, _ = ct.get_median_count(seq)
# has this portion of the graph saturated? if not,
# consume & save => pass2.
if med < NORMALIZE_LIMIT:
ct.consume(seq)
write_record(read1, pass2fp)
save_pass2 += 1
else: # trim!!
is_aligned, new_seq = correct_sequence(aligner, seq)
if is_aligned:
if new_seq != read1.sequence:
corrected_reads += 1
read1.sequence = new_seq
if hasattr(read1, 'quality'):
fix_quality(read1)
write_record(read1, corrfp)
written_reads += 1
written_bp += len(new_seq)
pass2fp.close()
print >>sys.stderr, '%s: kept aside %d of %d from first pass, in %s' \
% (filename, save_pass2, n, filename)
save_pass2_total += save_pass2
# ### SECOND PASS. ###
skipped_n = 0
skipped_bp = 0
for _, pass2filename, corrfp in pass2list:
print >>sys.stderr, ('second pass: looking at sequences kept aside '
'in %s') % pass2filename
# note that for this second pass, we don't care about paired
# reads - they will be output in the same order they're read in,
# so pairs will stay together if not orphaned. This is in contrast
# to the first loop.
for n, read in enumerate(screed.open(pass2filename,
parse_description=False)):
if n % 10000 == 0:
print >>sys.stderr, '... x 2', n, pass2filename, \
written_reads, written_bp
seq = read.sequence.replace('N', 'A')
med, _, _ = ct.get_median_count(seq)
# do we retain low-abundance components unchanged?
if med < NORMALIZE_LIMIT and args.variable_coverage:
write_record(read, corrfp)
written_reads += 1
written_bp += len(read.sequence)
skipped_n += 1
skipped_bp += len(read.sequence)
# otherwise, examine/correct.
else: # med >= NORMALIZE LIMIT or not args.variable_coverage
is_aligned, new_seq = correct_sequence(aligner, seq)
if is_aligned:
if new_seq != read.sequence:
corrected_reads += 1
read.sequence = new_seq
if hasattr(read, 'quality'):
fix_quality(read)
write_record(read, corrfp)
written_reads += 1
written_bp += len(new_seq)
print >>sys.stderr, 'removing %s' % pass2filename
os.unlink(pass2filename)
print >>sys.stderr, 'removing temp directory & contents (%s)' % tempdir
shutil.rmtree(tempdir)
n_passes = 1.0 + (float(save_pass2_total) / n_reads)
percent_reads_corrected = float(corrected_reads +
(n_reads - written_reads)) /\
n_reads * 100.0
print >>sys.stderr, 'read %d reads, %d bp' % (n_reads, n_bp,)
print >>sys.stderr, 'wrote %d reads, %d bp' % (written_reads, written_bp,)
print >>sys.stderr, 'looked at %d reads twice (%.2f passes)' % \
(save_pass2_total, n_passes)
print >>sys.stderr, 'removed %d reads and corrected %d reads (%.2f%%)' % \
(n_reads - written_reads, corrected_reads, percent_reads_corrected)
print >>sys.stderr, 'removed %.2f%% of bases (%d total)' % \
((1 - (written_bp / float(n_bp))) * 100.0, n_bp - written_bp)
if args.variable_coverage:
percent_reads_hicov = 100.0 * float(n_reads - skipped_n) / n_reads
print >>sys.stderr, '%d reads were high coverage (%.2f%%);' % \
(n_reads - skipped_n, percent_reads_hicov)
print >>sys.stderr, ('skipped %d reads/%d bases because of low'
'coverage') % (skipped_n, skipped_bp)
fp_rate = \
khmer.calc_expected_collisions(ct, args.force, max_false_pos=.8)
# for max_false_pos see Zhang et al., http://arxiv.org/abs/1309.2975
print >>sys.stderr, \
'fp rate estimated to be {fpr:1.3f}'.format(fpr=fp_rate)
print >>sys.stderr, 'output in *.corr'
if args.savegraph:
print >>sys.stderr, "Saving k-mer countgraph to", args.savegraph
ct.save(args.savegraph)
if __name__ == '__main__':
main()
|
|
from IPython.display import HTML
from jinja2 import Template
from datetime import datetime, timezone, timedelta
import copy
from typing import List, Tuple
import biokbase.narrative.clients as clients
from .job import (
Job,
EXCLUDED_JOB_STATE_FIELDS,
JOB_INIT_EXCLUDED_JOB_STATE_FIELDS,
)
from biokbase.narrative.common import kblogging
from biokbase.narrative.app_util import system_variable
from biokbase.narrative.exception_util import (
transform_job_exception,
JobIDException,
)
"""
KBase Job Manager
The main class here defines a manager for running jobs (as Job objects).
This class knows how to fetch job status, kill jobs, etc.
It also communicates with the front end over the KBaseJobs channel.
This is a singleton - instantiating a new JobManager will return the existing
instance in its current state.
"""
__author__ = "Bill Riehl <[email protected]>"
__version__ = "0.0.1"
JOB_NOT_REG_ERR = "Job ID is not registered"
JOB_NOT_BATCH_ERR = "Job ID is not for a batch job"
JOBS_TYPE_ERR = "List expected for job_id_list"
JOBS_MISSING_FALSY_ERR = "Job IDs are missing or all falsy"
def get_error_output_state(job_id, error="does_not_exist"):
if error not in ["does_not_exist", "ee2_error"]:
raise ValueError(f"Unknown error type: {error}")
return {"state": {"job_id": job_id, "status": error}}
class JobManager(object):
"""
The KBase Job Manager class. This handles all jobs and makes their status available.
On status lookups, it feeds the results to the KBaseJobs channel that the front end
listens to.
"""
__instance = None
# keys = job_id, values = { refresh = 1/0, job = Job object }
_running_jobs = dict()
_log = kblogging.get_logger(__name__)
def __new__(cls):
if JobManager.__instance is None:
JobManager.__instance = object.__new__(cls)
return JobManager.__instance
@staticmethod
def _reorder_parents_children(states: dict) -> dict:
ordering = []
for job_id, state in states.items():
if state.get("batch_job"):
ordering.append(job_id)
else:
ordering.insert(0, job_id)
states = {job_id: states[job_id] for job_id in ordering}
return states
def initialize_jobs(self, cell_ids: List[str] = None) -> None:
"""
Initializes this JobManager.
This is expected to be run by a running Narrative, and naturally linked to a workspace.
So it does the following steps.
1. app_util.system_variable('workspace_id')
2. get list of jobs with that ws id from ee2 (also gets tag, cell_id, run_id)
3. initialize the Job objects and add them to the running jobs list
4. start the status lookup loop.
"""
ws_id = system_variable("workspace_id")
job_states = dict()
kblogging.log_event(self._log, "JobManager.initialize_jobs", {"ws_id": ws_id})
try:
job_states = clients.get("execution_engine2").check_workspace_jobs(
{
"workspace_id": ws_id,
"return_list": 0, # do not remove
"exclude_fields": JOB_INIT_EXCLUDED_JOB_STATE_FIELDS,
}
)
except Exception as e:
kblogging.log_event(self._log, "init_error", {"err": str(e)})
new_e = transform_job_exception(e, "Unable to initialize jobs")
raise new_e
self._running_jobs = dict()
job_states = self._reorder_parents_children(job_states)
for job_state in job_states.values():
child_jobs = None
if job_state.get("batch_job"):
child_jobs = [
self.get_job(child_id)
for child_id in job_state.get("child_jobs", [])
]
job = Job(job_state, children=child_jobs)
# Set to refresh when job is not in terminal state
# and when job is present in cells (if given)
refresh = not job.was_terminal()
if cell_ids is not None:
refresh = refresh and job.in_cells(cell_ids)
self.register_new_job(job, refresh)
def _create_jobs(self, job_ids) -> dict:
"""
TODO: error handling
Given a list of job IDs, creates job objects for them and populates the _running_jobs dictionary
"""
job_ids = [job_id for job_id in job_ids if job_id not in self._running_jobs]
if not len(job_ids):
return {}
job_states = clients.get("execution_engine2").check_jobs(
{
"job_ids": job_ids,
"exclude_fields": JOB_INIT_EXCLUDED_JOB_STATE_FIELDS,
"return_list": 0,
}
)
for job_state in job_states.values():
# do not set new jobs to be automatically refreshed - if the front end wants them
# refreshed, it'll make a request.
self.register_new_job(job=Job(job_state), refresh=False)
return job_states
def _check_job(self, input_id: str) -> None:
if input_id not in self._running_jobs:
raise JobIDException(JOB_NOT_REG_ERR, input_id)
def _check_job_list(self, input_ids: List[str]) -> Tuple[List[str], List[str]]:
"""
Deduplicates the input job list, maintaining insertion order
Any jobs not present in self._running_jobs are added to an error list
:param input_ids: a list of putative job IDs
:return results: tuple with items "job_ids", containing valid IDs;
and "error_ids", for jobs that the narrative backend does not know about
"""
if not isinstance(input_ids, list):
raise TypeError(f"{JOBS_TYPE_ERR}: {input_ids}")
job_ids = []
error_ids = []
for input_id in input_ids:
if input_id and input_id not in job_ids + error_ids:
if input_id in self._running_jobs:
job_ids.append(input_id)
else:
error_ids.append(input_id)
if not len(job_ids) + len(error_ids):
raise JobIDException(JOBS_MISSING_FALSY_ERR, input_ids)
return job_ids, error_ids
def list_jobs(self):
"""
List all job ids, their info, and status in a quick HTML format.
"""
try:
all_states = self.lookup_all_job_states(ignore_refresh_flag=True)
state_list = [copy.deepcopy(s["state"]) for s in all_states.values()]
if not len(state_list):
return "No running jobs!"
state_list = sorted(state_list, key=lambda s: s.get("created", 0))
for state in state_list:
job = self.get_job(state["job_id"])
state["created"] = datetime.fromtimestamp(
state["created"] / 1000.0
).strftime("%Y-%m-%d %H:%M:%S")
state["run_time"] = "Not started"
state["owner"] = job.user
state["app_id"] = job.app_id
exec_start = state.get("running", None)
if state.get("finished"):
finished_time = datetime.fromtimestamp(
state.get("finished") / 1000.0
)
state["finish_time"] = finished_time.strftime("%Y-%m-%d %H:%M:%S")
if exec_start:
exec_start_time = datetime.fromtimestamp(exec_start / 1000.0)
delta = finished_time - exec_start_time
delta = delta - timedelta(microseconds=delta.microseconds)
state["run_time"] = str(delta)
elif exec_start:
exec_start_time = datetime.fromtimestamp(
exec_start / 1000.0
).replace(tzinfo=timezone.utc)
delta = datetime.now(timezone.utc) - exec_start_time
delta = delta - timedelta(microseconds=delta.microseconds)
state["run_time"] = str(delta)
tmpl = """
<table class="table table-bordered table-striped table-condensed">
<tr>
<th>Id</th>
<th>Name</th>
<th>Submitted</th>
<th>Submitted By</th>
<th>Status</th>
<th>Run Time</th>
<th>Complete Time</th>
</tr>
{% for j in jobs %}
<tr>
<td>{{ j.job_id|e }}</td>
<td>{{ j.app_id|e }}</td>
<td>{{ j.created|e }}</td>
<td>{{ j.user|e }}</td>
<td>{{ j.status|e }}</td>
<td>{{ j.run_time|e }}</td>
<td>{% if j.finish_time %}{{ j.finish_time|e }}{% else %}Incomplete{% endif %}</td>
</tr>
{% endfor %}
</table>
"""
return HTML(Template(tmpl).render(jobs=state_list))
except Exception as e:
kblogging.log_event(self._log, "list_jobs.error", {"err": str(e)})
raise
def _construct_job_output_state_set(
self, job_ids: list, states: dict = None
) -> dict:
"""
Builds a set of job states for the list of job ids.
:param states: dict, where each value is a state is from EE2
"""
# if cached, use 'em.
# otherwise, lookup.
# do transform
# cache terminal ones.
# return all.
if not isinstance(job_ids, list):
raise ValueError("job_ids must be a list")
if not len(job_ids):
return {}
output_states = dict()
jobs_to_lookup = list()
# Fetch from cache of terminated jobs, where available.
# These are already post-processed and ready to return.
for job_id in job_ids:
job = self.get_job(job_id)
if job.was_terminal():
output_states[job_id] = job.output_state()
elif states and job_id in states:
state = states[job_id]
output_states[job_id] = job.output_state(state)
else:
jobs_to_lookup.append(job_id)
fetched_states = dict()
# Get the rest of states direct from EE2.
if len(jobs_to_lookup):
try:
fetched_states = clients.get("execution_engine2").check_jobs(
{
"job_ids": jobs_to_lookup,
"exclude_fields": EXCLUDED_JOB_STATE_FIELDS,
"return_list": 0,
}
)
except Exception as e:
kblogging.log_event(
self._log, "_construct_job_output_state_set", {"err": str(e)}
)
for job_id in jobs_to_lookup:
output_states[job_id] = get_error_output_state(job_id, "ee2_error")
for job_id, state in fetched_states.items():
output_states[job_id] = self.get_job(job_id).output_state(state)
return output_states
def lookup_job_info(self, job_ids: List[str]) -> dict:
"""
Sends the info over the comm channel as these packets:
{
app_id: module/name,
app_name: random string,
job_id: string,
job_params: dictionary,
batch_id: string,
}
Will set packet to "does_not_exist" if job_id doesn't exist.
"""
job_ids, error_ids = self._check_job_list(job_ids)
infos = dict()
for job_id in job_ids:
job = self.get_job(job_id)
infos[job_id] = {
"app_id": job.app_id,
"app_name": job.app_name,
"job_id": job_id,
"job_params": job.params,
"batch_id": job.batch_id,
}
for error_id in error_ids:
infos[error_id] = "does_not_exist"
return infos
def lookup_all_job_states(self, ignore_refresh_flag=False):
"""
Fetches states for all running jobs.
If ignore_refresh_flag is True, then returns states for all jobs this
JobManager knows about (i.e. all jobs associated with the workspace).
This returns them all as a dictionary, keyed on the job id.
:param ignore_refresh_flag: boolean - if True, ignore the usual refresh state of the job.
Even if the job is stopped, or completed, fetch and return its state from the service.
"""
jobs_to_lookup = list()
# grab the list of running job ids, so we don't run into update-while-iterating problems.
for job_id in self._running_jobs.keys():
if self._running_jobs[job_id]["refresh"] or ignore_refresh_flag:
jobs_to_lookup.append(job_id)
if len(jobs_to_lookup) > 0:
return self._construct_job_output_state_set(jobs_to_lookup)
return dict()
def register_new_job(self, job: Job, refresh: bool = None) -> None:
"""
Registers a new Job with the manager and stores the job locally.
This should only be invoked when a new Job gets started.
Parameters:
-----------
job : biokbase.narrative.jobs.job.Job object
The new Job that was started.
"""
kblogging.log_event(self._log, "register_new_job", {"job_id": job.job_id})
if refresh is None:
refresh = not job.was_terminal()
self._running_jobs[job.job_id] = {"job": job, "refresh": refresh}
def get_job(self, job_id):
"""
Returns a Job with the given job_id.
Raises a JobIDException if not found.
"""
self._check_job(job_id)
return self._running_jobs[job_id]["job"]
def get_job_logs(
self,
job_id: str,
first_line: int = 0,
num_lines: int = None,
latest_only: bool = False,
) -> dict:
"""
Raises a Value error if the job_id doesn't exist or is not present.
:param job_id: str - the job id from the execution engine
:param first_line: int - the first line to be requested by the log. 0-indexed. If < 0,
this will be set to 0
:param num_lines: int - the maximum number of lines to return.
if < 0, will be reset to 0.
if None, then will not be considered, and just return all the lines.
:param latest_only: bool - if True, will only return the most recent max_lines
of logs. This overrides the first_line parameter if set to True. So if the call made
is get_job_logs(id, first_line=0, num_lines=5, latest_only=True), and there are 100
log lines available, then lines 96-100 will be returned.
:returns: dict with keys:
job_id: string
batch_id: string | None
first: int - the first line returned
max_lines: int - the number of logs lines currently available for that job
lines: list - the lines themselves, fresh from the server. These are all tiny dicts with keys
"line" - the log line string
"is_error" - either 0 or 1
"""
job = self.get_job(job_id)
if first_line < 0:
first_line = 0
if num_lines is not None and num_lines < 0:
num_lines = 0
try:
if latest_only:
(max_lines, logs) = job.log()
if num_lines is None or max_lines <= num_lines:
first_line = 0
else:
first_line = max_lines - num_lines
logs = logs[first_line:]
else:
(max_lines, logs) = job.log(first_line=first_line, num_lines=num_lines)
return {
"job_id": job.job_id,
"batch_id": job.batch_id,
"first": first_line,
"max_lines": max_lines,
"lines": logs,
}
except Exception as e:
raise transform_job_exception(e, "Unable to retrieve job logs")
def cancel_jobs(self, job_id_list: List[str]) -> dict:
"""
Cancel a list of running jobs, placing them in a canceled state
Does NOT delete the jobs.
If the job_ids are not present or are not found in the Narrative,
a ValueError is raised.
Results are returned as a dict of job status objects keyed by job id
:param job_id_list: list of strs
:return job_states: dict with keys job IDs and values job state objects
"""
job_ids, error_ids = self._check_job_list(job_id_list)
for job_id in job_ids:
if not self.get_job(job_id).was_terminal():
self._cancel_job(job_id)
job_states = self._construct_job_output_state_set(job_ids)
for job_id in error_ids:
job_states[job_id] = {
"state": {"job_id": job_id, "status": "does_not_exist"}
}
return job_states
def _cancel_job(self, job_id: str) -> None:
# Stop updating the job status while we try to cancel.
# Set the job to a special state of 'canceling' while we're doing the cancel
is_refreshing = self._running_jobs[job_id].get("refresh", False)
self._running_jobs[job_id]["refresh"] = False
self._running_jobs[job_id]["canceling"] = True
try:
clients.get("execution_engine2").cancel_job({"job_id": job_id})
except Exception as e:
raise transform_job_exception(e, "Unable to cancel job")
finally:
self._running_jobs[job_id]["refresh"] = is_refreshing
del self._running_jobs[job_id]["canceling"]
def retry_jobs(self, job_id_list: List[str]) -> List[dict]:
"""
Returns
[
{
"job": {"state": {"job_id": job_id, "status": status, ...} ...},
"retry": {"state": {"job_id": job_id, "status": status, ...} ...}
},
{
"job": {"state": {"job_id": job_id, "status": status, ...} ...},
"error": "..."
}
...
{
"job": {"state": {"job_id": job_id, "status": "does_not_exist"}},
"error": "does_not_exist"
}
]
where the innermost dictionaries are job states from ee2 and are within the
job states from job.output_state()
"""
job_ids, error_ids = self._check_job_list(job_id_list)
try:
retry_results = clients.get("execution_engine2").retry_jobs(
{"job_ids": job_ids}
)
except Exception as e:
raise transform_job_exception(e, "Unable to retry job(s)")
# for each retry result, refresh the state of the retried and new jobs
orig_ids = [result["job_id"] for result in retry_results]
retry_ids = [
result["retry_id"] for result in retry_results if "retry_id" in result
]
orig_states = self._construct_job_output_state_set(orig_ids)
retry_states = self._construct_job_output_state_set(
retry_ids, self._create_jobs(retry_ids) # add to self._running_jobs index
)
job_states = {**orig_states, **retry_states}
# fill in the job state details
for result in retry_results:
result["job"] = job_states[result["job_id"]]
del result["job_id"]
if "retry_id" in result:
result["retry"] = job_states[result["retry_id"]]
del result["retry_id"]
for job_id in error_ids:
retry_results.append(
{
"job": {"state": {"job_id": job_id, "status": "does_not_exist"}},
"error": "does_not_exist",
}
)
return retry_results
def get_job_states(self, job_ids: List[str]) -> dict:
job_ids, error_ids = self._check_job_list(job_ids)
output_states = self._construct_job_output_state_set(job_ids)
for error_id in error_ids:
output_states[error_id] = get_error_output_state(error_id)
return output_states
def modify_job_refresh(self, job_ids: List[str], update_refresh: bool) -> None:
"""
Modifies how many things want to get the job updated.
If this sets the current "refresh" key to be less than 0, it gets reset to 0.
If the job isn't present or None, a ValueError is raised.
"""
job_ids, _ = self._check_job_list(job_ids)
for job_id in job_ids:
self._running_jobs[job_id]["refresh"] = update_refresh
def update_batch_job(self, batch_id: str) -> List[str]:
"""
Update a batch job and create child jobs if necessary
"""
batch_job = self.get_job(batch_id)
if not batch_job.batch_job:
raise JobIDException(JOB_NOT_BATCH_ERR, batch_id)
child_ids = batch_job.child_jobs
reg_child_jobs = []
unreg_child_ids = []
for job_id in child_ids:
if job_id in self._running_jobs:
reg_child_jobs.append(self.get_job(job_id))
else:
unreg_child_ids.append(job_id)
unreg_child_jobs = []
if unreg_child_ids:
unreg_child_jobs = Job.from_job_ids(unreg_child_ids)
for job in unreg_child_jobs:
self.register_new_job(
job=job,
refresh=not job.was_terminal(),
)
batch_job.update_children(reg_child_jobs + unreg_child_jobs)
return [batch_id] + child_ids
|
|
import cwriting.core as core
import cwriting.node as node
import cwriting.curve as curve
import math
def makeRiseTween(d, obj, duration, real, diff=node.Placement(start=(0, -0.2, 0))):
tl = core.Timeline(d.next())
d.registerTimeline(tl)
obj.setPlacement(real.moved(diff))
obj.setVisibility(False)
obj.keyPlacement(tl)
obj.keyVisibility(tl)
objTweenIn = core.CurveTweener()
objTweenIn.setObject(obj, 'Placement', real)
objTweenIn.setCurve(curve.quadOut3)
objTweenIn.tween(duration)
d.registerTimeline(objTweenIn.getTimeline())
tl.changeTimeline(objTweenIn.getTimeline())
tl.advance(duration)
obj.setVisibility(True)
obj.keyVisibility(tl)
return tl
def makeFallTween(d, obj, duration, real, diff=node.Placement(start=(0, -0.2, 0))):
tl = core.Timeline(d.next())
d.registerTimeline(tl)
obj.setPlacement(real)
obj.setVisibility(True)
obj.keyPlacement(tl)
obj.keyVisibility(tl)
objTweenIn = core.CurveTweener()
objTweenIn.setObject(obj, 'Placement', real.moved(diff))
objTweenIn.setCurve(curve.quadIn3)
objTweenIn.tween(duration)
d.registerTimeline(objTweenIn.getTimeline())
tl.changeTimeline(objTweenIn.getTimeline())
tl.advance(duration)
obj.setVisibility(False)
obj.keyVisibility(tl)
return tl
def genSceneList(d):
tl0 = core.Timeline(d.next())
d.registerTimeline(tl0)
enable = core.Timeline('_linksEnable')
d.registerTimeline(enable)
disable = core.Timeline('_linksDisable')
d.registerTimeline(disable)
scenes = d.getScenes()
y = 0
for s in scenes:
t = core.Text(d.next(), s['longname'])
enable.changeLink(t, True)
disable.changeLink(t, False)
p = node.Placement(start=(0, 0.3*(len(scenes) / 2.0 - y), 0))
p.relativeTo = 'RightWall'
t.setPlacement(p)
tl = core.Timeline(d.next())
d.registerTimeline(tl)
tl.changeTimeline(disable)
tl.startScene(s['name'])
tl.advance(s['timeline'].current())
tl.changeTimeline(enable)
t.link = node.Link()
t.link.addAction(node.TimerChange(tl, 'start'))
d.registerObject(t)
t.keyVisibility(tl0)
s['text'] = t
y += 1
tl0.advance(1)
for s in scenes:
s['text'].setVisibility(True)
s['text'].keyVisibility(tl0)
return {
'name': '_list',
'timeline': tl0
}
def genScene0(d):
tl0 = core.Timeline(d.next())
d.registerTimeline(tl0)
tlBegin = core.Timeline('begin')
d.registerTimeline(tlBegin)
exnihilo = core.Text(d.next(), 'Ex Nihilo')
exnihilo.setScale(3)
exnihilo.setVisibility(False)
exnihilo.setPlacement(node.Placement(start=(0, -2, -2)))
d.registerObject(exnihilo)
begin = core.Text(d.next(), 'begin')
begin.setVisibility(False)
begin.setPlacement(node.Placement(start=(0, -2, -3)))
begin.link = node.Link()
begin.link.addAction(node.TimerChange(tlBegin, 'start'))
d.registerObject(begin)
exnihilo.keyVisibility(tl0)
exnihiloTweenIn = core.CurveTweener()
exnihiloTweenIn.setObject(exnihilo, 'Placement', node.Placement(start=(0, 1, -2)))
exnihiloTweenIn.setCurve(curve.quadOut3)
exnihiloTweenIn.tween(3)
d.registerTimeline(exnihiloTweenIn.getTimeline())
tl0.changeTimeline(exnihiloTweenIn.getTimeline())
tl0.advance(0.5)
begin.keyVisibility(tl0)
beginTweenIn = core.CurveTweener()
beginTweenIn.setObject(begin, 'Placement', node.Placement(start=(0, 0, -2)))
beginTweenIn.setCurve(curve.quadOut3)
beginTweenIn.tween(2.5)
d.registerTimeline(beginTweenIn.getTimeline())
tl0.changeTimeline(beginTweenIn.getTimeline())
tl0.advance(2.5)
exnihilo.setVisibility(True)
exnihilo.keyVisibility(tl0)
exnihilo.keyVisibility(tlBegin)
tl0.advance(0.5)
begin.setVisibility(True)
begin.keyVisibility(tl0)
beginSfx = node.Sound(d.next(), './res/00_s00.mp3')
d.registerSound(beginSfx)
tlBegin.playSound(beginSfx)
exnihilo.keyVisibility(tlBegin)
exnihiloTweenOut = core.CurveTweener()
exnihiloTweenOut.setObject(exnihilo, 'Placement', node.Placement(start=(0, 4, -2)))
exnihiloTweenOut.setCurve(curve.quadIn3)
exnihiloTweenOut.tween(2)
d.registerTimeline(exnihiloTweenOut.getTimeline())
tlBegin.changeTimeline(exnihiloTweenOut.getTimeline())
tlBegin.advance(1)
begin.keyVisibility(tlBegin)
tlBegin.advance(1)
exnihilo.setVisibility(False)
exnihilo.keyVisibility(tlBegin)
tlBegin.advance(1)
begin.setVisibility(False)
begin.keyVisibility(tlBegin)
tlBegin.startScenes(d)
return {
'name': 'begin',
'timeline': tl0
}
def genSceneEnd(d):
tl0 = core.Timeline(d.next())
d.registerTimeline(tl0)
tl = core.Timeline(d.next())
d.registerTimeline(tl)
t = core.Text(d.next(), 'restart')
t.setPlacement(node.Placement(start=(0, 0, -2)))
t.link = node.Link()
t.link.addAction(node.TimerChange(tl, 'start'))
d.registerObject(t)
tl0.advance(1)
t.keyVisibility(tl0)
tl0.advance(1)
t.setVisibility(True)
t.keyVisibility(tl0)
t.keyVisibility(tl)
tTweenOut = core.CurveTweener()
tTweenOut.setObject(t, 'Placement', node.Placement(start=(0, 0, 4)))
tTweenOut.setCurve(curve.quadIn3)
tTweenOut.tween(2)
d.registerTimeline(tTweenOut.getTimeline())
tl.changeTimeline(tTweenOut.getTimeline())
tl.advance(2)
t.setVisibility(False)
t.keyVisibility(tl)
tl.advance(1)
tl.restart()
return {
'name': 'end',
'timeline': tl0
}
def genSceneInTheBeginning(d):
tl0 = core.Timeline('scene:inTheBeginning')
d.registerTimeline(tl0)
tl0.advance(1)
inTheBeginning = core.Text(d.next(), 'In the beginning')
inTheBeginning.setPlacement(node.Placement(start=(0, 1, -2)))
d.registerObject(inTheBeginning)
inTheBeginningSound = node.Sound(d.next(), './res/01_l00.mp3')
d.registerSound(inTheBeginningSound)
tl0.playSound(inTheBeginningSound)
inTheBeginning.keyVisibility(tl0)
tl0.advance(0.05)
inTheBeginning.setVisibility(True)
inTheBeginning.keyVisibility(tl0)
tl0.advance(1)
thereWasNothing = core.Text(d.next(), 'there was nothing.')
thereWasNothing.setPlacement(node.Placement(start=(0, 0.8, -2)))
d.registerObject(thereWasNothing)
thereWasNothing.keyVisibility(tl0)
inTheBeginningTweenUp = core.CurveTweener()
inTheBeginningTweenUp.setObject(inTheBeginning, 'Placement', node.Placement(start=(0, 1.2, -2)))
inTheBeginningTweenUp.setCurve(curve.quadOut3)
inTheBeginningTweenUp.tween(1)
d.registerTimeline(inTheBeginningTweenUp.getTimeline())
tl0.changeTimeline(inTheBeginningTweenUp.getTimeline())
tl0.advance(1)
thereWasNothing.setVisibility(True)
thereWasNothing.keyVisibility(tl0)
tl0.advance(2)
inTheBeginning.setVisibility(True)
inTheBeginning.keyVisibility(tl0)
thereWasNothing.setVisibility(True)
thereWasNothing.keyVisibility(tl0)
inTheBeginningTweenOut = core.CurveTweener()
inTheBeginningTweenOut.setObject(inTheBeginning, 'Placement', node.Placement(start=(0, 1.2, -6)))
inTheBeginningTweenOut.setCurve(curve.quadIn3)
inTheBeginningTweenOut.tween(2)
d.registerTimeline(inTheBeginningTweenOut.getTimeline())
tl0.changeTimeline(inTheBeginningTweenOut.getTimeline())
thereWasNothingTweenOut = core.CurveTweener()
thereWasNothingTweenOut.setObject(thereWasNothing, 'Placement', node.Placement(start=(0, 0.8, -6)))
thereWasNothingTweenOut.setCurve(curve.quadIn3)
thereWasNothingTweenOut.tween(2)
d.registerTimeline(thereWasNothingTweenOut.getTimeline())
tl0.changeTimeline(thereWasNothingTweenOut.getTimeline())
tl0.advance(2)
inTheBeginning.setVisibility(False)
inTheBeginning.keyVisibility(tl0)
thereWasNothing.setVisibility(False)
thereWasNothing.keyVisibility(tl0)
fromNothing = core.Text(d.next(), "From nothing,\nthe world began")
fromNothing.setPlacement(node.Placement(start=(0, 1.2, -2)))
fromNothing.breakApart()
d.registerObject(fromNothing)
fromNothingSound = node.Sound(d.next(), './res/01_l01.mp3')
d.registerSound(fromNothingSound)
fromNothingTweensIn = fromNothing.createTweenSet('Visibility', (lambda t: node.Boolean('Visibility', True)), (lambda t: curve.floor1))
fromNothingTweensIn.register(d)
fromNothingTweensIn.setLazyKeying(True)
tlFromNothingIn = fromNothingTweensIn.tweenAcross(0.1, 1.5)
d.registerTimeline(tlFromNothingIn)
tl0.changeTimeline(tlFromNothingIn)
tl0.playSound(fromNothingSound)
tl0.advance(5)
fromNothingTweensOut1 = fromNothing.createTweenSetBackwards('Visibility', (lambda t: node.Boolean('Visibility', False)), (lambda t: curve.floor1))
fromNothingTweensOut1.register(d)
fromNothingTweensOut1.setLazyKeying(True)
tlFromNothingOut1 = fromNothingTweensOut1.tweenAcross(0.1, 1)
d.registerTimeline(tlFromNothingOut1)
tl0.changeTimeline(tlFromNothingOut1)
fromNothingTweensOut2 = fromNothing.createTweenSetBackwards('Placement', (lambda t: t.getPlacement().moved(node.Placement(start=(0, -2, 0)))), (lambda t: curve.quadIn3))
fromNothingTweensOut2.register(d)
tlFromNothingOut2 = fromNothingTweensOut2.tweenAcross(0.1, 2)
d.registerTimeline(tlFromNothingOut2)
tl0.changeTimeline(tlFromNothingOut2)
tl0.advance(4)
return {'name': 'inTheBeginning',
'longname': 'In The Beginning',
'timeline': tl0
}
def genSceneRain(d):
def waves(n):
return (lambda s, t: 0.2*math.sin(n)*(math.sin(2.5*s+n) + math.sin(2.5*t+n)) + (t-1)*(t-1)*0.1 + n/3.5 - 3.0)
tl0 = core.Timeline('scene:rain')
d.registerTimeline(tl0)
sceneText = core.Text(d.next(), 'First, the rains rained and the\n'
'seas filled. An almost endless\n'
'deluge filled the yawning void\n'
'that had preceded the oceans.')
sceneText2 = core.Text(d.next(), 'Slowly, the rains ceased and\n'
'the water stopped rising.\n'
'\n'
'The rain was done.')
sceneText.setPlacement(node.Placement(start=(0, 2.0, -2)))
d.registerObject(sceneText)
sceneText.keyPlacement(tl0)
sceneText.keyVisibility(tl0)
sceneText2.setPlacement(node.Placement(start=(0, 0.5, -2)))
d.registerObject(sceneText2)
sceneText2.keyPlacement(tl0)
sceneText2.keyVisibility(tl0)
sceneSound = node.Sound(d.next(), './res/02_l00.mp3')
sceneSound2 = node.Sound(d.next(), './res/02_l01.mp3')
d.registerSound(sceneSound)
d.registerSound(sceneSound2)
rainSfx = node.Sound(d.next(), './res/02_s00.mp3')
rainSfx.freq = 0.6
rainSfx.volume = 0.3
d.registerSound(rainSfx)
tlRain = core.Timeline(d.next())
d.registerTimeline(tlRain)
#tl0.changeTimeline(tlRain)
s = []
text = 'the rains fell, the seas filled, '
for y in range(12):
for x in range(20):
s.append(text[(x + 4*y) % len(text)])
s.append('\n')
s = ''.join(s)
theRains = core.Text(d.next(), s)
theRains.setScale(2.1)
# XXX HACK ATTACK
theRains.breakApart(lambda s, t: node.Placement(start=(s*.4, 0, t*.7 - 5)))
theRainsG = theRains.getGroup()
theRainsG.applyMap(curve.HeightMap(None, 2, waves(0)))
d.registerObject(theRains)
theRainsP = core.Text(d.next(), 'the rains')
theRainsPGroup = core.Group('rains')
theRainsPGroup.addObject(theRainsP)
theRainsPSystem = core.ParticleSystem(d.next())
theRainsPSystem.speed = 0.3
theRainsPActions = node.ParticleActionList(d.next())
theRainsPSystem.actions = theRainsPActions
theRainsPActions.setRate(2)
theRainsPGravity = node.Gravity()
theRainsPGravity.setDirection((-0.2, -0.5, 0))
theRainsPActions.addAction(theRainsPGravity)
theRainsPSystem.particles = theRainsPGroup
theRainsPSource = node.Disc()
theRainsPSource.setCenter((0, 8, 0))
theRainsPSource.setNormal((0.2, -1.0, 0))
theRainsPSource.setRadius(8)
theRainsPVel = node.Point()
theRainsPVel.setPoint((-1, -3, -2))
theRainsPActions.setSource(theRainsPSource)
theRainsPActions.setVel(theRainsPVel)
theRainsPActions.setRemoveCondition(node.Age(6))
d.registerObject(theRainsP)
d.registerObject(theRainsPSystem)
d.registerGroup(theRainsPGroup)
d.registerParticleActions(theRainsPSystem.actions)
tlWaves = core.Timeline('waves')
d.registerTimeline(tlWaves)
for t in range(72):
if t:
tlWaves.advance(1/4.0)
theRainsG.applyMap(curve.HeightMap(None, 2, waves(t*2.0/15.0*math.pi)))
theRainsG.key('Placement', tlWaves)
if t == 64:
theRainsG.set('Visibility', True)
theRainsG.key('Visibility', tlWaves)
theRainsG.set('Visibility', False)
theRainsG.key('Visibility', tlWaves)
tlWaves.advance(0.01)
#tlWaves.freeze(True)
tl0.changeTimeline(tlWaves)
theRainsG.key('Visibility', tl0)
theRainsPSystem.key('Visibility', tl0)
sceneTextTweenIn = core.CurveTweener()
sceneTextTweenIn.setObject(sceneText, 'Placement', node.Placement(start=(0, 2.2, -2)))
sceneTextTweenIn.setCurve(curve.quadOut3)
sceneTextTweenIn.tween(1)
d.registerTimeline(sceneTextTweenIn.getTimeline())
tl0.changeTimeline(sceneTextTweenIn.getTimeline())
tl0.playSound(sceneSound)
tl0.playSound(rainSfx)
tl0.advance(1)
theRainsG.set('Visibility', True)
theRainsG.key('Visibility', tl0)
theRainsPSystem.set('Visibility', True)
theRainsPSystem.key('Visibility', tl0)
sceneText.setVisibility(True)
sceneText.keyVisibility(tl0)
tl0.advance(15)
tl0.playSound(sceneSound2)
sceneText2.keyPlacement(tl0)
sceneText2.keyVisibility(tl0)
sceneText2TweenIn = core.CurveTweener()
sceneText2TweenIn.setObject(sceneText2, 'Placement', node.Placement(start=(0, 0.7, -2)))
sceneText2TweenIn.setCurve(curve.quadOut3)
sceneText2TweenIn.tween(1)
d.registerTimeline(sceneText2TweenIn.getTimeline())
tl0.changeTimeline(sceneText2TweenIn.getTimeline())
theRainsPSystem.key('Visibility', tl0)
tl0.advance(1)
sceneText2.setVisibility(True)
sceneText2.keyVisibility(tl0)
#for i in xrange(int(tl0.current()/0.5)):
# tlRain.playSound(rainSfx)
# tlRain.advance(0.5)
tl0.advance(2)
theRainsPSystem.set('Visibility', False)
theRainsPSystem.key('Visibility', tl0)
tl0.advance(5)
tl0.changeTimeline(makeFallTween(d, sceneText, 1, node.Placement(start=(0, 2.2, -2))))
tl0.changeTimeline(makeFallTween(d, sceneText2, 1, node.Placement(start=(0, 0.7, -2))))
tl0.advance(1)
return {
'name': 'rain',
'longname': 'The Rains',
'timeline': tl0
}
def genSceneLands(d):
tl0 = core.Timeline('scene:lands')
d.registerTimeline(tl0)
sceneText = core.Text(d.next(), 'But the oceans were unending.')
sceneText2 = core.Text(d.next(), 'From beneath the infinite oceans\n'
'rose the lands. Solid stone jutted\n'
'out. The mountains towered, and\n'
'slowly the land emerged from the\n'
'ocean. There was a terminus\n'
'setting apart the seas.')
sceneText.setPlacement(node.Placement(start=(0, 2.4, -2)))
sceneText2.setPlacement(node.Placement(start=(0, 1.1, -2)))
d.registerObject(sceneText)
d.registerObject(sceneText2)
sceneSound = node.Sound(d.next(), './res/03_l00.mp3')
sceneSound2 = node.Sound(d.next(), './res/03_l01.mp3')
d.registerSound(sceneSound)
d.registerSound(sceneSound2)
sceneSfx = node.Sound(d.next(), './res/03_s00.mp3')
sceneSfx.volume = 0.7
d.registerSound(sceneSfx)
waterText = core.Text(d.next(), 'Into the horizon, into the eternity, the\n'
'waters spread forever. North, south, east,\n'
'and west: in all directions, the waters\n'
'were all that could be seen, for the waters\n'
'were all there was. Above the waters, the\n'
'skies looked down upon her brother and saw\n'
'the loneliness of his expanse. She knew\n'
'something was missing, and she knew that\n'
'there was more to come. There must be no\n'
'infinite; there must be bounds, they must\n'
'just be found. After the rains, the waters\n'
'were calm. But this too must not last, for\n'
'the bleak emptiness of the unending filled\n'
'the waters. This pure boredom of the waters,\n'
'with no waves and no motion, must be ended\n'
'somehow. The end has yet to be seen, but\n'
'there must be an end. There must be an end.\n'
'Where is the end? The end must be found. The\n'
'skies and the seas must make this end. There\n'
'will be a terminus. There must be an end.')
waterText.setScale(2)
waterText.valign = 'bottom'
waterText.setPlacement(node.Placement(start=(0, -3, 0), rotation=node.AxisRotation(axis=(1, 0, 0), rotation=-90)))
d.registerObject(waterText)
waterText.keyVisibility(tl0)
tl0.changeTimeline(makeRiseTween(d, sceneText, 1, node.Placement(start=(0, 2.6, -2))))
tl0.playSound(sceneSound)
tl0.playSound(sceneSfx)
tl0.advance(1)
waterText.setVisibility(True)
waterText.keyVisibility(tl0)
tl0.advance(2)
mountain01 = core.Text(d.next(), 'The mountains rose from beneath the seas, filling the space around them. And here did the oceans part.'.replace(' ', '\n'))
mountain01.halign = 'right'
mountain01.setScale(2)
mountain01.setPlacement(node.Placement(start=(-5, -4, -5), rotation=node.LookAt(target=(-2, -2.8, -4.5), up=(0, 0, -1))))
d.registerObject(mountain01)
mountain01.keyVisibility(tl0)
mountain01.keyPlacement(tl0)
mountain02 = core.Text(d.next(), 'No longer whole, the seas were sundered by cliffs and beaches. And beyond these shores lay more still.'.replace(' ', '\n'))
mountain02.halign = 'left'
mountain02.setScale(2)
mountain02.setPlacement(node.Placement(start=(5, -4, -5), rotation=node.LookAt(target=(2, -2.8, -4.5), up=(0, 0, -1))))
d.registerObject(mountain02)
mountain02.keyVisibility(tl0)
mountain02.keyPlacement(tl0)
mountain01TweenIn = core.CurveTweener()
mountain01TweenIn.setObject(mountain01, 'Placement', mountain01.getPlacement().moved(node.Placement(start=(-0.2, 1, 0), rotation=node.LookAt(up=(0, 0, -1)))))
mountain01TweenIn.setCurve(curve.quadOut3)
mountain01TweenIn.tween(2)
d.registerTimeline(mountain01TweenIn.getTimeline())
tl0.changeTimeline(mountain01TweenIn.getTimeline())
mountain02TweenIn = core.CurveTweener()
mountain02TweenIn.setObject(mountain02, 'Placement', mountain02.getPlacement().moved(node.Placement(start=(0.2, 1, 0), rotation=node.LookAt(up=(0, 0, -1)))))
mountain02TweenIn.setCurve(curve.quadOut3)
mountain02TweenIn.tween(2)
d.registerTimeline(mountain02TweenIn.getTimeline())
tl0.changeTimeline(mountain02TweenIn.getTimeline())
tl0.advance(1)
mountain01.setVisibility(True)
mountain01.keyVisibility(tl0)
mountain02.setVisibility(True)
mountain02.keyVisibility(tl0)
tl0.playSound(sceneSound2)
tl0.changeTimeline(makeRiseTween(d, sceneText2, 1, node.Placement(start=(0, 1.3, -2))))
tl0.advance(15)
tl0.changeTimeline(makeFallTween(d, sceneText, 1, node.Placement(start=(0, 2.6, -2))))
tl0.changeTimeline(makeFallTween(d, sceneText2, 1, node.Placement(start=(0, 1.3, -2))))
tl0.changeTimeline(makeFallTween(d, waterText, 1, node.Placement(start=(0, -3, 0), rotation=node.AxisRotation(axis=(1, 0, 0), rotation=-90)), diff=node.Placement(start=(0, 0, 2))))
mountain01.keyVisibility(tl0)
mountain01TweenOut = core.CurveTweener()
mountain01TweenOut.setObject(mountain01, 'Placement', mountain01.getPlacement().moved(node.Placement(start=(-0.5, 1, 0), rotation=node.LookAt(target=(-0.2, 0, 0), up=(0, 0, -1)))))
mountain01TweenOut.setCurve(curve.quadIn3)
mountain01TweenOut.tween(1)
d.registerTimeline(mountain01TweenOut.getTimeline())
tl0.changeTimeline(mountain01TweenOut.getTimeline())
mountain02.keyVisibility(tl0)
mountain02TweenOut = core.CurveTweener()
mountain02TweenOut.setObject(mountain02, 'Placement', mountain02.getPlacement().moved(node.Placement(start=(0.5, 1, 0), rotation=node.LookAt(target=(0.2, 0, 0), up=(0, 0, -1)))))
mountain02TweenOut.setCurve(curve.quadIn3)
mountain02TweenOut.tween(1)
d.registerTimeline(mountain02TweenOut.getTimeline())
tl0.changeTimeline(mountain02TweenOut.getTimeline())
tl0.advance(1)
mountain01.setVisibility(False)
mountain01.keyVisibility(tl0)
mountain02.setVisibility(False)
mountain02.keyVisibility(tl0)
tl0.advance(0.2)
return {
'name': 'lands',
'longname': 'The Lands',
'timeline': tl0
}
def genSceneStillYoung(d):
tl0 = core.Timeline('scene:stillyoung')
d.registerTimeline(tl0)
sceneText = core.Text(d.next(), 'The world was still young,\n'
'and restless. For eons, the\n'
'skies battled against their\n'
'younger siblings. The skies\n'
'thundered bolts of raw power,\n'
'the lands erupted molten lava\n'
'of pure heat, and the oceans\n'
'boiled clouds of solid steam.')
sceneText.setPlacement(node.Placement(start=(0, 1.1, -2)))
d.registerObject(sceneText)
sceneSound = node.Sound(d.next(), './res/04_l00.mp3')
d.registerSound(sceneSound)
sceneSfx = node.Sound(d.next(), './res/04_s00.mp3')
sceneSfx.volume = 0.3
d.registerSound(sceneSfx)
tl0.playSound(sceneSfx)
tl0.advance(0.5)
tl0.playSound(sceneSound)
tl0.changeTimeline(makeRiseTween(d, sceneText, 1, node.Placement(start=(0, 1.3, -2))))
power = core.Text(d.next(), ' er pow \n'
' powe power \n'
'power power power power power power were power\n'
' wer er po \n'
' power po \n')
powerGroup = core.Group('power')
powerGroup.addObject(power)
powerSystem = core.ParticleSystem(d.next())
powerActions = node.ParticleActionList(d.next())
powerSystem.actions = powerActions
powerActions.setRate(0.02)
powerGravity = node.Gravity()
powerGravity.setDirection((0, 0.05, 0))
powerActions.addAction(powerGravity)
powerSystem.particles = powerGroup
powerSource = node.Box()
powerSource.setP1((-2, 6, -2))
powerSource.setP2((2, 8, 2))
powerVel = node.Box()
powerVel.setP1((-0.5, -2, -0.5))
powerVel.setP2((0.5, -2, 0))
powerActions.setSource(powerSource)
powerActions.setVel(powerVel)
powerRemoveCondition = node.Plane()
powerRemoveCondition.setPoint((0, -10, 0))
powerRemoveCondition.setNormal((0, 1, 0))
powerActions.setRemoveCondition(node.Position(powerRemoveCondition))
d.registerObject(power)
d.registerObject(powerSystem)
d.registerGroup(powerGroup)
d.registerParticleActions(powerActions)
powerSystem.key('Visibility', tl0)
lava = core.Text(d.next(), 'lav')
lava.breakApart()
lavaGroup = lava.getGroup()
lavaActions = node.ParticleActionList(d.next())
lavaActions.setRate(10)
lavaGravity = node.Gravity()
lavaGravity.setDirection((0, -1, 0))
lavaActions.addAction(lavaGravity)
lavaSource = node.Plane()
lavaSource.setNormal((0, 1, 0))
lavaSource.setPoint((0, 0, 0))
lavaVel = node.Cone()
lavaVel.setApex((0, 0, 0))
lavaVel.setBaseCenter((0, 3, 0))
lavaVel.setRadius(1)
lavaActions.setSource(lavaSource)
lavaActions.setVel(lavaVel)
lavaRemoveCondition = node.Plane()
lavaRemoveCondition.setPoint((0, -0.1, 0))
lavaRemoveCondition.setNormal((0, 1, 0))
lavaActions.setRemoveCondition(node.Position(lavaRemoveCondition))
d.registerObject(lava)
d.registerGroup(lavaGroup)
d.registerParticleActions(lavaActions)
lavaSystem = core.ParticleSystem(d.next())
lavaSystem.sequential = False
lavaSystem.speed = 0.4
lavaSystem.actions = lavaActions
lavaSystem.particles = lavaGroup
lavaSystem.setPlacement(node.Placement(start=(-3.2, -4, -2.1)))
d.registerObject(lavaSystem)
lavaSystem2 = core.ParticleSystem(d.next())
lavaSystem2.sequential = False
lavaSystem2.speed = 0.4
lavaSystem2.actions = lavaActions
lavaSystem2.particles = lavaGroup
lavaSystem2.setPlacement(node.Placement(start=(-0.2, -4, -2.8)))
d.registerObject(lavaSystem2)
lavaSystem.key('Visibility', tl0)
lavaSystem2.key('Visibility', tl0)
steam = core.Text(d.next(), 'steam')
steamGroup = core.Group('steam')
steamGroup.addObject(steam)
steamActions = node.ParticleActionList(d.next())
steamActions.setRate(1)
steamGravity = node.Gravity()
steamGravity.setDirection((0, 0.1, 0))
steamActions.addAction(steamGravity)
steamSource = node.Disc()
steamSource.setCenter((0, 0, 0))
steamSource.setNormal((0, -1, 0))
steamSource.setRadius(1)
steamVel = node.Disc()
steamVel.setCenter((0, 0.1, 0))
steamVel.setNormal((0, -1, 0))
steamVel.setRadius(0.2)
steamActions.setSource(steamSource)
steamActions.setVel(steamVel)
steamActions.setRemoveCondition(node.Age(14))
d.registerObject(steam)
d.registerGroup(steamGroup)
d.registerParticleActions(steamActions)
steamSystem = core.ParticleSystem(d.next())
steamSystem.sequential = False
steamSystem.lookAtCamera = True
steamSystem.speed = 0.1
steamSystem.actions = steamActions
steamSystem.particles = steamGroup
steamSystem.setPlacement(node.Placement(start=(1.4, -4, -3.4)))
d.registerObject(steamSystem)
steamSystem2 = core.ParticleSystem(d.next())
steamSystem2.sequential = False
steamSystem2.lookAtCamera = True
steamSystem2.speed = 0.1
steamSystem2.actions = steamActions
steamSystem2.particles = steamGroup
steamSystem2.setPlacement(node.Placement(start=(3.4, -4, -2.3)))
d.registerObject(steamSystem2)
steamSystem.key('Visibility', tl0)
steamSystem2.key('Visibility', tl0)
tl0.advance(1)
powerSystem.set('Visibility', True)
powerSystem.key('Visibility', tl0)
lavaSystem.set('Visibility', True)
lavaSystem.key('Visibility', tl0)
lavaSystem2.set('Visibility', True)
lavaSystem2.key('Visibility', tl0)
steamSystem.set('Visibility', True)
steamSystem.key('Visibility', tl0)
steamSystem2.set('Visibility', True)
steamSystem2.key('Visibility', tl0)
tl0.advance(18)
sceneText.key('Visibility', tl0)
powerSystem.key('Visibility', tl0)
lavaSystem.key('Visibility', tl0)
lavaSystem2.key('Visibility', tl0)
steamSystem.key('Visibility', tl0)
steamSystem2.key('Visibility', tl0)
tl0.advance(1)
sceneText.set('Visibility', False)
sceneText.key('Visibility', tl0)
powerSystem.set('Visibility', False)
powerSystem.key('Visibility', tl0)
lavaSystem.set('Visibility', False)
lavaSystem.key('Visibility', tl0)
lavaSystem2.set('Visibility', False)
lavaSystem2.key('Visibility', tl0)
steamSystem.set('Visibility', False)
steamSystem.key('Visibility', tl0)
steamSystem2.set('Visibility', False)
steamSystem2.key('Visibility', tl0)
return {
'name': 'stillyoung',
'longname': 'The Battles',
'timeline': tl0
}
d = core.Document()
d.setTitleScene(genScene0(d))
scenes = [genSceneInTheBeginning(d),
genSceneRain(d),
genSceneLands(d),
genSceneStillYoung(d)]
for s in scenes:
d.addScene(s)
d.addScene(genSceneEnd(d))
# Scenes don't have a good method for resetting
#d.addScene(genSceneList(d))
d.save('exnihilo.xml')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import errno
import gc
import itertools
import os
import re
import shutil
import tempfile
import warnings
import numpy as np
import six
from tensorflow.compiler.tf2tensorrt.wrap_py_utils import is_tensorrt_enabled
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.eager import def_function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
from tensorflow.python.tools import saved_model_utils
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
TfTrtIntegrationTestParams = namedtuple(
"TfTrtIntegrationTestParams",
[
# A function that creates the TF graph for testing.
"graph_fn",
# A list of specifications for input tensors.
"input_specs",
# A list of specifications for output tensors.
"output_specs",
# A list of list of input shapes. Each shape must match the
# corresponding element in `input_specs`.
"input_dims",
# A list of list of expected output shapes. Each shape must match the
# corresponding element in `output_specs`.
"expected_output_dims"
])
RunParams = namedtuple(
"RunParams",
[
# Whether to run the conversion online with RewriterConfig, or offline
# with TrtGraphConverter.
"convert_online",
"precision_mode",
"dynamic_engine",
"use_calibration",
"test_name",
# Is this test for TF 2.0?
"is_v2",
])
FP32 = "FP32"
FP16 = "FP16"
INT8 = "INT8"
PRECISION_MODES = [FP32, FP16, INT8]
def IsQuantizationMode(mode):
return mode == "INT8"
def IsQuantizationWithCalibration(params):
return IsQuantizationMode(params.precision_mode) and params.use_calibration
class GraphState(object):
ORIGINAL = 0
CALIBRATE = 1
INFERENCE = 2
def OptimizerDisabledRewriterConfig():
"""Returns a RewriterConfig with all default Grappler optimizers disabled."""
rewriter_config = rewriter_config_pb2.RewriterConfig()
# Turn off all default Grappler optimizers.
off = rewriter_config_pb2.RewriterConfig.OFF
rewriter_config.layout_optimizer = off
rewriter_config.constant_folding = off
rewriter_config.shape_optimization = off
rewriter_config.remapping = off
rewriter_config.arithmetic_optimization = off
rewriter_config.dependency_optimization = off
rewriter_config.loop_optimization = off
rewriter_config.function_optimization = off
rewriter_config.debug_stripper = off
rewriter_config.disable_model_pruning = True
rewriter_config.scoped_allocator_optimization = off
rewriter_config.memory_optimization = (
rewriter_config_pb2.RewriterConfig.NO_MEM_OPT)
rewriter_config.pin_to_host_optimization = off
rewriter_config.auto_parallel.enable = False
# Run only once for each enabled optimizer.
rewriter_config.meta_optimizer_iterations = (
rewriter_config_pb2.RewriterConfig.ONE)
return rewriter_config
class TfTrtIntegrationTestBase(test_util.TensorFlowTestCase):
"""Class to test Tensorflow-TensorRT integration."""
@property
def trt_incompatible_op(self):
return math_ops.erf
@property
def precision_modes(self):
return ["FP32", "FP16", "INT8"]
# str is bytes in py2, but unicode in py3.
def _ToUnicode(self, s):
if six.PY2:
if isinstance(s, unicode):
return s
return s.decode("utf-8")
else:
if isinstance(s, str):
return s
return s.decode("utf-8")
def _ToBytes(self, s):
if six.PY2:
if isinstance(s, unicode):
return s.encode("utf-8")
return s
else:
if isinstance(s, str):
return s.encode("utf-8")
return s
def _ToString(self, s):
if six.PY2:
if isinstance(s, unicode):
return s.encode("utf-8")
return s
else:
if isinstance(s, str):
return s
return s.decode("utf-8")
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TfTrtIntegrationTestBase, self).__init__(methodName)
self._trt_test_params = None
def setUp(self):
"""Setup method."""
super(TfTrtIntegrationTestBase, self).setUp()
warnings.simplefilter("always")
def BuildParams(self, graph_fn, dtype, input_shapes, output_shapes):
"""Build test parameters when not considering dynamic shapes."""
def _Validate(shapes):
# Make sure all the shapes are fully specified.
for shape in shapes:
assert all(shape)
_Validate(input_shapes)
_Validate(output_shapes)
return TfTrtIntegrationTestParams(
graph_fn=graph_fn,
# Unset the batch dim of the specs to make sure TRT can tolerate changes
# on that.
input_specs=[
tensor_spec.TensorSpec([None] + shape[1:], dtype, "input_%d" % i)
for i, shape in enumerate(input_shapes)
],
output_specs=[
tensor_spec.TensorSpec([None] + shape[1:], dtype, "output_%d" % i)
for i, shape in enumerate(output_shapes)
],
input_dims=[input_shapes],
expected_output_dims=[output_shapes])
def GetParams(self):
"""Return a TfTrtIntegrationTestParams for test, implemented by subclass."""
raise NotImplementedError()
def GetConversionParams(self, run_params):
"""Return a TrtConversionParams for test."""
batch_list = []
for dims_list in self._GetParamsCached().input_dims:
assert dims_list
# Each list of shapes should have same batch size.
input_batches = [dims[0] for dims in dims_list]
assert max(input_batches) == min(input_batches)
batch_list.append(input_batches[0])
conversion_params = trt_convert.TrtConversionParams(
# We use the minimum of all the batch sizes, so when multiple different
# input shapes are provided it'll always create new engines in the
# cache, and we can therefore test the cache behavior.
rewriter_config_template=None,
max_workspace_size_bytes=1 << 25,
precision_mode=run_params.precision_mode,
minimum_segment_size=2,
is_dynamic_op=run_params.dynamic_engine,
maximum_cached_engines=1,
use_calibration=run_params.use_calibration,
max_batch_size=min(batch_list))
return conversion_params
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# Ensure use_calibration=True in case of INT8 precision
return (run_params.use_calibration or
not IsQuantizationMode(run_params.precision_mode))
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build, implemented by subclass."""
raise NotImplementedError()
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-02
def _GetParamsCached(self):
if self._trt_test_params is None:
self._trt_test_params = self.GetParams()
return self._trt_test_params
def _GetGPUOptions(self):
gpu_options = config_pb2.GPUOptions()
gpu_options.allow_growth = True
return gpu_options
def _GetConfigProto(self, run_params, graph_state):
"""Get config proto based on specific settings."""
conversion_params = self.GetConversionParams(run_params)
if graph_state == GraphState.INFERENCE and run_params.convert_online:
rewriter_cfg = trt_convert.get_tensorrt_rewriter_config(conversion_params)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_cfg)
else:
graph_options = config_pb2.GraphOptions()
if conversion_params.rewriter_config_template is not None:
graph_options.rewrite_options.CopyFrom(
conversion_params.rewriter_config_template)
config = config_pb2.ConfigProto(
gpu_options=self._GetGPUOptions(), graph_options=graph_options)
return config
def _GetFeedNames(self):
params = self._GetParamsCached()
# Construct the feeds tensor names by appending :0 to the node names.
return [spec.name + ":0" for spec in params.input_specs]
def _GetFetchNames(self):
params = self._GetParamsCached()
# Construct the fetches tensor names by appending :0 to the node names.
return [spec.name + ":0" for spec in params.output_specs]
def _GetFeedDict(self, inputs_data):
return {name: data for name, data in zip(self._GetFeedNames(), inputs_data)}
def _RunGraphV1(self, saved_model_dir, inputs_data, config, num_runs=2):
"""Run given graphdef multiple times using TF 1.x runtime."""
params = self._GetParamsCached()
fetches = self._GetFetchNames()
g = ops.Graph()
with g.as_default():
with self.session(graph=g, config=config, use_gpu=True) as sess:
loader.load(sess, [tag_constants.SERVING], saved_model_dir)
vals = []
# Run for each input(s) shape
for expected_shapes, current_input_data in zip(
params.expected_output_dims, inputs_data):
val = None
for _ in range(num_runs):
new_val = sess.run(fetches, self._GetFeedDict(current_input_data))
self.assertEqual(len(expected_shapes), len(new_val))
for expected_shape, actual_val in zip(expected_shapes, new_val):
self.assertEqual(list(expected_shape), list(actual_val.shape))
if val is not None:
# Some ops may have nondeterministic output. E.g. Conv2D may use
# winograd algorithm. So we set atol/rtol be larger than 1.e-06.
self.assertAllClose(val, new_val, atol=1.e-05, rtol=1.e-05)
val = new_val
vals.append(val)
return vals
def _RunGraphV2(self, saved_model_dir, inputs_data, graph_state, num_runs=2):
"""Run given graphdef multiple times using TF 2.0 runtime."""
params = self._GetParamsCached()
root = load.load(saved_model_dir)
func = root.signatures[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
results = []
for expected_shapes, current_input_data in zip(params.expected_output_dims,
inputs_data):
val = None
for _ in range(num_runs):
feed_dict = {
params.input_specs[i].name: current_input_data[i]
for i in range(len(params.input_specs))
}
new_val = func(**feed_dict)
assert isinstance(new_val, dict)
# The key of the output map is always like output_i.
new_val = [new_val[key] for key in sorted(new_val)]
# Each element is an eager Tensor, and accessing individual elements is
# very expensive, so we convert them to a numpy array first.
new_val = [v.numpy() for v in new_val]
self.assertEqual(len(expected_shapes), len(new_val))
for expected_shape, actual_val in zip(expected_shapes, new_val):
self.assertEqual(list(expected_shape), list(actual_val.shape))
if val is not None:
self.assertAllClose(val, new_val, atol=1.e-06, rtol=1.e-06)
val = new_val
results.append(val)
return results
def _RunGraph(self,
run_params,
saved_model_dir,
inputs_data,
config,
graph_state,
num_runs=2):
params = self._GetParamsCached()
for data in inputs_data:
assert len(params.input_specs) == len(data)
if run_params.is_v2:
results = self._RunGraphV2(saved_model_dir, inputs_data, graph_state,
num_runs)
gc.collect() # Force GC to destroy the TRT engine cache.
return results
return self._RunGraphV1(saved_model_dir, inputs_data, config, num_runs)
def _CreateConverter(self, run_params, saved_model_dir, session_config,
conversion_params):
"""Return a TrtGraphConverter."""
if run_params.is_v2:
return trt_convert.TrtGraphConverterV2(
input_saved_model_dir=saved_model_dir,
conversion_params=conversion_params)
return trt_convert.TrtGraphConverter(
input_saved_model_dir=saved_model_dir,
session_config=session_config,
max_batch_size=conversion_params.max_batch_size,
max_workspace_size_bytes=conversion_params.max_workspace_size_bytes,
precision_mode=conversion_params.precision_mode,
minimum_segment_size=conversion_params.minimum_segment_size,
is_dynamic_op=conversion_params.is_dynamic_op,
maximum_cached_engines=conversion_params.maximum_cached_engines,
use_calibration=conversion_params.use_calibration)
def _GetCalibratedInferGraph(self, run_params, saved_model_dir, inputs_data):
"""Return trt converted graphdef in INT8 mode."""
conversion_params = self.GetConversionParams(run_params)
logging.info(conversion_params)
assert conversion_params.precision_mode == "INT8"
assert conversion_params.is_dynamic_op
assert conversion_params.maximum_cached_engines == 1
assert conversion_params.use_calibration
# We only support calibrating single engine.
# TODO(aaroey): fix this.
assert len(inputs_data) == 1
session_config = self._GetConfigProto(run_params, GraphState.CALIBRATE)
logging.info("Running calibration graph, config:\n%s", str(session_config))
converter = self._CreateConverter(run_params, saved_model_dir,
session_config, conversion_params)
int8_gdef = converter.convert()
self._VerifyGraphDef(run_params, saved_model_dir, int8_gdef,
GraphState.CALIBRATE)
converter.calibrate(
fetch_names=self._GetFetchNames(),
num_runs=5,
feed_dict_fn=lambda: self._GetFeedDict(inputs_data[0]))
trt_saved_model_dir = self._GetSavedModelDir(run_params,
GraphState.CALIBRATE)
converter.save(trt_saved_model_dir)
return trt_saved_model_dir
def _GetInferGraph(self, run_params, saved_model_dir):
"""Return trt converted graphdef."""
conversion_params = self.GetConversionParams(run_params)
logging.info(conversion_params)
session_config = self._GetConfigProto(run_params, GraphState.INFERENCE)
logging.info("Creating TRT graph for inference, config\n%s",
str(session_config))
converter = self._CreateConverter(run_params, saved_model_dir,
session_config, conversion_params)
converter.convert()
trt_saved_model_dir = self._GetSavedModelDir(run_params,
GraphState.INFERENCE)
converter.save(trt_saved_model_dir)
return trt_saved_model_dir
def _GetGraphStateLabel(self, graph_state):
if graph_state == GraphState.ORIGINAL:
return "Original"
elif graph_state == GraphState.CALIBRATE:
return "CalibEngine"
elif graph_state == GraphState.INFERENCE:
return "InferEngine"
else:
return "UnknownState"
def _WriteGraph(self, run_params, gdef, graph_state):
temp_dir = os.getenv("TRT_TEST_TMPDIR")
if not temp_dir:
return
graph_name = (
self.__class__.__name__ + "_" + run_params.test_name + "_" +
self._GetGraphStateLabel(graph_state) + ".pbtxt")
logging.info("Writing graph to %s/%s", temp_dir, graph_name)
graph_io.write_graph(gdef, temp_dir, graph_name)
def _VerifyConnections(self, expected_engines, original_gdef, converted_gdef):
old_to_new_node_map = {
self._ToString(node.name): self._ToString(node.name)
for node in original_gdef.node
}
for engine_name, node_names in expected_engines.items():
for node_name in node_names:
old_to_new_node_map[node_name] = engine_name
name_to_node_map = {
self._ToString(node.name): node for node in original_gdef.node
}
def _InputName(inp):
inp = self._ToString(inp)
prefix = ""
if inp[0] == "^":
prefix = "^"
inp = inp[1:]
parts = inp.split(":")
if len(parts) > 1 and parts[-1].isdigit():
inp = inp[:-len(parts[-1]) - 1]
return (prefix, inp)
# Compute the expected mapping from each node to its input nodes.
expected_input_map = {}
removed_const_nodes = set([
self._ToString(node.name)
for node in original_gdef.node
if node.op == "Const"
])
for node in original_gdef.node:
name_str = self._ToString(node.name)
target_node_name = old_to_new_node_map[name_str]
is_engine_op = (target_node_name != name_str)
if target_node_name not in expected_input_map:
expected_input_map[target_node_name] = set()
input_set = expected_input_map[target_node_name]
for inp in node.input:
(prefix, inp_name) = _InputName(inp)
mapped_input = old_to_new_node_map[inp_name]
# Add the input only if it's outside the segment (note that it could be
# in a different engine).
if not is_engine_op or (mapped_input != target_node_name and
name_to_node_map[inp_name].op != "Const"):
input_set.add(prefix + mapped_input)
if mapped_input in removed_const_nodes:
removed_const_nodes.remove(mapped_input)
# Remove const nodes that have no outputs.
expected_input_map = {
k: v
for k, v in expected_input_map.items()
if k not in removed_const_nodes
}
# Compute the actual mapping from each node to its input nodes.
actual_input_map = {}
for node in converted_gdef.node:
name_str = self._ToString(node.name)
actual_input_map[name_str] = set()
input_set = actual_input_map[name_str]
for inp in node.input:
(prefix, node_name) = _InputName(inp)
input_set.add(prefix + node_name)
self.assertEqual(
expected_input_map,
actual_input_map,
msg="\nexpected:\n%s\nvs actual:\n%s" %
(sorted(expected_input_map.items()), sorted(actual_input_map.items())))
def _GetGraphDef(self, run_params, gdef_or_saved_model_dir):
if isinstance(gdef_or_saved_model_dir, str):
if run_params.is_v2:
root = load.load(gdef_or_saved_model_dir)
func = root.signatures[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
gdef = func.graph.as_graph_def()
# Manually unref the loaded saved model and force GC to destroy the TRT
# engine cache after load(). There is currently a reference cycle in 2.0
# which prevents auto deletion of the resource.
# TODO(laigd): fix this.
del func
del root
gc.collect()
return gdef
return saved_model_utils.get_meta_graph_def(
gdef_or_saved_model_dir, tag_constants.SERVING).graph_def
assert isinstance(gdef_or_saved_model_dir, graph_pb2.GraphDef)
return gdef_or_saved_model_dir
def _VerifyGraphDefV1(self, run_params, original_gdef, gdef_to_verify,
graph_state):
expected_engines = self.ExpectedEnginesToBuild(run_params)
num_engines = 0
functions = [f.signature.name for f in gdef_to_verify.library.function]
for node in gdef_to_verify.node:
if node.op == "TRTEngineOp":
logging.info("Found TRTEngineOp: " + node.name)
num_engines += 1
segment_funcdef_name = node.attr["segment_func"].func.name
function_name = node.name + "_native_segment"
is_dynamic_engine = not node.attr["static_engine"].b
self.assertNotEmpty(segment_funcdef_name, node.name)
self.assertIn(function_name, functions)
if not IsQuantizationWithCalibration and not is_dynamic_engine:
self.assertTrue(len(node.attr["serialized_segment"].s), node.name)
self.assertIn(node.name, expected_engines)
self.assertEqual(
self._ToBytes(run_params.precision_mode),
node.attr["precision_mode"].s, node.name)
self.assertEqual(run_params.dynamic_engine, is_dynamic_engine,
node.name)
self.assertEqual(node.attr["use_calibration"].b,
run_params.use_calibration, node.name)
has_calibration_data = len(node.attr["calibration_data"].s)
if (IsQuantizationWithCalibration(run_params) and
graph_state == GraphState.INFERENCE):
self.assertTrue(has_calibration_data, node.name)
else:
self.assertFalse(has_calibration_data, node.name)
if graph_state == GraphState.ORIGINAL:
self.assertEqual(0, num_engines)
else:
self.assertEqual(num_engines, len(expected_engines))
if isinstance(expected_engines, dict):
self._VerifyConnections(expected_engines, original_gdef, gdef_to_verify)
# TODO(aaroey): consider verifying the corresponding TF function.
def _VerifyGraphDefV2(self, run_params, original_gdef, gdef_to_verify,
graph_state):
if graph_state == GraphState.ORIGINAL:
return
expected_engines = self.ExpectedEnginesToBuild(run_params)
all_op_names = [node.name for node in gdef_to_verify.node]
trt_op_names = [
node.name for node in gdef_to_verify.node if node.op == "TRTEngineOp"
]
for func in gdef_to_verify.library.function:
if not re.search(r"TRTEngineOp_\d+_native_segment", func.signature.name):
for node in func.node_def:
all_op_names.append(node.name)
if node.op == "TRTEngineOp":
trt_op_names.append(node.name)
# Remove the function name prefix.
def _Canonicalize(names):
return set([self._ToString(name.split("/")[-1]) for name in names])
all_op_names = _Canonicalize(all_op_names)
trt_op_names = _Canonicalize(trt_op_names)
if isinstance(expected_engines, dict):
# For simplicity we don't verify the connections inside the engine in
# 2.0, but we still make sure that the converted ops are gone from the
# graph.
unexpected_names = set(nest.flatten(expected_engines.values()))
self.assertEmpty(
[name for name in unexpected_names if name in all_op_names])
expected_engines = set(expected_engines.keys())
self.assertEqual(set(expected_engines), trt_op_names)
def _VerifyGraphDef(self, run_params, original_gdef_or_saved_model_dir,
gdef_or_saved_model_dir_to_verify, graph_state):
original_gdef = self._GetGraphDef(run_params,
original_gdef_or_saved_model_dir)
gdef_to_verify = self._GetGraphDef(run_params,
gdef_or_saved_model_dir_to_verify)
self._WriteGraph(run_params, gdef_to_verify, graph_state)
if run_params.is_v2:
self._VerifyGraphDefV2(run_params, original_gdef, gdef_to_verify,
graph_state)
else:
self._VerifyGraphDefV1(run_params, original_gdef, gdef_to_verify,
graph_state)
def _GetSavedModelDir(self, run_params, graph_state):
test_tmpdir = os.getenv("TRT_TEST_TMPDIR")
if test_tmpdir:
saved_model_dir = os.path.join(
test_tmpdir, self.__class__.__name__ + "_" + run_params.test_name +
"_" + self._GetGraphStateLabel(graph_state))
try:
# For TF 1.x we need to make sure the output directory doesn't exist
# before exporting the saved model.
shutil.rmtree(saved_model_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return saved_model_dir
return tempfile.mkdtemp(dir=self.get_temp_dir())
def _MakeSavedModelV1(self, run_params):
"""Write the saved model as an input for testing."""
params = self._GetParamsCached()
g = ops.Graph()
with g.as_default():
inputs = []
for spec in params.input_specs:
inp = array_ops.placeholder(
dtype=spec.dtype, shape=spec.shape, name=spec.name)
inputs.append(inp)
outputs = params.graph_fn(*inputs)
if not isinstance(outputs, list) and not isinstance(outputs, tuple):
outputs = [outputs]
signature_def = signature_def_utils.build_signature_def(
inputs={inp.op.name: utils.build_tensor_info(inp) for inp in inputs},
outputs={out.op.name: utils.build_tensor_info(out) for out in outputs},
method_name=signature_constants.PREDICT_METHOD_NAME)
saved_model_dir = self._GetSavedModelDir(run_params, GraphState.ORIGINAL)
saved_model_builder = builder.SavedModelBuilder(saved_model_dir)
with self.session(
graph=g, config=self._GetConfigProto(run_params,
GraphState.ORIGINAL)) as sess:
saved_model_builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature_def
})
saved_model_builder.save()
return saved_model_dir
def _MakeSavedModelV2(self, run_params):
params = self._GetParamsCached()
root = tracking.AutoTrackable()
root.run = def_function.function(
params.graph_fn, input_signature=params.input_specs)
saved_model_dir = self._GetSavedModelDir(run_params, GraphState.ORIGINAL)
logging.info("Saving input SavedModel to %s", saved_model_dir)
save.save(root, saved_model_dir,
{signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: root.run})
return saved_model_dir
def _MakeSavedModel(self, run_params):
if run_params.is_v2:
return self._MakeSavedModelV2(run_params)
return self._MakeSavedModelV1(run_params)
def RunTest(self, run_params):
if not self.ShouldRunTest(run_params):
return
saved_model_dir = self._MakeSavedModel(run_params)
np.random.seed(12345) # Fix the seed so the test is deterministic.
inputs_data = []
input_specs = self._GetParamsCached().input_specs
for dim_list in self._GetParamsCached().input_dims:
assert len(input_specs) == len(dim_list)
current_input_data = []
for spec, np_shape in zip(input_specs, dim_list):
np_dtype = spec.dtype.as_numpy_dtype()
# Multiply the input by some constant to avoid all zeros input for
# integer types.
scale = 10.0 if np.issubdtype(np_dtype, np.integer) else 1.0
# TODO(laigd): add debug options. E.g. we can set the input data to be
# continuous natural numbers:
# seq = np.arange(np.prod(np_shape))
# seq.resize(np_shape)
# current_inputs_data.append(scale * seq.astype(np_dtype))
data = (scale * np.random.random_sample(np_shape)).astype(np_dtype)
if run_params.is_v2:
with ops.device("/GPU:0"):
data = ops.convert_to_tensor(data)
current_input_data.append(data)
inputs_data.append(current_input_data)
# Verify original graph.
self._VerifyGraphDef(run_params, saved_model_dir, saved_model_dir,
GraphState.ORIGINAL)
# Run original graph without trt to get reference result.
config_no_trt = self._GetConfigProto(run_params, GraphState.ORIGINAL)
logging.info("Running original graph w/o trt, config:\n%s",
str(config_no_trt))
ref_result = self._RunGraph(run_params, saved_model_dir, inputs_data,
config_no_trt, GraphState.ORIGINAL)
# Run calibration if necessary.
if IsQuantizationWithCalibration(run_params):
infer_saved_model_dir = self._GetCalibratedInferGraph(
run_params, saved_model_dir, inputs_data)
self._VerifyGraphDef(run_params, saved_model_dir, infer_saved_model_dir,
GraphState.INFERENCE)
elif not run_params.convert_online:
infer_saved_model_dir = self._GetInferGraph(run_params, saved_model_dir)
self._VerifyGraphDef(run_params, saved_model_dir, infer_saved_model_dir,
GraphState.INFERENCE)
else:
infer_saved_model_dir = saved_model_dir
# Run inference.
infer_config = self._GetConfigProto(run_params, GraphState.INFERENCE)
logging.info("Running final inference graph, config:\n%s",
str(infer_config))
result = self._RunGraph(run_params, infer_saved_model_dir, inputs_data,
infer_config, GraphState.INFERENCE)
self.assertAllClose(
ref_result,
result,
atol=self.ExpectedAbsoluteTolerance(run_params),
rtol=self.ExpectedRelativeTolerance(run_params))
def testIdempotence(self):
# Test that applying tensorrt optimizer or offline conversion tools multiple
# times to the same graph will result in same graph.
#
# TODO(aaroey): implement this.
pass
def _GetTestConfigsV1():
"""Returns the config combinations to run the test."""
convert_online, convert_offline = True, False
dynamic_engine, static_engine = True, False
use_calibration, no_calibration = True, False
# Add all possible test cases and let the derived test class to decide
# whether to run specific ones with ShouldRunTest().
#
# Note: INT8 without calibration behaves like FP32/FP16.
opts = list(
itertools.product([FP32, FP16, INT8], [convert_online, convert_offline],
[dynamic_engine, static_engine], [no_calibration]))
# We always run calibration with offline tool.
# TODO(aaroey): static calibration engine is not supported yet.
opts.append((INT8, convert_offline, dynamic_engine, use_calibration))
return opts
def _GetTestConfigsV2():
"""Returns the config combinations to run the test."""
convert_offline = False
# TODO(laigd): add support for static_engine.
dynamic_engine = True
# TODO(laigd): add support for calibration.
no_calibration = False
# Add all possible test cases and let the derived test class to decide
# whether to run specific ones with ShouldRunTest().
#
# Note:
# - In TF2.0 the conversion always produce dynamic engine, and we don't test
# the offline mode here.
# - For simplicity we don't test online conversion which requires setting the
# Grappler config in default eager context.
# - INT8 without calibration behaves like FP32/FP16.
opts = list(
itertools.product([FP32, FP16, INT8], [convert_offline], [dynamic_engine],
[no_calibration]))
# We always run calibration with offline tool.
# TODO(aaroey): INT8+calibration is not supported yet in V2.
# opts.append((INT8, convert_offline, dynamic_engine, use_calibration))
return opts
def _GetTest(run_params):
"""Gets a single test method based on the parameters."""
def _Test(self):
logging.info(
"Running test %s with parameters: convert_online=%s, "
"precision_mode=%s, dynamic_engine=%s", run_params.test_name,
run_params.convert_online, run_params.precision_mode,
run_params.dynamic_engine)
self.RunTest(run_params)
return _Test
def _AddTestsFor(test_class, is_v2):
"""Adds test methods to TfTrtIntegrationTestBase for specific TF version."""
opts = _GetTestConfigsV2() if is_v2 else _GetTestConfigsV1()
for (precision_mode, convert_online, dynamic_engine, use_calibration) in opts:
conversion = "OnlineConversion" if convert_online else "OfflineConversion"
engine_type = "DynamicEngine" if dynamic_engine else "StaticEngine"
calibration_type = "UseCalibration" if use_calibration else "NoCalibration"
test_name = "%s_%s_%s_%s_%s" % ("testTfTrtV2" if is_v2 else "testTfTrt",
conversion, engine_type, precision_mode,
calibration_type)
run_params = RunParams(
convert_online=convert_online,
precision_mode=precision_mode,
dynamic_engine=dynamic_engine,
test_name=test_name,
use_calibration=use_calibration,
is_v2=is_v2)
if is_v2:
setattr(test_class, test_name,
test_util.run_v2_only(_GetTest(run_params)))
else:
setattr(test_class, test_name,
test_util.run_v1_only("", _GetTest(run_params)))
def _AddTests(test_class):
"""Adds test methods to TfTrtIntegrationTestBase."""
_AddTestsFor(test_class, is_v2=False)
_AddTestsFor(test_class, is_v2=True)
if is_tensorrt_enabled():
_AddTests(TfTrtIntegrationTestBase)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class ntp_key(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/ntp/ntp-keys/ntp-key. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: List of NTP authentication keys
"""
__slots__ = ("_path_helper", "_extmethods", "__key_id", "__config", "__state")
_yang_name = "ntp-key"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__key_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="key-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="leafref",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["system", "ntp", "ntp-keys", "ntp-key"]
def _get_key_id(self):
"""
Getter method for key_id, mapped from YANG variable /system/ntp/ntp_keys/ntp_key/key_id (leafref)
YANG Description: Reference to auth key-id list key
"""
return self.__key_id
def _set_key_id(self, v, load=False):
"""
Setter method for key_id, mapped from YANG variable /system/ntp/ntp_keys/ntp_key/key_id (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_key_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_key_id() directly.
YANG Description: Reference to auth key-id list key
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="key-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """key_id must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="key-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='leafref', is_config=True)""",
}
)
self.__key_id = t
if hasattr(self, "_set"):
self._set()
def _unset_key_id(self):
self.__key_id = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="key-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="leafref",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /system/ntp/ntp_keys/ntp_key/config (container)
YANG Description: Configuration data for NTP auth keys
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /system/ntp/ntp_keys/ntp_key/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for NTP auth keys
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /system/ntp/ntp_keys/ntp_key/state (container)
YANG Description: Operational state data for NTP auth keys
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /system/ntp/ntp_keys/ntp_key/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for NTP auth keys
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="container",
is_config=True,
)
key_id = __builtin__.property(_get_key_id, _set_key_id)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[("key_id", key_id), ("config", config), ("state", state)]
)
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rewritepolicy_rewriteglobal_binding(base_resource) :
""" Binding class showing the rewriteglobal that can be bound to rewritepolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def boundto(self) :
ur"""Location where policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
ur"""Location where policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the rewrite policy.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the rewrite policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
ur"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
ur"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rewritepolicy_rewriteglobal_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rewritepolicy_rewriteglobal_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch rewritepolicy_rewriteglobal_binding resources.
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of rewritepolicy_rewriteglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count rewritepolicy_rewriteglobal_binding resources configued on NetScaler.
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of rewritepolicy_rewriteglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = rewritepolicy_rewriteglobal_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class rewritepolicy_rewriteglobal_binding_response(base_response) :
def __init__(self, length=1) :
self.rewritepolicy_rewriteglobal_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rewritepolicy_rewriteglobal_binding = [rewritepolicy_rewriteglobal_binding() for _ in range(length)]
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.prolog
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Prolog and Prolog-like languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['PrologLexer', 'LogtalkLexer']
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
flags = re.UNICODE
tokens = {
'root': [
(r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
# character literal
(r'0\'.', String.Char),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
# literal with prepended base
(r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
# (r'=(?=\s|[a-zA-Z\[])', Operator),
(r'is\b', Operator),
(r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(\\()',
bygroups(Name.Function, Text, Punctuation)),
(u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_]\w*', Name.Variable),
(u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
return ':-' in text
class LogtalkLexer(RegexLexer):
"""
For `Logtalk <http://logtalk.org/>`_ source code.
.. versionadded:: 0.10
"""
name = 'Logtalk'
aliases = ['logtalk']
filenames = ['*.lgt', '*.logtalk']
mimetypes = ['text/x-logtalk']
tokens = {
'root': [
# Directives
(r'^\s*:-\s',Punctuation,'directive'),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/',Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Event handlers
(r'(after|before)(?=[(])', Keyword),
# Message forwarding handler
(r'forward(?=[(])', Keyword),
# Execution-context methods
(r'(parameter|this|se(lf|nder))(?=[(])', Keyword),
# Reflection
(r'(current_predicate|predicate_property)(?=[(])', Keyword),
# DCGs and term expansion
(r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword),
# Entity
(r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword),
(r'(object|protocol|category)_property(?=[(])', Keyword),
# Entity relations
(r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword),
(r'extends_(object|protocol|category)(?=[(])', Keyword),
(r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
(r'(instantiat|specializ)es_class(?=[(])', Keyword),
# Events
(r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
# Flags
(r'(current|set)_logtalk_flag(?=[(])', Keyword),
# Compiling, loading, and library paths
(r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make)(?=[(])', Keyword),
(r'\blogtalk_make\b', Keyword),
# Database
(r'(clause|retract(all)?)(?=[(])', Keyword),
(r'a(bolish|ssert(a|z))(?=[(])', Keyword),
# Control constructs
(r'(ca(ll|tch)|throw)(?=[(])', Keyword),
(r'(fa(il|lse)|true)\b', Keyword),
# All solutions
(r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
# Multi-threading meta-predicates
(r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword),
# Term unification
(r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword),
# Term creation and decomposition
(r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword),
# Evaluable functors
(r'(rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
(r'(floor|truncate|round|ceiling)(?=[(])', Keyword),
# Other arithmetic functors
(r'(cos|a(cos|sin|tan)|exp|log|s(in|qrt))(?=[(])', Keyword),
# Term testing
(r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|ground|acyclic_term)(?=[(])', Keyword),
# Term comparison
(r'compare(?=[(])', Keyword),
# Stream selection and control
(r'(curren|se)t_(in|out)put(?=[(])', Keyword),
(r'(open|close)(?=[(])', Keyword),
(r'flush_output(?=[(])', Keyword),
(r'(at_end_of_stream|flush_output)\b', Keyword),
(r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword),
# Character and byte input/output
(r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
(r'\bnl\b', Keyword),
# Term input/output
(r'read(_term)?(?=[(])', Keyword),
(r'write(q|_(canonical|term))?(?=[(])', Keyword),
(r'(current_)?op(?=[(])', Keyword),
(r'(current_)?char_conversion(?=[(])', Keyword),
# Atomic term processing
(r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
(r'(char_code|sub_atom)(?=[(])', Keyword),
(r'number_c(har|ode)s(?=[(])', Keyword),
# Implementation defined hooks functions
(r'(se|curren)t_prolog_flag(?=[(])', Keyword),
(r'\bhalt\b', Keyword),
(r'halt(?=[(])', Keyword),
# Message sending operators
(r'(::|:|\^\^)', Operator),
# External call
(r'[{}]', Keyword),
# Logic and control
(r'(ignore|once)(?=[(])', Keyword),
(r'\brepeat\b', Keyword),
# Sorting
(r'(key)?sort(?=[(])', Keyword),
# Bitwise functors
(r'(>>|<<|/\\|\\\\|\\)', Operator),
# Predicate aliases
(r'\bas\b', Operator),
# Arithemtic evaluation
(r'\bis\b', Keyword),
# Arithemtic comparison
(r'(=:=|=\\=|<|=<|>=|>)', Operator),
# Term creation and decomposition
(r'=\.\.', Operator),
# Term unification
(r'(=|\\=)', Operator),
# Term comparison
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
# Evaluable functors
(r'(//|[-+*/])', Operator),
(r'\b(e|pi|mod|rem)\b', Operator),
# Other arithemtic functors
(r'\b\*\*\b', Operator),
# DCG rules
(r'-->', Operator),
# Control constructs
(r'([!;]|->)', Operator),
# Logic and control
(r'\\+', Operator),
# Mode operators
(r'[?@]', Operator),
# Existential quantifier
(r'\^', Operator),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# Ponctuation
(r'[()\[\],.|]', Text),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"[']", String, 'quoted_atom'),
],
'quoted_atom': [
(r"['][']", String),
(r"[']", String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r"[^\\'\n]+", String),
(r'\\', String),
],
'directive': [
# Conditional compilation directives
(r'(el)?if(?=[(])', Keyword, 'root'),
(r'(e(lse|ndif))[.]', Keyword, 'root'),
# Entity directives
(r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
(r'(end_(category|object|protocol))[.]',Keyword, 'root'),
# Predicate scope directives
(r'(public|protected|private)(?=[(])', Keyword, 'root'),
# Other directives
(r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
(r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'),
(r'(built_in|dynamic|synchronized|threaded)[.]', Keyword, 'root'),
(r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|s(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
(r'op(?=[(])', Keyword, 'root'),
(r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
(r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'),
(r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'),
],
'entityrelations': [
(r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"[']", String, 'quoted_atom'),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# End of entity-opening directive
(r'([)]\.)', Text, 'root'),
# Scope operator
(r'(::)', Operator),
# Ponctuation
(r'[()\[\],.|]', Text),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/',Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
]
}
def analyse_text(text):
if ':- object(' in text:
return 1.0
elif ':- protocol(' in text:
return 1.0
elif ':- category(' in text:
return 1.0
elif re.search('^:-\s[a-z]', text, re.M):
return 0.9
else:
return 0.0
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceAction(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'action': fields.StringField(nullable=True),
'instance_uuid': fields.UUIDField(nullable=True),
'request_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'start_time': fields.DateTimeField(nullable=True),
'finish_time': fields.DateTimeField(nullable=True),
'message': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, action, db_action):
for field in action.fields:
action[field] = db_action[field]
action._context = context
action.obj_reset_changes()
return action
@staticmethod
def pack_action_start(context, instance_uuid, action_name):
values = {'request_id': context.request_id,
'instance_uuid': instance_uuid,
'user_id': context.user_id,
'project_id': context.project_id,
'action': action_name,
'start_time': context.timestamp}
return values
@staticmethod
def pack_action_finish(context, instance_uuid):
values = {'request_id': context.request_id,
'instance_uuid': instance_uuid,
'finish_time': timeutils.utcnow()}
return values
@base.remotable_classmethod
def get_by_request_id(cls, context, instance_uuid, request_id):
db_action = db.action_get_by_request_id(context, instance_uuid,
request_id)
if db_action:
return cls._from_db_object(context, cls(), db_action)
@base.remotable_classmethod
def action_start(cls, context, instance_uuid, action_name,
want_result=True):
values = cls.pack_action_start(context, instance_uuid, action_name)
db_action = db.action_start(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_action)
@base.remotable_classmethod
def action_finish(cls, context, instance_uuid, want_result=True):
values = cls.pack_action_finish(context, instance_uuid)
db_action = db.action_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_action)
@base.remotable
def finish(self, context):
values = self.pack_action_finish(context, self.instance_uuid)
db_action = db.action_finish(context, values)
self._from_db_object(context, self, db_action)
class InstanceActionList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceAction <= version 1.1
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('InstanceAction'),
}
child_versions = {
'1.0': '1.1',
# NOTE(danms): InstanceAction was at 1.1 before we added this
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_actions = db.actions_get(context, instance_uuid)
return base.obj_make_list(context, cls(), InstanceAction, db_actions)
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceActionEvent(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: event_finish_with_failure decorated with serialize_args
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'event': fields.StringField(nullable=True),
'action_id': fields.IntegerField(nullable=True),
'start_time': fields.DateTimeField(nullable=True),
'finish_time': fields.DateTimeField(nullable=True),
'result': fields.StringField(nullable=True),
'traceback': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, event, db_event):
for field in event.fields:
event[field] = db_event[field]
event._context = context
event.obj_reset_changes()
return event
@staticmethod
def pack_action_event_start(context, instance_uuid, event_name):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'start_time': timeutils.utcnow()}
return values
@staticmethod
def pack_action_event_finish(context, instance_uuid, event_name,
exc_val=None, exc_tb=None):
values = {'event': event_name,
'instance_uuid': instance_uuid,
'request_id': context.request_id,
'finish_time': timeutils.utcnow()}
if exc_tb is None:
values['result'] = 'Success'
else:
values['result'] = 'Error'
values['message'] = exc_val
values['traceback'] = exc_tb
return values
@base.remotable_classmethod
def get_by_id(cls, context, action_id, event_id):
db_event = db.action_event_get_by_id(context, action_id, event_id)
return cls._from_db_object(context, cls(), db_event)
@base.remotable_classmethod
def event_start(cls, context, instance_uuid, event_name, want_result=True):
values = cls.pack_action_event_start(context, instance_uuid,
event_name)
db_event = db.action_event_start(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_event)
@base.serialize_args
@base.remotable_classmethod
def event_finish_with_failure(cls, context, instance_uuid, event_name,
exc_val=None, exc_tb=None, want_result=None):
values = cls.pack_action_event_finish(context, instance_uuid,
event_name, exc_val=exc_val,
exc_tb=exc_tb)
db_event = db.action_event_finish(context, values)
if want_result:
return cls._from_db_object(context, cls(), db_event)
@base.remotable_classmethod
def event_finish(cls, context, instance_uuid, event_name,
want_result=True):
return cls.event_finish_with_failure(context, instance_uuid,
event_name, exc_val=None,
exc_tb=None,
want_result=want_result)
@base.remotable
def finish_with_failure(self, context, exc_val, exc_tb):
values = self.pack_action_event_finish(context, self.instance_uuid,
self.event, exc_val=exc_val,
exc_tb=exc_tb)
db_event = db.action_event_finish(context, values)
self._from_db_object(context, self, db_event)
@base.remotable
def finish(self, context):
self.finish_with_failure(context, exc_val=None, exc_tb=None)
class InstanceActionEventList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('InstanceActionEvent'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
}
@base.remotable_classmethod
def get_by_action(cls, context, action_id):
db_events = db.action_events_get(context, action_id)
return base.obj_make_list(context, cls(context),
objects.InstanceActionEvent, db_events)
|
|
"""
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = u'<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
u'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
u'<html></html>'
"""
from __future__ import unicode_literals
import inspect
import logging
import re
import warnings
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.inspect import getargspec
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import pgettext_lazy, ugettext_lazy
from .exceptions import TemplateSyntaxError
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
ALLOWED_VARIABLE_CHARS = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.')
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
logger = logging.getLogger('django.template')
class TemplateEncodingError(Exception):
pass
@python_2_unicode_compatible
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % tuple(force_text(p, errors='replace') for p in self.params)
class Origin(object):
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, Origin):
return False
return (
self.name == other.name and
self.loader == other.loader
)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class Template(object):
def __init__(self, template_string, origin=None, name=None, engine=None):
try:
template_string = force_text(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed "
"from unicode or UTF-8 strings.")
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = template_string
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
context.render_context.push()
try:
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
finally:
context.render_context.pop()
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
is annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# unicode string.
try:
message = force_text(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token(object):
def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinal = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinal):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content, position, lineno)
else:
token = Token(TOKEN_TEXT, token_string, position, lineno)
return token
class DebugLexer(Lexer):
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so we only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
upto = start
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result
class Parser(object):
def __init__(self, tokens, libraries=None, builtins=None):
self.tokens = tokens
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compils each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
raise self.error(token, 'Empty variable tag')
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag')
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set token here since we can't modify the node __init__ method
node.token = token
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(token, "Invalid block tag: '%s', expected %s" %
(command, get_text_list(["'%s'" % p for p in parse_until])))
raise self.error(token, "Invalid block tag: '%s'" % command)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s." % (command, ', '.join(parse_until))
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': "\w\.",
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE | re.VERBOSE)
class FilterExpression(object):
"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
obj = mark_for_escaping(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, _, _, defaults = getargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context.
Deprecated; use the Variable class instead.
"""
warnings.warn("resolve_variable() is deprecated. Use django.template."
"Variable(path).resolve(context) instead",
RemovedInDjango20Warning, stacklevel=2)
return Variable(path).resolve(context)
class Variable(object):
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':u'News'}}
>>> Variable('article.section').resolve(c)
u'News'
>>> Variable('article').resolve(c)
{'section': u'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = u'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, six.string_types):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
if self.message_context:
return pgettext_lazy(self.message_context, value)
else:
return ugettext_lazy(value)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
inspect.getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', 'unknown')
logger.debug('{} - {}'.format(template_name, e))
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.template.get_exception_info(e, self.token)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = node.render_annotated(context)
else:
bit = node
bits.append(force_text(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return force_str("<Text Node: '%s'>" % self.s[:25], 'ascii',
errors='replace')
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
value = force_text(value)
if ((context.autoescape and not isinstance(value, SafeData)) or
isinstance(value, EscapeData)):
return conditional_escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be removed
from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard ``foo=1``
format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits`` token
list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
|
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals)
"""
====================
Metabolic Components
====================
:Authors:
Moritz Emanuel Beber
Nikolaus Sonnenschein
:Date:
2011-04-07
:Copyright:
Copyright(c) 2011 Jacobs University of Bremen. All rights reserved.
:File:
elements.py
"""
__all__ = ["BasicCompound", "BasicReaction", "BasicCompartment",
"BasicCompartmentCompound", "SBMLCompound", "SBMLCompartment",
"SBMLCompartmentCompound", "SBMLReaction", "KEGGReaction"]
import logging
import re
from itertools import chain
from builtins import (str, dict)
from .. import miscellaneous as misc
from ..errors import PyOrganismError
from ..base import UniqueBase
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(misc.NullHandler())
class BasicCompound(UniqueBase):
"""
The simplest form of representing a metabolic compound - just its class and
name.
"""
def __init__(self, unique_id="", **kw_args):
"""
Parameters
----------
unique_id: str (optional)
A string uniquely identifying the compound among its class.
"""
super(BasicCompound, self).__init__(unique_id=unique_id, **kw_args)
class BasicReaction(UniqueBase):
"""
The simplest form of representing a biochemical reaction - just its class
and name with substrates and products.
Notes
-----
This is useful in metabolic networks where substrates and products are
determined by the topology. Stoichiometric information is then stored on the
links themselves. The only other information stored about the reaction is
its reversibility.
"""
def __init__(self, unique_id="", reversible=False, substrates=None,
products=None, **kw_args):
"""
Parameters
----------
unique_id: str (optional)
A string uniquely identifying the reaction among its class.
reversible: bool (optional)
Reversibility information of the reaction.
substrates: dict (optional)
A map from the reaction educts to the absolute value of their
stoichiometric factors in the reaction.
products: dict (optional)
A map from the reaction products to the absolute value of their
stoichiometric factors in the reaction.
"""
super(BasicReaction, self).__init__(unique_id=unique_id, **kw_args)
self.reversible = bool(reversible)
self.substrates = misc.convert(substrates, dict, dict())
self.products = misc.convert(products, dict, dict())
def __contains__(self, compound):
"""
Parameters
----------
compound: SBMLCompound
A compound instance whose participation in the reaction is tested.
"""
return (compound in self.substrates or compound in self.products)
def __len__(self):
return len(self.substrates) + len(self.products)
def is_substrate(self, compound):
"""
Parameters
----------
compound: SBMLCompound
A compound instance whose status as educt or product is queried.
"""
return compound in self.substrates
def compounds_iter(self, coefficients=False):
"""
Returns
-------
iterator:
An iterator over all compounds partaking in the reaction.
coefficients: bool (optional)
Specifies whether the returned iterator should contain pairs of
compounds with stoichiometric coefficients.
"""
if coefficients:
educts_iter = ((cmpd, -factor) for (cmpd, factor) in
self.substrates.items())
products_iter = ((cmpd, factor) for (cmpd, factor) in
self.products.items())
return chain(educts_iter, products_iter)
else:
return chain(self.substrates.keys(), self.products.keys())
def stoichiometric_coefficient(self, compound):
"""
Parameters
----------
compound: SBMLCompound
A compound instance whose stoichiometric coefficient is sought for.
Returns
-------
float:
The stoichiometric coefficient of a compound in the reaction.
Coefficients of substrates are negative.
Exceptions
----------
KeyError:
In case compound is not part of the reaction.
"""
if compound in self.substrates:
return -self.substrates[compound]
elif compound in self.products:
return self.products[compound]
else:
raise KeyError("'{0}' is not participating in reaction"\
" '{1}'".format(str(compound), str(self)))
def full_form(self):
"""
Returns
-------
str:
A string representation of the reaction, e.g., '2 A + 4 B -> 1 C'
or '2 A + 4 B <=> 1 C' for a reversible reaction.
"""
def util(compounds):
for cmpd in compounds:
yield str(abs(self.stoichiometric_coefficient(cmpd)))
yield str(cmpd)
if not (cmpd == compounds[-1]):
yield "+"
rxn = ["%s:" % str(self.unique_id)]
rxn.extend([e for e in util(self.substrates.keys())])
if self.reversible:
rxn.append("<=>")
else:
rxn.append("->")
rxn.extend([e for e in util(self.products.keys())])
return " ".join(rxn)
class BasicCompartment(UniqueBase):
"""
The simplest form of representing a cellular compartment - just its class
and name.
"""
def __init__(self, unique_id="", suffix="", **kw_args):
"""
Parameters
----------
unique_id: str
A string uniquely identifying the compartment among its class.
suffix: str (optional)
A string appended to compounds for input/output.
"""
super(BasicCompartment, self).__init__(unique_id=unique_id, **kw_args)
self.suffix = suffix
self._compounds = set()
self._compartmentalized = set()
def __contains__(self, element):
"""
Tests for the existance of `element` in this compartment.
"""
if isinstance(element, BasicCompartmentCompound):
return element.compartment == self
elif isinstance(element, BasicCompound):
return element in self._contained
elif isinstance(element, BasicReaction):
return all(cmpd in self for cmpd in element.compounds_iter())
else:
raise PyOrganismError("unrecognised metabolic component '{0}'",
element)
def __len__(self):
return len(self._compounds)
def __iter__(self):
return iter(self._compounds)
def register(self, element):
"""
Parameters
----------
element: `BasicCompound`
Compound that is found in this compartment.
"""
if isinstance(element, BasicCompartmentCompound):
self._compounds.add(element.compound)
self._compartmentalized.add(element)
else:
self._compounds.add(element)
def iter_compounds(self):
return self.__iter__()
def iter_compartmentalized(self):
return iter(self._compartmentalized)
class BasicCompartmentCompound(BasicCompound):
"""
A compartment specific compound.
Often it is desirable to identify compounds on a per compartment basis, for
example, in FBA experiments. This class is a simple container for both the
compound instance that already exists and the compartment.
"""
def __init__(self, unique_id="", compound=None, compartment=None, **kw_args):
"""
Parameters
----------
unique_id: str (optional)
A string uniquely identifying the compartmentalized compound among
its class.
compound: BasicCompound
An instance of BasicCompound that is then attached to a compartment.
compartment: BasicCompartment
An instance of BasicCompartment in which the compound is located.
"""
super(BasicCompartmentCompound, self).__init__(unique_id=unique_id, **kw_args)
self.compound = compound
self.compartment = compartment
if not self.compartment is None:
self.compartment.register(self)
def __getattr__(self, attr):
"""
Defer unsuccessful attribute access to the compound instance.
Note
----
The if-clause and raised AttributeError are a safeguard for, e.g.,
unpickling this object and infinite recursion.
"""
if "compound" in self.__dict__:
return self.compound.__getattribute__(attr)
raise AttributeError("'{0}' object has no attribute '{1}'".format(
self.__class__.__name__, attr))
class SBMLCompartment(BasicCompartment):
"""
A cellular compartment as defined per SBML standard.
"""
def __init__(self, unique_id="", name="", outside=None, constant=True,
spatial_dimensions=None, size=None, units=None, **kw_args):
"""
Parameters
----------
unique_id: str (optional)
A string uniquely identifying the compartment among its class.
name: str (optional)
The full name of the compartment.
outside: str (optional)
The name of the compartment that surrounds this one.
constant: bool (optional)
Determines whether the size attribute is allowed to change during
model simulation.
spatial_dimensions: int (optional)
From 0 to 3, normal models have three dimensions.
size: float (optional)
The magnitude of the spatial_dimension in units.
units: str (optional)
A string identifying the unit in which size is measured.
Notes
-----
The constant attribute is so far only kept for compatibility with SBML,
it's not actually required. This behaviour may change in future.
"""
super(SBMLCompartment, self).__init__(unique_id=unique_id, **kw_args)
self.name = name
self.outside = misc.convert(outside, SBMLCompartment)
self.constant = bool(constant)
self.spatial_dimensions = misc.convert(spatial_dimensions, int)
self.size = size
self.units = units
class SBMLCompound(BasicCompound):
"""
A molecular compound as defined per SBML standard.
"""
atomic_pattern = re.compile(r"([A-Z][a-z]?)(\d*)", re.UNICODE)
def __init__(self, unique_id="", name="", formula=None, kegg_id=None,
cas_id=None, in_chl=None, in_chl_key=None, smiles=None, charge=None,
mass=None, notes=dict(), **kw_args):
"""
Parameters
----------
unique_id: str (optional)
A string uniquely identifying the compound among its class.
name: str (optional)
A string uniquely identifying the compound.
formula: str (optional)
Molecular formula as a simple string, e.g., C6H12O6.
kegg_id: str (optional)
The KEGG id of the compound.
cas_id: str (optional)
The CAS id of the compound.
in_chl: str (optional)
An IUPAC compliant identifier in InChl format.
in_chl_key: int (optional)
A hashed key of the InChl string.
smiles: str (optional)
A SMILES representation of the compound.
charge: int (optional)
Electric charge on the compound (may be pH dependent).
mass: float (optional)
A unit-less magnitude determining the mass of the compound.
notes: float (optional)
Other notes about this compound usually parsed from an SBML
document.
"""
super(SBMLCompound, self).__init__(unique_id=unique_id, **kw_args)
self.name = name
self._parse_formula(formula)
self.kegg_id = kegg_id
self.cas_id = cas_id
self.in_chl = in_chl
self.in_chl_key = in_chl_key
self.smiles = smiles
self.charge = misc.convert(charge, int)
self.mass = misc.convert(mass, float)
self.notes = notes
def __contains__(self, element):
"""
Checks for the existance of an atomic element in the compound.
"""
if len(self.formula) == 0:
LOGGER.warn("testing element against empty formula")
return element in self.formula
def _parse_formula(self, formula):
self.formula = dict()
if not formula:
return
for mobj in self.atomic_pattern.finditer(formula):
if mobj.group(2):
self.formula[mobj.group(1)] = int(mobj.group(2))
else:
self.formula[mobj.group(1)] = 1
class SBMLCompartmentCompound(BasicCompartmentCompound):
"""
A compartment specific compound.
Often it is desirable to identify compounds on a per compartment basis, for
example, in FBA experiments. This class is a simple container for both the
compound instance that already exists and the compartment.
"""
def __init__(self, unique_id="", compound=None, compartment=None, **kw_args):
"""
Parameters
----------
unique_id: str (optional)
A string uniquely identifying the compartmentalized compound among
its class.
compound: SBMLCompound
An instance of SBMLCompound that is then attached to a compartment.
compartment: SBMLCompartment
An instance of SBMLCompartment in which the compound is located.
"""
super(SBMLCompartmentCompound, self).__init__(unique_id=unique_id,
compound=compound, compartment=compartment, **kw_args)
class SBMLReaction(BasicReaction):
"""
A biochemical reaction as defined per SBML standard.
"""
def __init__(self, unique_id="", reversible=False, substrates=None,
products=None, name="", synonyms=None, rate_constant=None,
lower_bound=None, upper_bound=None, objective_coefficient=None,
flux_value=None, reduced_cost=None, notes=None, **kw_args):
"""
Parameters
----------
unique_id: str (optional)
A string uniquely identifying the reaction among its class.
reversible: bool (optional)
Whether this reaction is known to occur in both directions in an
organism.
substrates: dict (optional)
A map from the reaction educts to the absolute value of their
stoichiometric factors in the reaction.
products: dict (optional)
A map from the reaction products to the absolute value of their
stoichiometric factors in the reaction.
name: str (optional)
A string uniquely identifying the reaction.
synonyms: str (optional)
Additional identifiers of the reaction.
rate_constant: float (optional)
Unit-less specifier of the rate of the reaction at model conditions.
notes: dict (optional)
Additional notes, for example, from parsing an SBML model.
"""
super(SBMLReaction, self).__init__(unique_id=unique_id, reversible=reversible,
substrates=substrates, products=products, **kw_args)
self.name = name
self.synonyms = misc.convert(synonyms, list, list())
self.rate_constant = misc.convert(rate_constant, float)
self.lower_bound = misc.convert(lower_bound, float)
self.upper_bound = misc.convert(upper_bound, float)
self.objective_coefficient = misc.convert(objective_coefficient, float)
self.flux_value = misc.convert(flux_value, float)
self.notes = misc.convert(notes, dict, dict())
# self._consistency_check()
def _consistency_check(self):
"""
Asserts some basic consistency of the SBMLReaction instance.
With enough meta data (SBMLCompound formula, charge, or mass)
stoichiometric balancing is checked.
Exceptions
----------
AssertionError:
In case any of the given conditions are not true.
"""
# elemental balancing
if all(cmpd.formula for cmpd in self.substrates.keys() +
self.products.keys()):
pass # not implemented yet
# mass balancing
if all(cmpd.mass for cmpd in self.substrates.keys() +
self.products.keys()):
assert sum(cmpd.mass * coeff for (cmpd, coeff) in
self.substrates.items()) == sum(cmpd.mass * coeff
for (cmpd, coeff) in self.products.items()),\
"There is a mass imbalance in reaction '{0}'".format(\
self.unique_id)
# charge balancing
if all(cmpd.charge for cmpd in self.substrates.keys() +
self.products.keys()):
assert sum(cmpd.charge * coeff for (cmpd, coeff) in
self.substrates.items()) == sum(cmpd.charge * coeff
for (cmpd, coeff) in self.products.items()),\
"There is a charge imbalance in reaction '{0}'".format(\
self.unique_id)
class KEGGReaction(BasicReaction):
"""
A biochemical reaction as defined per SBML standard.
"""
def __init__(self, unique_id="", name="", definition=None, equation=None,
rpair=None, enzyme=None, pathway=None, orthology=None, comment=None,
remark=None, reference=None, **kw_args):
"""
Parameters
----------
unique_id: str (optional)
A string, preferably the KEGG ID, uniquely identifying the reaction
among its class.
name: str (optional)
A string identifying the reaction.
rpair: dict (optional)
A dictionary whose keys are KEGG RPAIR classifiers and whose values
are lists of pairs of compounds.
"""
if kw_args:
LOGGER.debug(kw_args)
super(KEGGReaction, self).__init__(unique_id=unique_id, **kw_args)
self.name = name
self.definition = definition
self.equation = equation
self.rpair = rpair
self.enzyme = enzyme
self.pathway = pathway
self.orthology = orthology
self.comment = comment
self.remark = remark
self.reference = reference
|
|
"""
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
from __future__ import unicode_literals
import datetime
import os
import tempfile
import uuid
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six.moves import range
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
class Person(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
@python_2_unicode_compatible
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer, models.CASCADE)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save(*args, **kwargs)
def __str__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article, models.CASCADE)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, models.CASCADE, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
@python_2_unicode_compatible
class Publication(models.Model):
title = models.CharField(max_length=30)
date_published = models.DateField()
def __str__(self):
return self.title
def default_mode():
return 'di'
def default_category():
return 3
class PublicationDefaults(models.Model):
MODE_CHOICES = (('di', 'direct'), ('de', 'delayed'))
CATEGORY_CHOICES = ((1, 'Games'), (2, 'Comics'), (3, 'Novel'))
title = models.CharField(max_length=30)
date_published = models.DateField(default=datetime.date.today)
datetime_published = models.DateTimeField(default=datetime.datetime(2000, 1, 1))
mode = models.CharField(max_length=2, choices=MODE_CHOICES, default=default_mode)
category = models.IntegerField(choices=CATEGORY_CHOICES, default=default_category)
active = models.BooleanField(default=True)
file = models.FileField(default='default.txt')
class Author(models.Model):
publication = models.OneToOneField(Publication, models.SET_NULL, null=True, blank=True)
full_name = models.CharField(max_length=255)
class Author1(models.Model):
publication = models.OneToOneField(Publication, models.CASCADE, null=False)
full_name = models.CharField(max_length=255)
@python_2_unicode_compatible
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, models.CASCADE, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %s" % (self.writer, self.age)
class Document(models.Model):
myfile = models.FileField(upload_to='unused', blank=True)
@python_2_unicode_compatible
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __str__(self):
return self.description
class CustomFileField(models.FileField):
def save_form_data(self, instance, data):
been_here = getattr(self, 'been_saved', False)
assert not been_here, "save_form_data called more than once"
setattr(self, 'been_saved', True)
class CustomFF(models.Model):
f = CustomFileField(upload_to='unused', blank=True)
class FilePathModel(models.Model):
path = models.FilePathField(path=os.path.dirname(upath(__file__)), match=r".*\.py$", blank=True)
try:
from PIL import Image # NOQA: detect if Pillow is installed
test_images = True
@python_2_unicode_compatible
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
@python_2_unicode_compatible
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
except ImportError:
test_images = False
@python_2_unicode_compatible
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __str__(self):
return self.field
class Homepage(models.Model):
url = models.URLField()
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class Triple(models.Model):
left = models.IntegerField()
middle = models.IntegerField()
right = models.IntegerField()
class Meta:
unique_together = (('left', 'middle'), ('middle', 'right'))
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, models.SET_NULL, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
@python_2_unicode_compatible
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __str__(self):
return self.key
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.title
@python_2_unicode_compatible
class DateTimePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateTimeField(editable=False)
def __str__(self):
return self.title
class DerivedPost(Post):
pass
@python_2_unicode_compatible
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __str__(self):
return six.text_type(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
@python_2_unicode_compatible
class Colour(models.Model):
name = models.CharField(max_length=50)
def __iter__(self):
for number in range(5):
yield number
def __str__(self):
return self.name
class ColourfulItem(models.Model):
name = models.CharField(max_length=50)
colours = models.ManyToManyField(Colour)
class CustomErrorMessage(models.Model):
name1 = models.CharField(
max_length=50,
validators=[validators.validate_slug],
error_messages={'invalid': 'Model custom error message.'},
)
name2 = models.CharField(
max_length=50,
validators=[validators.validate_slug],
error_messages={'invalid': 'Model custom error message.'},
)
def clean(self):
if self.name1 == 'FORBIDDEN_VALUE':
raise ValidationError({'name1': [ValidationError('Model.clean() error messages.')]})
elif self.name1 == 'FORBIDDEN_VALUE2':
raise ValidationError({'name1': 'Model.clean() error messages (simpler syntax).'})
elif self.name1 == 'GLOBAL_ERROR':
raise ValidationError("Global error message.")
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
class StumpJoke(models.Model):
most_recently_fooled = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=today_callable_dict,
related_name="+",
)
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
# Model for #13776
class Student(models.Model):
character = models.ForeignKey(Character, models.CASCADE)
study = models.CharField(max_length=30)
# Model for #639
class Photo(models.Model):
title = models.CharField(max_length=30)
image = models.FileField(storage=temp_storage, upload_to='tests')
# Support code for the tests; this keeps track of how many times save()
# gets called on each instance.
def __init__(self, *args, **kwargs):
super(Photo, self).__init__(*args, **kwargs)
self._savecount = 0
def save(self, force_insert=False, force_update=False):
super(Photo, self).save(force_insert, force_update)
self._savecount += 1
class UUIDPK(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=30)
# Models for #24706
class StrictAssignmentFieldSpecific(models.Model):
title = models.CharField(max_length=30)
_should_error = False
def __setattr__(self, key, value):
if self._should_error is True:
raise ValidationError(message={key: "Cannot set attribute"}, code='invalid')
super(StrictAssignmentFieldSpecific, self).__setattr__(key, value)
class StrictAssignmentAll(models.Model):
title = models.CharField(max_length=30)
_should_error = False
def __setattr__(self, key, value):
if self._should_error is True:
raise ValidationError(message="Cannot set attribute", code='invalid')
super(StrictAssignmentAll, self).__setattr__(key, value)
# A model with ForeignKey(blank=False, null=True)
class Award(models.Model):
name = models.CharField(max_length=30)
character = models.ForeignKey(Character, models.SET_NULL, blank=False, null=True)
class NullableUniqueCharFieldModel(models.Model):
codename = models.CharField(max_length=50, blank=True, null=True, unique=True)
|
|
"""
Wishary Random Variates
TODO want to move the specific implementation of posteriors outside of the
general `stats.rvs` libraries, since e.g. the below Wishart implementation
is only valid for vector autoregressions.
"""
from __future__ import division
import numpy as np
from rvs import RandomVariable
# Shim for old Scipy versions
try:
raise ImportError
from scipy.stats import wishart, invwishart
except ImportError:
from _wishart import wishart, invwishart
from scipy.linalg import lapack
class Wishart(RandomVariable):
"""
Posterior calculation is for a VAR(1) process, where the endogenous
variable is shaped (M, T) where M is the number of endogenous variables
(and so also the number of equations) and T is the number of observations.
The posterior degrees of freedom calculation is simple:
.. math::
posterior\_df = prior\_df + T
The posterior scale matrix is calculated as follows:
Since each of the M equations has the same right-hand-side variables, the
provided exogenous array is also shaped (M, T) and is just the first lag of
the endogenous variable.
The M equations can be written as:
.. math::
y_i = L y' \hat \phi_i + \varepsilon_i
where :math:`y_i` is shaped :math:`T \times 1`, L is the lag operator,
:math:`y` is shaped :math:`M \times T` and :math:`\hat \phi_i` is a vector
holding the coefficients in the ith equation, shaped :math:`M \times 1`.
M residual vectors are created as :math:`e_i = y_i - L y' \hat \phi_i`
(each with shape :math:`T \times 1`) and stacked as columns to create an
:math:`M \times T` matrix :math:`e`.
Alternatively, given the :math:`M \times M` matrix :math:`\phi`, the
residual vector can be calculated as:
.. math::
e = y - \phi L y
Then the posterior scale matrix is constructed in the following way:
.. math::
posterior_scale = [ prior\_scale^{-1} + e e']^{-1}
The result is the following:
Presuming the prior for the precision matrix of the VAR was given by
W(prior_df, prior_scale), the posterior for the precision matrix of the
VAR is given by W(posterior_df, posterior_scale).
**Inverse Wishart**
Drawing from the Wishart distribution provides draws of the precision
matrix, but what is usually required in other steps of, for example, a
Gibbs-sampling procedure is a covariance matrix (i.e. the inverse of the
precision matrix). Instead of drawing a precision matrix and then inverting
it, it is instead possible, and more computationally convenient, to draw
covariance matrices directly from the inverse Wishart distribution.
Deriving the posterior degrees of freedom and posterior scale for the case
of an inverse Wishart is not difficult due to the following result:
If :math:`S \sim W(df, scale)`, then :math:`S^{-1} \sim iW(df, scale^{-1})`
If we had specified :math:`prior\_df_W` and `prior\_scale_W` for a Wishart
distribution, then we could alternatively draw from an inverse Wishart
with parameters :math:`prior\_df_{iW} = prior\_df_{W}` and
:math:`prior\_scale_{iW} = prior\_scale_{W}^{-1}`.
The posterior degrees of freedom is the same as for the Wishart
distribution.
.. math::
posterior\_df_{*} = prior\_df_{*} + T
The posterior scale is simply the inverse of the Wishart posterior scale:
.. math::
posterior\_scale_{iW} & = posterior\_scale_W^{-1} \\
& = \left \{[ prior\_scale_W^{-1} + e e']^{-1} \right \}^{-1} \\
& = prior\_scale_{iW} + e e' \\
This saves us two matrix inversions: one in calculating the posterior
scale, and one in retrieving a draw of a covariance matrix from the draw
of a precision matrix.
**Sampling from Wishart vs. inverse Wishart**
The actual situation is more complicated than it might appear above,
because the process of sampling from an inverse Wishart distribution is
the following:
Suppose a draw :math:`T \sim iW(df_{iW}, scale_{iw})` is required.
1. :math:`scale_W = scale_{iw}^{-1}` is calculated.
2. :math:`S \sim W(df, scale_W)` is drawn
3. :math:`T \sim iW(df, scale_{iW})` is calculated as :math:`T = S^{-1}`
Thus it appears that by drawing from an inverse Wishart, we have simply
traded the point at which we perform the two inversions (that we had
thought we could avoid by drawing from an inverse Wishart).
However, we can further break down the steps of drawing from an inverse
Wishart as follows:
Suppose a draw :math:`T \sim iW(df_{iW}, scale_{iw})` is required.
1. :math:`scale_W = scale_{iw}^{-1}` is calculated.
2. :math:`S \sim W(df, scale_W)` is drawn
a. The lower triangular matrix :math:`A` is created by filling the
diagonal with the square roots of :math:`\chi^2` random variables,
and the the lower triangle as independent N(0,1) draws.
b. The Cholesky factorization :math:`DD' = scale_W` is calculated.
c. By the Bartlett (1933) decomposition, :math:`S = D A A' D'`
3. :math:`T \sim iW(df, scale_{iW})` is calculated as :math:`T = S^{-1}`
a. However, instead of calculating :math:`S` and then separately
calculating :math:`T = S^{-1}`, notice that
:math:`T = (DA)^{-1'} (DA)^{-1}`, where :math:`DA` is lower
triangular.
b. Thus, :math:`T` is constructed directly, which only requires
inverting a single lower triangular matrix.
Steps 3a-3b show the performance improvement that can be achieved by
drawing directly from the inverse Wishart rather than from the Wishart:
we have substituted the inverse of dense precision matrix for the
easier inverse of a lower triangular matrix.
"""
def __init__(self, df, scale, size=1, preload=1, *args, **kwargs):
# Initialize parameters
self._frozen = wishart(df, scale)
self._rvs = self._frozen._wishart
self.df = self.prior_df = self._frozen.df
self.scale = self.prior_scale = self._frozen.scale
# Calculated quantities
self._inv_prior_scale = np.linalg.inv(self.prior_scale)
# Setup holder variables for posterior-related quantities
self._phi = None # (M x M)
self._lagged = None # (M x T)
self._endog = None # (M x T)
# Setup holder variables for calculated quantities
self._philagged = None
self._posterior_df = None
self._posterior_scale = None
self._posterior_cholesky = None
# Set the flag to use the prior
self._use_posterior = False
# Initialize the distribution
super(Wishart, self).__init__(None, size=size, preload=preload,
*args, **kwargs)
@property
def phi(self):
return self._phi
@phi.setter
def phi(self, value):
# Save the value
value = np.array(value)
# Check that dimensions match
if not value.ndim == 2:
raise ValueError('Invalid phi array dimensions. Required '
' 2-dim, got %d-dim.' % value.ndim)
if self._lagged is not None:
if not value.shape[1] == self._lagged.shape[0]:
raise ValueError('Invalid phi array dimension. Required'
' (n, %d), got %s'
% (self._lagged.shape[0], str(value.shape)))
elif self._endog is not None:
if not value.shape[0] == self._endog.shape[0]:
raise ValueError('Invalid phi array dimension. Required'
' (%d, k), got %s'
% (self._endog.shape[0], str(value.shape)))
# Set the underlying value
self._phi = value
# Clear calculated quantities
self._philagged = None
self._posterior_scale = None
self._posterior_cholesky = None
# Set the posterior flag
self._use_posterior = True
@phi.deleter
def phi(self):
# Clear phi
self._phi = None
# Clear calculated quantities
self._philagged = None
self._posterior_scale = None
self._posterior_cholesky = None
# Recalculate posterior flag
self._use_posterior = not (
self._lagged is None and
self._endog is None and
self._phi is None
)
@property
def lagged(self):
return self._lagged
@lagged.setter
def lagged(self, value):
# Save the laggedenous dataset
value = np.array(value)
# Check that dimensions match
if not value.ndim == 2:
raise ValueError('Invalid lagged array dimensions. Required '
' (k, nobs), got %s' % str(value.shape))
if self._phi is not None:
if not value.shape[0] == self._phi.shape[1]:
raise ValueError('Invalid lagged array dimensions. Required'
' (%d, nobs), got %s'
% (self._phi.shape[1], str(value.shape)))
if self._endog is not None:
if not value.shape[1] == self._endog.shape[1]:
raise ValueError('Invalid lagged array dimensions.'
' Required (k, %d), got %s'
% (self._endog.shape[1], str(value.shape)))
# Set the underlying value
self._lagged = value
# Clear calculated quantities
self._philagged = None
self._posterior_scale = None
# Set the posterior flag
self._use_posterior = True
@lagged.deleter
def lagged(self):
# Clear lagged
self._lagged = None
# Clear calculated quantities
self._philagged = None
self._posterior_scale = None
self._posterior_cholesky = None
# Recalculate posterior flag
self._use_posterior = not (
self._lagged is None and
self._endog is None and
self._phi is None
)
@property
def endog(self):
return self._endog
@endog.setter
def endog(self, value):
# Save the endogenous dataset
value = np.array(value)
# Record the old nobs (so that we avoid re-caching if we don't need to)
nobs = None
if self._endog is not None:
nobs = self._endog.shape[1]
# Check that dimensions match
if not value.ndim == 2:
raise ValueError('Invalid endogenous array dimension.'
' Required (k, nobs), got %s' % str(value.shape))
if self._phi is not None:
if not value.shape[0] == self._phi.shape[0]:
raise ValueError('Invalid endogenous array dimensions.'
' Required (%d, nobs), got %s'
% (self._phi.shape[0], str(value.shape)))
if self._lagged is not None:
if not value.shape[1] == self._lagged.shape[1]:
raise ValueError('Invalid endogenous array dimensions.'
' Required (n, %d), got %s'
% (str(self._lagged.shape[1]),
str(value.shape)))
# Set the underlying value
self._endog = value
# Clear calculated quantities
self._posterior_df = None
self._posterior_scale = None
self._posterior_cholesky = None
# Clear the cache (if the scale changed)
if not self._endog.shape[1] == nobs:
self._cache = None
self._cache_index = None
# Set the posterior flag
self._use_posterior = True
@endog.deleter
def endog(self):
# Clear endog
self._endog = None
# Clear calculated quantities
self._posterior_df = None
self._posterior_scale = None
self._posterior_cholesky = None
# Clear the cache (because scale will change)
self._cache = None
self._cache_index = None
# Recalculate posterior flag
self._use_posterior = not (
self._lagged is None and
self._endog is None and
self._phi is None
)
@property
def posterior_df(self):
if self._posterior_df is None:
# Get intermediate calculated quantity
if self._endog is None:
raise RuntimeError('Endogenous array is not set; cannot'
' calculate posterior degrees of freedom.')
self._posterior_df = self.prior_df + self._endog.shape[1]
return self._posterior_df
@property
def _resid(self):
# Note: does no caching, should not be called twice
# Make sure we have required quantities
if self._endog is None:
raise RuntimeError('Endogenous array is not set; cannot'
' calculate posterior scale.')
if self._lagged is None:
raise RuntimeError('Lagged array is not set; cannot'
' calculate posterior scale.')
if self._phi is None:
raise RuntimeError('Phi array is not set; cannot'
' calculate posterior scale.')
# This corresponds to a SUR model, where the residuals will
# be shaped (k x T)
if self._philagged is None:
self._philagged = np.dot(self._phi, self._lagged)
return self._endog - self._philagged
@property
def posterior_scale(self):
if self._posterior_scale is None:
resid = self._resid
# Calculate the posterior scale
# TODO inverse via Cholesky (?)
self._posterior_scale = np.linalg.inv(
self._inv_prior_scale + np.inner(resid, resid)
)
return self._posterior_scale
@property
def posterior_cholesky(self):
if self._posterior_cholesky is None:
self._posterior_cholesky = (
np.linalg.cholesky(self.posterior_scale)
)
return self._posterior_cholesky
def recache(self):
# Set the appropriate degrees of freedom parameter
if self._use_posterior:
self._frozen.df = self.posterior_df
else:
self._frozen.df = self.prior_df
# All of the cached draws are from a "standard" Wishart - meaning with
# an identity scale matrix, but with the degrees of freedom set above.
# In the `next` function, the cached variables are transformed to the
# appropriate Wishart random variable
# Re-create the cache
del self._cache
self._cache = self._rvs._standard_rvs(
# n, shape, dim, df
self._cache_n, self._cache_size, self._frozen.dim,
self._frozen.df
)
# Re-initialize the index
self._cache_index = np.ndindex(self.preload_size)
# Return the first index element
return next(self._cache_index)
def next(self):
rvs = super(Wishart, self).next()
# Transformation
if self._use_posterior:
D = self.posterior_cholesky
else:
D = self._frozen.C
if self.size == (1,):
DA = np.dot(D, rvs[0])
rvs = np.dot(DA, DA.T)
else:
for index in np.ndindex(rvs.shape[:-2]):
DA = np.dot(D, rvs[index])
rvs[index] = np.dot(DA, DA.T)
return rvs
class InverseWishart(Wishart):
# TODO: this probably should subclass RandomVariable directly, with the
# common functions separated into helpers or a second common
# superclass
def __init__(self, df, scale, size=1, preload=1, *args, **kwargs):
# Initialize the Wishart
super(InverseWishart, self).__init__(df, scale, size, preload,
*args, **kwargs)
# Replace the wishart _rvs with an invwishart
self._frozen = invwishart(self.df, self.scale)
self._rvs = self._frozen._invwishart
# df, scale are the same
# Helpers for the triangular matrix inversion
self._trtri = lapack.get_lapack_funcs(('trtri'), (self.scale,))
@property
def posterior_scale(self):
if self._posterior_scale is None:
resid = self._resid
# Calculate the posterior scale
self._posterior_scale = (
self._inv_prior_scale + np.inner(resid, resid)
)
return self._posterior_scale
def next(self):
# Don't want to call the Wishart next, want to call the RandomVariable
# next independently to get the standard Wishart distributed rvs
rvs = RandomVariable.next(self)
# Transformation
if self._use_posterior:
D = self.posterior_cholesky
else:
D = self._frozen.C
if self.size == (1,):
DA = np.dot(D, rvs[0])
DA, info = self._trtri(DA, lower=True)
if info > 0:
raise np.linalg.LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
rvs = np.dot(DA.T, DA)
else:
for index in np.ndindex(rvs.shape[:-2]):
# Calculate CA
DA = np.dot(D, rvs[index])
DA, info = self._trtrs(DA, self._eye, lower=True)
if info > 0:
raise np.linalg.LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
# Get SA
rvs[index] = np.dot(DA.T, DA)
return rvs
|
|
# this code comes from ABE. it can probably be simplified
#
#
import mmap
import string
import struct
import types
from utils import hash_160_to_pubkey_address, hash_160_to_script_address, public_key_to_pubkey_address, hash_encode,\
hash_160
class SerializationError(Exception):
"""Thrown when there's a problem deserializing or serializing."""
class BCDataStream(object):
"""Workalike python implementation of Bitcoin's CDataStream class."""
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def map_file(self, file, start): # Initialize with bytes from file
self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
self.read_cursor = start
def seek_file(self, position):
self.read_cursor = position
def close_file(self):
self.input.close()
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self):
return self.read_bytes(1)[0] != chr(0)
def read_int16(self):
return self._read_num('<h')
def read_uint16(self):
return self._read_num('<H')
def read_int32(self):
return self._read_num('<i')
def read_uint32(self):
return self._read_num('<I')
def read_int64(self):
return self._read_num('<q')
def read_uint64(self):
return self._read_num('<Q')
def write_boolean(self, val):
return self.write(chr(1) if val else chr(0))
def write_int16(self, val):
return self._write_num('<h', val)
def write_uint16(self, val):
return self._write_num('<H', val)
def write_int32(self, val):
return self._write_num('<i', val)
def write_uint32(self, val):
return self._write_num('<I', val)
def write_int64(self, val):
return self._write_num('<q', val)
def write_uint64(self, val):
return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
class EnumException(Exception):
pass
class Enumeration:
"""enum-like type
From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
"""
def __init__(self, name, enumList):
self.__doc__ = name
lookup = {}
reverseLookup = {}
i = 0
uniqueNames = []
uniqueValues = []
for x in enumList:
if isinstance(x, types.TupleType):
x, i = x
if not isinstance(x, types.StringType):
raise EnumException("enum name is not a string: %r" % x)
if not isinstance(i, types.IntType):
raise EnumException("enum value is not an integer: %r" % i)
if x in uniqueNames:
raise EnumException("enum name is not unique: %r" % x)
if i in uniqueValues:
raise EnumException("enum value is not unique for %r" % x)
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if attr not in self.lookup:
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def parse_TxIn(vds):
d = {}
d['prevout_hash'] = hash_encode(vds.read_bytes(32))
d['prevout_n'] = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
d['sequence'] = vds.read_uint32()
if scriptSig:
pubkeys, signatures, address = get_address_from_input_script(scriptSig)
else:
pubkeys = []
signatures = []
address = None
d['address'] = address
d['signatures'] = signatures
return d
def parse_TxOut(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['address'] = get_address_from_output_script(scriptPubKey)
d['raw_output_script'] = scriptPubKey.encode('hex')
d['index'] = i
return d
def parse_Transaction(vds, is_coinbase):
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = []
for i in xrange(n_vin):
o = parse_TxIn(vds)
if not is_coinbase:
d['inputs'].append(o)
n_vout = vds.read_compact_size()
d['outputs'] = []
for i in xrange(n_vout):
o = parse_TxOut(vds, i)
#if o['address'] == "None" and o['value']==0:
# print("skipping strange tx output with zero value")
# continue
# if o['address'] != "None":
d['outputs'].append(o)
d['lockTime'] = vds.read_uint32()
return d
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1", 76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
"OP_NOP1", "OP_NOP2", "OP_NOP3", "OP_NOP4", "OP_NOP5", "OP_NOP6", "OP_NOP7", "OP_NOP8", "OP_NOP9", "OP_NOP10",
("OP_INVALIDOPCODE", 0xFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
if i+nSize > len(bytes):
vch = "_INVALID_"+bytes[i:]
i = len(bytes)
else:
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
try:
return (opcodes.whatis(opcode)).replace("OP_", "")
except KeyError:
return "InvalidOp_"+str(opcode)
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0:
result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:" % (opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def get_address_from_input_script(bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except:
# coinbase transactions raise an exception
return [], [], None
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return None, None, public_key_to_pubkey_address(decoded[1][1])
# p2sh transaction, 2 of n
match = [ opcodes.OP_0 ]
while len(match) < len(decoded):
match.append(opcodes.OP_PUSHDATA4)
if match_decoded(decoded, match):
redeemScript = decoded[-1][1]
num = len(match) - 2
signatures = map(lambda x:x[1].encode('hex'), decoded[1:-1])
dec2 = [ x for x in script_GetOp(redeemScript) ]
# 2 of 2
match2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec2, match2):
pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex') ]
return pubkeys, signatures, hash_160_to_script_address(hash_160(redeemScript))
# 2 of 3
match2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec2, match2):
pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex'), dec2[3][1].encode('hex') ]
return pubkeys, signatures, hash_160_to_script_address(hash_160(redeemScript))
return [], [], None
def get_address_from_output_script(bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except:
return None
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return public_key_to_pubkey_address(decoded[0][1])
# coins sent to black hole
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_0, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return None
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return hash_160_to_pubkey_address(decoded[2][1])
# strange tx
match = [opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG, opcodes.OP_NOP]
if match_decoded(decoded, match):
return hash_160_to_pubkey_address(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
addr = hash_160_to_script_address(decoded[1][1])
return addr
return None
|
|
#Pyjsdl - Copyright (C) 2013 James Garnon <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
from pyjsdl import env
from math import sqrt, sin, cos, atan2, pi, floor
class Vector2(object):
"""
Vector2 - 2-dimensional vector.
Build in --optimized mode (-O) lack type enforcement
with attribute assignment, so ensure use of floats.
To enable type enforcement use --enable-descriptor-proto,
however doing so has a moderate performance impact.
To enable arithmetic operation functionality, use
--enable-operator-funcs with minimal performance impact.
"""
__slots__ = ['x', 'y']
def __init__(self, *args, **kwargs):
l = len(args)
if l == 2:
self.x = float(args[0])
self.y = float(args[1])
elif l == 1:
if isinstance(args[0], (int, float)):
self.x = float(args[0])
self.y = float(args[0])
else:
self.x = float(args[0][0])
self.y = float(args[0][1])
else:
if kwargs:
if 'x' in kwargs and 'y' in kwargs:
self.x = float(kwargs['x'])
self.y = float(kwargs['y'])
elif 'x' in kwargs:
self.x = float(kwargs['x'])
self.y = float(kwargs['x'])
else:
self.x = float(kwargs['y'])
self.y = float(kwargs['y'])
else:
self.x = 0.0
self.y = 0.0
def __str__(self):
return '[%g, %g]' % (self.x, self.y)
def __repr__(self):
return '<%s(%g, %g)>' % (self.__class__.__name__,
self.x, self.y)
def __getitem__(self, index):
if index in (0, -2):
return self.x
elif index in (1, -1):
return self.y
elif isinstance(index, slice):
return [self.x, self.y][index]
else:
raise IndexError
def __setitem__(self, index, val):
if index == 0:
try:
self.x = float(val)
except ValueError:
raise TypeError
elif index == 1:
try:
self.y = float(val)
except ValueError:
raise TypeError
elif isinstance(index, slice):
l = [self.x, self.y]
l[index] = val
if len(l) != 2:
raise ValueError
self.x = float(l[0])
self.y = float(l[1])
else:
raise IndexError
def __delitem__(self, index):
raise TypeError(
'Deletion of vector components is not supported')
def __getslice__(self, start, stop):
return [self.x, self.y][start:stop]
def __setslice__(self, lower, upper, val):
l = [self.x, self.y]
l[lower:upper] = val
if len(l) != 2:
raise ValueError
self.x = float(l[0])
self.y = float(l[1])
def __iter__(self):
for val in (self.x, self.y):
yield val
def __len__(self):
return 2
def __bool__(self):
return bool(self.x or self.y)
def __nonzero__(self):
return bool(self.x or self.y)
def dot(self, vector):
"""
Return dot product with other vector.
"""
return (self.x * vector[0]) + (self.y * vector[1])
def cross(self, vector):
"""
Return cross product with other vector.
"""
return (self.x * vector[1]) - (self.y * vector[0])
def magnitude(self):
"""
Return magnitude of vector.
"""
return sqrt((self.x**2) + (self.y**2))
def magnitude_squared(self):
"""
Return squared magnitude of vector.
"""
return ((self.x**2) + (self.y**2))
def length(self):
#js keyword, use magnitude.
"""
Return length of vector.
"""
return sqrt((self.x**2) + (self.y**2))
def length_squared(self):
"""
Return squared length of vector.
"""
return ((self.x**2) + (self.y**2))
def normalize(self):
"""
Return normalized vector.
"""
mag = self.magnitude()
if mag == 0:
raise ValueError('Cannot normalize vector of zero length')
return Vector2(self.x / mag, self.y / mag)
def normalize_ip(self):
"""
Normalized this vector.
"""
mag = self.magnitude()
if mag == 0:
raise ValueError('Cannot normalize vector of zero length')
self.x /= mag
self.y /= mag
return None
def is_normalized(self):
"""
Check whether vector is normalized.
"""
return self.magnitude() == 1
def scale_to_length(self, length):
"""
Scale vector to length.
"""
mag = self.magnitude()
if mag == 0:
raise ValueError('Cannot scale vector of zero length')
self.x = (self.x / mag) * length
self.y = (self.y / mag) * length
return None
def reflect(self, vector):
"""
Return reflected vector at given vector.
"""
vn = (self.x * vector[0]) + (self.y * vector[1])
nn = (vector[0] * vector[0]) + (vector[1] * vector[1])
if nn == 0:
raise ValueError('Cannot reflect from normal of zero length')
c = 2 * vn / nn
return Vector2(self.x - (vector[0] * c),
self.y - (vector[1] * c))
def reflect_ip(self, vector):
"""
Derive reflected vector at given vector in place.
"""
vn = (self.x * vector[0]) + (self.y * vector[1])
nn = (vector[0] * vector[0]) + (vector[1] * vector[1])
if nn == 0:
raise ValueError('Cannot reflect from normal of zero length')
c = 2 * vn / nn
self.x -= (vector[0] * c)
self.y -= (vector[1] * c)
return None
def distance_to(self, vector):
"""
Return distance to given vector.
"""
return sqrt((self.x - vector[0])**2
+ (self.y - vector[1])**2)
def distance_squared_to(self, vector):
"""
Return squared distance to given vector.
"""
return ((self.x - vector[0])**2
+ (self.y - vector[1])**2)
def lerp(self, vector, t):
"""
Return vector linear interpolated by t to the given vector.
"""
if t < 0.0 or t > 1.0:
raise ValueError('Argument t must be in range 0 to 1')
return Vector2(self.x * (1-t) + vector[0] * t,
self.y * (1-t) + vector[1] * t)
def slerp(self, vector, t):
"""
Return vector spherical interpolated by t to the given vector.
"""
if t < -1.0 or t > 1.0:
raise ValueError('Argument t must be in range -1 to 1')
if not hasattr(vector, '__len__') or len(vector) != 2:
raise TypeError('The first argument must be a vector')
smag = sqrt((self.x**2) + (self.y**2))
vmag = sqrt((vector[0]**2) + (vector[1]**2))
if smag==0 or vmag==0:
raise ValueError('Cannot use slerp with zero-vector')
sx = self.x / smag
sy = self.y / smag
vx = vector[0] / vmag
vy = vector[1] / vmag
theta = atan2(vy, vx) - atan2(sy, sx)
_theta = abs(theta)
if _theta-pi > 0.000001:
theta -= (2*pi) * (theta / _theta)
elif -0.000001 < _theta-pi < 0.000001:
raise ValueError('Cannot use slerp on 180 degrees')
if t < 0.0:
t = -t
theta -= (2*pi) * (theta / abs(theta))
sin_theta = sin(theta)
if sin_theta:
a = sin((1.0-t) * theta) / sin_theta
b = sin(t * theta) / sin_theta
else:
a = 1.0
b = 0.0
v = Vector2((sx * a) + (vx * b),
(sy * a) + (vy * b))
smag = ((1.0-t) * smag) + (t * vmag)
v.x *= smag
v.y *= smag
return v
def elementwise(self):
"""
Elementwice operation.
"""
return VectorElementwiseProxy(self.x, self.y)
def rotate(self, angle):
"""
Return vector rotated by angle in degrees.
"""
rad = angle / 180.0 * pi
c = round(cos(rad), 6)
s = round(sin(rad), 6)
return Vector2((c * self.x) - (s * self.y),
(s * self.x) + (c * self.y))
def rotate_rad(self, angle):
"""
Return vector rotated by angle in radians.
"""
c = cos(angle)
s = sin(angle)
return Vector2((c * self.x) - (s * self.y),
(s * self.x) + (c * self.y))
def rotate_ip(self, angle):
"""
Rotate vector by angle in degrees.
"""
r = angle / 180.0 * pi
c = round(cos(r), 6)
s = round(sin(r), 6)
x = self.x
y = self.y
self.x = (c * x) - (s * y)
self.y = (s * x) + (c * y)
return None
def rotate_ip_rad(self, angle):
"""
Rotate vector by angle in radians.
"""
c = cos(angle)
s = sin(angle)
x = self.x
y = self.y
self.x = (c * x) - (s * y)
self.y = (s * x) + (c * y)
return None
def angle_to(self, vector):
"""
Return angle to given vector.
"""
return (atan2(vector[1], vector[0])
- atan2(self.y, self.x)) * (180.0 / pi)
def as_polar(self):
"""
Return radial distance and azimuthal angle.
"""
r = self.magnitude()
phi = atan2(self.y, self.x) * (180.0 / pi)
return (r, phi)
def from_polar(self, coordinate):
"""
Set vector with polar coordinate tuple.
"""
if len(coordinate) != 2:
raise TypeError('coodinate must be of length 2')
r = coordinate[0]
phi = coordinate[1] * (pi / 180.0)
self.x = round(r * cos(phi), 6)
self.y = round(r * sin(phi), 6)
return None
def update(self, *args, **kwargs):
"""
Update vector.
"""
l = len(args)
if l == 2:
self.x = float(args[0])
self.y = float(args[1])
elif l == 1:
if isinstance(args[0], (int, float)):
self.x = float(args[0])
self.y = float(args[0])
else:
self.x = float(args[0][0])
self.y = float(args[0][1])
else:
if kwargs:
if 'x' in kwargs and 'y' in kwargs:
self.x = float(kwargs['x'])
self.y = float(kwargs['y'])
elif 'x' in kwargs:
self.x = float(kwargs['x'])
self.y = float(kwargs['x'])
else:
self.x = float(kwargs['y'])
self.y = float(kwargs['y'])
else:
self.x = 0.0
self.y = 0.0
def __pos__(self):
return Vector2(self.x, self.y)
def __neg__(self):
return Vector2(-self.x, -self.y)
def __add__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self.x + other[0], self.y + other[1])
else:
return Vector2(self.x + other, self.y + other)
def __sub__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self.x - other[0], self.y - other[1])
else:
return Vector2(self.x - other, self.y - other)
def __mul__(self, other):
if hasattr(other, '__iter__'):
if not isinstance(other, VectorElementwiseProxy):
return (self.x * other[0]) + (self.y * other[1])
else:
return Vector2(self.x * other[0], self.y * other[1])
else:
return Vector2(self.x * other, self.y * other)
def __div__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self.x / other[0], self.y / other[1])
else:
return Vector2(self.x / other, self.y / other)
def __truediv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self.x / other[0], self.y / other[1])
else:
return Vector2(self.x / other, self.y / other)
def __floordiv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(floor(self.x/other[0]), floor(self.y/other[1]))
else:
return Vector2(floor(self.x/other), floor(self.y/other))
def __eq__(self, other):
if hasattr(other, '__iter__'):
if len(other) == 2:
return ( abs(self.x - other[0]) < 0.000001 and
abs(self.y - other[1]) < 0.000001 )
else:
return False
else:
return ( abs(self.x - other) < 0.000001 and
abs(self.y - other) < 0.000001 )
def __ne__(self, other):
if hasattr(other, '__iter__'):
if len(other) == 2:
return ( abs(self.x - other[0]) > 0.000001 or
abs(self.y - other[1]) > 0.000001 )
else:
return True
else:
return ( abs(self.x - other) > 0.000001 or
abs(self.y - other) > 0.000001 )
def __gt__(self, other):
if not isinstance(other, VectorElementwiseProxy):
msg = 'This operation is not supported by vectors'
raise TypeError(msg)
return NotImplemented
def __ge__(self, other):
if not isinstance(other, VectorElementwiseProxy):
msg = 'This operation is not supported by vectors'
raise TypeError(msg)
return NotImplemented
def __lt__(self, other):
if not isinstance(other, VectorElementwiseProxy):
msg = 'This operation is not supported by vectors'
raise TypeError(msg)
return NotImplemented
def __le__(self, other):
if not isinstance(other, VectorElementwiseProxy):
msg = 'This operation is not supported by vectors'
raise TypeError(msg)
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self.x + other[0], self.y + other[1])
else:
return Vector2(self.x + other, self.y + other)
def __rsub__(self, other):
if hasattr(other, '__iter__'):
return Vector2(other[0] - self.x, other[1] - self.y)
else:
return Vector2(other - self.x, other - self.y)
def __rmul__(self, other):
if hasattr(other, '__iter__'):
if not isinstance(other, VectorElementwiseProxy):
return (self.x * other[0]) + (self.y * other[1])
else:
return Vector2(self.x * other[0], self.y * other[1])
else:
return Vector2(self.x * other, self.y * other)
def __rdiv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(other[0] / self.x, other[1] / self.y)
else:
return Vector2(other / self.x, other / self.y)
def __rtruediv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(other[0] / self.x, other[1] / self.y)
else:
return Vector2(other / self.x, other / self.y)
def __rfloordiv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(floor(other[0]/self.x), floor(other[1]/self.y))
else:
return Vector2(floor(other/self.x), floor(other/self.y))
def __iadd__(self, other):
if hasattr(other, '__iter__'):
self.x += other[0]
self.y += other[1]
else:
self.x += other
self.y += other
return self
def __isub__(self, other):
if hasattr(other, '__iter__'):
self.x -= other[0]
self.y -= other[1]
else:
self.x -= other
self.y -= other
return self
def __imul__(self, other):
if hasattr(other, '__iter__'):
self.x *= other[0]
self.y *= other[1]
else:
self.x *= other
self.y *= other
return self
def __idiv__(self, other):
if hasattr(other, '__iter__'):
self.x /= other[0]
self.y /= other[1]
else:
self.x /= other
self.y /= other
return self
def __itruediv__(self, other):
if hasattr(other, '__iter__'):
self.x /= other[0]
self.y /= other[1]
else:
self.x /= other
self.y /= other
return self
def __ifloordiv__(self, other):
if hasattr(other, '__iter__'):
self.x = float(floor(self.x / other[0]))
self.y = float(floor(self.y / other[1]))
else:
self.x = float(floor(self.x / other))
self.y = float(floor(self.y / other))
return self
class _Vector2(Vector2):
"""
Vector2 - 2-dimensional vector.
"""
__slots__ = ['_x', '_y']
def __init__(self, *args, **kwargs):
l = len(args)
if l == 2:
self._x = float(args[0])
self._y = float(args[1])
elif l == 1:
if isinstance(args[0], (int, float)):
self._x = float(args[0])
self._y = float(args[0])
else:
self._x = float(args[0][0])
self._y = float(args[0][1])
else:
if kwargs:
if 'x' in kwargs and 'y' in kwargs:
self._x = float(kwargs['x'])
self._y = float(kwargs['y'])
elif 'x' in kwargs:
self._x = float(kwargs['x'])
self._y = float(kwargs['x'])
else:
self._x = float(kwargs['y'])
self._y = float(kwargs['y'])
else:
self._x = 0.0
self._y = 0.0
@property
def x(self):
return self._x
@x.setter
def x(self, val):
try:
self._x = float(val)
except ValueError:
raise TypeError('float is required')
@x.deleter
def x(self):
raise TypeError('Cannot delete the x attribute')
@property
def y(self):
return self._y
@y.setter
def y(self, val):
try:
self._y = float(val)
except ValueError:
raise TypeError('float is required')
@y.deleter
def y(self):
raise TypeError('Cannot delete the y attribute')
def __repr__(self):
return '<%s(%g, %g)>' % (self.__class__.__name__[1:],
self.x, self.y)
if env.pyjs_mode.test_getattr():
#-S mode or -O --enable-descriptor-proto mode.
Vector2 = _Vector2
class VectorElementwiseProxy(object):
def __init__(self, x, y):
self._x = x
self._y = y
def __getitem__(self, index):
if index in (0, -2):
return self._x
elif index in (1, -1):
return self._y
def __iter__(self):
for val in (self._x, self._y):
yield val
def __len__(self):
return 2
def __bool__(self):
return bool(self._x or self._y)
def __nonzero__(self):
return bool(self._x or self._y)
def __pos__(self):
return Vector2(self._x, self._y)
def __neg__(self):
return Vector2(-self._x, -self._y)
def __abs__(self):
return (abs(self._x), abs(self._y))
def __add__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self._x + other[0], self._y + other[1])
else:
return Vector2(self._x + other, self._y + other)
def __sub__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self._x - other[0], self._y - other[1])
else:
return Vector2(self._x - other, self._y - other)
def __mul__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self._x * other[0], self._y * other[1])
else:
return Vector2(self._x * other, self._y * other)
def __div__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self._x / other[0], self._y / other[1])
else:
return Vector2(self._x / other, self._y / other)
def __truediv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self._x / other[0], self._y / other[1])
else:
return Vector2(self._x / other, self._y / other)
def __floordiv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(floor(self._x/other[0]), floor(self._y/other[1]))
else:
return Vector2(floor(self._x/other), floor(self._y/other))
def __pow__(self, other):
if hasattr(other, '__iter__'):
if (other[0]%1 and self._x<0) or (other[1]%1 and self._y<0):
raise ValueError(
'negative number cannot be raised to a fractional power')
return Vector2(self._x ** other[0], self._y ** other[1])
else:
if other%1 and (self._x<0 or self._y<0):
raise ValueError(
'negative number cannot be raised to a fractional power')
return Vector2(self._x ** other, self._y ** other)
def __mod__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self._x % other[0], self._y % other[1])
else:
return Vector2(self._x % other, self._y % other)
def __eq__(self, other):
if hasattr(other, '__iter__'):
if len(other) == 2:
return ( abs(self._x - other[0]) < 0.000001 and
abs(self._y - other[1]) < 0.000001 )
else:
return False
else:
return ( abs(self._x - other) < 0.000001 and
abs(self._y - other) < 0.000001 )
def __ne__(self, other):
if hasattr(other, '__iter__'):
if len(other) == 2:
return ( abs(self._x - other[0]) > 0.000001 or
abs(self._y - other[1]) > 0.000001 )
else:
return True
else:
return ( abs(self._x - other) > 0.000001 or
abs(self._y - other) > 0.000001 )
def __gt__(self, other):
if hasattr(other, '__iter__'):
return bool(self._x > other[0] and self._y > other[1])
else:
return bool(self._x > other and self._y > other)
def __ge__(self, other):
if hasattr(other, '__iter__'):
return bool(self._x >= other[0] and self._y >= other[1])
else:
return bool(self._x >= other and self._y >= other)
def __lt__(self, other):
if hasattr(other, '__iter__'):
return bool(self._x < other[0] and self._y < other[1])
else:
return bool(self._x < other and self._y < other)
def __le__(self, other):
if hasattr(other, '__iter__'):
return bool(self._x <= other[0] and self._y <= other[1])
else:
return bool(self._x <= other and self._y <= other)
def __radd__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self._x + other[0], self._y + other[1])
else:
return Vector2(self._x + other, self._y + other)
def __rsub__(self, other):
if hasattr(other, '__iter__'):
return Vector2(other[0] - self._x, other[1] - self._y)
else:
return Vector2(other - self._x, other - self._y)
def __rmul__(self, other):
if hasattr(other, '__iter__'):
return Vector2(self._x * other[0], self._y * other[1])
else:
return Vector2(self._x * other, self._y * other)
def __rdiv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(other[0] / self._x, other[1] / self._y)
else:
return Vector2(other / self._x, other / self._y)
def __rtruediv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(other[0] / self._x, other[1] / self._y)
else:
return Vector2(other / self._x, other / self._y)
def __rfloordiv__(self, other):
if hasattr(other, '__iter__'):
return Vector2(floor(other[0]/self._x), floor(other[1]/self._y))
else:
return Vector2(floor(other/self._x), floor(other/self._y))
def __rpow__(self, other):
if hasattr(other, '__iter__'):
if (other[0]<0 and self._x%1) or (other[1]<0 and self._y%1):
raise ValueError(
'negative number cannot be raised to a fractional power')
return Vector2(other[0] ** self._x, other[1] ** self._y)
else:
if other<0 and (self._x%1 or self._y%1):
raise ValueError(
'negative number cannot be raised to a fractional power')
return Vector2(other ** self._x, other ** self._y)
def __rmod__(self, other):
if hasattr(other, '__iter__'):
return Vector2(other[0] % self._x, other[1] % self._y)
else:
return Vector2(other % self._x, other % self._y)
|
|
import os
import Queue
import serial
import settings
import sys
import time
import threading
import traceback
import zmq
#BASE_PATH = os.path.dirname(os.path.realpath(__file__))
#UPPER_PATH = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
#DEVICES_PATH = "%s/Hosts/" % (BASE_PATH )
#THIRTYBIRDS_PATH = "%s/thirtybirds_2_0" % (UPPER_PATH )
#sys.path.append(BASE_PATH)
#sys.path.append(UPPER_PATH)
from thirtybirds_2_0.Network.manager import init as network_init
class Network(object):
def __init__(self, hostname, network_message_handler, network_status_handler):
self.hostname = hostname
self.thirtybirds = network_init(
hostname=hostname,
role="client",
discovery_multicastGroup=settings.discovery_multicastGroup,
discovery_multicastPort=settings.discovery_multicastPort,
discovery_responsePort=settings.discovery_responsePort,
pubsub_pubPort=settings.pubsub_pubPort,
message_callback=network_message_handler,
status_callback=network_status_handler
)
class Mandala(threading.Thread):
def __init__(self, serial_device_path='/dev/ttyUSB0'):
threading.Thread.__init__(self)
self.ser = serial.Serial(serial_device_path, 115200, timeout=10) # Establish the connection on a specific port
self.delay = 0.015
self.level_range = 4096
self.queue = Queue.Queue()
#print self.ser
self.topics_d = {
"controller":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":26,
"dependencies":[
"controller_pedals_connected"
]
},
"controller_pedals_connected":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":9,
"dependencies":[]
},
"layer_1":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":29,
"dependencies":[]
},
"layer_2":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":30,
"dependencies":[]
},
"layer_3":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":31,
"dependencies":[]
},
"medulla":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":24,
"dependencies":[]
},
"pitch_keys":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":28,
"dependencies":[
"pitch_keys_sensor_1_present",
"pitch_keys_sensor_2_present",
"pitch_keys_sensor_3_present",
"pitch_keys_sensor_4_present",
]
},
"pitch_keys_sensor_1_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":11,
"dependencies":[]
},
"pitch_keys_sensor_2_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":12,
"dependencies":[]
},
"pitch_keys_sensor_3_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":13,
"dependencies":[]
},
"pitch_keys_sensor_4_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":14,
"dependencies":[]
},
"preamp_1":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":33,
"dependencies":[]
},
"preamp_2":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":34,
"dependencies":[]
},
"preamp_3":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":35,
"dependencies":[]
},
"settings":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":32,
"dependencies":[
"settings_adc_1_present",
"settings_adc_2_present",
"settings_adc_3_present",
"settings_adc_4_present",
"settings_adc_5_present",
"settings_adc_6_present",
]
},
"settings_adc_1_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":15,
"dependencies":[]
},
"settings_adc_2_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":16,
"dependencies":[]
},
"settings_adc_3_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":17,
"dependencies":[]
},
"settings_adc_4_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":18,
"dependencies":[]
},
"settings_adc_5_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":19,
"dependencies":[]
},
"settings_adc_6_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":20,
"dependencies":[]
},
"transport":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":27,
"dependencies":[
"transport_encoder_present"
]
},
"transport_encoder_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":10,
"dependencies":[]
},
"voice_1":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":21,
"dependencies":[
"voice_1_oscillator_present",
"voice_1_tuning_complete",
]
},
"voice_1_oscillator_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":0,
"dependencies":[]
},
"voice_1_tuning_complete":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":1,
"dependencies":[]
},
"voice_2":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":22,
"dependencies":[
"voice_2_oscillator_present",
"voice_2_tuning_complete",
]
},
"voice_2_oscillator_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":2,
"dependencies":[]
},
"voice_2_tuning_complete":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":3,
"dependencies":[]
},
"voice_3":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":23,
"dependencies":[
"voice_3_oscillator_present",
"voice_3_tuning_complete",
]
},
"voice_3_oscillator_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":4,
"dependencies":[]
},
"voice_3_tuning_complete":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":5,
"dependencies":[]
},
"voice_keys":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":25,
"dependencies":[
"voice_keys_encoder_1_present",
"voice_keys_encoder_2_present",
"voice_keys_encoder_3_present",
]
},
"voice_keys_encoder_1_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":6,
"dependencies":[]
},
"voice_keys_encoder_2_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":7,
"dependencies":[]
},
"voice_keys_encoder_3_present":{
"reported_status":False,
"computed_status":False,
"led_status":"off",
"address":8,
"dependencies":[]
},
"ready":{
"computed_status":False,
"led_status":"off",
"address":36,
"dependencies":[
"controller",
"layer_1",
"layer_2",
"layer_3",
"medulla",
"pitch_keys",
"preamp_1",
"preamp_2",
"preamp_3",
"settings",
"transport",
"voice_1",
"voice_2",
"voice_3",
"voice_keys",
]
}
}
self.computer_topics = [key for key, val in self.topics_d.items() if len(val["dependencies"]) > 0 and key != "ready"]
self.peripheral_topics = [key for key, val in self.topics_d.items() if len(val["dependencies"]) == 0]
self.led_level_d = {
"off":0.0,
"low":0.02,
"high":0.9
}
def get_ready_state(self):
return self.topics_d["ready"]["computed_status"]
def get_dependent_topic(self, dependency_topic):
for computer_topic in self.computer_topics:
if dependency_topic in self.topics_d[topic]["dependencies"]:
return computer_topic
return False
def get_if_dependencies_are_all_true(self, topic):
for dependency_topic in self.topics_d[topic]["dependencies"]:
if self.topics_d[dependency_topic]["computed_status"] == False:
return False
return True
def update_computed_status(self, topic):
if self.get_ready_state(): #if already ready
if topic in self.peripheral_topics:
if self.topics_d[topic]["reported_status"]: # this status should already be True if ready is True
print topic, "reports True but should already be True"
self.topics_d[topic]["computed_status"] = True
else:
self.topics_d[topic]["computed_status"] = False
dependent_topic = self.get_dependent_topic(topic)
self.topics_d[dependent_topic]["computed_status"] = False
if topic in self.computer_topics:
if self.topics_d[topic]["reported_status"]: # this status should already be True if ready is True
print topic, "reports True but should already be True"
self.topics_d[topic]["computed_status"] = True
else: # computer is offline. set dependent peripherals offline, too
self.topics_d[topic]["computed_status"] = False
for dependency_topic in self.topics_d[topic]["dependencies"]:
self.topics_d[dependency_topic]["reported_status"] = False
self.topics_d[dependency_topic]["computed_status"] = False
else: # if not ready
if topic in self.peripheral_topics:
if self.topics_d[topic]["reported_status"]: # peripheral coming online
self.topics_d[topic]["computed_status"] = True
dependent_topic = self.get_dependent_topic(topic)
if self.get_if_dependencies_are_all_true(dependent_topic):
self.topics_d[dependent_topic]["computed_status"] = True
else:
self.topics_d[dependent_topic]["computed_status"] = False
else: # peripheral coming offline
self.topics_d[topic]["computed_status"] = False
dependent_topic = self.get_dependent_topic(topic)
self.topics_d[dependent_topic]["computed_status"] = False
if topic in self.computer_topics:
if self.topics_d[topic]["reported_status"]: # computer coming online
self.topics_d[topic]["computed_status"] = True
else: # computer coming offline
self.topics_d[topic]["computed_status"] = False
for dependency_topic in self.topics_d[topic]["dependencies"]:
self.topics_d[dependency_topic]["computed_status"] = False
self.topics_d[dependency_topic]["reported_status"] = False
ready_state_test = True
for dependency_topic in self.topics_d['ready']["dependencies"]:
if self.topics_d[dependency_topic]["computed_status"] == False:
ready_state_test = False
break
self.topics_d['ready']["computed_status"] = ready_state_test
def update_led_states(self):
if self.topics_d['ready']["computed_status"]:
self.topics_d['ready']["led_status"] = "high"
for topic in self.computer_topics:
self.topics_d[topic]["led_status"] = "low"
for topic in self.peripheral_topics:
self.topics_d[topic]["led_status"] = "low"
else:
self.topics_d['ready']["led_status"] = "off"
for topic in self.peripheral_topics:
if self.topics_d[topic]["computed_status"]:
self.topics_d[topic]["led_status"] = "high"
else:
self.topics_d[topic]["led_status"] = "off"
for topic in self.computer_topics:
if self.topics_d[topic]["reported_status"] == False:
self.topics_d[topic]["led_status"] = "off"
else:
if self.topics_d[topic]["computed_status"]:
self.topics_d[topic]["led_status"] = "high"
else:
self.topics_d[topic]["led_status"] = "low"
for topic in self.topics_d.keys():
led_position = self.topics_d[topic]["address"]
led_level = self.led_level_d[self.topics_d[topic]["led_status"]]
self.add_to_queue(led_position, led_level)
def update(self, topic, status): # status_str = True|False
try:
self.topics_d[topic]["reported_status"] = status
self.update_computed_status(topic)
self.update_led_states()
except Exception as e:
print "exception in Mandala.update", e
def add_to_queue(self, position, level):
self.queue.put((position, level))
def run(self):
while True:
time.sleep(self.delay)
position, level = self.queue.get(True)
print position, level
if position < 38 and level <= 1.0:
led_pos_str = str(position+5000)
led_level_str = str(int(level*4096))
print led_pos_str, led_level_str
self.ser.write('<' + led_pos_str + '>')
time.sleep(self.delay)
self.ser.write('<' + led_level_str + '>')
#mandala = Mandala('/dev/ttyACM0')
#mandala.daemon = True
#mandala.start()
# Main handles network send/recv and can see all other classes directly
class Main(threading.Thread):
def __init__(self, hostname):
threading.Thread.__init__(self)
self.network = Network(hostname, self.network_message_handler, self.network_status_handler)
self.queue = Queue.Queue()
self.mandala = Mandala('/dev/ttyACM0')
self.mandala.daemon = True
self.mandala.start()
self.mandala_topics = self.mandala.topics_d.keys()
for topic in self.mandala_topics:
self.network.thirtybirds.subscribe_to_topic(topic)
def network_message_handler(self, topic_msg):
# this method runs in the thread of the caller, not the tread of Main
topic, msg = topic_msg # separating just to eval msg. best to do it early. it should be done in TB.
if len(msg) > 0:
msg = eval(msg)
self.add_to_queue(topic, msg)
def network_status_handler(self, topic_msg):
# this method runs in the thread of the caller, not the tread of Main
print "Main.network_status_handler", topic_msg
def add_to_queue(self, topic, msg):
self.queue.put((topic, msg))
def run(self):
while True:
#try:
topic, msg = self.queue.get(True)
if topic in self.mandala_topics:
self.mandala.update(topic, msg)
#self.socket.send("{} {}".format(topic, msg))
print "main Main.run topic/queue", topic, msg
#except Exception as e:
# exc_type, exc_value, exc_traceback = sys.exc_info()
# print e, repr(traceback.format_exception(exc_type, exc_value,exc_traceback))
def init(hostname):
main = Main(hostname)
main.daemon = True
main.start()
return main
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Runs a development application server for an application.
%(script)s [options] <application root>
Application root must be the path to the application to run in this server.
Must contain a valid app.yaml or app.yml file.
Options:
--address=ADDRESS, -a ADDRESS
Address to which this server should bind. (Default
%(address)s).
--clear_datastore, -c Clear the Datastore on startup. (Default false)
--debug, -d Use debug logging. (Default false)
--help, -h View this helpful message.
--port=PORT, -p PORT Port for the server to run on. (Default %(port)s)
--allow_skipped_files Allow access to files matched by app.yaml's
skipped_files (default False)
--auth_domain Authorization domain that this app runs in.
(Default gmail.com)
--backends Run the dev_appserver with backends support
(multiprocess mode).
--blobstore_path=DIR Path to directory to use for storing Blobstore
file stub data.
--clear_prospective_search Clear the Prospective Search subscription index
(Default false).
--clear_search_indexes Clear the Full Text Search indexes (Default false).
--datastore_path=DS_FILE Path to file to use for storing Datastore file
stub data.
(Default %(datastore_path)s)
--debug_imports Enables debug logging for module imports, showing
search paths used for finding modules and any
errors encountered during the import process.
--default_partition Default partition to use in the APPLICATION_ID.
(Default dev)
--disable_static_caching Never allow the browser to cache static files.
(Default enable if expiration set in app.yaml)
--disable_task_running When supplied, tasks will not be automatically
run after submission and must be run manually
in the local admin console.
--enable_sendmail Enable sendmail when SMTP not configured.
(Default false)
--high_replication Use the high replication datastore consistency
model. (Default false).
--history_path=PATH Path to use for storing Datastore history.
(Default %(history_path)s)
--multiprocess_min_port When running in multiprocess mode, specifies the
lowest port value to use when choosing ports. If
set to 0, select random ports.
(Default 9000)
--mysql_host=HOSTNAME MySQL database host.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_host)s')
--mysql_port=PORT MySQL port to connect to.
Used by the Cloud SQL (rdbms) stub.
(Default %(mysql_port)s)
--mysql_user=USER MySQL user to connect as.
Used by the Cloud SQL (rdbms) stub.
(Default %(mysql_user)s)
--mysql_password=PASSWORD MySQL password to use.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_password)s')
--mysql_socket=PATH MySQL Unix socket file path.
Used by the Cloud SQL (rdbms) stub.
(Default '%(mysql_socket)s')
--persist_logs Enables storage of all request and application
logs to enable later access. (Default false).
--require_indexes Disallows queries that require composite indexes
not defined in index.yaml.
--search_indexes_path=PATH Path to file to use for storing Full Text Search
indexes (Default %(search_indexes_path)s).
--show_mail_body Log the body of emails in mail stub.
(Default false)
--skip_sdk_update_check Skip checking for SDK updates. If false, fall back
to opt_in setting specified in .appcfg_nag
(Default false)
--smtp_host=HOSTNAME SMTP host to send test mail to. Leaving this
unset will disable SMTP mail sending.
(Default '%(smtp_host)s')
--smtp_port=PORT SMTP port to send test mail to.
(Default %(smtp_port)s)
--smtp_user=USER SMTP user to connect as. Stub will only attempt
to login if this field is non-empty.
(Default '%(smtp_user)s').
--smtp_password=PASSWORD Password for SMTP server.
(Default '%(smtp_password)s')
--task_retry_seconds How long to wait in seconds before retrying a
task after it fails during execution.
(Default '%(task_retry_seconds)s')
--use_sqlite Use the new, SQLite based datastore stub.
(Default false)
"""
from google.appengine.tools import os_compat
import getopt
import logging
import os
import signal
import sys
import tempfile
import traceback
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
from google.appengine.api import yaml_errors
from google.appengine.dist import py_zipimport
from google.appengine.tools import appcfg
from google.appengine.tools import appengine_rpc
from google.appengine.tools import dev_appserver
from google.appengine.tools import dev_appserver_multiprocess as multiprocess
DEFAULT_ADMIN_CONSOLE_SERVER = 'appengine.google.com'
ARG_ADDRESS = 'address'
ARG_ADMIN_CONSOLE_HOST = 'admin_console_host'
ARG_ADMIN_CONSOLE_SERVER = 'admin_console_server'
ARG_ALLOW_SKIPPED_FILES = 'allow_skipped_files'
ARG_AUTH_DOMAIN = 'auth_domain'
ARG_BACKENDS = 'backends'
ARG_BLOBSTORE_PATH = 'blobstore_path'
ARG_CLEAR_DATASTORE = 'clear_datastore'
ARG_CLEAR_PROSPECTIVE_SEARCH = 'clear_prospective_search'
ARG_CLEAR_SEARCH_INDEX = 'clear_search_indexes'
ARG_DATASTORE_PATH = 'datastore_path'
ARG_DEBUG_IMPORTS = 'debug_imports'
ARG_DEFAULT_PARTITION = 'default_partition'
ARG_DISABLE_TASK_RUNNING = 'disable_task_running'
ARG_ENABLE_SENDMAIL = 'enable_sendmail'
ARG_HIGH_REPLICATION = 'high_replication'
ARG_HISTORY_PATH = 'history_path'
ARG_LOGIN_URL = 'login_url'
ARG_LOG_LEVEL = 'log_level'
ARG_MULTIPROCESS = multiprocess.ARG_MULTIPROCESS
ARG_MULTIPROCESS_API_PORT = multiprocess.ARG_MULTIPROCESS_API_PORT
ARG_MULTIPROCESS_API_SERVER = multiprocess.ARG_MULTIPROCESS_API_SERVER
ARG_MULTIPROCESS_APP_INSTANCE_ID = multiprocess.ARG_MULTIPROCESS_APP_INSTANCE_ID
ARG_MULTIPROCESS_BACKEND_ID = multiprocess.ARG_MULTIPROCESS_BACKEND_ID
ARG_MULTIPROCESS_BACKEND_INSTANCE_ID = multiprocess.ARG_MULTIPROCESS_BACKEND_INSTANCE_ID
ARG_MULTIPROCESS_MIN_PORT = multiprocess.ARG_MULTIPROCESS_MIN_PORT
ARG_MYSQL_HOST = 'mysql_host'
ARG_MYSQL_PASSWORD = 'mysql_password'
ARG_MYSQL_PORT = 'mysql_port'
ARG_MYSQL_SOCKET = 'mysql_socket'
ARG_MYSQL_USER = 'mysql_user'
ARG_PERSIST_LOGS = 'persist_logs'
ARG_PORT = 'port'
ARG_PROSPECTIVE_SEARCH_PATH = 'prospective_search_path'
ARG_REQUIRE_INDEXES = 'require_indexes'
ARG_SEARCH_INDEX_PATH = 'search_indexes_path'
ARG_SHOW_MAIL_BODY = 'show_mail_body'
ARG_SKIP_SDK_UPDATE_CHECK = 'skip_sdk_update_check'
ARG_SMTP_HOST = 'smtp_host'
ARG_SMTP_PASSWORD = 'smtp_password'
ARG_SMTP_PORT = 'smtp_port'
ARG_SMTP_USER = 'smtp_user'
ARG_STATIC_CACHING = 'static_caching'
ARG_TASK_RETRY_SECONDS = 'task_retry_seconds'
ARG_TRUSTED = 'trusted'
ARG_USE_SQLITE = 'use_sqlite'
SDK_PATH = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(os_compat.__file__)
)
)
)
PRODUCTION_VERSION = (2, 5)
WARN_ABOUT_PYTHON_VERSION = True
DEFAULT_ARGS = {
ARG_ADDRESS: 'localhost',
ARG_ADMIN_CONSOLE_HOST: None,
ARG_ADMIN_CONSOLE_SERVER: DEFAULT_ADMIN_CONSOLE_SERVER,
ARG_ALLOW_SKIPPED_FILES: False,
ARG_AUTH_DOMAIN: 'gmail.com',
ARG_BLOBSTORE_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.blobstore'),
ARG_CLEAR_DATASTORE: False,
ARG_CLEAR_PROSPECTIVE_SEARCH: False,
ARG_CLEAR_SEARCH_INDEX: False,
ARG_DATASTORE_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.datastore'),
ARG_DEFAULT_PARTITION: 'dev',
ARG_DISABLE_TASK_RUNNING: False,
ARG_ENABLE_SENDMAIL: False,
ARG_HIGH_REPLICATION: False,
ARG_HISTORY_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.datastore.history'),
ARG_LOGIN_URL: '/_ah/login',
ARG_LOG_LEVEL: logging.INFO,
ARG_MYSQL_HOST: 'localhost',
ARG_MYSQL_PASSWORD: '',
ARG_MYSQL_PORT: 3306,
ARG_MYSQL_SOCKET: '',
ARG_MYSQL_USER: '',
ARG_PERSIST_LOGS: False,
ARG_PORT: 8080,
ARG_PROSPECTIVE_SEARCH_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.prospective_search'),
ARG_REQUIRE_INDEXES: False,
ARG_SEARCH_INDEX_PATH: os.path.join(tempfile.gettempdir(),
'dev_appserver.searchindexes'),
ARG_SHOW_MAIL_BODY: False,
ARG_SKIP_SDK_UPDATE_CHECK: False,
ARG_SMTP_HOST: '',
ARG_SMTP_PASSWORD: '',
ARG_SMTP_PORT: 25,
ARG_SMTP_USER: '',
ARG_STATIC_CACHING: True,
ARG_TASK_RETRY_SECONDS: 30,
ARG_TRUSTED: False,
ARG_USE_SQLITE: False,
}
OPTIONS = 'a:cdhp:'
LONG_OPTIONS = [
'address=',
'admin_console_host=',
'admin_console_server=',
'allow_skipped_files',
'auth_domain=',
'backends',
'blobstore_path=',
'clear_datastore',
'clear_prospective_search',
'clear_search_indexes',
'datastore_path=',
'debug',
'debug_imports',
'default_partition=',
'disable_static_caching',
'disable_task_running',
'enable_sendmail',
'help',
'high_replication',
'history_path=',
'multiprocess',
'multiprocess_api_port=',
'multiprocess_api_server',
'multiprocess_app_instance_id=',
'multiprocess_backend_id=',
'multiprocess_backend_instance_id=',
'multiprocess_min_port=',
'mysql_host=',
'mysql_password=',
'mysql_port=',
'mysql_socket=',
'mysql_user=',
'persist_logs',
'port=',
'require_indexes',
'search_indexes_path=',
'show_mail_body',
'skip_sdk_update_check',
'smtp_host=',
'smtp_password=',
'smtp_port=',
'smtp_user=',
'task_retry_seconds=',
'trusted',
'use_sqlite',
]
def PrintUsageExit(code):
"""Prints usage information and exits with a status code.
Args:
code: Status code to pass to sys.exit() after displaying usage information.
"""
render_dict = DEFAULT_ARGS.copy()
render_dict['script'] = os.path.basename(sys.argv[0])
print sys.modules['__main__'].__doc__ % render_dict
sys.stdout.flush()
sys.exit(code)
def ParseArguments(argv):
"""Parses command-line arguments.
Args:
argv: Command-line arguments, including the executable name, used to
execute this application.
Returns:
Tuple (args, option_dict) where:
args: List of command-line arguments following the executable name.
option_dict: Dictionary of parsed flags that maps keys from DEFAULT_ARGS
to their values, which are either pulled from the defaults, or from
command-line flags.
"""
option_dict = DEFAULT_ARGS.copy()
try:
opts, args = getopt.gnu_getopt(argv[1:], OPTIONS, LONG_OPTIONS)
except getopt.GetoptError, e:
print >>sys.stderr, 'Error: %s' % e
PrintUsageExit(1)
for option, value in opts:
if option in ('-h', '--help'):
PrintUsageExit(0)
if option in ('-d', '--debug'):
option_dict[ARG_LOG_LEVEL] = logging.DEBUG
if option in ('-p', '--port'):
try:
option_dict[ARG_PORT] = int(value)
if not (65535 > option_dict[ARG_PORT] > 0):
raise ValueError
except ValueError:
print >>sys.stderr, 'Invalid value supplied for port'
PrintUsageExit(1)
def expand_path(s):
return os.path.abspath(os.path.expanduser(s))
if option in ('-a', '--address'):
option_dict[ARG_ADDRESS] = value
if option == '--blobstore_path':
option_dict[ARG_BLOBSTORE_PATH] = expand_path(value)
if option == '--datastore_path':
option_dict[ARG_DATASTORE_PATH] = expand_path(value)
if option == '--search_indexes_path':
option_dict[ARG_SEARCH_INDEX_PATH] = expand_path(value)
if option == '--prospective_search_path':
option_dict[ARG_PROSPECTIVE_SEARCH_PATH] = expand_path(value)
if option == '--skip_sdk_update_check':
option_dict[ARG_SKIP_SDK_UPDATE_CHECK] = True
if option == '--use_sqlite':
option_dict[ARG_USE_SQLITE] = True
if option == '--high_replication':
option_dict[ARG_HIGH_REPLICATION] = True
if option == '--history_path':
option_dict[ARG_HISTORY_PATH] = expand_path(value)
if option in ('-c', '--clear_datastore'):
option_dict[ARG_CLEAR_DATASTORE] = True
if option == '--clear_prospective_search':
option_dict[ARG_CLEAR_PROSPECTIVE_SEARCH] = True
if option == '--clear_search_indexes':
option_dict[ARG_CLEAR_SEARCH_INDEX] = True
if option == '--require_indexes':
option_dict[ARG_REQUIRE_INDEXES] = True
if option == '--mysql_host':
option_dict[ARG_MYSQL_HOST] = value
if option == '--mysql_port':
option_dict[ARG_MYSQL_PORT] = _ParsePort(value, '--mysql_port')
if option == '--mysql_user':
option_dict[ARG_MYSQL_USER] = value
if option == '--mysql_password':
option_dict[ARG_MYSQL_PASSWORD] = value
if option == '--mysql_socket':
option_dict[ARG_MYSQL_SOCKET] = value
if option == '--smtp_host':
option_dict[ARG_SMTP_HOST] = value
if option == '--smtp_port':
option_dict[ARG_SMTP_PORT] = _ParsePort(value, '--smtp_port')
if option == '--smtp_user':
option_dict[ARG_SMTP_USER] = value
if option == '--smtp_password':
option_dict[ARG_SMTP_PASSWORD] = value
if option == '--enable_sendmail':
option_dict[ARG_ENABLE_SENDMAIL] = True
if option == '--show_mail_body':
option_dict[ARG_SHOW_MAIL_BODY] = True
if option == '--auth_domain':
option_dict['_DEFAULT_ENV_AUTH_DOMAIN'] = value
if option == '--debug_imports':
option_dict['_ENABLE_LOGGING'] = True
if option == '--admin_console_server':
option_dict[ARG_ADMIN_CONSOLE_SERVER] = value.strip()
if option == '--admin_console_host':
option_dict[ARG_ADMIN_CONSOLE_HOST] = value
if option == '--allow_skipped_files':
option_dict[ARG_ALLOW_SKIPPED_FILES] = True
if option == '--disable_static_caching':
option_dict[ARG_STATIC_CACHING] = False
if option == '--disable_task_running':
option_dict[ARG_DISABLE_TASK_RUNNING] = True
if option == '--task_retry_seconds':
try:
option_dict[ARG_TASK_RETRY_SECONDS] = int(value)
if option_dict[ARG_TASK_RETRY_SECONDS] < 0:
raise ValueError
except ValueError:
print >>sys.stderr, 'Invalid value supplied for task_retry_seconds'
PrintUsageExit(1)
if option == '--trusted':
option_dict[ARG_TRUSTED] = True
if option == '--persist_logs':
option_dict[ARG_PERSIST_LOGS] = True
if option == '--backends':
option_dict[ARG_BACKENDS] = value
if option == '--multiprocess':
option_dict[ARG_MULTIPROCESS] = value
if option == '--multiprocess_min_port':
option_dict[ARG_MULTIPROCESS_MIN_PORT] = value
if option == '--multiprocess_api_server':
option_dict[ARG_MULTIPROCESS_API_SERVER] = value
if option == '--multiprocess_api_port':
option_dict[ARG_MULTIPROCESS_API_PORT] = value
if option == '--multiprocess_app_instance_id':
option_dict[ARG_MULTIPROCESS_APP_INSTANCE_ID] = value
if option == '--multiprocess_backend_id':
option_dict[ARG_MULTIPROCESS_BACKEND_ID] = value
if option == '--multiprocess_backend_instance_id':
option_dict[ARG_MULTIPROCESS_BACKEND_INSTANCE_ID] = value
if option == '--default_partition':
option_dict[ARG_DEFAULT_PARTITION] = value
return args, option_dict
def _ParsePort(port, description):
"""Parses a port number from a string.
Args:
port: string
description: string to use in error messages.
Returns: integer between 0 and 65535
Raises:
ValueError if port is not a valid port number.
"""
try:
port = int(port)
if not (65535 > port > 0):
raise ValueError
return port
except ValueError:
print >>sys.stderr, 'Invalid value %s supplied for %s' % (port, description)
PrintUsageExit(1)
def MakeRpcServer(option_dict):
"""Create a new HttpRpcServer.
Creates a new HttpRpcServer to check for updates to the SDK.
Args:
option_dict: The dict of command line options.
Returns:
A HttpRpcServer.
"""
server = appengine_rpc.HttpRpcServer(
option_dict[ARG_ADMIN_CONSOLE_SERVER],
lambda: ('unused_email', 'unused_password'),
appcfg.GetUserAgent(),
appcfg.GetSourceName(),
host_override=option_dict[ARG_ADMIN_CONSOLE_HOST])
server.authenticated = True
return server
def SigTermHandler(signum, frame):
"""Handler for TERM signal.
Raises a KeyboardInterrupt to perform a graceful shutdown on SIGTERM signal.
"""
raise KeyboardInterrupt()
def main(argv):
"""Runs the development application server."""
args, option_dict = ParseArguments(argv)
if len(args) != 1:
print >>sys.stderr, 'Invalid arguments'
PrintUsageExit(1)
root_path = args[0]
if '_DEFAULT_ENV_AUTH_DOMAIN' in option_dict:
auth_domain = option_dict['_DEFAULT_ENV_AUTH_DOMAIN']
dev_appserver.DEFAULT_ENV['AUTH_DOMAIN'] = auth_domain
if '_ENABLE_LOGGING' in option_dict:
enable_logging = option_dict['_ENABLE_LOGGING']
dev_appserver.HardenedModulesHook.ENABLE_LOGGING = enable_logging
log_level = option_dict[ARG_LOG_LEVEL]
option_dict['root_path'] = os.path.realpath(root_path)
logging.getLogger().setLevel(log_level)
default_partition = option_dict[ARG_DEFAULT_PARTITION]
appinfo = None
try:
appinfo, _, _ = dev_appserver.LoadAppConfig(
root_path, {}, default_partition=default_partition)
except yaml_errors.EventListenerError, e:
logging.error('Fatal error when loading application configuration:\n%s', e)
return 1
except dev_appserver.InvalidAppConfigError, e:
logging.error('Application configuration file invalid:\n%s', e)
return 1
version_tuple = tuple(sys.version_info[:2])
expected_version = PRODUCTION_VERSION
if appinfo.runtime == 'python27':
expected_version = (2, 7)
if ARG_MULTIPROCESS not in option_dict and WARN_ABOUT_PYTHON_VERSION:
if version_tuple < expected_version:
sys.stderr.write('Warning: You are using a Python runtime (%d.%d) that '
'is older than the production runtime environment '
'(%d.%d). Your application may be dependent on Python '
'behaviors that have changed and may not work correctly '
'when deployed to production.\n' % (
version_tuple[0], version_tuple[1],
expected_version[0], expected_version[1]))
if version_tuple > expected_version:
sys.stderr.write('Warning: You are using a Python runtime (%d.%d) that '
'is more recent than the production runtime environment '
'(%d.%d). Your application may use features that are '
'not available in the production environment and may '
'not work correctly when deployed to production.\n' % (
version_tuple[0], version_tuple[1],
expected_version[0], expected_version[1]))
multiprocess.Init(argv, option_dict, root_path, appinfo)
dev_process = multiprocess.GlobalProcess()
port = option_dict[ARG_PORT]
login_url = option_dict[ARG_LOGIN_URL]
address = option_dict[ARG_ADDRESS]
allow_skipped_files = option_dict[ARG_ALLOW_SKIPPED_FILES]
static_caching = option_dict[ARG_STATIC_CACHING]
persist_logs = option_dict[ARG_PERSIST_LOGS]
skip_sdk_update_check = option_dict[ARG_SKIP_SDK_UPDATE_CHECK]
if (option_dict[ARG_ADMIN_CONSOLE_SERVER] != '' and
not dev_process.IsSubprocess()):
server = MakeRpcServer(option_dict)
if skip_sdk_update_check:
logging.info('Skipping update check.')
else:
update_check = appcfg.UpdateCheck(server, appinfo)
update_check.CheckSupportedVersion()
if update_check.AllowedToCheckForUpdates():
update_check.CheckForUpdates()
if dev_process.IsSubprocess():
logging.getLogger().setLevel(logging.WARNING)
try:
dev_appserver.SetupStubs(appinfo.application, **option_dict)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.error(str(exc_type) + ': ' + str(exc_value))
logging.debug(''.join(traceback.format_exception(
exc_type, exc_value, exc_traceback)))
return 1
http_server = dev_appserver.CreateServer(
root_path,
login_url,
port,
sdk_dir=SDK_PATH,
serve_address=address,
allow_skipped_files=allow_skipped_files,
static_caching=static_caching,
default_partition=default_partition,
persist_logs=persist_logs)
signal.signal(signal.SIGTERM, SigTermHandler)
dev_process.PrintStartMessage(appinfo.application, address, port)
if dev_process.IsInstance():
logging.getLogger().setLevel(logging.INFO)
try:
try:
http_server.serve_forever()
except KeyboardInterrupt:
if not dev_process.IsSubprocess():
logging.info('Server interrupted by user, terminating')
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.error('Error encountered:\n%s\nNow terminating.', info_string)
return 1
finally:
http_server.server_close()
finally:
done = False
while not done:
try:
multiprocess.Shutdown()
done = True
except KeyboardInterrupt:
pass
dev_appserver.TearDownStubs()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
import gspread
import config
import datetime
import uuid
import httplib
import httplib2
import os
from oauth2client.file import Storage
from oauth2client.client import Credentials
def refresh_access_token():
if creds.access_token_expired:
creds._do_refresh_request(httplib2.Http().request)
print "Loading DB..."
if os.environ.get("GOOGLE_CREDS", None):
print "Loading google credentials from environment"
creds = Credentials.new_from_json(os.environ.get("GOOGLE_CREDS",""))
else:
print "Loading google credentials from file"
storage = Storage('credentials-nhshd.dat')
creds = storage.get()
refresh_access_token()
log_worksheet = None
student_worksheet = None
procedures_worksheet = None
timeframes_worksheet = None
locations_worksheet = None
doctors_worksheet = None
sms_log_worksheet = None
def reconnect():
global log_worksheet, student_worksheet, procedures_worksheet, timeframes_worksheet, locations_worksheet, doctors_worksheet, sms_log_worksheet
gs = gspread.authorize(creds)
sheet = gs.open_by_key(config.google_sheet_key)
log_worksheet = sheet.worksheet("log")
student_worksheet = sheet.worksheet("students")
procedures_worksheet = sheet.worksheet("procedures")
timeframes_worksheet = sheet.worksheet("timeframes")
locations_worksheet = sheet.worksheet("locations")
doctors_worksheet = sheet.worksheet("doctors")
sms_log_worksheet = sheet.worksheet("sms_log")
reconnect()
print "Complete"
def do_with_retry(f):
try:
return f()
except httplib.BadStatusLine:
print "Got BadStatusLine. Retrying"
reconnect()
return f()
def to_timestamp(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
def get_all_students():
refresh_access_token()
def doit():
return student_worksheet.get_all_records()
return do_with_retry(doit)
def get_all_opportunities():
refresh_access_token()
def f():
rs = log_worksheet.get_all_records()
for r in rs:
if r["outcome"] == "ATTENDED":
r["status"] = "Attended"
elif r["outcome"] == "NOT_ATTENDED":
r["status"] = "Not Attended"
elif r["student"]:
r["status"] = "Accepted"
elif to_timestamp(datetime.datetime.utcnow()) > int(r["expiry_time"]):
r["status"] = "Expired"
else:
r["status"] = "Offered"
r["time"] = datetime.datetime.fromtimestamp(r["time_sent"])
return rs
return do_with_retry(f)
## Call this function with a new opportunity:
# db.add_opportunity({
# "doctor": "Dr Thing",
# "procedure":"do thing",
# "location": "the ward",
# "duration": 20
# })
#
# Returns the GUID of the created opportunity
def add_opportunity(op):
refresh_access_token()
def f():
vs = [uuid.uuid4()]
now = to_timestamp(datetime.datetime.utcnow())
vs.append(op["doctor"])
vs.append(now)
vs.append(op["procedure"])
vs.append(int(now + int(op["duration"]) * 60))
vs.append(op["location"])
log_worksheet.append_row(vs)
return vs[0]
return do_with_retry(f)
def get_opportunity(guid):
refresh_access_token()
def f():
ops = get_all_opportunities()
for op in ops:
if op["id"] == guid:
return op
return do_with_retry(f)
def update_opportunity(guid, student_name):
refresh_access_token()
def f():
ops = get_all_opportunities()
i = 1
x = None
for op in ops:
i += 1
if op["id"] == guid:
x = op
break
if x["student"]:
return False
log_worksheet.update_cell(i, 7, student_name)
log_worksheet.update_cell(i, 8, to_timestamp(datetime.datetime.utcnow()))
return True
return do_with_retry(f)
def complete_opportunity(guid, attended):
refresh_access_token()
def f():
ops = get_all_opportunities()
i = 1
x = None
for op in ops:
i += 1
if op["id"] == guid:
x = op
break
if attended:
log_worksheet.update_cell(i, 9, "ATTENDED")
else:
log_worksheet.update_cell(i, 9, "NOT_ATTENDED")
return True
return do_with_retry(f)
def get_procedures():
refresh_access_token()
def f():
return [p['procedure'] for p in procedures_worksheet.get_all_records()]
return do_with_retry(f)
def get_locations():
refresh_access_token()
def f():
return [l['location'] for l in locations_worksheet.get_all_records()]
return do_with_retry(f)
def get_timeframes():
refresh_access_token()
def f():
return [t['timeframe'] for t in timeframes_worksheet.get_all_records()]
return do_with_retry(f)
def get_doctors():
refresh_access_token()
def f():
return [d['doctor'] for d in doctors_worksheet.get_all_records()]
return do_with_retry(f)
def log_sms(from_number, to_number, body, direction):
refresh_access_token()
def f():
vs = []
now = to_timestamp(datetime.datetime.utcnow())
vs.append(now)
vs.append(from_number)
vs.append(to_number)
vs.append(body)
vs.append(direction)
sms_log_worksheet.append_row(vs)
return do_with_retry(f)
|
|
"""
Convenience forms for adding and updating ``Event``s and ``Occurrence``s.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime, date, time, timedelta
from django import forms
from django.utils.translation import ugettext_lazy as _
#from django.forms.extras.widgets import SelectDateWidget
from dateutil import rrule
from temporale import settings as temporale_settings
from temporale import utils
from temporale.models import Event, Occurrence
from dorsale.forms import DorsaleBaseModelForm
#from dorsale.tools import assert_on_exception
from dorsale.widgets import DatePickerWidget
WEEKDAY_SHORT = (
(7, _('Sun')),
(1, _('Mon')),
(2, _('Tue')),
(3, _('Wed')),
(4, _('Thu')),
(5, _('Fri')),
(6, _('Sat'))
)
WEEKDAY_LONG = (
(7, _('Sunday')),
(1, _('Monday')),
(2, _('Tuesday')),
(3, _('Wednesday')),
(4, _('Thursday')),
(5, _('Friday')),
(6, _('Saturday'))
)
MONTH_LONG = (
(1, _('January')),
(2, _('February')),
(3, _('March')),
(4, _('April')),
(5, _('May')),
(6, _('June')),
(7, _('July')),
(8, _('August')),
(9, _('September')),
(10, _('October')),
(11, _('November')),
(12, _('December')),
)
MONTH_SHORT = (
(1, _('Jan')),
(2, _('Feb')),
(3, _('Mar')),
(4, _('Apr')),
(5, _('May')),
(6, _('Jun')),
(7, _('Jul')),
(8, _('Aug')),
(9, _('Sep')),
(10, _('Oct')),
(11, _('Nov')),
(12, _('Dec')),
)
ORDINAL = (
(1, _('first')),
(2, _('second')),
(3, _('third')),
(4, _('fourth')),
(-1, _('last'))
)
FREQUENCY_CHOICES = (
(rrule.DAILY, _('Day(s)')),
(rrule.WEEKLY, _('Week(s)')),
(rrule.MONTHLY, _('Month(s)')),
(rrule.YEARLY, _('Year(s)')),
)
REPEAT_CHOICES = (
('count', _('By count')),
('until', _('Until date')),
)
ISO_WEEKDAYS_MAP = (
None,
rrule.MO,
rrule.TU,
rrule.WE,
rrule.TH,
rrule.FR,
rrule.SA,
rrule.SU
)
MINUTES_INTERVAL = temporale_settings.TIMESLOT_INTERVAL.seconds // 60
SECONDS_INTERVAL = utils.time_delta_total_seconds(temporale_settings.DEFAULT_OCCURRENCE_DURATION)
def timeslot_options(interval=temporale_settings.TIMESLOT_INTERVAL,
start_time=temporale_settings.TIMESLOT_START_TIME,
end_delta=temporale_settings.TIMESLOT_END_TIME_DURATION,
fmt=temporale_settings.TIMESLOT_TIME_FORMAT):
"""
Create a list of time slot options for use in temporale forms.
The list is comprised of 2-tuples containing a 24-hour time value and a
12-hour temporal representation of that offset.
"""
dt = datetime.combine(date.today(), time(0))
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_delta
options = []
while dtstart <= dtend:
options.append((str(dtstart.time()), dtstart.strftime(fmt)))
dtstart += interval
return options
def timeslot_offset_options(interval=temporale_settings.TIMESLOT_INTERVAL,
start_time=temporale_settings.TIMESLOT_START_TIME,
end_delta=temporale_settings.TIMESLOT_END_TIME_DURATION,
fmt=temporale_settings.TIMESLOT_TIME_FORMAT):
"""
Create a list of time slot options for use in temporale forms.
The list is comprised of 2-tuples containing the number of seconds since the
start of the day and a 12-hour temporal representation of that offset.
"""
dt = datetime.combine(date.today(), time(0))
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_delta
options = []
delta = utils.time_delta_total_seconds(dtstart - dt)
seconds = utils.time_delta_total_seconds(interval)
while dtstart <= dtend:
options.append((delta, dtstart.strftime(fmt)))
dtstart += interval
delta += seconds
return options
default_timeslot_options = timeslot_options()
default_timeslot_offset_options = timeslot_offset_options()
class SplitDateTimeWidget(forms.MultiWidget):
"""
A Widget that splits datetime input into a DatePickerWidget for dates and
Select widget for times.
"""
def __init__(self, attrs=None):
widgets = (
DatePickerWidget(attrs=attrs),
forms.Select(choices=default_timeslot_options, attrs=attrs)
)
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class MultipleIntegerField(forms.MultipleChoiceField):
"""
A form field for handling multiple integers.
"""
def __init__(self, choices, size=None, label=None, widget=None):
if widget is None:
widget = forms.SelectMultiple(attrs={'size' : size or len(choices)})
super(MultipleIntegerField, self).__init__(
required=False,
choices=choices,
label=label,
widget=widget,
)
def clean(self, value):
return [int(i) for i in super(MultipleIntegerField, self).clean(value)]
class MultipleOccurrenceForm(forms.Form):
day = forms.DateField(
label=_('Date'),
initial=date.today,
widget=DatePickerWidget(),
localize=True,
)
day.help_text = _('-')
start_time_delta = forms.IntegerField(
label=_('Start time'),
widget=forms.Select(choices=default_timeslot_offset_options)
)
start_time_delta.help_text = _('-')
end_time_delta = forms.IntegerField(
label=_('End time'),
widget=forms.Select(choices=default_timeslot_offset_options)
)
end_time_delta.help_text = _('-')
# recurrence options
repeats = forms.ChoiceField(
choices=REPEAT_CHOICES,
initial='count',
label=_('Occurrences'),
widget=forms.RadioSelect(),
localize=True,
)
repeats.help_text = _('-')
count = forms.IntegerField(
label=_('Total Occurrences'),
initial=1,
required=False,
widget=forms.TextInput(attrs=dict(size=2, max_length=2))
)
repeats.help_text = _('-')
until = forms.DateField(
label=_('until'),
required=False,
initial=date.today,
widget=DatePickerWidget(),
localize=True,
)
until.help_text = _('-')
freq = forms.IntegerField(
label=_('Frequency'),
initial=rrule.WEEKLY,
widget=forms.RadioSelect(choices=FREQUENCY_CHOICES),
)
repeats.help_text = _('-')
interval = forms.IntegerField(
label=_('Interval'),
required=False,
initial='1',
widget=forms.TextInput(attrs=dict(size=3, max_length=3))
)
interval.help_text = _('-')
# weekly options
week_days = MultipleIntegerField(
WEEKDAY_SHORT,
label=_('Weekly options'),
widget=forms.CheckboxSelectMultiple
)
week_days.help_text = _('-')
# monthly options
month_option = forms.ChoiceField(
choices=(('on',_('On the')), ('each',_('Each:'))),
initial='each',
widget=forms.RadioSelect(),
label=_('Monthly options'),
localize=True,
)
month_option.help_text = _('-')
month_ordinal = forms.IntegerField(
widget=forms.Select(choices=ORDINAL),
)
month_ordinal.help_text = _('-')
month_ordinal_day = forms.IntegerField(
widget=forms.Select(choices=WEEKDAY_LONG),
)
month_ordinal_day.help_text = _('-')
each_month_day = MultipleIntegerField(
[(i,i) for i in range(1,32)],
widget=forms.CheckboxSelectMultiple,
)
each_month_day.help_text = _('-')
# yearly options
year_months = MultipleIntegerField(
MONTH_SHORT,
label=_('Yearly options'),
widget=forms.CheckboxSelectMultiple,
)
year_months.help_text = _('-')
is_year_month_ordinal = forms.BooleanField(required=False)
is_year_month_ordinal.help_text = _('-')
year_month_ordinal = forms.IntegerField(widget=forms.Select(choices=ORDINAL))
year_month_ordinal.help_text = _('-')
year_month_ordinal_day = forms.IntegerField(widget=forms.Select(choices=WEEKDAY_LONG))
year_month_ordinal_day.help_text = _('-')
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(MultipleOccurrenceForm, self).__init__(*args, **kwargs)
dtstart = self.initial.get('dtstart', None)
if dtstart:
dtstart = dtstart.replace(
minute=((dtstart.minute // MINUTES_INTERVAL) * MINUTES_INTERVAL),
second=0,
microsecond=0
)
weekday = dtstart.isoweekday()
ordinal = dtstart.day // 7
ordinal = u'%d' % (-1 if ordinal > 3 else ordinal + 1,)
self.initial.setdefault('week_days', u'%d' % weekday)
self.initial.setdefault('month_ordinal', ordinal)
self.initial.setdefault('month_ordinal_day', u'%d' % weekday)
self.initial.setdefault('each_month_day', [u'%d' % dtstart.day])
self.initial.setdefault('year_months', [u'%d' % dtstart.month])
self.initial.setdefault('year_month_ordinal', ordinal)
self.initial.setdefault('year_month_ordinal_day', u'%d' % weekday)
offset = (dtstart - datetime.combine(dtstart.date(), time(0))).seconds
self.initial.setdefault('start_time_delta', u'%d' % offset)
self.initial.setdefault('end_time_delta', u'%d' % (offset + SECONDS_INTERVAL,))
def clean(self):
day = datetime.combine(self.cleaned_data['day'], time(0))
self.cleaned_data['start_time'] = day + timedelta(
seconds=self.cleaned_data['start_time_delta']
)
self.cleaned_data['end_time'] = day + timedelta(
seconds=self.cleaned_data['end_time_delta']
)
return self.cleaned_data
def save(self, event):
if self.cleaned_data['repeats'] == 'no':
params = {}
else:
params = self._build_rrule_params()
event.add_occurrences(
self.cleaned_data['start_time'],
self.cleaned_data['end_time'],
**params
)
return event
def _build_rrule_params(self):
iso = ISO_WEEKDAYS_MAP
data = self.cleaned_data
params = dict(
freq=data['freq'],
interval=data['interval'] or 1
)
if self.cleaned_data['repeats'] == 'count':
params['count'] = data['count']
elif self.cleaned_data['repeats'] == 'until':
params['until'] = data['until']
if params['freq'] == rrule.WEEKLY:
params['byweekday'] = [iso[n] for n in data['week_days']]
elif params['freq'] == rrule.MONTHLY:
if 'on' == data['month_option']:
ordinal = data['month_ordinal']
day = iso[data['month_ordinal_day']]
params['byweekday'] = day(ordinal)
else:
params['bymonthday'] = data['each_month_day']
elif params['freq'] == rrule.YEARLY:
params['bymonth'] = data['year_months']
if data['is_year_month_ordinal']:
ordinal = data['year_month_ordinal']
day = iso[data['year_month_ordinal_day']]
params['byweekday'] = day(ordinal)
elif params['freq'] != rrule.DAILY:
raise NotImplementedError(_('Unknown interval rule %s') % params['freq'])
return params
class EventForm(DorsaleBaseModelForm):
"""
A simple form for adding and updating Event attributes
"""
class Meta:
model = Event
fields = ['title', 'description', 'event_type', 'content_type', ] # does this make sense at all?
def __init__(self, *args, **kws):
"""
Required keyword parameter: `user`
"""
super(EventForm, self).__init__(*args, **kws)
self.fields['description'].required = False
class SingleOccurrenceForm(DorsaleBaseModelForm):
"""
A simple form for adding and updating single Occurrence attributes
Required keyword parameter: `user`
"""
start_time = forms.DateTimeField(widget=SplitDateTimeWidget, localize=True, label=_('start time'))
start_time.help_text = _('-')
end_time = forms.DateTimeField(widget=SplitDateTimeWidget, localize=True, label=_('end time'))
end_time.help_text = _('-')
class Meta:
model = Occurrence
fields = ['start_time', 'end_time', ] # 'event',
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
endpoint for heat AWS-compatible CloudWatch API
"""
from heat.api.aws import exception
from heat.api.aws import utils as api_utils
from heat.common import wsgi
from heat.common import policy
from heat.common import exception as heat_exception
from heat.rpc import client as rpc_client
from heat.rpc import api as engine_api
import heat.openstack.common.rpc.common as rpc_common
from heat.openstack.common import log as logging
from heat.openstack.common.gettextutils import _
logger = logging.getLogger(__name__)
class WatchController(object):
"""
WSGI controller for CloudWatch resource in heat API
Implements the API actions
"""
def __init__(self, options):
self.options = options
self.engine_rpcapi = rpc_client.EngineClient()
self.policy = policy.Enforcer(scope='cloudwatch')
def _enforce(self, req, action):
"""Authorize an action against the policy.json."""
try:
self.policy.enforce(req.context, action)
except heat_exception.Forbidden:
msg = _("Action %s not allowed for user") % action
raise exception.HeatAccessDeniedError(msg)
except Exception:
# We expect policy.enforce to either pass or raise Forbidden
# however, if anything else happens, we want to raise
# HeatInternalFailureError, failure to do this results in
# the user getting a big stacktrace spew as an API response
msg = _("Error authorizing action %s") % action
raise exception.HeatInternalFailureError(msg)
@staticmethod
def _reformat_dimensions(dims):
'''
Reformat dimensions list into AWS API format
Parameter dims is a list of dicts
'''
newdims = []
for count, d in enumerate(dims, 1):
for key in d.keys():
newdims.append({'Name': key, 'Value': d[key]})
return newdims
def delete_alarms(self, req):
"""
Implements DeleteAlarms API action
"""
self._enforce(req, 'DeleteAlarms')
return exception.HeatAPINotImplementedError()
def describe_alarm_history(self, req):
"""
Implements DescribeAlarmHistory API action
"""
self._enforce(req, 'DescribeAlarmHistory')
return exception.HeatAPINotImplementedError()
def describe_alarms(self, req):
"""
Implements DescribeAlarms API action
"""
self._enforce(req, 'DescribeAlarms')
def format_metric_alarm(a):
"""
Reformat engine output into the AWS "MetricAlarm" format
"""
keymap = {
engine_api.WATCH_ACTIONS_ENABLED: 'ActionsEnabled',
engine_api.WATCH_ALARM_ACTIONS: 'AlarmActions',
engine_api.WATCH_TOPIC: 'AlarmArn',
engine_api.WATCH_UPDATED_TIME:
'AlarmConfigurationUpdatedTimestamp',
engine_api.WATCH_DESCRIPTION: 'AlarmDescription',
engine_api.WATCH_NAME: 'AlarmName',
engine_api.WATCH_COMPARISON: 'ComparisonOperator',
engine_api.WATCH_DIMENSIONS: 'Dimensions',
engine_api.WATCH_PERIODS: 'EvaluationPeriods',
engine_api.WATCH_INSUFFICIENT_ACTIONS:
'InsufficientDataActions',
engine_api.WATCH_METRIC_NAME: 'MetricName',
engine_api.WATCH_NAMESPACE: 'Namespace',
engine_api.WATCH_OK_ACTIONS: 'OKActions',
engine_api.WATCH_PERIOD: 'Period',
engine_api.WATCH_STATE_REASON: 'StateReason',
engine_api.WATCH_STATE_REASON_DATA: 'StateReasonData',
engine_api.WATCH_STATE_UPDATED_TIME: 'StateUpdatedTimestamp',
engine_api.WATCH_STATE_VALUE: 'StateValue',
engine_api.WATCH_STATISTIC: 'Statistic',
engine_api.WATCH_THRESHOLD: 'Threshold',
engine_api.WATCH_UNIT: 'Unit'}
# AWS doesn't return StackId in the main MetricAlarm
# structure, so we add StackId as a dimension to all responses
a[engine_api.WATCH_DIMENSIONS].append({'StackId':
a[engine_api.WATCH_STACK_ID]
})
# Reformat dimensions list into AWS API format
a[engine_api.WATCH_DIMENSIONS] = self._reformat_dimensions(
a[engine_api.WATCH_DIMENSIONS])
return api_utils.reformat_dict_keys(keymap, a)
con = req.context
parms = dict(req.params)
try:
name = parms['AlarmName']
except KeyError:
name = None
try:
watch_list = self.engine_rpcapi.show_watch(con, watch_name=name)
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
res = {'MetricAlarms': [format_metric_alarm(a)
for a in watch_list]}
result = api_utils.format_response("DescribeAlarms", res)
return result
def describe_alarms_for_metric(self, req):
"""
Implements DescribeAlarmsForMetric API action
"""
self._enforce(req, 'DescribeAlarmsForMetric')
return exception.HeatAPINotImplementedError()
def disable_alarm_actions(self, req):
"""
Implements DisableAlarmActions API action
"""
self._enforce(req, 'DisableAlarmActions')
return exception.HeatAPINotImplementedError()
def enable_alarm_actions(self, req):
"""
Implements EnableAlarmActions API action
"""
self._enforce(req, 'EnableAlarmActions')
return exception.HeatAPINotImplementedError()
def get_metric_statistics(self, req):
"""
Implements GetMetricStatistics API action
"""
self._enforce(req, 'GetMetricStatistics')
return exception.HeatAPINotImplementedError()
def list_metrics(self, req):
"""
Implements ListMetrics API action
Lists metric datapoints associated with a particular alarm,
or all alarms if none specified
"""
self._enforce(req, 'ListMetrics')
def format_metric_data(d, fil={}):
"""
Reformat engine output into the AWS "Metric" format
Takes an optional filter dict, which is traversed
so a metric dict is only returned if all keys match
the filter dict
"""
dimensions = [
{'AlarmName': d[engine_api.WATCH_DATA_ALARM]},
{'Timestamp': d[engine_api.WATCH_DATA_TIME]}
]
for key in d[engine_api.WATCH_DATA]:
dimensions.append({key: d[engine_api.WATCH_DATA][key]})
newdims = self._reformat_dimensions(dimensions)
result = {
'MetricName': d[engine_api.WATCH_DATA_METRIC],
'Dimensions': newdims,
'Namespace': d[engine_api.WATCH_DATA_NAMESPACE],
}
for f in fil:
try:
value = result[f]
if value != fil[f]:
# Filter criteria not met, return None
return
except KeyError:
logger.warning(_("Invalid filter key %s, ignoring") % f)
return result
con = req.context
parms = dict(req.params)
# FIXME : Don't yet handle filtering by Dimensions
filter_result = dict((k, v) for (k, v) in parms.iteritems() if k in
("MetricName", "Namespace"))
logger.debug(_("filter parameters : %s") % filter_result)
try:
# Engine does not currently support query by namespace/metric
# so we pass None/None and do any filtering locally
null_kwargs = {'metric_namespace': None,
'metric_name': None}
watch_data = self.engine_rpcapi.show_watch_metric(con,
**null_kwargs)
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
res = {'Metrics': []}
for d in watch_data:
metric = format_metric_data(d, filter_result)
if metric:
res['Metrics'].append(metric)
result = api_utils.format_response("ListMetrics", res)
return result
def put_metric_alarm(self, req):
"""
Implements PutMetricAlarm API action
"""
self._enforce(req, 'PutMetricAlarm')
return exception.HeatAPINotImplementedError()
def put_metric_data(self, req):
"""
Implements PutMetricData API action
"""
self._enforce(req, 'PutMetricData')
con = req.context
parms = dict(req.params)
namespace = api_utils.get_param_value(parms, 'Namespace')
# Extract data from the request so we can pass it to the engine
# We have to do this in two passes, because the AWS
# query format nests the dimensions within the MetricData
# query-parameter-list (see AWS PutMetricData docs)
# extract_param_list gives a list-of-dict, which we then
# need to process (each dict) for dimensions
metric_data = api_utils.extract_param_list(parms, prefix='MetricData')
if not len(metric_data):
logger.error(_("Request does not contain required MetricData"))
return exception.HeatMissingParameterError("MetricData list")
watch_name = None
dimensions = []
for p in metric_data:
dimension = api_utils.extract_param_pairs(p,
prefix='Dimensions',
keyname='Name',
valuename='Value')
if 'AlarmName' in dimension:
watch_name = dimension['AlarmName']
else:
dimensions.append(dimension)
# Extract the required data from the metric_data
# and format dict to pass to engine
data = {'Namespace': namespace,
api_utils.get_param_value(metric_data[0], 'MetricName'): {
'Unit': api_utils.get_param_value(metric_data[0], 'Unit'),
'Value': api_utils.get_param_value(metric_data[0],
'Value'),
'Dimensions': dimensions}}
try:
self.engine_rpcapi.create_watch_data(con, watch_name, data)
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
result = {'ResponseMetadata': None}
return api_utils.format_response("PutMetricData", result)
def set_alarm_state(self, req):
"""
Implements SetAlarmState API action
"""
self._enforce(req, 'SetAlarmState')
# Map from AWS state names to those used in the engine
state_map = {'OK': engine_api.WATCH_STATE_OK,
'ALARM': engine_api.WATCH_STATE_ALARM,
'INSUFFICIENT_DATA': engine_api.WATCH_STATE_NODATA}
con = req.context
parms = dict(req.params)
# Get mandatory parameters
name = api_utils.get_param_value(parms, 'AlarmName')
state = api_utils.get_param_value(parms, 'StateValue')
if state not in state_map:
msg = _('Invalid state %(state)s, '
'expecting one of %(expect)s') % {
'state': state,
'expect': state_map.keys()}
logger.error(msg)
return exception.HeatInvalidParameterValueError(msg)
logger.debug(_("setting %(name)s to %(state)s") % {
'name': name, 'state': state_map[state]})
try:
self.engine_rpcapi.set_watch_state(con, watch_name=name,
state=state_map[state])
except rpc_common.RemoteError as ex:
return exception.map_remote_error(ex)
return api_utils.format_response("SetAlarmState", "")
def create_resource(options):
"""
Watch resource factory method.
"""
deserializer = wsgi.JSONRequestDeserializer()
return wsgi.Resource(WatchController(options), deserializer)
|
|
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PyCOMPSs Worker Commons
=======================
This file contains the common code of all workers.
"""
import sys
import signal
import traceback
import base64
from pycompss.api.parameter import TaskParameter
from pycompss.api.exceptions import COMPSsException
from pycompss.runtime.commons import IS_PYTHON3
from pycompss.runtime.commons import STR_ESCAPE
from pycompss.util.serialization.serializer import deserialize_from_string
from pycompss.util.serialization.serializer import deserialize_from_file
from pycompss.util.serialization.serializer import serialize_to_file
from pycompss.util.serialization.serializer import SerializerException
from pycompss.util.storages.persistent import storage_task_context
from pycompss.util.storages.persistent import is_psco
from pycompss.util.storages.persistent import get_by_id
import pycompss.api.parameter as parameter
def build_task_parameter(p_type, p_stream, p_prefix, p_name, p_value,
args=None, pos=None):
"""
Build task parameter object from the given parameters.
:param p_type: Parameter type
:param p_stream: Parameter stream
:param p_prefix: Parameter prefix
:param p_name: Parameter name
:param p_value: Parameter value
:param args: Arguments (Default: None)
:param pos: Position (Default: None)
:return: Parameter object
"""
num_substrings = 0
if p_type in [parameter.TYPE.FILE, parameter.TYPE.COLLECTION]:
# Maybe the file is a object, we dont care about this here
# We will decide whether to deserialize or to forward the value
# when processing parameters in the task decorator
return TaskParameter(
p_type=p_type,
stream=p_stream,
prefix=p_prefix,
name=p_name,
file_name=p_value
), 0
elif p_type == parameter.TYPE.EXTERNAL_PSCO:
# Next position contains R/W but we do not need it. Currently skipped.
return TaskParameter(
p_type=p_type,
stream=p_stream,
prefix=p_prefix,
name=p_name,
key=p_value
), 1
elif p_type == parameter.TYPE.EXTERNAL_STREAM:
# Next position contains R/W but we do not need it. Currently skipped.
return TaskParameter(
p_type=p_type,
stream=p_stream,
prefix=p_prefix,
name=p_name,
file_name=p_value
), 1
elif p_type == parameter.TYPE.STRING:
if args is not None:
num_substrings = int(p_value)
aux = ''
first_substring = True
for j in range(5, num_substrings + 5):
if not first_substring:
aux += ' '
first_substring = False
aux += args[pos + j]
else:
aux = str(p_value)
# Decode the received string
# Note that we prepend a sharp to all strings in order to avoid
# getting empty encodings in the case of empty strings, so we need
# to remove it when decoding
aux = base64.b64decode(aux.encode())[1:]
if aux:
#######
# Check if the string is really an object
# Required in order to recover objects passed as parameters.
# - Option object_conversion
real_value = aux
try:
# try to recover the real object
if IS_PYTHON3:
# decode removes double backslash, and encode returns
# the result as binary
p_bin_str = aux.decode(STR_ESCAPE).encode()
aux = deserialize_from_string(p_bin_str)
else:
# decode removes double backslash, and str casts the output
aux = deserialize_from_string(str(aux.decode(STR_ESCAPE)))
except (SerializerException, ValueError, EOFError):
# was not an object
aux = str(real_value.decode())
#######
if IS_PYTHON3 and isinstance(aux, bytes):
aux = aux.decode('utf-8')
return TaskParameter(
p_type=p_type,
stream=p_stream,
prefix=p_prefix,
name=p_name,
content=aux
), num_substrings
else:
# Basic numeric types. These are passed as command line arguments
# and only a cast is needed
val = None
if p_type == parameter.TYPE.INT:
val = int(p_value)
elif p_type == parameter.TYPE.LONG:
val = parameter.PYCOMPSS_LONG(p_value)
if val > parameter.JAVA_MAX_INT or val < parameter.JAVA_MIN_INT:
# A Python in parameter was converted to a Java long to prevent
# overflow. We are sure we will not overflow Python int,
# otherwise this would have been passed as a serialized object.
val = int(val)
elif p_type == parameter.TYPE.DOUBLE:
val = float(p_value)
elif p_type == parameter.TYPE.BOOLEAN:
val = (p_value == 'true')
return TaskParameter(
p_type=p_type,
stream=p_stream,
prefix=p_prefix,
name=p_name,
content=val
), 0
def get_input_params(num_params, logger, args):
"""
Get and prepare the input parameters from string to lists.
:param num_params: Number of parameters
:param logger: Logger
:param args: Arguments (complete list of parameters with type, stream,
prefix and value)
:return: A list of TaskParameter objects
"""
pos = 0
ret = []
for i in range(0, num_params):
p_type = int(args[pos])
p_stream = int(args[pos + 1])
p_prefix = args[pos + 2]
p_name = args[pos + 3]
p_value = args[pos + 4]
if __debug__:
logger.debug("Parameter : %s" % str(i))
logger.debug("\t * Type : %s" % str(p_type))
logger.debug("\t * Std IO Stream : %s" % str(p_stream))
logger.debug("\t * Prefix : %s" % str(p_prefix))
logger.debug("\t * Name : %s" % str(p_name))
logger.debug("\t * Value: %r" % p_value)
task_param, offset = build_task_parameter(p_type, p_stream, p_prefix,
p_name, p_value, args, pos)
ret.append(task_param)
pos += offset + 5
return ret
def task_execution(logger, process_name, module, method_name, time_out,
types, values, compss_kwargs,
persistent_storage, storage_conf):
"""
Task execution function.
:param logger: Logger
:param process_name: Process name
:param module: Module which contains the function
:param method_name: Function to invoke
:param time_out: Time out
:param types: List of the parameter's types
:param values: List of the parameter's values
:param compss_kwargs: PyCOMPSs keywords
:param persistent_storage: If persistent storage is enabled
:param storage_conf: Persistent storage configuration file
:return: exit_code, new types, new_values, and target_direction
"""
if __debug__:
logger.debug("Starting task execution")
logger.debug("module : %s " % str(module))
logger.debug("method_name: %s " % str(method_name))
logger.debug("time_out : %s " % str(time_out))
logger.debug("Types : %s " % str(types))
logger.debug("Values : %s " % str(values))
logger.debug("P. storage : %s " % str(persistent_storage))
logger.debug("Storage cfg: %s " % str(storage_conf))
new_types = []
new_values = []
try:
# WARNING: the following call will not work if a user decorator
# overrides the return of the task decorator.
# new_types, new_values = getattr(module, method_name)
# (*values, compss_types=types, **compss_kwargs)
# If the @task is decorated with a user decorator, may include more
# return values, and consequently, the new_types and new_values will
# be within a tuple at position 0.
# Force users that use decorators on top of @task to return the task
# results first. This is tested with the timeit decorator in test 19.
signal.signal(signal.SIGALRM, task_timed_out)
signal.signal(signal.SIGUSR2, task_cancel)
signal.alarm(time_out)
if persistent_storage:
with storage_task_context(logger, values,
config_file_path=storage_conf):
task_output = getattr(module, method_name)(*values,
compss_types=types,
**compss_kwargs)
else:
task_output = getattr(module, method_name)(*values,
compss_types=types,
**compss_kwargs)
except TimeOutError:
logger.exception("TIMEOUT ERROR IN %s - Time Out Exception" %
process_name)
logger.exception("Task has taken too much time to process")
return task_returns(3,
types,
values,
None,
True,
"",
logger)
except COMPSsException as compss_exception:
logger.exception("COMPSS EXCEPTION IN %s" % process_name)
return_message = "No message"
if (compss_exception.message != None):
return_message = compss_exception.message
return task_returns(2,
new_types,
new_values,
None, False,
return_message,
logger)
except AttributeError:
# Appears with functions that have not been well defined.
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.exception("WORKER EXCEPTION IN %s - Attribute Error Exception" %
process_name)
logger.exception(''.join(line for line in lines))
logger.exception("Check that all parameters have been defined with " +
"an absolute import path (even if in the same file)")
# If exception is raised during the task execution, new_types and
# new_values are empty and target_direction is None
return task_returns(1,
new_types,
new_values,
None,
False,
"",
logger)
except BaseException:
# Catch any other user/decorators exception.
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.exception("WORKER EXCEPTION IN %s" % process_name)
logger.exception(''.join(line for line in lines))
# If exception is raised during the task execution, new_types and
# new_values are empty and target_direction is None
return task_returns(1,
new_types,
new_values,
None,
False,
"",
logger)
finally:
signal.alarm(0)
if isinstance(task_output[0], tuple):
# Weak but effective way to check it without doing inspect that
# another decorator has added another return thing.
# TODO: Should we consider here to create a list with all elements and
# serialize it to a file with the real task output plus the decorator
# results? == task_output[1:]
# TODO: Currently, the extra result is ignored.
new_types = task_output[0][0]
new_values = task_output[0][1]
target_direction = task_output[0][2]
else:
# The task_output is composed by the new_types and new_values returned
# by the task decorator.
new_types = task_output[0]
new_values = task_output[1]
target_direction = task_output[2]
return task_returns(0, new_types, new_values, target_direction,
False, "", logger)
def task_returns(exit_code, new_types, new_values, target_direction,
timed_out, return_message, logger):
"""
Unified task return function
:param exit_code: Exit value (0 ok, 1 error)
:param new_types: New types to be returned
:param new_values: New values to be returned
:param target_direction: Target direction
:param timed_out: If the task has reached time ot
:param return_message: Return exception messsage
:param logger: Logger where to place the messages
:return: exit code, new types, new values, target direction and time out
"""
if __debug__:
# The types may change
# (e.g. if the user does a makePersistent within the task)
logger.debug("Exit code : %s " % str(exit_code))
logger.debug("Return Types : %s " % str(new_types))
logger.debug("Return Values: %s " % str(new_values))
logger.debug("Return target_direction: %s " % str(target_direction))
logger.debug("Return timed_out: %s " % str(timed_out))
logger.debug("Return exception_message: %s " % str(return_message))
logger.debug("Finished task execution")
return (exit_code,
new_types,
new_values,
target_direction,
timed_out,
return_message)
class TimeOutError(BaseException):
"""
Time out error exception
"""
pass
def task_timed_out(signum, frame):
"""
Task time out signal handler
:param signum: Signal number
:param frame: Frame
:raise: TimeOutError exception
"""
raise TimeOutError
class CancelError(BaseException):
pass
def task_cancel(signum, frame):
raise CancelError
def execute_task(process_name, storage_conf, params, tracing, logger,
python_mpi=False):
"""
ExecuteTask main method.
:param process_name: Process name
:param storage_conf: Storage configuration file path
:param params: List of parameters
:param tracing: Tracing flag
:param logger: Logger to use
:param python_mpi: If it is a MPI task
:return: exit code, new types and new values
"""
if __debug__:
logger.debug("Begin task execution in %s" % process_name)
persistent_storage = False
if storage_conf != 'null':
persistent_storage = True
# Retrieve the parameters from the params argument
path = params[0]
method_name = params[1]
num_slaves = int(params[3])
time_out = int(params[2])
slaves = []
for i in range(3, 3 + num_slaves):
slaves.append(params[i])
arg_position = 4 + num_slaves
args = params[arg_position:]
cus = args[0]
args = args[1:]
has_target = args[0]
return_type = args[1]
return_length = int(args[2])
num_params = int(args[3])
args = args[4:]
# COMPSs keywords for tasks (ie: tracing, process name...)
# compss_key is included to be checked in the @task decorator, so that
# the task knows if it has been called from the worker or from the
# user code (reason: ignore @task decorator if called from another task).
compss_kwargs = {
'compss_key': True,
'compss_tracing': tracing,
'compss_process_name': process_name,
'compss_storage_conf': storage_conf,
'compss_return_length': return_length,
'python_MPI': python_mpi
}
if __debug__:
logger.debug("Storage conf: %s" % str(storage_conf))
logger.debug("Params: %s" % str(params))
logger.debug("Path: %s" % str(path))
logger.debug("Method name: %s" % str(method_name))
logger.debug("Num slaves: %s" % str(num_slaves))
logger.debug("Slaves: %s" % str(slaves))
logger.debug("Cus: %s" % str(cus))
logger.debug("Has target: %s" % str(has_target))
logger.debug("Num Params: %s" % str(num_params))
logger.debug("Return Length: %s" % str(return_length))
logger.debug("Args: %r" % args)
# Get all parameter values
if __debug__:
logger.debug("Processing parameters:")
values = get_input_params(num_params, logger, args)
types = [x.type for x in values]
if __debug__:
logger.debug("RUN TASK with arguments:")
logger.debug("\t- Path: %s" % path)
logger.debug("\t- Method/function name: %s" % method_name)
logger.debug("\t- Has target: %s" % str(has_target))
logger.debug("\t- # parameters: %s" % str(num_params))
logger.debug("\t- Values:")
for v in values:
logger.debug("\t\t %r" % v)
logger.debug("\t- COMPSs types:")
for t in types:
logger.debug("\t\t %s" % str(t))
import_error = False
new_types = []
new_values = []
timed_out = False
try:
# Try to import the module (for functions)
if __debug__:
logger.debug("Trying to import the user module: %s" % path)
py_version = sys.version_info
if py_version >= (2, 7):
import importlib
module = importlib.import_module(path) # Python 2.7
if path.startswith('InteractiveMode_'):
# Force reload in interactive mode. The user may have
# overwritten a function or task.
if py_version < (3, 0):
reload(module)
elif py_version < (3, 4):
import imp
imp.reload(module)
else:
importlib.reload(module)
if __debug__:
msg = "Module successfully loaded (Python version >= 2.7)"
logger.debug(msg)
else:
module = __import__(path, globals(), locals(), [path], -1)
if __debug__:
msg = "Module successfully loaded (Python version < 2.7"
logger.debug(msg)
except ImportError:
if __debug__:
msg = "Could not import the module. Reason: Method in class."
logger.debug(msg)
import_error = True
if not import_error:
# Module method declared as task
result = task_execution(logger,
process_name,
module,
method_name,
time_out,
types,
values,
compss_kwargs,
persistent_storage,
storage_conf)
exit_code = result[0]
new_types = result[1]
new_values = result[2]
target_direction = result[3]
timed_out = result[4]
except_msg = result[5]
if exit_code != 0:
return exit_code, new_types, new_values, timed_out, except_msg
else:
# Method declared as task in class
# Not the path of a module, it ends with a class name
class_name = path.split('.')[-1]
module_name = '.'.join(path.split('.')[0:-1])
if '.' in path:
module_name = '.'.join(path.split('.')[0:-1])
else:
module_name = path
try:
module = __import__(module_name, fromlist=[class_name])
klass = getattr(module, class_name)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.exception("EXCEPTION IMPORTING MODULE IN %s" % process_name)
logger.exception(''.join(line for line in lines))
return 1, [], [], False, None
if __debug__:
logger.debug("Method in class %s of module %s" % (class_name,
module_name))
logger.debug("Has target: %s" % str(has_target))
if has_target == 'true':
# Instance method
# The self object needs to be an object in order to call the
# function. So, it can not be done in the @task decorator.
# Since the args structure is parameters + self + returns we pop
# the corresponding considering the return_length notified by the
# runtime (-1 due to index starts from 0).
self_index = num_params - return_length - 1
self_elem = values.pop(self_index)
self_type = types.pop(self_index)
if self_type == parameter.TYPE.EXTERNAL_PSCO:
if __debug__:
logger.debug("Last element (self) is a PSCO with id: %s" %
str(self_elem.key))
obj = get_by_id(self_elem.key)
else:
obj = None
file_name = None
if self_elem.key is None:
file_name = self_elem.file_name.split(':')[-1]
if __debug__:
logger.debug("Deserialize self from file.")
try:
obj = deserialize_from_file(file_name)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.exception("EXCEPTION DESERIALIZING SELF IN %s" % process_name)
logger.exception(''.join(line for line in lines))
return 1, [], [], False, None
if __debug__:
logger.debug('Deserialized self object is: %s' %
self_elem.content)
logger.debug("Processing callee, a hidden object of %s in file %s" % # noqa: E501
(file_name, type(self_elem.content)))
values.insert(0, obj)
if not self_type == parameter.TYPE.EXTERNAL_PSCO:
types.insert(0, parameter.TYPE.OBJECT)
else:
types.insert(0, parameter.TYPE.EXTERNAL_PSCO)
result = task_execution(logger,
process_name,
klass,
method_name,
time_out,
types,
values,
compss_kwargs,
persistent_storage,
storage_conf)
exit_code = result[0]
new_types = result[1]
new_values = result[2]
target_direction = result[3]
timed_out = result[4]
except_msg = result[5]
if exit_code != 0:
return exit_code, new_types, new_values, timed_out, except_msg
# Depending on the target_direction option, it is necessary to
# serialize again self or not. Since this option is only visible
# within the task decorator, the task_execution returns the value
# of target_direction in order to know here if self has to be
# serialized. This solution avoids to use inspect.
if target_direction.direction == parameter.DIRECTION.INOUT or \
target_direction.direction == parameter.DIRECTION.COMMUTATIVE: # noqa: E501
if is_psco(obj):
# There is no explicit update if self is a PSCO.
# Consequently, the changes on the PSCO must have been
# pushed into the storage automatically on each PSCO
# modification.
if __debug__:
logger.debug("The changes on the PSCO must have been" +
" automatically updated by the storage.")
pass
else:
if __debug__:
logger.debug("Serializing self to file: %s" %
file_name)
try:
serialize_to_file(obj, file_name)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.exception("EXCEPTION SERIALIZING SELF IN %s" % process_name)
logger.exception(''.join(line for line in lines))
return 1, new_types, new_values, timed_out, except_msg
if __debug__:
logger.debug("Obj: %r" % obj)
else:
# Class method - class is not included in values (e.g. values=[7])
types.append(None) # class must be first type
result = task_execution(logger,
process_name,
klass,
method_name,
time_out,
types,
values,
compss_kwargs,
persistent_storage,
storage_conf)
exit_code = result[0]
new_types = result[1]
new_values = result[2]
target_direction = result[3]
timed_out = result[4]
except_msg = result[5]
if exit_code != 0:
return exit_code, new_types, new_values, timed_out, except_msg
# EVERYTHING OK
if __debug__:
logger.debug("End task execution. Status: Ok")
return exit_code, new_types, new_values, timed_out, except_msg
|
|
from sqlalchemy import schema as sa_schema, types as sqltypes
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy import event
from ..operations import ops
import logging
from .. import util
from ..util import compat
from ..util import sqla_compat
from sqlalchemy.util import OrderedSet
import re
from .render import _user_defined_render
import contextlib
from alembic.ddl.base import _fk_spec
log = logging.getLogger(__name__)
def _populate_migration_script(autogen_context, migration_script):
upgrade_ops = migration_script.upgrade_ops_list[-1]
downgrade_ops = migration_script.downgrade_ops_list[-1]
_produce_net_changes(autogen_context, upgrade_ops)
upgrade_ops.reverse_into(downgrade_ops)
comparators = util.Dispatcher(uselist=True)
def _produce_net_changes(autogen_context, upgrade_ops):
connection = autogen_context.connection
include_schemas = autogen_context.opts.get('include_schemas', False)
inspector = Inspector.from_engine(connection)
default_schema = connection.dialect.default_schema_name
if include_schemas:
schemas = set(inspector.get_schema_names())
# replace default schema name with None
schemas.discard("information_schema")
# replace the "default" schema with None
schemas.discard(default_schema)
schemas.add(None)
else:
schemas = [None]
comparators.dispatch("schema", autogen_context.dialect.name)(
autogen_context, upgrade_ops, schemas
)
@comparators.dispatch_for("schema")
def _autogen_for_tables(autogen_context, upgrade_ops, schemas):
inspector = autogen_context.inspector
metadata = autogen_context.metadata
conn_table_names = set()
version_table_schema = \
autogen_context.migration_context.version_table_schema
version_table = autogen_context.migration_context.version_table
for s in schemas:
tables = set(inspector.get_table_names(schema=s))
if s == version_table_schema:
tables = tables.difference(
[autogen_context.migration_context.version_table]
)
conn_table_names.update(zip([s] * len(tables), tables))
metadata_table_names = OrderedSet(
[(table.schema, table.name) for table in metadata.sorted_tables]
).difference([(version_table_schema, version_table)])
_compare_tables(conn_table_names, metadata_table_names,
inspector, metadata, upgrade_ops, autogen_context)
def _compare_tables(conn_table_names, metadata_table_names,
inspector, metadata, upgrade_ops, autogen_context):
default_schema = inspector.bind.dialect.default_schema_name
# tables coming from the connection will not have "schema"
# set if it matches default_schema_name; so we need a list
# of table names from local metadata that also have "None" if schema
# == default_schema_name. Most setups will be like this anyway but
# some are not (see #170)
metadata_table_names_no_dflt_schema = OrderedSet([
(schema if schema != default_schema else None, tname)
for schema, tname in metadata_table_names
])
# to adjust for the MetaData collection storing the tables either
# as "schemaname.tablename" or just "tablename", create a new lookup
# which will match the "non-default-schema" keys to the Table object.
tname_to_table = dict(
(
no_dflt_schema,
metadata.tables[sa_schema._get_table_key(tname, schema)]
)
for no_dflt_schema, (schema, tname) in zip(
metadata_table_names_no_dflt_schema,
metadata_table_names)
)
metadata_table_names = metadata_table_names_no_dflt_schema
for s, tname in metadata_table_names.difference(conn_table_names):
name = '%s.%s' % (s, tname) if s else tname
metadata_table = tname_to_table[(s, tname)]
if autogen_context.run_filters(
metadata_table, tname, "table", False, None):
upgrade_ops.ops.append(
ops.CreateTableOp.from_table(metadata_table))
log.info("Detected added table %r", name)
modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
comparators.dispatch("table")(
autogen_context, modify_table_ops,
s, tname, None, metadata_table
)
if not modify_table_ops.is_empty():
upgrade_ops.ops.append(modify_table_ops)
removal_metadata = sa_schema.MetaData()
for s, tname in conn_table_names.difference(metadata_table_names):
name = sa_schema._get_table_key(tname, s)
exists = name in removal_metadata.tables
t = sa_schema.Table(tname, removal_metadata, schema=s)
if not exists:
event.listen(
t,
"column_reflect",
autogen_context.migration_context.impl.
_compat_autogen_column_reflect(inspector))
inspector.reflecttable(t, None)
if autogen_context.run_filters(t, tname, "table", True, None):
upgrade_ops.ops.append(
ops.DropTableOp.from_table(t)
)
log.info("Detected removed table %r", name)
existing_tables = conn_table_names.intersection(metadata_table_names)
existing_metadata = sa_schema.MetaData()
conn_column_info = {}
for s, tname in existing_tables:
name = sa_schema._get_table_key(tname, s)
exists = name in existing_metadata.tables
t = sa_schema.Table(tname, existing_metadata, schema=s)
if not exists:
event.listen(
t,
"column_reflect",
autogen_context.migration_context.impl.
_compat_autogen_column_reflect(inspector))
inspector.reflecttable(t, None)
conn_column_info[(s, tname)] = t
for s, tname in sorted(existing_tables, key=lambda x: (x[0] or '', x[1])):
s = s or None
name = '%s.%s' % (s, tname) if s else tname
metadata_table = tname_to_table[(s, tname)]
conn_table = existing_metadata.tables[name]
if autogen_context.run_filters(
metadata_table, tname, "table", False,
conn_table):
modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
with _compare_columns(
s, tname,
conn_table,
metadata_table,
modify_table_ops, autogen_context, inspector):
comparators.dispatch("table")(
autogen_context, modify_table_ops,
s, tname, conn_table, metadata_table
)
if not modify_table_ops.is_empty():
upgrade_ops.ops.append(modify_table_ops)
def _make_index(params, conn_table):
# TODO: add .info such as 'duplicates_constraint'
return sa_schema.Index(
params['name'],
*[conn_table.c[cname] for cname in params['column_names']],
unique=params['unique']
)
def _make_unique_constraint(params, conn_table):
uq = sa_schema.UniqueConstraint(
*[conn_table.c[cname] for cname in params['column_names']],
name=params['name']
)
if 'duplicates_index' in params:
uq.info['duplicates_index'] = params['duplicates_index']
return uq
def _make_foreign_key(params, conn_table):
tname = params['referred_table']
if params['referred_schema']:
tname = "%s.%s" % (params['referred_schema'], tname)
options = params.get('options', {})
const = sa_schema.ForeignKeyConstraint(
[conn_table.c[cname] for cname in params['constrained_columns']],
["%s.%s" % (tname, n) for n in params['referred_columns']],
onupdate=options.get('onupdate'),
ondelete=options.get('ondelete'),
deferrable=options.get('deferrable'),
initially=options.get('initially'),
name=params['name']
)
# needed by 0.7
conn_table.append_constraint(const)
return const
@contextlib.contextmanager
def _compare_columns(schema, tname, conn_table, metadata_table,
modify_table_ops, autogen_context, inspector):
name = '%s.%s' % (schema, tname) if schema else tname
metadata_cols_by_name = dict((c.name, c) for c in metadata_table.c)
conn_col_names = dict((c.name, c) for c in conn_table.c)
metadata_col_names = OrderedSet(sorted(metadata_cols_by_name))
for cname in metadata_col_names.difference(conn_col_names):
if autogen_context.run_filters(
metadata_cols_by_name[cname], cname,
"column", False, None):
modify_table_ops.ops.append(
ops.AddColumnOp.from_column_and_tablename(
schema, tname, metadata_cols_by_name[cname])
)
log.info("Detected added column '%s.%s'", name, cname)
for colname in metadata_col_names.intersection(conn_col_names):
metadata_col = metadata_cols_by_name[colname]
conn_col = conn_table.c[colname]
if not autogen_context.run_filters(
metadata_col, colname, "column", False,
conn_col):
continue
alter_column_op = ops.AlterColumnOp(
tname, colname, schema=schema)
comparators.dispatch("column")(
autogen_context, alter_column_op,
schema, tname, colname, conn_col, metadata_col
)
if alter_column_op.has_changes():
modify_table_ops.ops.append(alter_column_op)
yield
for cname in set(conn_col_names).difference(metadata_col_names):
if autogen_context.run_filters(
conn_table.c[cname], cname,
"column", True, None):
modify_table_ops.ops.append(
ops.DropColumnOp.from_column_and_tablename(
schema, tname, conn_table.c[cname]
)
)
log.info("Detected removed column '%s.%s'", name, cname)
class _constraint_sig(object):
def __eq__(self, other):
return self.const == other.const
def __ne__(self, other):
return self.const != other.const
def __hash__(self):
return hash(self.const)
class _uq_constraint_sig(_constraint_sig):
is_index = False
is_unique = True
def __init__(self, const):
self.const = const
self.name = const.name
self.sig = tuple(sorted([col.name for col in const.columns]))
@property
def column_names(self):
return [col.name for col in self.const.columns]
class _ix_constraint_sig(_constraint_sig):
is_index = True
def __init__(self, const):
self.const = const
self.name = const.name
self.sig = tuple(sorted([col.name for col in const.columns]))
self.is_unique = bool(const.unique)
@property
def column_names(self):
return sqla_compat._get_index_column_names(self.const)
class _fk_constraint_sig(_constraint_sig):
def __init__(self, const, include_options=False):
self.const = const
self.name = const.name
(
self.source_schema, self.source_table,
self.source_columns, self.target_schema, self.target_table,
self.target_columns,
onupdate, ondelete,
deferrable, initially) = _fk_spec(const)
self.sig = (
self.source_schema, self.source_table, tuple(self.source_columns),
self.target_schema, self.target_table, tuple(self.target_columns)
)
if include_options:
self.sig += (
(None if onupdate.lower() == 'no action'
else onupdate.lower())
if onupdate else None,
(None if ondelete.lower() == 'no action'
else ondelete.lower())
if ondelete else None,
# convert initially + deferrable into one three-state value
"initially_deferrable"
if initially and initially.lower() == "deferred"
else "deferrable" if deferrable
else "not deferrable"
)
@comparators.dispatch_for("table")
def _compare_indexes_and_uniques(
autogen_context, modify_ops, schema, tname, conn_table,
metadata_table):
inspector = autogen_context.inspector
is_create_table = conn_table is None
# 1a. get raw indexes and unique constraints from metadata ...
metadata_unique_constraints = set(
uq for uq in metadata_table.constraints
if isinstance(uq, sa_schema.UniqueConstraint)
)
metadata_indexes = set(metadata_table.indexes)
conn_uniques = conn_indexes = frozenset()
supports_unique_constraints = False
unique_constraints_duplicate_unique_indexes = False
if conn_table is not None:
# 1b. ... and from connection, if the table exists
if hasattr(inspector, "get_unique_constraints"):
try:
conn_uniques = inspector.get_unique_constraints(
tname, schema=schema)
supports_unique_constraints = True
except NotImplementedError:
pass
except TypeError:
# number of arguments is off for the base
# method in SQLAlchemy due to the cache decorator
# not being present
pass
else:
for uq in conn_uniques:
if uq.get('duplicates_index'):
unique_constraints_duplicate_unique_indexes = True
try:
conn_indexes = inspector.get_indexes(tname, schema=schema)
except NotImplementedError:
pass
# 2. convert conn-level objects from raw inspector records
# into schema objects
conn_uniques = set(_make_unique_constraint(uq_def, conn_table)
for uq_def in conn_uniques)
conn_indexes = set(_make_index(ix, conn_table) for ix in conn_indexes)
# 2a. if the dialect dupes unique indexes as unique constraints
# (mysql and oracle), correct for that
if unique_constraints_duplicate_unique_indexes:
_correct_for_uq_duplicates_uix(
conn_uniques, conn_indexes,
metadata_unique_constraints,
metadata_indexes
)
# 3. give the dialect a chance to omit indexes and constraints that
# we know are either added implicitly by the DB or that the DB
# can't accurately report on
autogen_context.migration_context.impl.\
correct_for_autogen_constraints(
conn_uniques, conn_indexes,
metadata_unique_constraints,
metadata_indexes)
# 4. organize the constraints into "signature" collections, the
# _constraint_sig() objects provide a consistent facade over both
# Index and UniqueConstraint so we can easily work with them
# interchangeably
metadata_unique_constraints = set(_uq_constraint_sig(uq)
for uq in metadata_unique_constraints
)
metadata_indexes = set(_ix_constraint_sig(ix) for ix in metadata_indexes)
conn_unique_constraints = set(
_uq_constraint_sig(uq) for uq in conn_uniques)
conn_indexes = set(_ix_constraint_sig(ix) for ix in conn_indexes)
# 5. index things by name, for those objects that have names
metadata_names = dict(
(c.name, c) for c in
metadata_unique_constraints.union(metadata_indexes)
if c.name is not None)
conn_uniques_by_name = dict((c.name, c) for c in conn_unique_constraints)
conn_indexes_by_name = dict((c.name, c) for c in conn_indexes)
conn_names = dict((c.name, c) for c in
conn_unique_constraints.union(conn_indexes)
if c.name is not None)
doubled_constraints = dict(
(name, (conn_uniques_by_name[name], conn_indexes_by_name[name]))
for name in set(
conn_uniques_by_name).intersection(conn_indexes_by_name)
)
# 6. index things by "column signature", to help with unnamed unique
# constraints.
conn_uniques_by_sig = dict((uq.sig, uq) for uq in conn_unique_constraints)
metadata_uniques_by_sig = dict(
(uq.sig, uq) for uq in metadata_unique_constraints)
metadata_indexes_by_sig = dict(
(ix.sig, ix) for ix in metadata_indexes)
unnamed_metadata_uniques = dict(
(uq.sig, uq) for uq in
metadata_unique_constraints if uq.name is None)
# assumptions:
# 1. a unique constraint or an index from the connection *always*
# has a name.
# 2. an index on the metadata side *always* has a name.
# 3. a unique constraint on the metadata side *might* have a name.
# 4. The backend may double up indexes as unique constraints and
# vice versa (e.g. MySQL, Postgresql)
def obj_added(obj):
if obj.is_index:
if autogen_context.run_filters(
obj.const, obj.name, "index", False, None):
modify_ops.ops.append(
ops.CreateIndexOp.from_index(obj.const)
)
log.info("Detected added index '%s' on %s",
obj.name, ', '.join([
"'%s'" % obj.column_names
]))
else:
if not supports_unique_constraints:
# can't report unique indexes as added if we don't
# detect them
return
if is_create_table:
# unique constraints are created inline with table defs
return
if autogen_context.run_filters(
obj.const, obj.name,
"unique_constraint", False, None):
modify_ops.ops.append(
ops.AddConstraintOp.from_constraint(obj.const)
)
log.info("Detected added unique constraint '%s' on %s",
obj.name, ', '.join([
"'%s'" % obj.column_names
]))
def obj_removed(obj):
if obj.is_index:
if obj.is_unique and not supports_unique_constraints:
# many databases double up unique constraints
# as unique indexes. without that list we can't
# be sure what we're doing here
return
if autogen_context.run_filters(
obj.const, obj.name, "index", True, None):
modify_ops.ops.append(
ops.DropIndexOp.from_index(obj.const)
)
log.info(
"Detected removed index '%s' on '%s'", obj.name, tname)
else:
if autogen_context.run_filters(
obj.const, obj.name,
"unique_constraint", True, None):
modify_ops.ops.append(
ops.DropConstraintOp.from_constraint(obj.const)
)
log.info("Detected removed unique constraint '%s' on '%s'",
obj.name, tname
)
def obj_changed(old, new, msg):
if old.is_index:
if autogen_context.run_filters(
new.const, new.name, "index",
False, old.const):
log.info("Detected changed index '%s' on '%s':%s",
old.name, tname, ', '.join(msg)
)
modify_ops.ops.append(
ops.DropIndexOp.from_index(old.const)
)
modify_ops.ops.append(
ops.CreateIndexOp.from_index(new.const)
)
else:
if autogen_context.run_filters(
new.const, new.name,
"unique_constraint", False, old.const):
log.info("Detected changed unique constraint '%s' on '%s':%s",
old.name, tname, ', '.join(msg)
)
modify_ops.ops.append(
ops.DropConstraintOp.from_constraint(old.const)
)
modify_ops.ops.append(
ops.AddConstraintOp.from_constraint(new.const)
)
for added_name in sorted(set(metadata_names).difference(conn_names)):
obj = metadata_names[added_name]
obj_added(obj)
for existing_name in sorted(set(metadata_names).intersection(conn_names)):
metadata_obj = metadata_names[existing_name]
if existing_name in doubled_constraints:
conn_uq, conn_idx = doubled_constraints[existing_name]
if metadata_obj.is_index:
conn_obj = conn_idx
else:
conn_obj = conn_uq
else:
conn_obj = conn_names[existing_name]
if conn_obj.is_index != metadata_obj.is_index:
obj_removed(conn_obj)
obj_added(metadata_obj)
else:
msg = []
if conn_obj.is_unique != metadata_obj.is_unique:
msg.append(' unique=%r to unique=%r' % (
conn_obj.is_unique, metadata_obj.is_unique
))
if conn_obj.sig != metadata_obj.sig:
msg.append(' columns %r to %r' % (
conn_obj.sig, metadata_obj.sig
))
if msg:
obj_changed(conn_obj, metadata_obj, msg)
for removed_name in sorted(set(conn_names).difference(metadata_names)):
conn_obj = conn_names[removed_name]
if not conn_obj.is_index and conn_obj.sig in unnamed_metadata_uniques:
continue
elif removed_name in doubled_constraints:
if conn_obj.sig not in metadata_indexes_by_sig and \
conn_obj.sig not in metadata_uniques_by_sig:
conn_uq, conn_idx = doubled_constraints[removed_name]
obj_removed(conn_uq)
obj_removed(conn_idx)
else:
obj_removed(conn_obj)
for uq_sig in unnamed_metadata_uniques:
if uq_sig not in conn_uniques_by_sig:
obj_added(unnamed_metadata_uniques[uq_sig])
def _correct_for_uq_duplicates_uix(
conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes):
# dedupe unique indexes vs. constraints, since MySQL / Oracle
# doesn't really have unique constraints as a separate construct.
# but look in the metadata and try to maintain constructs
# that already seem to be defined one way or the other
# on that side. This logic was formerly local to MySQL dialect,
# generalized to Oracle and others. See #276
metadata_uq_names = set([
cons.name for cons in metadata_unique_constraints
if cons.name is not None])
unnamed_metadata_uqs = set([
_uq_constraint_sig(cons).sig
for cons in metadata_unique_constraints
if cons.name is None
])
metadata_ix_names = set([
cons.name for cons in metadata_indexes if cons.unique])
conn_ix_names = dict(
(cons.name, cons) for cons in conn_indexes if cons.unique
)
uqs_dupe_indexes = dict(
(cons.name, cons) for cons in conn_unique_constraints
if cons.info['duplicates_index']
)
for overlap in uqs_dupe_indexes:
if overlap not in metadata_uq_names:
if _uq_constraint_sig(uqs_dupe_indexes[overlap]).sig \
not in unnamed_metadata_uqs:
conn_unique_constraints.discard(uqs_dupe_indexes[overlap])
elif overlap not in metadata_ix_names:
conn_indexes.discard(conn_ix_names[overlap])
@comparators.dispatch_for("column")
def _compare_nullable(
autogen_context, alter_column_op, schema, tname, cname, conn_col,
metadata_col):
# work around SQLAlchemy issue #3023
if metadata_col.primary_key:
return
metadata_col_nullable = metadata_col.nullable
conn_col_nullable = conn_col.nullable
alter_column_op.existing_nullable = conn_col_nullable
if conn_col_nullable is not metadata_col_nullable:
alter_column_op.modify_nullable = metadata_col_nullable
log.info("Detected %s on column '%s.%s'",
"NULL" if metadata_col_nullable else "NOT NULL",
tname,
cname
)
@comparators.dispatch_for("column")
def _compare_type(
autogen_context, alter_column_op, schema, tname, cname, conn_col,
metadata_col):
conn_type = conn_col.type
alter_column_op.existing_type = conn_type
metadata_type = metadata_col.type
if conn_type._type_affinity is sqltypes.NullType:
log.info("Couldn't determine database type "
"for column '%s.%s'", tname, cname)
return
if metadata_type._type_affinity is sqltypes.NullType:
log.info("Column '%s.%s' has no type within "
"the model; can't compare", tname, cname)
return
isdiff = autogen_context.migration_context._compare_type(
conn_col, metadata_col)
if isdiff:
alter_column_op.modify_type = metadata_type
log.info("Detected type change from %r to %r on '%s.%s'",
conn_type, metadata_type, tname, cname
)
def _render_server_default_for_compare(metadata_default,
metadata_col, autogen_context):
rendered = _user_defined_render(
"server_default", metadata_default, autogen_context)
if rendered is not False:
return rendered
if isinstance(metadata_default, sa_schema.DefaultClause):
if isinstance(metadata_default.arg, compat.string_types):
metadata_default = metadata_default.arg
else:
metadata_default = str(metadata_default.arg.compile(
dialect=autogen_context.dialect))
if isinstance(metadata_default, compat.string_types):
if metadata_col.type._type_affinity is sqltypes.String:
metadata_default = re.sub(r"^'|'$", "", metadata_default)
return repr(metadata_default)
else:
return metadata_default
else:
return None
@comparators.dispatch_for("column")
def _compare_server_default(
autogen_context, alter_column_op, schema, tname, cname,
conn_col, metadata_col):
metadata_default = metadata_col.server_default
conn_col_default = conn_col.server_default
if conn_col_default is None and metadata_default is None:
return False
rendered_metadata_default = _render_server_default_for_compare(
metadata_default, metadata_col, autogen_context)
rendered_conn_default = conn_col.server_default.arg.text \
if conn_col.server_default else None
alter_column_op.existing_server_default = conn_col_default
isdiff = autogen_context.migration_context._compare_server_default(
conn_col, metadata_col,
rendered_metadata_default,
rendered_conn_default
)
if isdiff:
alter_column_op.modify_server_default = metadata_default
log.info(
"Detected server default on column '%s.%s'",
tname, cname)
@comparators.dispatch_for("table")
def _compare_foreign_keys(
autogen_context, modify_table_ops, schema, tname, conn_table,
metadata_table):
# if we're doing CREATE TABLE, all FKs are created
# inline within the table def
if conn_table is None:
return
inspector = autogen_context.inspector
metadata_fks = set(
fk for fk in metadata_table.constraints
if isinstance(fk, sa_schema.ForeignKeyConstraint)
)
conn_fks = inspector.get_foreign_keys(tname, schema=schema)
backend_reflects_fk_options = conn_fks and 'options' in conn_fks[0]
conn_fks = set(_make_foreign_key(const, conn_table) for const in conn_fks)
# give the dialect a chance to correct the FKs to match more
# closely
autogen_context.migration_context.impl.\
correct_for_autogen_foreignkeys(
conn_fks, metadata_fks,
)
metadata_fks = set(
_fk_constraint_sig(fk, include_options=backend_reflects_fk_options)
for fk in metadata_fks
)
conn_fks = set(
_fk_constraint_sig(fk, include_options=backend_reflects_fk_options)
for fk in conn_fks
)
conn_fks_by_sig = dict(
(c.sig, c) for c in conn_fks
)
metadata_fks_by_sig = dict(
(c.sig, c) for c in metadata_fks
)
metadata_fks_by_name = dict(
(c.name, c) for c in metadata_fks if c.name is not None
)
conn_fks_by_name = dict(
(c.name, c) for c in conn_fks if c.name is not None
)
def _add_fk(obj, compare_to):
if autogen_context.run_filters(
obj.const, obj.name, "foreign_key_constraint", False,
compare_to):
modify_table_ops.ops.append(
ops.CreateForeignKeyOp.from_constraint(const.const)
)
log.info(
"Detected added foreign key (%s)(%s) on table %s%s",
", ".join(obj.source_columns),
", ".join(obj.target_columns),
"%s." % obj.source_schema if obj.source_schema else "",
obj.source_table)
def _remove_fk(obj, compare_to):
if autogen_context.run_filters(
obj.const, obj.name, "foreign_key_constraint", True,
compare_to):
modify_table_ops.ops.append(
ops.DropConstraintOp.from_constraint(obj.const)
)
log.info(
"Detected removed foreign key (%s)(%s) on table %s%s",
", ".join(obj.source_columns),
", ".join(obj.target_columns),
"%s." % obj.source_schema if obj.source_schema else "",
obj.source_table)
# so far it appears we don't need to do this by name at all.
# SQLite doesn't preserve constraint names anyway
for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig):
const = conn_fks_by_sig[removed_sig]
if removed_sig not in metadata_fks_by_sig:
compare_to = metadata_fks_by_name[const.name].const \
if const.name in metadata_fks_by_name else None
_remove_fk(const, compare_to)
for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig):
const = metadata_fks_by_sig[added_sig]
if added_sig not in conn_fks_by_sig:
compare_to = conn_fks_by_name[const.name].const \
if const.name in conn_fks_by_name else None
_add_fk(const, compare_to)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import logging
from .sites import foolSlide
from .sites import readcomicOnlineli
from .sites import comicNaver
from .sites import mangaHere
from .sites import rawSenManga
from .sites import mangaFox
from .sites import omgBeauPeep
from .sites import mangaReader
from .sites import mangaEden
from .sites import acQQ
from .sites import stripUtopia
from .sites import readComicBooksOnline
from .sites import readComicsWebsite
from .sites import batoto
from .sites import hqbr
from .sites import comicextra
from .sites import readComicsIO
from .sites import japscan
from .sites import manganelo
from .sites import webtoons
class Honcho(object):
def comic_language_resolver(self, language_code):
# Will return the Language Name corresponding to the language code.
language_dict = {
'0': 'English',
'1': 'Italian',
'2': 'Spanish',
'3': 'French',
'4': 'German',
'5': 'Portuguese',
'6': 'Turkish',
'7': 'Indonesian',
'8': 'Greek',
'9': 'Filipino',
'10': 'Polish',
'11': 'Thai',
'12': 'Malay',
'13 ': 'Hungarian',
'14': 'Romanian',
'15': ' Arabic',
'16': 'Hebrew',
'17': 'Russian',
'18': 'Vietnamese',
'19': 'Dutch',
'20': 'Bengali',
'21': 'Persian',
'22': 'Czech',
'23': 'Brazilian',
'24': 'Bulgarian',
'25': 'Danish',
'26': 'Esperanto',
'27': 'Swedish',
'28': 'Lithuanian',
'29': 'Other'
}
return language_dict[language_code]
def checker(self, comic_url, download_directory, chapter_range, **kwargs):
user_name = kwargs.get("username")
password = kwargs.get("password")
current_directory = kwargs.get("current_directory")
log_flag = kwargs.get("logger")
sorting = kwargs.get("sorting_order")
comic_language = kwargs.get("comic_language")
print_index = kwargs.get("print_index")
if log_flag is True:
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG)
logging.debug("Comic Url : %s" % comic_url)
domain = urlparse(comic_url).netloc
logging.debug("Selected Domain : %s" % domain)
# Remove the "/" from ending to make checking URL for Full Series or Single Chapter easier.
if comic_url[-1] == "/":
comic_url = comic_url[:-1]
if domain in ["yomanga.co", "gomanga.co"]:
foolSlide.FoolSlide(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
elif domain in ["www.readcomiconline.li", "readcomiconline.li", "www.readcomicsonline.ru", "readcomicsonline.ru"]:
readcomicOnlineli.ReadComicOnlineLi(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
image_quality=kwargs.get("image_quality"),
print_index=print_index)
return 0
elif domain in ["www.comic.naver.com", "comic.naver.com"]:
comicNaver.ComicNaver(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangahere.co", "mangahere.co", "www.mangahere.cc", "mangahere.cc"]:
mangaHere.MangaHere(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.raw.senmanga.com", "raw.senmanga.com"]:
rawSenManga.RawSenaManga(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangafox.me", "mangafox.me", "www.mangafox.la", "mangafox.la", "www.fanfox.net",
"fanfox.net"]:
mangaFox.MangaFox(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.omgbeaupeep.com", "omgbeaupeep.com", "www.otakusmash.com", "otakusmash.com"]:
omgBeauPeep.OmgBeauPeep(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO --print-index -i http://ac.qq.com/Comic/comicInfo/id/547059?trace_id=907_27.156.162.231_1539265645 broken?
elif domain in ["www.ac.qq.com", "ac.qq.com"]:
acQQ.AcQq(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.striputopija.blogspot.in", "striputopija.blogspot.in", "www.striputopija.blogspot.com",
"striputopija.blogspot.com"]:
stripUtopia.StripUtopia(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.mangareader.net", "mangareader.net"]:
mangaReader.MangaReader(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.readcomicbooksonline.net", "readcomicbooksonline.net", "www.readcomicbooksonline.org",
"readcomicbooksonline.org"]:
readComicBooksOnline.ReadComicBooksOnline(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO seems broken
elif domain in ["www.readcomics.website", "readcomics.website"]:
readComicsWebsite.ReadComicsWebsite(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.japscan.to"]:
japscan.Japscan(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.hqbr.com.br", "hqbr.com.br"]:
hqbr.Hqbr(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.comicextra.com", "comicextra.com"]:
comicextra.ComicExtra(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO seems broken
elif domain in ["www.readcomics.io", "readcomics.io"]:
readComicsIO.ReadComicsIO(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.kissmanga.com", "kissmanga.com"]:
# kissManga.KissManga(manga_url = comic_url, logger = logging,
# current_directory = current_directory, sorting_order = sorting)
print("Under Development!")
return 0
elif domain in ["www.bato.to", "bato.to"]:
batoto.Batoto(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"), username=user_name, password=password,
comic_language=self.comic_language_resolver(comic_language),
print_index=print_index)
return 0
elif domain in ["manganelo.com", "mangakakalot.com", "manganato.com", "readmanganato.com"]:
manganelo.Manganelo(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangaeden.com"]:
if print_index:
print("please use -find and -cid instead!")
return -1
mangaEden.MangaEden(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
elif domain in ["www.webtoons.com", "webtoons.com"]:
webtoons.Webtoons(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"), image_quality=kwargs.get("image_quality"))
return 0
else:
print("%s is not supported at the moment. You can request it on the Github repository." % domain)
|
|
"""
dataset specification for JEDI
"""
import re
import math
from pandajedi.jediconfig import jedi_config
class JediDatasetSpec(object):
def __str__(self):
sb = []
for key in self.__dict__:
if key == 'Files':
sb.append("{key}='{value}'".format(key=key, value=len(self.__dict__[key])))
else:
sb.append("{key}='{value}'".format(key=key, value=self.__dict__[key]))
return ', '.join(sb)
def __repr__(self):
return self.__str__()
# attributes
_attributes = (
'jediTaskID','datasetID','datasetName','containerName',
'type','creationTime','modificationTime','vo','cloud',
'site','masterID','provenanceID','status','state',
'stateCheckTime','stateCheckExpiration','frozenTime',
'nFiles','nFilesToBeUsed','nFilesUsed',
'nFilesFinished','nFilesFailed','nFilesOnHold',
'nEvents','nEventsToBeUsed','nEventsUsed',
'lockedBy','lockedTime','attributes','streamName',
'storageToken','destination','templateID','nFilesWaiting'
)
# attributes which have 0 by default
_zeroAttrs = ()
# attributes to force update
_forceUpdateAttrs = ('lockedBy','lockedTime')
# mapping between sequence and attr
_seqAttrMap = {'datasetID':'{0}.JEDI_DATASETS_ID_SEQ.nextval'.format(jedi_config.db.schemaJEDI)}
# token for attributes
attrToken = {
'allowNoOutput' : 'an',
'consistencyCheck' : 'cc',
'eventRatio' : 'er',
'indexConsistent' : 'ic',
'mergeOnly' : 'mo',
'nFilesPerJob' : 'np',
'num_records' : 'nr',
'offset' : 'of',
'objectStore' : 'os',
'pseudo' : 'ps',
'random' : 'rd',
'reusable' : 'ru',
'transient' : 'tr',
'useDuplicated' : 'ud',
}
# constructor
def __init__(self):
# install attributes
for attr in self._attributes:
object.__setattr__(self,attr,None)
# file list
object.__setattr__(self,'Files',[])
# map of changed attributes
object.__setattr__(self,'_changedAttrs',{})
# distributed
object.__setattr__(self,'distributed',False)
# override __setattr__ to collecte the changed attributes
def __setattr__(self,name,value):
oldVal = getattr(self,name)
object.__setattr__(self,name,value)
newVal = getattr(self,name)
# collect changed attributes
if oldVal != newVal or name in self._forceUpdateAttrs:
self._changedAttrs[name] = value
# add File to files list
def addFile(self,fileSpec):
# append
self.Files.append(fileSpec)
# reset changed attribute list
def resetChangedList(self):
object.__setattr__(self,'_changedAttrs',{})
# force update
def forceUpdate(self,name):
if name in self._attributes:
self._changedAttrs[name] = getattr(self,name)
# return map of values
def valuesMap(self,useSeq=False,onlyChanged=False):
ret = {}
for attr in self._attributes:
# use sequence
if useSeq and attr in self._seqAttrMap:
continue
# only changed attributes
if onlyChanged:
if attr not in self._changedAttrs:
continue
val = getattr(self,attr)
if val is None:
if attr in self._zeroAttrs:
val = 0
else:
val = None
ret[':%s' % attr] = val
return ret
# pack tuple into FileSpec
def pack(self,values):
for i in range(len(self._attributes)):
attr= self._attributes[i]
val = values[i]
object.__setattr__(self,attr,val)
# return column names for INSERT
def columnNames(cls,prefix=None):
ret = ""
for attr in cls._attributes:
if prefix is not None:
ret += '{0}.'.format(prefix)
ret += '{0},'.format(attr)
ret = ret[:-1]
return ret
columnNames = classmethod(columnNames)
# return expression of bind variables for INSERT
def bindValuesExpression(cls,useSeq=True):
ret = "VALUES("
for attr in cls._attributes:
if useSeq and attr in cls._seqAttrMap:
ret += "%s," % cls._seqAttrMap[attr]
else:
ret += ":%s," % attr
ret = ret[:-1]
ret += ")"
return ret
bindValuesExpression = classmethod(bindValuesExpression)
# return an expression of bind variables for UPDATE to update only changed attributes
def bindUpdateChangesExpression(self):
ret = ""
for attr in self._attributes:
if attr in self._changedAttrs:
ret += '%s=:%s,' % (attr,attr)
ret = ret[:-1]
ret += ' '
return ret
# set dataset attribute
def setDatasetAttribute(self,attr):
if self.attributes is None:
self.attributes = ''
else:
self.attributes += ','
self.attributes += attr
# set dataset attribute with label
def setDatasetAttributeWithLabel(self,label):
if label not in self.attrToken:
return
attr = self.attrToken[label]
if self.attributes is None:
self.attributes = ''
else:
self.attributes += ','
self.attributes += attr
# get the total size of files
def getSize(self):
totalSize = 0
checkedList = []
for tmpFileSpec in self.Files:
if tmpFileSpec.lfn not in checkedList:
totalSize += tmpFileSpec.fsize
checkedList.append(tmpFileSpec.lfn)
return totalSize
# return list of status to update contents
def statusToUpdateContents(cls):
return ['defined','toupdate']
statusToUpdateContents = classmethod(statusToUpdateContents)
# return list of types for input
def getInputTypes(cls):
return ['input','pseudo_input']
getInputTypes = classmethod(getInputTypes)
# return list of types to generate jobs
def getProcessTypes(cls):
return cls.getInputTypes() + ['pp_input'] + cls.getMergeProcessTypes()
getProcessTypes = classmethod(getProcessTypes)
# return list of types for merging
def getMergeProcessTypes(cls):
return ['trn_log','trn_output']
getMergeProcessTypes = classmethod(getMergeProcessTypes)
# get type of unkown input
def getUnknownInputType(cls):
return 'trn_unknown'
getUnknownInputType = classmethod(getUnknownInputType)
# check if JEDI needs to keep track of file usage
def toKeepTrack(self):
if self.isNoSplit() and self.isRepeated():
return False
elif self.isReusable():
return False
else:
return True
# check if it is not split
def isNoSplit(self):
if self.attributes is not None and 'nosplit' in self.attributes:
return True
else:
return False
# check if it is repeatedly used
def isRepeated(self):
if self.attributes is not None and 'repeat' in self.attributes:
return True
else:
return False
# check if it is randomly used
def isRandom(self):
if self.attributes is not None and 'rd' in self.attributes.split(','):
return True
else:
return False
# check if it is reusable
def isReusable(self):
if self.attributes is not None and 'ru' in self.attributes.split(','):
return True
else:
return False
# check if consistency is checked
def checkConsistency(self):
if self.attributes is not None and 'cc' in self.attributes.split(','):
return True
else:
return False
# set consistency is checked
def enableCheckConsistency(self):
if self.attributes in [None,'']:
self.attributes = 'cc'
elif 'cc' not in self.attributes.split(','):
self.attributes += ',cc'
# check if it is pseudo
def isPseudo(self):
if self.datasetName in ['pseudo_dataset','seq_number'] \
or self.type in ['pp_input']:
return True
if self.attributes is not None and self.attrToken['pseudo'] in self.attributes.split(','):
return True
return False
# check if it is a many-time dataset which is treated as long-standing at T2s
def isManyTime(self):
if self.attributes is not None and 'manytime' in self.attributes:
return True
else:
return False
# check if it is seq number
def isSeqNumber(self):
if self.datasetName in ['seq_number']:
return True
else:
return False
# check if duplicated files are used
def useDuplicatedFiles(self):
if self.attributes is not None and ('usedup' in self.attributes or \
'ud' in self.attributes.split(',')):
return True
else:
return False
# check if it is a master dataset
def isMaster(self):
if self.masterID is None and self.type in self.getProcessTypes():
return True
else:
return False
# check if it is a master input dataset
def isMasterInput(self):
if self.masterID is None and self.type in self.getInputTypes():
return True
else:
return False
# remove nosplit attribute
def remAttribute(self,attrName):
if self.attributes is not None:
self.attributes = re.sub(attrName,'',self.attributes)
self.attributes = re.sub(',,',',',self.attributes)
self.attributes = re.sub('^,','',self.attributes)
self.attributes = re.sub(',$','',self.attributes)
if self.attributes == '':
self.attributes = None
# remove nosplit attribute
def remNoSplit(self):
self.remAttribute('nosplit')
# remove repeat attribute
def remRepeat(self):
self.remAttribute('repeat')
# get the ratio to master
def getRatioToMaster(self):
try:
tmpMatch = re.search('ratio=(\d+(\.\d+)*)',self.attributes)
if tmpMatch is not None:
ratioStr = tmpMatch.group(1)
try:
# integer
return int(ratioStr)
except Exception:
pass
try:
# float
return float(ratioStr)
except Exception:
pass
except Exception:
pass
return 1
# get N multiplied by ratio
def getNumMultByRatio(self,num):
# no split
if self.isNoSplit():
return None
# get ratio
ratioVal = self.getRatioToMaster()
# integer or float
if isinstance(ratioVal,int):
retVal = num * ratioVal
else:
retVal = float(num) * ratioVal
retVal = int(math.ceil(retVal))
return retVal
# unique map key for output
def outputMapKey(self):
mapKey = '{0}#{1}'.format(self.datasetName,self.provenanceID)
return mapKey
# unique map key
def uniqueMapKey(self):
mapKey = '{0}#{1}'.format(self.datasetName,self.datasetID)
return mapKey
# set offset
def setOffset(self,offset):
self.setDatasetAttribute('{0}={1}'.format(self.attrToken['offset'],offset))
# get offset
def getOffset(self):
if self.attributes is not None:
tmpMatch = re.search(self.attrToken['offset']+'=(\d+)',self.attributes)
if tmpMatch is not None:
offset = int(tmpMatch.group(1))
return offset
return 0
# set number of records
def setNumRecords(self,n):
self.setDatasetAttribute('{0}={1}'.format(self.attrToken['num_records'],n))
# get number of records
def getNumRecords(self):
if self.attributes is not None:
for item in self.attributes.split(','):
tmpMatch = re.search(self.attrToken['num_records']+'=(\d+)',item)
if tmpMatch is not None:
num_records = int(tmpMatch.group(1))
return num_records
return None
# set object store
def setObjectStore(self,objectStore):
self.setDatasetAttribute('{0}={1}'.format(self.attrToken['objectStore'],objectStore))
# get object store
def getObjectStore(self):
if self.attributes is not None:
tmpMatch = re.search(self.attrToken['objectStore']+'=([^,]+)',self.attributes)
if tmpMatch is not None:
return tmpMatch.group(1)
return None
# set the number of files per job
def setNumFilesPerJob(self,num):
self.setDatasetAttribute('{0}={1}'.format(self.attrToken['nFilesPerJob'],num))
# get the number of files per job
def getNumFilesPerJob(self):
if self.attributes is not None:
tmpMatch = re.search(self.attrToken['nFilesPerJob']+'=(\d+)',self.attributes)
if tmpMatch is not None:
num = int(tmpMatch.group(1))
return num
# use continuous numbers for seq_number
if self.isSeqNumber():
return 1
return None
# check if unmerged dataset
def toMerge(self):
if self.type.startswith('trn_'):
return True
return False
# set transient
def setTransient(self,val):
if val is True:
val = 1
else:
val = 0
self.setDatasetAttribute('{0}={1}'.format(self.attrToken['transient'],val))
# get transient
def getTransient(self):
if self.attributes is not None:
for item in self.attributes.split(','):
tmpMatch = re.search(self.attrToken['transient']+'=(\d+)',item)
if tmpMatch is not None:
val = int(tmpMatch.group(1))
if val == 1:
return True
else:
return False
return None
# check if no output is allowed
def isAllowedNoOutput(self):
if self.attributes is not None and self.attrToken['allowNoOutput'] in self.attributes.split(','):
return True
else:
return False
# allow no output
def allowNoOutput(self):
if self.attributes in [None,'']:
items = []
else:
items = self.attributes.split(',')
if self.attrToken['allowNoOutput'] not in items:
items.append(self.attrToken['allowNoOutput'])
self.attributes = ','.join(items)
# check if index consistency is required
def indexConsistent(self):
if self.attributes is not None and self.attrToken['indexConsistent'] in self.attributes.split(','):
return True
else:
return False
# set distributed
def setDistributed(self):
self.distributed = True
# reset distributed
def reset_distributed(self):
self.distributed = False
# check if distributed
def isDistributed(self):
return self.distributed
# set event ratio
def setEventRatio(self,num):
self.setDatasetAttribute('{0}={1}'.format(self.attrToken['eventRatio'],num))
# get event ratio
def getEventRatio(self):
if self.attributes is not None:
for item in self.attributes.split(','):
tmpMatch = re.search(self.attrToken['eventRatio']+'=(\d+(\.\d+)*)',item)
if tmpMatch is not None:
ratioStr = tmpMatch.group(1)
try:
# integer
return int(ratioStr)
except Exception:
pass
try:
# float
return float(ratioStr)
except Exception:
pass
return None
# set pseudo
def setPseudo(self):
if self.attributes in [None,'']:
items = []
else:
items = self.attributes.split(',')
if self.attrToken['pseudo'] not in items:
items.append(self.attrToken['pseudo'])
self.attributes = ','.join(items)
# merge only
def is_merge_only(self):
try:
return self.attrToken['mergeOnly'] in self.attributes.split(',')
except Exception:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.