gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from itertools import cycle
import os
import tarfile
import threading
import zipfile
import numpy as np
from six.moves.urllib.parse import urljoin
from six.moves.urllib.request import pathname2url
from tensorflow.python.keras._impl import keras
from tensorflow.python.platform import test
class TestGetFileAndValidateIt(test.TestCase):
def test_get_file_and_validate_it(self):
"""Tests get_file from a url, plus extraction and validation.
"""
dest_dir = self.get_temp_dir()
orig_dir = self.get_temp_dir()
text_file_path = os.path.join(orig_dir, 'test.txt')
zip_file_path = os.path.join(orig_dir, 'test.zip')
tar_file_path = os.path.join(orig_dir, 'test.tar.gz')
with open(text_file_path, 'w') as text_file:
text_file.write('Float like a butterfly, sting like a bee.')
with tarfile.open(tar_file_path, 'w:gz') as tar_file:
tar_file.add(text_file_path)
with zipfile.ZipFile(zip_file_path, 'w') as zip_file:
zip_file.write(text_file_path)
origin = urljoin('file://', pathname2url(os.path.abspath(tar_file_path)))
path = keras.utils.data_utils.get_file('test.txt', origin,
untar=True, cache_subdir=dest_dir)
filepath = path + '.tar.gz'
hashval_sha256 = keras.utils.data_utils._hash_file(filepath)
hashval_md5 = keras.utils.data_utils._hash_file(filepath, algorithm='md5')
path = keras.utils.data_utils.get_file(
'test.txt', origin, md5_hash=hashval_md5,
untar=True, cache_subdir=dest_dir)
path = keras.utils.data_utils.get_file(
filepath, origin, file_hash=hashval_sha256,
extract=True, cache_subdir=dest_dir)
self.assertTrue(os.path.exists(filepath))
self.assertTrue(keras.utils.data_utils.validate_file(filepath,
hashval_sha256))
self.assertTrue(keras.utils.data_utils.validate_file(filepath, hashval_md5))
os.remove(filepath)
origin = urljoin('file://', pathname2url(os.path.abspath(zip_file_path)))
hashval_sha256 = keras.utils.data_utils._hash_file(zip_file_path)
hashval_md5 = keras.utils.data_utils._hash_file(zip_file_path,
algorithm='md5')
path = keras.utils.data_utils.get_file(
'test', origin, md5_hash=hashval_md5,
extract=True, cache_subdir=dest_dir)
path = keras.utils.data_utils.get_file(
'test', origin, file_hash=hashval_sha256,
extract=True, cache_subdir=dest_dir)
self.assertTrue(os.path.exists(path))
self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_sha256))
self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_md5))
class ThreadsafeIter(object):
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self.lock:
return next(self.it)
def threadsafe_generator(f):
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
class TestSequence(keras.utils.data_utils.Sequence):
def __init__(self, shape, value=1.):
self.shape = shape
self.inner = value
def __getitem__(self, item):
return np.ones(self.shape, dtype=np.uint32) * item * self.inner
def __len__(self):
return 100
def on_epoch_end(self):
self.inner *= 5.0
class FaultSequence(keras.utils.data_utils.Sequence):
def __getitem__(self, item):
raise IndexError(item, 'item is not present')
def __len__(self):
return 100
@threadsafe_generator
def create_generator_from_sequence_threads(ds):
for i in cycle(range(len(ds))):
yield ds[i]
def create_generator_from_sequence_pcs(ds):
for i in cycle(range(len(ds))):
yield ds[i]
class TestEnqueuers(test.TestCase):
def test_generator_enqueuer_threads(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(TestSequence([3, 200, 200, 3])),
use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(int(next(gen_output)[0, 0, 0, 0]))
self.assertEqual(len(set(acc) - set(range(100))), 0)
enqueuer.stop()
def test_generator_enqueuer_processes(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_pcs(TestSequence([3, 200, 200, 3])),
use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(int(next(gen_output)[0, 0, 0, 0]))
self.assertNotEqual(acc, list(range(100)))
enqueuer.stop()
def test_generator_enqueuer_fail_threads(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_threads(FaultSequence()),
use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(StopIteration):
next(gen_output)
def test_generator_enqueuer_fail_processes(self):
enqueuer = keras.utils.data_utils.GeneratorEnqueuer(
create_generator_from_sequence_pcs(FaultSequence()),
use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(StopIteration):
next(gen_output)
def test_ordered_enqueuer_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc, list(range(100)))
enqueuer.stop()
def test_ordered_enqueuer_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc, list(range(100)))
enqueuer.stop()
def test_ordered_enqueuer_fail_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
FaultSequence(), use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(StopIteration):
next(gen_output)
def test_ordered_enqueuer_fail_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
FaultSequence(), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
with self.assertRaises(StopIteration):
next(gen_output)
def test_on_epoch_end_processes(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(200):
acc.append(next(gen_output)[0, 0, 0, 0])
# Check that order was keep in GeneratorEnqueuer with processes
self.assertEqual(acc[100:], list([k * 5 for k in range(100)]))
enqueuer.stop()
def test_context_switch(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=True)
enqueuer2 = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3], value=15), use_multiprocessing=True)
enqueuer.start(3, 10)
enqueuer2.start(3, 10)
gen_output = enqueuer.get()
gen_output2 = enqueuer2.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
self.assertEqual(acc[-1], 99)
# One epoch is completed so enqueuer will switch the Sequence
acc = []
for _ in range(100):
acc.append(next(gen_output2)[0, 0, 0, 0])
self.assertEqual(acc[-1], 99 * 15)
# One epoch has been completed so enqueuer2 will switch
# Be sure that both Sequence were updated
self.assertEqual(next(gen_output)[0, 0, 0, 0], 0)
self.assertEqual(next(gen_output)[0, 0, 0, 0], 5)
self.assertEqual(next(gen_output2)[0, 0, 0, 0], 0)
self.assertEqual(next(gen_output2)[0, 0, 0, 0], 15 * 5)
# Tear down everything
enqueuer.stop()
enqueuer2.stop()
def test_on_epoch_end_threads(self):
enqueuer = keras.utils.data_utils.OrderedEnqueuer(
TestSequence([3, 200, 200, 3]), use_multiprocessing=False)
enqueuer.start(3, 10)
gen_output = enqueuer.get()
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
acc = []
for _ in range(100):
acc.append(next(gen_output)[0, 0, 0, 0])
# Check that order was keep in GeneratorEnqueuer with processes
self.assertEqual(acc, list([k * 5 for k in range(100)]))
enqueuer.stop()
if __name__ == '__main__':
test.main()
|
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
# Lots of different places that widgets could come from...
try:
from ipywidgets import interact, FloatSlider, IntSlider
except ImportError:
try:
from IPython.html.widgets import interact, FloatSlider, IntSlider
except ImportError:
try:
from IPython.html.widgets import (interact,
FloatSliderWidget as FloatSlider,
IntSliderWidget as IntSlider)
except ImportError:
pass
from .miscplot import palplot
from .palettes import (color_palette, dark_palette, light_palette,
diverging_palette, cubehelix_palette)
__all__ = ["choose_colorbrewer_palette", "choose_cubehelix_palette",
"choose_dark_palette", "choose_light_palette",
"choose_diverging_palette"]
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def choose_colorbrewer_palette(data_type, as_cmap=False):
"""Select a palette from the ColorBrewer set.
These palettes are built into matplotlib and can be used by name in
many seaborn functions, or by passing the object returned by this function.
Parameters
----------
data_type : {'sequential', 'diverging', 'qualitative'}
This describes the kind of data you want to visualize. See the seaborn
color palette docs for more information about how to choose this value.
Note that you can pass substrings (e.g. 'q' for 'qualitative.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette from selected colors.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
if data_type.startswith("q") and as_cmap:
raise ValueError("Qualitative palettes cannot be colormaps.")
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if data_type.startswith("s"):
opts = ["Greys", "Reds", "Greens", "Blues", "Oranges", "Purples",
"BuGn", "BuPu", "GnBu", "OrRd", "PuBu", "PuRd", "RdPu", "YlGn",
"PuBuGn", "YlGnBu", "YlOrBr", "YlOrRd"]
variants = ["regular", "reverse", "dark"]
@interact
def choose_sequential(name=opts, n=(2, 18),
desat=FloatSlider(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
elif variant == "dark":
name += "_d"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("d"):
opts = ["RdBu", "RdGy", "PRGn", "PiYG", "BrBG",
"RdYlBu", "RdYlGn", "Spectral"]
variants = ["regular", "reverse"]
@interact
def choose_diverging(name=opts, n=(2, 16),
desat=FloatSlider(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("q"):
opts = ["Set1", "Set2", "Set3", "Paired", "Accent",
"Pastel1", "Pastel2", "Dark2"]
@interact
def choose_qualitative(name=opts, n=(2, 16),
desat=FloatSlider(min=0, max=1, value=1)):
pal[:] = color_palette(name, n, desat)
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_dark_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a dark sequential palette.
This corresponds with the :func:`dark_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`dark_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_dark_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = dark_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_dark_palette_hls(h=(0., 1.),
l=(0., 1.),
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = dark_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_dark_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99),
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = dark_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_light_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a light sequential palette.
This corresponds with the :func:`light_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`light_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
light_palette : Create a sequential palette with bright low values.
dark_palette : Create a sequential palette with dark low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_light_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = light_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_light_palette_hls(h=(0., 1.),
l=(0., 1.),
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = light_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_light_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99),
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = light_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_diverging_palette(as_cmap=False):
"""Launch an interactive widget to choose a diverging color palette.
This corresponds with the :func:`diverging_palette` function. This kind
of palette is good for data that range between interesting low values
and interesting high values with a meaningful midpoint. (For example,
change scores relative to some baseline value).
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
diverging_palette : Create a diverging color palette or colormap.
choose_colorbrewer_palette : Interactively choose palettes from the
colorbrewer set, including diverging palettes.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_diverging_palette(h_neg=IntSlider(min=0,
max=359,
value=220),
h_pos=IntSlider(min=0,
max=359,
value=10),
s=IntSlider(min=0, max=99, value=74),
l=IntSlider(min=0, max=99, value=50),
sep=IntSlider(min=1, max=50, value=10),
n=(2, 16),
center=["light", "dark"]):
if as_cmap:
colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_cubehelix_palette(as_cmap=False):
"""Launch an interactive widget to create a sequential cubehelix palette.
This corresponds with the :func:`cubehelix_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values. The cubehelix system allows the
palette to have more hue variance across the range, which can be helpful
for distinguishing a wider range of values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),
start=FloatSlider(min=0, max=3, value=0),
rot=FloatSlider(min=-1, max=1, value=.4),
gamma=FloatSlider(min=0, max=5, value=1),
hue=FloatSlider(min=0, max=1, value=.8),
light=FloatSlider(min=0, max=1, value=.85),
dark=FloatSlider(min=0, max=1, value=.15),
reverse=False):
if as_cmap:
colors = cubehelix_palette(256, start, rot, gamma,
hue, light, dark, reverse)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = cubehelix_palette(n_colors, start, rot, gamma,
hue, light, dark, reverse)
palplot(pal)
if as_cmap:
return cmap
return pal
|
|
from abc import ABCMeta
from ConfigParser import NoOptionError, NoSectionError
from functools import partial
import logging
from lxml import etree, objectify
from lxml.builder import ElementMaker
import requests
from .config import production_config, sandbox_config, CONFIG_PATH
from . import namespaces
from .utils import parser_from_schema
logging.basicConfig()
log = logging.getLogger(__name__)
# I wish I didn't have to hard-code this but there's no service to query, see:
# http://developer.ebay.com/DevZone/merchandising/docs/Concepts/SiteIDToGlobalID.html
SITE_ID_TO_GLOBAL_ID = {
0: 'EBAY-US',
2: 'EBAY-ENCA',
3: 'EBAY-GB',
15: 'EBAY-AU',
16: 'EBAY-AT',
23: 'EBAY-FRBE',
71: 'EBAY-FR',
77: 'EBAY-DE',
100: 'EBAY-MOTOR',
101: 'EBAY-IT',
123: 'EBAY-NLBE',
146: 'EBAY-NL',
186: 'EBAY-ES',
193: 'EBAY-CH',
201: 'EBAY-HK',
203: 'EBAY-IN',
205: 'EBAY-IE',
207: 'EBAY-MY',
210: 'EBAY-FRCA',
211: 'EBAY-PH',
212: 'EBAY-PL',
216: 'EBAY-SG',
}
NSMAP = {
'soap': namespaces.SOAP_1_2,
'soapenv': namespaces.SOAP_1_1,
'ebl': namespaces.EBAY,
'xsi': namespaces.XSI,
}
def dicttoxml(obj, factory):
elements = []
for key, value in obj.items():
if hasattr(value, 'items'):
elements.append(dicttoxml(value, factory))
elif isinstance(value, (list, tuple)):
elements.append([
dicttoxml(sub_val, factory)
for sub_val in value
])
else:
el = getattr(factory, key)(unicode(value))
elements.append(el)
return elements
class APIBase:
"""
Abstract: make a concrete sub-class (some APIs are provided below)
"""
__metaclass__ = ABCMeta
SCHEMA_URL = None
PRODUCTION_ENDPOINT = None
SANDBOX_ENDPOINT = None
SOAP_VERSION = None
DEFAULT_NAMESPACE = namespaces.EBAY
def __init__(self, schema_url=None, sandbox=False, **kwargs):
# eBay API methods are all CamelCase so it should be safe to set
# lowercase (or all-caps) attributes on self (see __getattr__)
self.CONF_PREFIX = self._get_conf_prefix()
self.sandbox = sandbox
if sandbox:
self.config = sandbox_config
else:
self.config = production_config
# use passed in schema first, else try config file,
# else use class default
if schema_url is not None:
self._schema = schema_url
else:
try:
self._schema = self.config.get('soap',
'%s_schema' % self.CONF_PREFIX)
except (NoOptionError, NoSectionError):
if self.SCHEMA_URL is None:
raise NotImplementedError(
'You must give a value for SCHEMA_URL on a sub-class,'
' or define <api name>_schema in the conf file'
)
self._schema = self.SCHEMA_URL
# make a schema-aware parser (for deserializing responses into objects)
self.parser, self.version = parser_from_schema(self._schema)
# determine the service endpoint URI
try:
self._endpoint = self.config.get('soap',
'%s_api' % self.CONF_PREFIX)
except (NoOptionError, NoSectionError):
if sandbox:
if self.SANDBOX_ENDPOINT is None:
raise NotImplementedError(
'You must give a value for SANDBOX_ENDPOINT on a sub-'
'class, or define <api name>_api in the conf file'
)
self._endpoint = self.SANDBOX_ENDPOINT
else:
if self.SANDBOX_ENDPOINT is None:
raise NotImplementedError(
'You must give a value for PRODUCTION_ENDPOINT on a '
'sub-class, or define <api name>_api in the conf file'
)
self._endpoint = self.PRODUCTION_ENDPOINT
if self.SOAP_VERSION is None:
raise NotImplementedError(
'You must give a value for SOAP_VERSION on a sub-class'
)
self.client = requests.Session()
self.client.headers.update({
'Content-Type': 'application/soap+xml; charset=utf-8',
'SOAPAction': '',
})
log.info('CONFIG_PATH: %s', CONFIG_PATH)
self.site_id = (
kwargs.get('site_id') or self.config.get('site', 'site_id')
)
self.app_id = (
kwargs.get('app_id') or self.config.get('keys', 'app_id')
)
self.dev_id = (
kwargs.get('dev_id') or self.config.get('keys', 'dev_id')
)
self.cert_id = (
kwargs.get('cert_id') or self.config.get('keys', 'cert_id')
)
self.token = (
kwargs.get('token') or self.config.get('auth', 'token')
)
self._last_response = None
def __getattr__(self, name):
"""
make a SOAP request
Note:
eBay API methods are all CamelCase, or otherCamelCase
"""
if name.lower() == name or name.startswith('_'):
# avoid accidental API calls in ipython..!
return super(APIBase, self).__getattr(name)
return partial(self._execute, name=name)
def _get_conf_prefix(self):
# assume the class name ends in 'API'
return self.__class__.__name__[:-3].lower()
def _nsmap(self):
nsmap = NSMAP.copy()
nsmap[None] = self.DEFAULT_NAMESPACE
return nsmap
def get_soap_el_factory(self):
if self.SOAP_VERSION == '1.1':
return ElementMaker(
namespace=namespaces.SOAP_1_1,
nsmap=self._nsmap()
)
elif self.SOAP_VERSION == '1.2':
return ElementMaker(
namespace=namespaces.SOAP_1_2,
nsmap=self._nsmap()
)
else:
raise ValueError(
"Invalid SOAP_VERSION: {}".format(self.SOAP_VERSION)
)
def get_msg_el_factory(self):
return ElementMaker(nsmap=self._nsmap())
def get_http_headers(self, name):
return {}
def _request(self, name, envelope):
response = self.client.post(
url=self._endpoint,
data=etree.tostring(envelope),
params=self.get_query_params(name),
headers=self.get_http_headers(name),
)
response.raise_for_status()
self._last_response = response
return response
def _execute(self, name, **kwargs):
"""
this indirection gives us the same call signature as used by
ebaysdk-python, i.e. `api.execute('GetUser', {})`
(our __init__ signature and conf are different still at the moment)
"""
return self.execute(name, kwargs)
def execute(self, name, params):
request_name = '{}Request'.format(name) # can we rely on this?
factory = self.get_msg_el_factory()
body_el = getattr(factory, request_name)
envelope = self.make_envelope([
body_el(*dicttoxml(params, factory))
])
log.debug(etree.tostring(envelope, pretty_print=True))
response = self._request(name, envelope)
return self.objectify_response(name, response)
def objectify_response(self, name, response):
soap_response = etree.fromstring(
response.text.encode(response.encoding)
)
response_name = '{}Response'.format(name) # can we rely on this?
body_root = soap_response.xpath(
"//*[local-name() = '%s']" % response_name
)[0]
# tostring->fromstring roundtrip is ugly but otherwise the objectified
# tree is spoiled by namespaces (would have to use getattr everywhere)
return objectify.fromstring(
etree.tostring(body_root),
parser=self.parser
)
def get_soap_header(self):
return None
def get_query_params(self, name):
return {}
def make_envelope(self, body_elements=None):
"""
body_elements: <list> of etree.Elements or <None>
"""
soap = self.get_soap_el_factory()
body_elements = body_elements or []
body = soap.Body(*body_elements)
header = self.get_soap_header()
if header is not None:
elements = [header, body]
else:
elements = [body]
return soap.Envelope(
#{
# '{%s}encodingStyle' % namespaces.SOAP_1_2: (
# 'http://www.w3.org/2001/12/soap-encoding')
#},
*elements
)
class TradingAPI(APIBase):
SCHEMA_URL = 'http://developer.ebay.com/webservices/latest/ebaySvc.xsd'
PRODUCTION_ENDPOINT = 'https://api.ebay.com/wsapi'
SANDBOX_ENDPOINT = 'https://api.sandbox.ebay.com/wsapi'
SOAP_VERSION = '1.1'
def __getattr__(self, name):
method = super(TradingAPI, self).__getattr__(name=name)
# the method call has to include the API version in the body
# even though it's also provided on the querystring...
return partial(method, Version=self.version)
def get_soap_header(self):
soap = self.get_soap_el_factory()
payload = self.get_msg_el_factory()
return soap.Header(
payload.RequesterCredentials(
payload.Credentials(
payload.AppID(self.app_id),
payload.DevId(self.dev_id),
payload.AuthCert(self.cert_id),
),
payload.eBayAuthToken(self.token)
)
)
def get_query_params(self, name):
# for some reason ebay require some fields from the SOAP request to be
# repeated as querystring args appended to the service url
return {
'callname': name,
'siteid': self.site_id,
'appid': self.app_id,
'version': self.version,
'routing': 'default',
}
class PlatformNotificationsAPI(TradingAPI):
"""
The calls to get and set platform notification preferences are actually
part of the TradingAPI.
The notifications received at your callback url can be decoded using the
`objectify_response` method.
"""
pass
class ShoppingAPI(APIBase):
SCHEMA_URL = (
'http://developer.ebay.com/webservices/latest/ShoppingService.xsd'
)
PRODUCTION_ENDPOINT = 'http://open.api.ebay.com/shopping'
SANDBOX_ENDPOINT = 'http://open.api.sandbox.ebay.com/shopping'
SOAP_VERSION = '1.1'
def get_query_params(self, name):
# for some reason ebay require some fields from the SOAP request to be
# repeated as querystring args appended to the service url
return {
'callname': name,
'siteid': self.site_id,
'appid': self.app_id,
'version': self.version,
'responseencoding': 'SOAP',
'requestencoding': 'SOAP',
}
class FindingAPI(APIBase):
SCHEMA_URL = (
'http://developer.ebay.com/webservices/Finding/latest/'
'FindingService.wsdl'
)
PRODUCTION_ENDPOINT = (
'http://svcs.ebay.com/services/search/FindingService/v1'
)
SANDBOX_ENDPOINT = (
'http://svcs.sandbox.ebay.com/services/search/FindingService/v1'
)
SOAP_VERSION = '1.2'
DEFAULT_NAMESPACE = namespaces.EBAY_SEARCH
def get_http_headers(self, name):
site_id = int(self.site_id, 10)
return {
'X-EBAY-SOA-OPERATION-NAME': name,
'X-EBAY-SOA-SERVICE-NAME': 'FindingService', # this one is genius
'X-EBAY-SOA-SERVICE-VERSION': self.version,
'X-EBAY-SOA-GLOBAL-ID': SITE_ID_TO_GLOBAL_ID[site_id],
'X-EBAY-SOA-SECURITY-APPNAME': self.app_id,
'X-EBAY-SOA-REQUEST-DATA-FORMAT': 'SOAP',
'X-EBAY-SOA-MESSAGE-PROTOCOL': 'SOAP12',
}
class BusinessPoliciesAPI(APIBase):
SCHEMA_URL = (
'http://developer.ebay.com/webservices/business-policies/'
'latest/SellerProfilesManagementService.wsdl'
)
PRODUCTION_ENDPOINT = (
'https://svcs.ebay.com/services/selling/v1/'
'SellerProfilesManagementService'
)
SANDBOX_ENDPOINT = (
'http://svcs.sandbox.ebay.com/services/selling/v1/'
'SellerProfilesManagementService'
)
SOAP_VERSION = '1.2'
def get_http_headers(self, name):
site_id = int(self.site_id, 10)
return {
'X-EBAY-SOA-OPERATION-NAME': name,
'X-EBAY-SOA-SERVICE-VERSION': self.version,
'X-EBAY-SOA-GLOBAL-ID': SITE_ID_TO_GLOBAL_ID[site_id],
'X-EBAY-SOA-REQUEST-DATA-FORMAT': 'SOAP',
'X-EBAY-SOA-MESSAGE-PROTOCOL': 'SOAP12',
'X-EBAY-SOA-SECURITY-TOKEN': self.token,
}
|
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.app_engine import version_pb2
from google3.cloud.graphite.mmv2.services.google.app_engine import version_pb2_grpc
from typing import List
class Version(object):
def __init__(
self,
consumer_name: str = None,
name: str = None,
automatic_scaling: dict = None,
basic_scaling: dict = None,
manual_scaling: dict = None,
inbound_services: list = None,
instance_class: str = None,
network: dict = None,
zones: list = None,
resources: dict = None,
runtime: str = None,
runtime_channel: str = None,
threadsafe: bool = None,
vm: bool = None,
beta_settings: dict = None,
env: str = None,
serving_status: str = None,
created_by: str = None,
create_time: str = None,
disk_usage_bytes: int = None,
runtime_api_version: str = None,
runtime_main_executable_path: str = None,
handlers: list = None,
error_handlers: list = None,
libraries: list = None,
api_config: dict = None,
env_variables: dict = None,
default_expiration: str = None,
deployment: dict = None,
health_check: dict = None,
readiness_check: dict = None,
liveness_check: dict = None,
nobuild_files_regex: str = None,
version_url: str = None,
entrypoint: dict = None,
vpc_access_connector: dict = None,
app: str = None,
service: str = None,
service_account_file: str = "",
):
channel.initialize()
self.consumer_name = consumer_name
self.name = name
self.automatic_scaling = automatic_scaling
self.basic_scaling = basic_scaling
self.manual_scaling = manual_scaling
self.inbound_services = inbound_services
self.instance_class = instance_class
self.network = network
self.zones = zones
self.resources = resources
self.runtime = runtime
self.runtime_channel = runtime_channel
self.threadsafe = threadsafe
self.vm = vm
self.beta_settings = beta_settings
self.env = env
self.serving_status = serving_status
self.runtime_api_version = runtime_api_version
self.runtime_main_executable_path = runtime_main_executable_path
self.handlers = handlers
self.error_handlers = error_handlers
self.libraries = libraries
self.api_config = api_config
self.env_variables = env_variables
self.default_expiration = default_expiration
self.deployment = deployment
self.health_check = health_check
self.readiness_check = readiness_check
self.liveness_check = liveness_check
self.nobuild_files_regex = nobuild_files_regex
self.entrypoint = entrypoint
self.vpc_access_connector = vpc_access_connector
self.app = app
self.service = service
self.service_account_file = service_account_file
def apply(self):
stub = version_pb2_grpc.AppengineVersionServiceStub(channel.Channel())
request = version_pb2.ApplyAppengineVersionRequest()
if Primitive.to_proto(self.consumer_name):
request.resource.consumer_name = Primitive.to_proto(self.consumer_name)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if VersionAutomaticScaling.to_proto(self.automatic_scaling):
request.resource.automatic_scaling.CopyFrom(
VersionAutomaticScaling.to_proto(self.automatic_scaling)
)
else:
request.resource.ClearField("automatic_scaling")
if VersionBasicScaling.to_proto(self.basic_scaling):
request.resource.basic_scaling.CopyFrom(
VersionBasicScaling.to_proto(self.basic_scaling)
)
else:
request.resource.ClearField("basic_scaling")
if VersionManualScaling.to_proto(self.manual_scaling):
request.resource.manual_scaling.CopyFrom(
VersionManualScaling.to_proto(self.manual_scaling)
)
else:
request.resource.ClearField("manual_scaling")
if VersionInboundServicesEnumArray.to_proto(self.inbound_services):
request.resource.inbound_services.extend(
VersionInboundServicesEnumArray.to_proto(self.inbound_services)
)
if Primitive.to_proto(self.instance_class):
request.resource.instance_class = Primitive.to_proto(self.instance_class)
if VersionNetwork.to_proto(self.network):
request.resource.network.CopyFrom(VersionNetwork.to_proto(self.network))
else:
request.resource.ClearField("network")
if Primitive.to_proto(self.zones):
request.resource.zones.extend(Primitive.to_proto(self.zones))
if VersionResources.to_proto(self.resources):
request.resource.resources.CopyFrom(
VersionResources.to_proto(self.resources)
)
else:
request.resource.ClearField("resources")
if Primitive.to_proto(self.runtime):
request.resource.runtime = Primitive.to_proto(self.runtime)
if Primitive.to_proto(self.runtime_channel):
request.resource.runtime_channel = Primitive.to_proto(self.runtime_channel)
if Primitive.to_proto(self.threadsafe):
request.resource.threadsafe = Primitive.to_proto(self.threadsafe)
if Primitive.to_proto(self.vm):
request.resource.vm = Primitive.to_proto(self.vm)
if Primitive.to_proto(self.beta_settings):
request.resource.beta_settings = Primitive.to_proto(self.beta_settings)
if Primitive.to_proto(self.env):
request.resource.env = Primitive.to_proto(self.env)
if VersionServingStatusEnum.to_proto(self.serving_status):
request.resource.serving_status = VersionServingStatusEnum.to_proto(
self.serving_status
)
if Primitive.to_proto(self.runtime_api_version):
request.resource.runtime_api_version = Primitive.to_proto(
self.runtime_api_version
)
if Primitive.to_proto(self.runtime_main_executable_path):
request.resource.runtime_main_executable_path = Primitive.to_proto(
self.runtime_main_executable_path
)
if VersionHandlersArray.to_proto(self.handlers):
request.resource.handlers.extend(
VersionHandlersArray.to_proto(self.handlers)
)
if VersionErrorHandlersArray.to_proto(self.error_handlers):
request.resource.error_handlers.extend(
VersionErrorHandlersArray.to_proto(self.error_handlers)
)
if VersionLibrariesArray.to_proto(self.libraries):
request.resource.libraries.extend(
VersionLibrariesArray.to_proto(self.libraries)
)
if VersionApiConfig.to_proto(self.api_config):
request.resource.api_config.CopyFrom(
VersionApiConfig.to_proto(self.api_config)
)
else:
request.resource.ClearField("api_config")
if Primitive.to_proto(self.env_variables):
request.resource.env_variables = Primitive.to_proto(self.env_variables)
if Primitive.to_proto(self.default_expiration):
request.resource.default_expiration = Primitive.to_proto(
self.default_expiration
)
if VersionDeployment.to_proto(self.deployment):
request.resource.deployment.CopyFrom(
VersionDeployment.to_proto(self.deployment)
)
else:
request.resource.ClearField("deployment")
if VersionHealthCheck.to_proto(self.health_check):
request.resource.health_check.CopyFrom(
VersionHealthCheck.to_proto(self.health_check)
)
else:
request.resource.ClearField("health_check")
if VersionReadinessCheck.to_proto(self.readiness_check):
request.resource.readiness_check.CopyFrom(
VersionReadinessCheck.to_proto(self.readiness_check)
)
else:
request.resource.ClearField("readiness_check")
if VersionLivenessCheck.to_proto(self.liveness_check):
request.resource.liveness_check.CopyFrom(
VersionLivenessCheck.to_proto(self.liveness_check)
)
else:
request.resource.ClearField("liveness_check")
if Primitive.to_proto(self.nobuild_files_regex):
request.resource.nobuild_files_regex = Primitive.to_proto(
self.nobuild_files_regex
)
if VersionEntrypoint.to_proto(self.entrypoint):
request.resource.entrypoint.CopyFrom(
VersionEntrypoint.to_proto(self.entrypoint)
)
else:
request.resource.ClearField("entrypoint")
if VersionVPCAccessConnector.to_proto(self.vpc_access_connector):
request.resource.vpc_access_connector.CopyFrom(
VersionVPCAccessConnector.to_proto(self.vpc_access_connector)
)
else:
request.resource.ClearField("vpc_access_connector")
if Primitive.to_proto(self.app):
request.resource.app = Primitive.to_proto(self.app)
if Primitive.to_proto(self.service):
request.resource.service = Primitive.to_proto(self.service)
request.service_account_file = self.service_account_file
response = stub.ApplyAppengineVersion(request)
self.consumer_name = Primitive.from_proto(response.consumer_name)
self.name = Primitive.from_proto(response.name)
self.automatic_scaling = VersionAutomaticScaling.from_proto(
response.automatic_scaling
)
self.basic_scaling = VersionBasicScaling.from_proto(response.basic_scaling)
self.manual_scaling = VersionManualScaling.from_proto(response.manual_scaling)
self.inbound_services = VersionInboundServicesEnumArray.from_proto(
response.inbound_services
)
self.instance_class = Primitive.from_proto(response.instance_class)
self.network = VersionNetwork.from_proto(response.network)
self.zones = Primitive.from_proto(response.zones)
self.resources = VersionResources.from_proto(response.resources)
self.runtime = Primitive.from_proto(response.runtime)
self.runtime_channel = Primitive.from_proto(response.runtime_channel)
self.threadsafe = Primitive.from_proto(response.threadsafe)
self.vm = Primitive.from_proto(response.vm)
self.beta_settings = Primitive.from_proto(response.beta_settings)
self.env = Primitive.from_proto(response.env)
self.serving_status = VersionServingStatusEnum.from_proto(
response.serving_status
)
self.created_by = Primitive.from_proto(response.created_by)
self.create_time = Primitive.from_proto(response.create_time)
self.disk_usage_bytes = Primitive.from_proto(response.disk_usage_bytes)
self.runtime_api_version = Primitive.from_proto(response.runtime_api_version)
self.runtime_main_executable_path = Primitive.from_proto(
response.runtime_main_executable_path
)
self.handlers = VersionHandlersArray.from_proto(response.handlers)
self.error_handlers = VersionErrorHandlersArray.from_proto(
response.error_handlers
)
self.libraries = VersionLibrariesArray.from_proto(response.libraries)
self.api_config = VersionApiConfig.from_proto(response.api_config)
self.env_variables = Primitive.from_proto(response.env_variables)
self.default_expiration = Primitive.from_proto(response.default_expiration)
self.deployment = VersionDeployment.from_proto(response.deployment)
self.health_check = VersionHealthCheck.from_proto(response.health_check)
self.readiness_check = VersionReadinessCheck.from_proto(
response.readiness_check
)
self.liveness_check = VersionLivenessCheck.from_proto(response.liveness_check)
self.nobuild_files_regex = Primitive.from_proto(response.nobuild_files_regex)
self.version_url = Primitive.from_proto(response.version_url)
self.entrypoint = VersionEntrypoint.from_proto(response.entrypoint)
self.vpc_access_connector = VersionVPCAccessConnector.from_proto(
response.vpc_access_connector
)
self.app = Primitive.from_proto(response.app)
self.service = Primitive.from_proto(response.service)
def delete(self):
stub = version_pb2_grpc.AppengineVersionServiceStub(channel.Channel())
request = version_pb2.DeleteAppengineVersionRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.consumer_name):
request.resource.consumer_name = Primitive.to_proto(self.consumer_name)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if VersionAutomaticScaling.to_proto(self.automatic_scaling):
request.resource.automatic_scaling.CopyFrom(
VersionAutomaticScaling.to_proto(self.automatic_scaling)
)
else:
request.resource.ClearField("automatic_scaling")
if VersionBasicScaling.to_proto(self.basic_scaling):
request.resource.basic_scaling.CopyFrom(
VersionBasicScaling.to_proto(self.basic_scaling)
)
else:
request.resource.ClearField("basic_scaling")
if VersionManualScaling.to_proto(self.manual_scaling):
request.resource.manual_scaling.CopyFrom(
VersionManualScaling.to_proto(self.manual_scaling)
)
else:
request.resource.ClearField("manual_scaling")
if VersionInboundServicesEnumArray.to_proto(self.inbound_services):
request.resource.inbound_services.extend(
VersionInboundServicesEnumArray.to_proto(self.inbound_services)
)
if Primitive.to_proto(self.instance_class):
request.resource.instance_class = Primitive.to_proto(self.instance_class)
if VersionNetwork.to_proto(self.network):
request.resource.network.CopyFrom(VersionNetwork.to_proto(self.network))
else:
request.resource.ClearField("network")
if Primitive.to_proto(self.zones):
request.resource.zones.extend(Primitive.to_proto(self.zones))
if VersionResources.to_proto(self.resources):
request.resource.resources.CopyFrom(
VersionResources.to_proto(self.resources)
)
else:
request.resource.ClearField("resources")
if Primitive.to_proto(self.runtime):
request.resource.runtime = Primitive.to_proto(self.runtime)
if Primitive.to_proto(self.runtime_channel):
request.resource.runtime_channel = Primitive.to_proto(self.runtime_channel)
if Primitive.to_proto(self.threadsafe):
request.resource.threadsafe = Primitive.to_proto(self.threadsafe)
if Primitive.to_proto(self.vm):
request.resource.vm = Primitive.to_proto(self.vm)
if Primitive.to_proto(self.beta_settings):
request.resource.beta_settings = Primitive.to_proto(self.beta_settings)
if Primitive.to_proto(self.env):
request.resource.env = Primitive.to_proto(self.env)
if VersionServingStatusEnum.to_proto(self.serving_status):
request.resource.serving_status = VersionServingStatusEnum.to_proto(
self.serving_status
)
if Primitive.to_proto(self.runtime_api_version):
request.resource.runtime_api_version = Primitive.to_proto(
self.runtime_api_version
)
if Primitive.to_proto(self.runtime_main_executable_path):
request.resource.runtime_main_executable_path = Primitive.to_proto(
self.runtime_main_executable_path
)
if VersionHandlersArray.to_proto(self.handlers):
request.resource.handlers.extend(
VersionHandlersArray.to_proto(self.handlers)
)
if VersionErrorHandlersArray.to_proto(self.error_handlers):
request.resource.error_handlers.extend(
VersionErrorHandlersArray.to_proto(self.error_handlers)
)
if VersionLibrariesArray.to_proto(self.libraries):
request.resource.libraries.extend(
VersionLibrariesArray.to_proto(self.libraries)
)
if VersionApiConfig.to_proto(self.api_config):
request.resource.api_config.CopyFrom(
VersionApiConfig.to_proto(self.api_config)
)
else:
request.resource.ClearField("api_config")
if Primitive.to_proto(self.env_variables):
request.resource.env_variables = Primitive.to_proto(self.env_variables)
if Primitive.to_proto(self.default_expiration):
request.resource.default_expiration = Primitive.to_proto(
self.default_expiration
)
if VersionDeployment.to_proto(self.deployment):
request.resource.deployment.CopyFrom(
VersionDeployment.to_proto(self.deployment)
)
else:
request.resource.ClearField("deployment")
if VersionHealthCheck.to_proto(self.health_check):
request.resource.health_check.CopyFrom(
VersionHealthCheck.to_proto(self.health_check)
)
else:
request.resource.ClearField("health_check")
if VersionReadinessCheck.to_proto(self.readiness_check):
request.resource.readiness_check.CopyFrom(
VersionReadinessCheck.to_proto(self.readiness_check)
)
else:
request.resource.ClearField("readiness_check")
if VersionLivenessCheck.to_proto(self.liveness_check):
request.resource.liveness_check.CopyFrom(
VersionLivenessCheck.to_proto(self.liveness_check)
)
else:
request.resource.ClearField("liveness_check")
if Primitive.to_proto(self.nobuild_files_regex):
request.resource.nobuild_files_regex = Primitive.to_proto(
self.nobuild_files_regex
)
if VersionEntrypoint.to_proto(self.entrypoint):
request.resource.entrypoint.CopyFrom(
VersionEntrypoint.to_proto(self.entrypoint)
)
else:
request.resource.ClearField("entrypoint")
if VersionVPCAccessConnector.to_proto(self.vpc_access_connector):
request.resource.vpc_access_connector.CopyFrom(
VersionVPCAccessConnector.to_proto(self.vpc_access_connector)
)
else:
request.resource.ClearField("vpc_access_connector")
if Primitive.to_proto(self.app):
request.resource.app = Primitive.to_proto(self.app)
if Primitive.to_proto(self.service):
request.resource.service = Primitive.to_proto(self.service)
response = stub.DeleteAppengineVersion(request)
@classmethod
def list(self, app, service, service_account_file=""):
stub = version_pb2_grpc.AppengineVersionServiceStub(channel.Channel())
request = version_pb2.ListAppengineVersionRequest()
request.service_account_file = service_account_file
request.App = app
request.Service = service
return stub.ListAppengineVersion(request).items
def to_proto(self):
resource = version_pb2.AppengineVersion()
if Primitive.to_proto(self.consumer_name):
resource.consumer_name = Primitive.to_proto(self.consumer_name)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if VersionAutomaticScaling.to_proto(self.automatic_scaling):
resource.automatic_scaling.CopyFrom(
VersionAutomaticScaling.to_proto(self.automatic_scaling)
)
else:
resource.ClearField("automatic_scaling")
if VersionBasicScaling.to_proto(self.basic_scaling):
resource.basic_scaling.CopyFrom(
VersionBasicScaling.to_proto(self.basic_scaling)
)
else:
resource.ClearField("basic_scaling")
if VersionManualScaling.to_proto(self.manual_scaling):
resource.manual_scaling.CopyFrom(
VersionManualScaling.to_proto(self.manual_scaling)
)
else:
resource.ClearField("manual_scaling")
if VersionInboundServicesEnumArray.to_proto(self.inbound_services):
resource.inbound_services.extend(
VersionInboundServicesEnumArray.to_proto(self.inbound_services)
)
if Primitive.to_proto(self.instance_class):
resource.instance_class = Primitive.to_proto(self.instance_class)
if VersionNetwork.to_proto(self.network):
resource.network.CopyFrom(VersionNetwork.to_proto(self.network))
else:
resource.ClearField("network")
if Primitive.to_proto(self.zones):
resource.zones.extend(Primitive.to_proto(self.zones))
if VersionResources.to_proto(self.resources):
resource.resources.CopyFrom(VersionResources.to_proto(self.resources))
else:
resource.ClearField("resources")
if Primitive.to_proto(self.runtime):
resource.runtime = Primitive.to_proto(self.runtime)
if Primitive.to_proto(self.runtime_channel):
resource.runtime_channel = Primitive.to_proto(self.runtime_channel)
if Primitive.to_proto(self.threadsafe):
resource.threadsafe = Primitive.to_proto(self.threadsafe)
if Primitive.to_proto(self.vm):
resource.vm = Primitive.to_proto(self.vm)
if Primitive.to_proto(self.beta_settings):
resource.beta_settings = Primitive.to_proto(self.beta_settings)
if Primitive.to_proto(self.env):
resource.env = Primitive.to_proto(self.env)
if VersionServingStatusEnum.to_proto(self.serving_status):
resource.serving_status = VersionServingStatusEnum.to_proto(
self.serving_status
)
if Primitive.to_proto(self.runtime_api_version):
resource.runtime_api_version = Primitive.to_proto(self.runtime_api_version)
if Primitive.to_proto(self.runtime_main_executable_path):
resource.runtime_main_executable_path = Primitive.to_proto(
self.runtime_main_executable_path
)
if VersionHandlersArray.to_proto(self.handlers):
resource.handlers.extend(VersionHandlersArray.to_proto(self.handlers))
if VersionErrorHandlersArray.to_proto(self.error_handlers):
resource.error_handlers.extend(
VersionErrorHandlersArray.to_proto(self.error_handlers)
)
if VersionLibrariesArray.to_proto(self.libraries):
resource.libraries.extend(VersionLibrariesArray.to_proto(self.libraries))
if VersionApiConfig.to_proto(self.api_config):
resource.api_config.CopyFrom(VersionApiConfig.to_proto(self.api_config))
else:
resource.ClearField("api_config")
if Primitive.to_proto(self.env_variables):
resource.env_variables = Primitive.to_proto(self.env_variables)
if Primitive.to_proto(self.default_expiration):
resource.default_expiration = Primitive.to_proto(self.default_expiration)
if VersionDeployment.to_proto(self.deployment):
resource.deployment.CopyFrom(VersionDeployment.to_proto(self.deployment))
else:
resource.ClearField("deployment")
if VersionHealthCheck.to_proto(self.health_check):
resource.health_check.CopyFrom(
VersionHealthCheck.to_proto(self.health_check)
)
else:
resource.ClearField("health_check")
if VersionReadinessCheck.to_proto(self.readiness_check):
resource.readiness_check.CopyFrom(
VersionReadinessCheck.to_proto(self.readiness_check)
)
else:
resource.ClearField("readiness_check")
if VersionLivenessCheck.to_proto(self.liveness_check):
resource.liveness_check.CopyFrom(
VersionLivenessCheck.to_proto(self.liveness_check)
)
else:
resource.ClearField("liveness_check")
if Primitive.to_proto(self.nobuild_files_regex):
resource.nobuild_files_regex = Primitive.to_proto(self.nobuild_files_regex)
if VersionEntrypoint.to_proto(self.entrypoint):
resource.entrypoint.CopyFrom(VersionEntrypoint.to_proto(self.entrypoint))
else:
resource.ClearField("entrypoint")
if VersionVPCAccessConnector.to_proto(self.vpc_access_connector):
resource.vpc_access_connector.CopyFrom(
VersionVPCAccessConnector.to_proto(self.vpc_access_connector)
)
else:
resource.ClearField("vpc_access_connector")
if Primitive.to_proto(self.app):
resource.app = Primitive.to_proto(self.app)
if Primitive.to_proto(self.service):
resource.service = Primitive.to_proto(self.service)
return resource
class VersionAutomaticScaling(object):
def __init__(
self,
cool_down_period: str = None,
cpu_utilization: dict = None,
max_concurrent_requests: int = None,
max_idle_instances: int = None,
max_total_instances: int = None,
max_pending_latency: str = None,
min_idle_instances: int = None,
min_total_instances: int = None,
min_pending_latency: str = None,
request_utilization: dict = None,
disk_utilization: dict = None,
network_utilization: dict = None,
standard_scheduler_settings: dict = None,
):
self.cool_down_period = cool_down_period
self.cpu_utilization = cpu_utilization
self.max_concurrent_requests = max_concurrent_requests
self.max_idle_instances = max_idle_instances
self.max_total_instances = max_total_instances
self.max_pending_latency = max_pending_latency
self.min_idle_instances = min_idle_instances
self.min_total_instances = min_total_instances
self.min_pending_latency = min_pending_latency
self.request_utilization = request_utilization
self.disk_utilization = disk_utilization
self.network_utilization = network_utilization
self.standard_scheduler_settings = standard_scheduler_settings
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionAutomaticScaling()
if Primitive.to_proto(resource.cool_down_period):
res.cool_down_period = Primitive.to_proto(resource.cool_down_period)
if VersionAutomaticScalingCpuUtilization.to_proto(resource.cpu_utilization):
res.cpu_utilization.CopyFrom(
VersionAutomaticScalingCpuUtilization.to_proto(resource.cpu_utilization)
)
else:
res.ClearField("cpu_utilization")
if Primitive.to_proto(resource.max_concurrent_requests):
res.max_concurrent_requests = Primitive.to_proto(
resource.max_concurrent_requests
)
if Primitive.to_proto(resource.max_idle_instances):
res.max_idle_instances = Primitive.to_proto(resource.max_idle_instances)
if Primitive.to_proto(resource.max_total_instances):
res.max_total_instances = Primitive.to_proto(resource.max_total_instances)
if Primitive.to_proto(resource.max_pending_latency):
res.max_pending_latency = Primitive.to_proto(resource.max_pending_latency)
if Primitive.to_proto(resource.min_idle_instances):
res.min_idle_instances = Primitive.to_proto(resource.min_idle_instances)
if Primitive.to_proto(resource.min_total_instances):
res.min_total_instances = Primitive.to_proto(resource.min_total_instances)
if Primitive.to_proto(resource.min_pending_latency):
res.min_pending_latency = Primitive.to_proto(resource.min_pending_latency)
if VersionAutomaticScalingRequestUtilization.to_proto(
resource.request_utilization
):
res.request_utilization.CopyFrom(
VersionAutomaticScalingRequestUtilization.to_proto(
resource.request_utilization
)
)
else:
res.ClearField("request_utilization")
if VersionAutomaticScalingDiskUtilization.to_proto(resource.disk_utilization):
res.disk_utilization.CopyFrom(
VersionAutomaticScalingDiskUtilization.to_proto(
resource.disk_utilization
)
)
else:
res.ClearField("disk_utilization")
if VersionAutomaticScalingNetworkUtilization.to_proto(
resource.network_utilization
):
res.network_utilization.CopyFrom(
VersionAutomaticScalingNetworkUtilization.to_proto(
resource.network_utilization
)
)
else:
res.ClearField("network_utilization")
if VersionAutomaticScalingStandardSchedulerSettings.to_proto(
resource.standard_scheduler_settings
):
res.standard_scheduler_settings.CopyFrom(
VersionAutomaticScalingStandardSchedulerSettings.to_proto(
resource.standard_scheduler_settings
)
)
else:
res.ClearField("standard_scheduler_settings")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionAutomaticScaling(
cool_down_period=Primitive.from_proto(resource.cool_down_period),
cpu_utilization=VersionAutomaticScalingCpuUtilization.from_proto(
resource.cpu_utilization
),
max_concurrent_requests=Primitive.from_proto(
resource.max_concurrent_requests
),
max_idle_instances=Primitive.from_proto(resource.max_idle_instances),
max_total_instances=Primitive.from_proto(resource.max_total_instances),
max_pending_latency=Primitive.from_proto(resource.max_pending_latency),
min_idle_instances=Primitive.from_proto(resource.min_idle_instances),
min_total_instances=Primitive.from_proto(resource.min_total_instances),
min_pending_latency=Primitive.from_proto(resource.min_pending_latency),
request_utilization=VersionAutomaticScalingRequestUtilization.from_proto(
resource.request_utilization
),
disk_utilization=VersionAutomaticScalingDiskUtilization.from_proto(
resource.disk_utilization
),
network_utilization=VersionAutomaticScalingNetworkUtilization.from_proto(
resource.network_utilization
),
standard_scheduler_settings=VersionAutomaticScalingStandardSchedulerSettings.from_proto(
resource.standard_scheduler_settings
),
)
class VersionAutomaticScalingArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionAutomaticScaling.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionAutomaticScaling.from_proto(i) for i in resources]
class VersionAutomaticScalingCpuUtilization(object):
def __init__(
self, aggregation_window_length: str = None, target_utilization: float = None
):
self.aggregation_window_length = aggregation_window_length
self.target_utilization = target_utilization
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionAutomaticScalingCpuUtilization()
if Primitive.to_proto(resource.aggregation_window_length):
res.aggregation_window_length = Primitive.to_proto(
resource.aggregation_window_length
)
if Primitive.to_proto(resource.target_utilization):
res.target_utilization = Primitive.to_proto(resource.target_utilization)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionAutomaticScalingCpuUtilization(
aggregation_window_length=Primitive.from_proto(
resource.aggregation_window_length
),
target_utilization=Primitive.from_proto(resource.target_utilization),
)
class VersionAutomaticScalingCpuUtilizationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionAutomaticScalingCpuUtilization.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionAutomaticScalingCpuUtilization.from_proto(i) for i in resources]
class VersionAutomaticScalingRequestUtilization(object):
def __init__(
self,
target_request_count_per_second: int = None,
target_concurrent_requests: int = None,
):
self.target_request_count_per_second = target_request_count_per_second
self.target_concurrent_requests = target_concurrent_requests
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionAutomaticScalingRequestUtilization()
if Primitive.to_proto(resource.target_request_count_per_second):
res.target_request_count_per_second = Primitive.to_proto(
resource.target_request_count_per_second
)
if Primitive.to_proto(resource.target_concurrent_requests):
res.target_concurrent_requests = Primitive.to_proto(
resource.target_concurrent_requests
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionAutomaticScalingRequestUtilization(
target_request_count_per_second=Primitive.from_proto(
resource.target_request_count_per_second
),
target_concurrent_requests=Primitive.from_proto(
resource.target_concurrent_requests
),
)
class VersionAutomaticScalingRequestUtilizationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
VersionAutomaticScalingRequestUtilization.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
VersionAutomaticScalingRequestUtilization.from_proto(i) for i in resources
]
class VersionAutomaticScalingDiskUtilization(object):
def __init__(
self,
target_write_bytes_per_second: int = None,
target_write_ops_per_second: int = None,
target_read_bytes_per_second: int = None,
target_read_ops_per_second: int = None,
):
self.target_write_bytes_per_second = target_write_bytes_per_second
self.target_write_ops_per_second = target_write_ops_per_second
self.target_read_bytes_per_second = target_read_bytes_per_second
self.target_read_ops_per_second = target_read_ops_per_second
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionAutomaticScalingDiskUtilization()
if Primitive.to_proto(resource.target_write_bytes_per_second):
res.target_write_bytes_per_second = Primitive.to_proto(
resource.target_write_bytes_per_second
)
if Primitive.to_proto(resource.target_write_ops_per_second):
res.target_write_ops_per_second = Primitive.to_proto(
resource.target_write_ops_per_second
)
if Primitive.to_proto(resource.target_read_bytes_per_second):
res.target_read_bytes_per_second = Primitive.to_proto(
resource.target_read_bytes_per_second
)
if Primitive.to_proto(resource.target_read_ops_per_second):
res.target_read_ops_per_second = Primitive.to_proto(
resource.target_read_ops_per_second
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionAutomaticScalingDiskUtilization(
target_write_bytes_per_second=Primitive.from_proto(
resource.target_write_bytes_per_second
),
target_write_ops_per_second=Primitive.from_proto(
resource.target_write_ops_per_second
),
target_read_bytes_per_second=Primitive.from_proto(
resource.target_read_bytes_per_second
),
target_read_ops_per_second=Primitive.from_proto(
resource.target_read_ops_per_second
),
)
class VersionAutomaticScalingDiskUtilizationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionAutomaticScalingDiskUtilization.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionAutomaticScalingDiskUtilization.from_proto(i) for i in resources]
class VersionAutomaticScalingNetworkUtilization(object):
def __init__(
self,
target_sent_bytes_per_second: int = None,
target_sent_packets_per_second: int = None,
target_received_bytes_per_second: int = None,
target_received_packets_per_second: int = None,
):
self.target_sent_bytes_per_second = target_sent_bytes_per_second
self.target_sent_packets_per_second = target_sent_packets_per_second
self.target_received_bytes_per_second = target_received_bytes_per_second
self.target_received_packets_per_second = target_received_packets_per_second
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionAutomaticScalingNetworkUtilization()
if Primitive.to_proto(resource.target_sent_bytes_per_second):
res.target_sent_bytes_per_second = Primitive.to_proto(
resource.target_sent_bytes_per_second
)
if Primitive.to_proto(resource.target_sent_packets_per_second):
res.target_sent_packets_per_second = Primitive.to_proto(
resource.target_sent_packets_per_second
)
if Primitive.to_proto(resource.target_received_bytes_per_second):
res.target_received_bytes_per_second = Primitive.to_proto(
resource.target_received_bytes_per_second
)
if Primitive.to_proto(resource.target_received_packets_per_second):
res.target_received_packets_per_second = Primitive.to_proto(
resource.target_received_packets_per_second
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionAutomaticScalingNetworkUtilization(
target_sent_bytes_per_second=Primitive.from_proto(
resource.target_sent_bytes_per_second
),
target_sent_packets_per_second=Primitive.from_proto(
resource.target_sent_packets_per_second
),
target_received_bytes_per_second=Primitive.from_proto(
resource.target_received_bytes_per_second
),
target_received_packets_per_second=Primitive.from_proto(
resource.target_received_packets_per_second
),
)
class VersionAutomaticScalingNetworkUtilizationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
VersionAutomaticScalingNetworkUtilization.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
VersionAutomaticScalingNetworkUtilization.from_proto(i) for i in resources
]
class VersionAutomaticScalingStandardSchedulerSettings(object):
def __init__(
self,
target_cpu_utilization: float = None,
target_throughput_utilization: float = None,
min_instances: int = None,
max_instances: int = None,
):
self.target_cpu_utilization = target_cpu_utilization
self.target_throughput_utilization = target_throughput_utilization
self.min_instances = min_instances
self.max_instances = max_instances
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionAutomaticScalingStandardSchedulerSettings()
if Primitive.to_proto(resource.target_cpu_utilization):
res.target_cpu_utilization = Primitive.to_proto(
resource.target_cpu_utilization
)
if Primitive.to_proto(resource.target_throughput_utilization):
res.target_throughput_utilization = Primitive.to_proto(
resource.target_throughput_utilization
)
if Primitive.to_proto(resource.min_instances):
res.min_instances = Primitive.to_proto(resource.min_instances)
if Primitive.to_proto(resource.max_instances):
res.max_instances = Primitive.to_proto(resource.max_instances)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionAutomaticScalingStandardSchedulerSettings(
target_cpu_utilization=Primitive.from_proto(
resource.target_cpu_utilization
),
target_throughput_utilization=Primitive.from_proto(
resource.target_throughput_utilization
),
min_instances=Primitive.from_proto(resource.min_instances),
max_instances=Primitive.from_proto(resource.max_instances),
)
class VersionAutomaticScalingStandardSchedulerSettingsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
VersionAutomaticScalingStandardSchedulerSettings.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
VersionAutomaticScalingStandardSchedulerSettings.from_proto(i)
for i in resources
]
class VersionBasicScaling(object):
def __init__(self, idle_timeout: str = None, max_instances: int = None):
self.idle_timeout = idle_timeout
self.max_instances = max_instances
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionBasicScaling()
if Primitive.to_proto(resource.idle_timeout):
res.idle_timeout = Primitive.to_proto(resource.idle_timeout)
if Primitive.to_proto(resource.max_instances):
res.max_instances = Primitive.to_proto(resource.max_instances)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionBasicScaling(
idle_timeout=Primitive.from_proto(resource.idle_timeout),
max_instances=Primitive.from_proto(resource.max_instances),
)
class VersionBasicScalingArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionBasicScaling.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionBasicScaling.from_proto(i) for i in resources]
class VersionManualScaling(object):
def __init__(self, instances: int = None):
self.instances = instances
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionManualScaling()
if Primitive.to_proto(resource.instances):
res.instances = Primitive.to_proto(resource.instances)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionManualScaling(instances=Primitive.from_proto(resource.instances),)
class VersionManualScalingArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionManualScaling.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionManualScaling.from_proto(i) for i in resources]
class VersionNetwork(object):
def __init__(
self,
forwarded_ports: list = None,
instance_tag: str = None,
name: str = None,
subnetwork_name: str = None,
session_affinity: bool = None,
):
self.forwarded_ports = forwarded_ports
self.instance_tag = instance_tag
self.name = name
self.subnetwork_name = subnetwork_name
self.session_affinity = session_affinity
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionNetwork()
if Primitive.to_proto(resource.forwarded_ports):
res.forwarded_ports.extend(Primitive.to_proto(resource.forwarded_ports))
if Primitive.to_proto(resource.instance_tag):
res.instance_tag = Primitive.to_proto(resource.instance_tag)
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.subnetwork_name):
res.subnetwork_name = Primitive.to_proto(resource.subnetwork_name)
if Primitive.to_proto(resource.session_affinity):
res.session_affinity = Primitive.to_proto(resource.session_affinity)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionNetwork(
forwarded_ports=Primitive.from_proto(resource.forwarded_ports),
instance_tag=Primitive.from_proto(resource.instance_tag),
name=Primitive.from_proto(resource.name),
subnetwork_name=Primitive.from_proto(resource.subnetwork_name),
session_affinity=Primitive.from_proto(resource.session_affinity),
)
class VersionNetworkArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionNetwork.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionNetwork.from_proto(i) for i in resources]
class VersionResources(object):
def __init__(
self,
cpu: float = None,
disk_gb: float = None,
memory_gb: float = None,
volumes: list = None,
):
self.cpu = cpu
self.disk_gb = disk_gb
self.memory_gb = memory_gb
self.volumes = volumes
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionResources()
if Primitive.to_proto(resource.cpu):
res.cpu = Primitive.to_proto(resource.cpu)
if Primitive.to_proto(resource.disk_gb):
res.disk_gb = Primitive.to_proto(resource.disk_gb)
if Primitive.to_proto(resource.memory_gb):
res.memory_gb = Primitive.to_proto(resource.memory_gb)
if VersionResourcesVolumesArray.to_proto(resource.volumes):
res.volumes.extend(VersionResourcesVolumesArray.to_proto(resource.volumes))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionResources(
cpu=Primitive.from_proto(resource.cpu),
disk_gb=Primitive.from_proto(resource.disk_gb),
memory_gb=Primitive.from_proto(resource.memory_gb),
volumes=VersionResourcesVolumesArray.from_proto(resource.volumes),
)
class VersionResourcesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionResources.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionResources.from_proto(i) for i in resources]
class VersionResourcesVolumes(object):
def __init__(
self, name: str = None, volume_type: str = None, size_gb: float = None
):
self.name = name
self.volume_type = volume_type
self.size_gb = size_gb
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionResourcesVolumes()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.volume_type):
res.volume_type = Primitive.to_proto(resource.volume_type)
if Primitive.to_proto(resource.size_gb):
res.size_gb = Primitive.to_proto(resource.size_gb)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionResourcesVolumes(
name=Primitive.from_proto(resource.name),
volume_type=Primitive.from_proto(resource.volume_type),
size_gb=Primitive.from_proto(resource.size_gb),
)
class VersionResourcesVolumesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionResourcesVolumes.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionResourcesVolumes.from_proto(i) for i in resources]
class VersionHandlers(object):
def __init__(
self,
url_regex: str = None,
static_files: dict = None,
script: dict = None,
api_endpoint: dict = None,
security_level: str = None,
login: str = None,
auth_fail_action: str = None,
redirect_http_response_code: str = None,
):
self.url_regex = url_regex
self.static_files = static_files
self.script = script
self.api_endpoint = api_endpoint
self.security_level = security_level
self.login = login
self.auth_fail_action = auth_fail_action
self.redirect_http_response_code = redirect_http_response_code
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionHandlers()
if Primitive.to_proto(resource.url_regex):
res.url_regex = Primitive.to_proto(resource.url_regex)
if VersionHandlersStaticFiles.to_proto(resource.static_files):
res.static_files.CopyFrom(
VersionHandlersStaticFiles.to_proto(resource.static_files)
)
else:
res.ClearField("static_files")
if VersionHandlersScript.to_proto(resource.script):
res.script.CopyFrom(VersionHandlersScript.to_proto(resource.script))
else:
res.ClearField("script")
if VersionHandlersApiEndpoint.to_proto(resource.api_endpoint):
res.api_endpoint.CopyFrom(
VersionHandlersApiEndpoint.to_proto(resource.api_endpoint)
)
else:
res.ClearField("api_endpoint")
if VersionHandlersSecurityLevelEnum.to_proto(resource.security_level):
res.security_level = VersionHandlersSecurityLevelEnum.to_proto(
resource.security_level
)
if VersionHandlersLoginEnum.to_proto(resource.login):
res.login = VersionHandlersLoginEnum.to_proto(resource.login)
if VersionHandlersAuthFailActionEnum.to_proto(resource.auth_fail_action):
res.auth_fail_action = VersionHandlersAuthFailActionEnum.to_proto(
resource.auth_fail_action
)
if VersionHandlersRedirectHttpResponseCodeEnum.to_proto(
resource.redirect_http_response_code
):
res.redirect_http_response_code = VersionHandlersRedirectHttpResponseCodeEnum.to_proto(
resource.redirect_http_response_code
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionHandlers(
url_regex=Primitive.from_proto(resource.url_regex),
static_files=VersionHandlersStaticFiles.from_proto(resource.static_files),
script=VersionHandlersScript.from_proto(resource.script),
api_endpoint=VersionHandlersApiEndpoint.from_proto(resource.api_endpoint),
security_level=VersionHandlersSecurityLevelEnum.from_proto(
resource.security_level
),
login=VersionHandlersLoginEnum.from_proto(resource.login),
auth_fail_action=VersionHandlersAuthFailActionEnum.from_proto(
resource.auth_fail_action
),
redirect_http_response_code=VersionHandlersRedirectHttpResponseCodeEnum.from_proto(
resource.redirect_http_response_code
),
)
class VersionHandlersArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionHandlers.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionHandlers.from_proto(i) for i in resources]
class VersionHandlersStaticFiles(object):
def __init__(
self,
path: str = None,
upload_path_regex: str = None,
http_headers: dict = None,
mime_type: str = None,
expiration: str = None,
require_matching_file: bool = None,
application_readable: bool = None,
):
self.path = path
self.upload_path_regex = upload_path_regex
self.http_headers = http_headers
self.mime_type = mime_type
self.expiration = expiration
self.require_matching_file = require_matching_file
self.application_readable = application_readable
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionHandlersStaticFiles()
if Primitive.to_proto(resource.path):
res.path = Primitive.to_proto(resource.path)
if Primitive.to_proto(resource.upload_path_regex):
res.upload_path_regex = Primitive.to_proto(resource.upload_path_regex)
if Primitive.to_proto(resource.http_headers):
res.http_headers = Primitive.to_proto(resource.http_headers)
if Primitive.to_proto(resource.mime_type):
res.mime_type = Primitive.to_proto(resource.mime_type)
if Primitive.to_proto(resource.expiration):
res.expiration = Primitive.to_proto(resource.expiration)
if Primitive.to_proto(resource.require_matching_file):
res.require_matching_file = Primitive.to_proto(
resource.require_matching_file
)
if Primitive.to_proto(resource.application_readable):
res.application_readable = Primitive.to_proto(resource.application_readable)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionHandlersStaticFiles(
path=Primitive.from_proto(resource.path),
upload_path_regex=Primitive.from_proto(resource.upload_path_regex),
http_headers=Primitive.from_proto(resource.http_headers),
mime_type=Primitive.from_proto(resource.mime_type),
expiration=Primitive.from_proto(resource.expiration),
require_matching_file=Primitive.from_proto(resource.require_matching_file),
application_readable=Primitive.from_proto(resource.application_readable),
)
class VersionHandlersStaticFilesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionHandlersStaticFiles.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionHandlersStaticFiles.from_proto(i) for i in resources]
class VersionHandlersScript(object):
def __init__(self, script_path: str = None):
self.script_path = script_path
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionHandlersScript()
if Primitive.to_proto(resource.script_path):
res.script_path = Primitive.to_proto(resource.script_path)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionHandlersScript(
script_path=Primitive.from_proto(resource.script_path),
)
class VersionHandlersScriptArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionHandlersScript.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionHandlersScript.from_proto(i) for i in resources]
class VersionHandlersApiEndpoint(object):
def __init__(self, script_path: str = None):
self.script_path = script_path
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionHandlersApiEndpoint()
if Primitive.to_proto(resource.script_path):
res.script_path = Primitive.to_proto(resource.script_path)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionHandlersApiEndpoint(
script_path=Primitive.from_proto(resource.script_path),
)
class VersionHandlersApiEndpointArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionHandlersApiEndpoint.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionHandlersApiEndpoint.from_proto(i) for i in resources]
class VersionErrorHandlers(object):
def __init__(
self, error_code: str = None, static_file: str = None, mime_type: str = None
):
self.error_code = error_code
self.static_file = static_file
self.mime_type = mime_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionErrorHandlers()
if VersionErrorHandlersErrorCodeEnum.to_proto(resource.error_code):
res.error_code = VersionErrorHandlersErrorCodeEnum.to_proto(
resource.error_code
)
if Primitive.to_proto(resource.static_file):
res.static_file = Primitive.to_proto(resource.static_file)
if Primitive.to_proto(resource.mime_type):
res.mime_type = Primitive.to_proto(resource.mime_type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionErrorHandlers(
error_code=VersionErrorHandlersErrorCodeEnum.from_proto(
resource.error_code
),
static_file=Primitive.from_proto(resource.static_file),
mime_type=Primitive.from_proto(resource.mime_type),
)
class VersionErrorHandlersArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionErrorHandlers.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionErrorHandlers.from_proto(i) for i in resources]
class VersionLibraries(object):
def __init__(self, name: str = None, version: str = None):
self.name = name
self.version = version
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionLibraries()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.version):
res.version = Primitive.to_proto(resource.version)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionLibraries(
name=Primitive.from_proto(resource.name),
version=Primitive.from_proto(resource.version),
)
class VersionLibrariesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionLibraries.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionLibraries.from_proto(i) for i in resources]
class VersionApiConfig(object):
def __init__(
self,
auth_fail_action: str = None,
login: str = None,
script: str = None,
security_level: str = None,
url: str = None,
):
self.auth_fail_action = auth_fail_action
self.login = login
self.script = script
self.security_level = security_level
self.url = url
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionApiConfig()
if VersionApiConfigAuthFailActionEnum.to_proto(resource.auth_fail_action):
res.auth_fail_action = VersionApiConfigAuthFailActionEnum.to_proto(
resource.auth_fail_action
)
if VersionApiConfigLoginEnum.to_proto(resource.login):
res.login = VersionApiConfigLoginEnum.to_proto(resource.login)
if Primitive.to_proto(resource.script):
res.script = Primitive.to_proto(resource.script)
if VersionApiConfigSecurityLevelEnum.to_proto(resource.security_level):
res.security_level = VersionApiConfigSecurityLevelEnum.to_proto(
resource.security_level
)
if Primitive.to_proto(resource.url):
res.url = Primitive.to_proto(resource.url)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionApiConfig(
auth_fail_action=VersionApiConfigAuthFailActionEnum.from_proto(
resource.auth_fail_action
),
login=VersionApiConfigLoginEnum.from_proto(resource.login),
script=Primitive.from_proto(resource.script),
security_level=VersionApiConfigSecurityLevelEnum.from_proto(
resource.security_level
),
url=Primitive.from_proto(resource.url),
)
class VersionApiConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionApiConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionApiConfig.from_proto(i) for i in resources]
class VersionDeployment(object):
def __init__(
self,
files: dict = None,
container: dict = None,
zip: dict = None,
cloud_build_options: dict = None,
):
self.files = files
self.container = container
self.zip = zip
self.cloud_build_options = cloud_build_options
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionDeployment()
if Primitive.to_proto(resource.files):
res.files = Primitive.to_proto(resource.files)
if VersionDeploymentContainer.to_proto(resource.container):
res.container.CopyFrom(
VersionDeploymentContainer.to_proto(resource.container)
)
else:
res.ClearField("container")
if VersionDeploymentZip.to_proto(resource.zip):
res.zip.CopyFrom(VersionDeploymentZip.to_proto(resource.zip))
else:
res.ClearField("zip")
if VersionDeploymentCloudBuildOptions.to_proto(resource.cloud_build_options):
res.cloud_build_options.CopyFrom(
VersionDeploymentCloudBuildOptions.to_proto(
resource.cloud_build_options
)
)
else:
res.ClearField("cloud_build_options")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionDeployment(
files=Primitive.from_proto(resource.files),
container=VersionDeploymentContainer.from_proto(resource.container),
zip=VersionDeploymentZip.from_proto(resource.zip),
cloud_build_options=VersionDeploymentCloudBuildOptions.from_proto(
resource.cloud_build_options
),
)
class VersionDeploymentArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionDeployment.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionDeployment.from_proto(i) for i in resources]
class VersionDeploymentFiles(object):
def __init__(
self, source_url: str = None, sha1_sum: str = None, mime_type: str = None
):
self.source_url = source_url
self.sha1_sum = sha1_sum
self.mime_type = mime_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionDeploymentFiles()
if Primitive.to_proto(resource.source_url):
res.source_url = Primitive.to_proto(resource.source_url)
if Primitive.to_proto(resource.sha1_sum):
res.sha1_sum = Primitive.to_proto(resource.sha1_sum)
if Primitive.to_proto(resource.mime_type):
res.mime_type = Primitive.to_proto(resource.mime_type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionDeploymentFiles(
source_url=Primitive.from_proto(resource.source_url),
sha1_sum=Primitive.from_proto(resource.sha1_sum),
mime_type=Primitive.from_proto(resource.mime_type),
)
class VersionDeploymentFilesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionDeploymentFiles.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionDeploymentFiles.from_proto(i) for i in resources]
class VersionDeploymentContainer(object):
def __init__(self, image: str = None):
self.image = image
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionDeploymentContainer()
if Primitive.to_proto(resource.image):
res.image = Primitive.to_proto(resource.image)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionDeploymentContainer(image=Primitive.from_proto(resource.image),)
class VersionDeploymentContainerArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionDeploymentContainer.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionDeploymentContainer.from_proto(i) for i in resources]
class VersionDeploymentZip(object):
def __init__(self, source_url: str = None, files_count: int = None):
self.source_url = source_url
self.files_count = files_count
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionDeploymentZip()
if Primitive.to_proto(resource.source_url):
res.source_url = Primitive.to_proto(resource.source_url)
if Primitive.to_proto(resource.files_count):
res.files_count = Primitive.to_proto(resource.files_count)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionDeploymentZip(
source_url=Primitive.from_proto(resource.source_url),
files_count=Primitive.from_proto(resource.files_count),
)
class VersionDeploymentZipArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionDeploymentZip.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionDeploymentZip.from_proto(i) for i in resources]
class VersionDeploymentCloudBuildOptions(object):
def __init__(self, app_yaml_path: str = None, cloud_build_timeout: str = None):
self.app_yaml_path = app_yaml_path
self.cloud_build_timeout = cloud_build_timeout
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionDeploymentCloudBuildOptions()
if Primitive.to_proto(resource.app_yaml_path):
res.app_yaml_path = Primitive.to_proto(resource.app_yaml_path)
if Primitive.to_proto(resource.cloud_build_timeout):
res.cloud_build_timeout = Primitive.to_proto(resource.cloud_build_timeout)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionDeploymentCloudBuildOptions(
app_yaml_path=Primitive.from_proto(resource.app_yaml_path),
cloud_build_timeout=Primitive.from_proto(resource.cloud_build_timeout),
)
class VersionDeploymentCloudBuildOptionsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionDeploymentCloudBuildOptions.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionDeploymentCloudBuildOptions.from_proto(i) for i in resources]
class VersionHealthCheck(object):
def __init__(
self,
disable_health_check: bool = None,
host: str = None,
healthy_threshold: int = None,
unhealthy_threshold: int = None,
restart_threshold: int = None,
check_interval: str = None,
timeout: str = None,
):
self.disable_health_check = disable_health_check
self.host = host
self.healthy_threshold = healthy_threshold
self.unhealthy_threshold = unhealthy_threshold
self.restart_threshold = restart_threshold
self.check_interval = check_interval
self.timeout = timeout
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionHealthCheck()
if Primitive.to_proto(resource.disable_health_check):
res.disable_health_check = Primitive.to_proto(resource.disable_health_check)
if Primitive.to_proto(resource.host):
res.host = Primitive.to_proto(resource.host)
if Primitive.to_proto(resource.healthy_threshold):
res.healthy_threshold = Primitive.to_proto(resource.healthy_threshold)
if Primitive.to_proto(resource.unhealthy_threshold):
res.unhealthy_threshold = Primitive.to_proto(resource.unhealthy_threshold)
if Primitive.to_proto(resource.restart_threshold):
res.restart_threshold = Primitive.to_proto(resource.restart_threshold)
if Primitive.to_proto(resource.check_interval):
res.check_interval = Primitive.to_proto(resource.check_interval)
if Primitive.to_proto(resource.timeout):
res.timeout = Primitive.to_proto(resource.timeout)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionHealthCheck(
disable_health_check=Primitive.from_proto(resource.disable_health_check),
host=Primitive.from_proto(resource.host),
healthy_threshold=Primitive.from_proto(resource.healthy_threshold),
unhealthy_threshold=Primitive.from_proto(resource.unhealthy_threshold),
restart_threshold=Primitive.from_proto(resource.restart_threshold),
check_interval=Primitive.from_proto(resource.check_interval),
timeout=Primitive.from_proto(resource.timeout),
)
class VersionHealthCheckArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionHealthCheck.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionHealthCheck.from_proto(i) for i in resources]
class VersionReadinessCheck(object):
def __init__(
self,
path: str = None,
host: str = None,
failure_threshold: int = None,
success_threshold: int = None,
check_interval: str = None,
timeout: str = None,
app_start_timeout: str = None,
):
self.path = path
self.host = host
self.failure_threshold = failure_threshold
self.success_threshold = success_threshold
self.check_interval = check_interval
self.timeout = timeout
self.app_start_timeout = app_start_timeout
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionReadinessCheck()
if Primitive.to_proto(resource.path):
res.path = Primitive.to_proto(resource.path)
if Primitive.to_proto(resource.host):
res.host = Primitive.to_proto(resource.host)
if Primitive.to_proto(resource.failure_threshold):
res.failure_threshold = Primitive.to_proto(resource.failure_threshold)
if Primitive.to_proto(resource.success_threshold):
res.success_threshold = Primitive.to_proto(resource.success_threshold)
if Primitive.to_proto(resource.check_interval):
res.check_interval = Primitive.to_proto(resource.check_interval)
if Primitive.to_proto(resource.timeout):
res.timeout = Primitive.to_proto(resource.timeout)
if Primitive.to_proto(resource.app_start_timeout):
res.app_start_timeout = Primitive.to_proto(resource.app_start_timeout)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionReadinessCheck(
path=Primitive.from_proto(resource.path),
host=Primitive.from_proto(resource.host),
failure_threshold=Primitive.from_proto(resource.failure_threshold),
success_threshold=Primitive.from_proto(resource.success_threshold),
check_interval=Primitive.from_proto(resource.check_interval),
timeout=Primitive.from_proto(resource.timeout),
app_start_timeout=Primitive.from_proto(resource.app_start_timeout),
)
class VersionReadinessCheckArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionReadinessCheck.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionReadinessCheck.from_proto(i) for i in resources]
class VersionLivenessCheck(object):
def __init__(
self,
path: str = None,
host: str = None,
failure_threshold: int = None,
success_threshold: int = None,
check_interval: str = None,
timeout: str = None,
initial_delay: str = None,
):
self.path = path
self.host = host
self.failure_threshold = failure_threshold
self.success_threshold = success_threshold
self.check_interval = check_interval
self.timeout = timeout
self.initial_delay = initial_delay
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionLivenessCheck()
if Primitive.to_proto(resource.path):
res.path = Primitive.to_proto(resource.path)
if Primitive.to_proto(resource.host):
res.host = Primitive.to_proto(resource.host)
if Primitive.to_proto(resource.failure_threshold):
res.failure_threshold = Primitive.to_proto(resource.failure_threshold)
if Primitive.to_proto(resource.success_threshold):
res.success_threshold = Primitive.to_proto(resource.success_threshold)
if Primitive.to_proto(resource.check_interval):
res.check_interval = Primitive.to_proto(resource.check_interval)
if Primitive.to_proto(resource.timeout):
res.timeout = Primitive.to_proto(resource.timeout)
if Primitive.to_proto(resource.initial_delay):
res.initial_delay = Primitive.to_proto(resource.initial_delay)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionLivenessCheck(
path=Primitive.from_proto(resource.path),
host=Primitive.from_proto(resource.host),
failure_threshold=Primitive.from_proto(resource.failure_threshold),
success_threshold=Primitive.from_proto(resource.success_threshold),
check_interval=Primitive.from_proto(resource.check_interval),
timeout=Primitive.from_proto(resource.timeout),
initial_delay=Primitive.from_proto(resource.initial_delay),
)
class VersionLivenessCheckArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionLivenessCheck.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionLivenessCheck.from_proto(i) for i in resources]
class VersionEntrypoint(object):
def __init__(self, shell: str = None):
self.shell = shell
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionEntrypoint()
if Primitive.to_proto(resource.shell):
res.shell = Primitive.to_proto(resource.shell)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionEntrypoint(shell=Primitive.from_proto(resource.shell),)
class VersionEntrypointArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionEntrypoint.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionEntrypoint.from_proto(i) for i in resources]
class VersionVPCAccessConnector(object):
def __init__(self, name: str = None):
self.name = name
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = version_pb2.AppengineVersionVPCAccessConnector()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return VersionVPCAccessConnector(name=Primitive.from_proto(resource.name),)
class VersionVPCAccessConnectorArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [VersionVPCAccessConnector.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [VersionVPCAccessConnector.from_proto(i) for i in resources]
class VersionInboundServicesEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionInboundServicesEnum.Value(
"AppengineVersionInboundServicesEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionInboundServicesEnum.Name(resource)[
len("AppengineVersionInboundServicesEnum") :
]
class VersionServingStatusEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionServingStatusEnum.Value(
"AppengineVersionServingStatusEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionServingStatusEnum.Name(resource)[
len("AppengineVersionServingStatusEnum") :
]
class VersionHandlersSecurityLevelEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionHandlersSecurityLevelEnum.Value(
"AppengineVersionHandlersSecurityLevelEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionHandlersSecurityLevelEnum.Name(resource)[
len("AppengineVersionHandlersSecurityLevelEnum") :
]
class VersionHandlersLoginEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionHandlersLoginEnum.Value(
"AppengineVersionHandlersLoginEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionHandlersLoginEnum.Name(resource)[
len("AppengineVersionHandlersLoginEnum") :
]
class VersionHandlersAuthFailActionEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionHandlersAuthFailActionEnum.Value(
"AppengineVersionHandlersAuthFailActionEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionHandlersAuthFailActionEnum.Name(resource)[
len("AppengineVersionHandlersAuthFailActionEnum") :
]
class VersionHandlersRedirectHttpResponseCodeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionHandlersRedirectHttpResponseCodeEnum.Value(
"AppengineVersionHandlersRedirectHttpResponseCodeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionHandlersRedirectHttpResponseCodeEnum.Name(
resource
)[len("AppengineVersionHandlersRedirectHttpResponseCodeEnum") :]
class VersionErrorHandlersErrorCodeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionErrorHandlersErrorCodeEnum.Value(
"AppengineVersionErrorHandlersErrorCodeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionErrorHandlersErrorCodeEnum.Name(resource)[
len("AppengineVersionErrorHandlersErrorCodeEnum") :
]
class VersionApiConfigAuthFailActionEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionApiConfigAuthFailActionEnum.Value(
"AppengineVersionApiConfigAuthFailActionEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionApiConfigAuthFailActionEnum.Name(resource)[
len("AppengineVersionApiConfigAuthFailActionEnum") :
]
class VersionApiConfigLoginEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionApiConfigLoginEnum.Value(
"AppengineVersionApiConfigLoginEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionApiConfigLoginEnum.Name(resource)[
len("AppengineVersionApiConfigLoginEnum") :
]
class VersionApiConfigSecurityLevelEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionApiConfigSecurityLevelEnum.Value(
"AppengineVersionApiConfigSecurityLevelEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return version_pb2.AppengineVersionApiConfigSecurityLevelEnum.Name(resource)[
len("AppengineVersionApiConfigSecurityLevelEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
import unittest
from custom_set import CustomSet
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.3.0
class CustomSetTest(unittest.TestCase):
def test_sets_with_no_elements_are_empty(self):
sut = CustomSet()
self.assertIs(sut.isempty(), True)
def test_sets_with_elements_are_not_empty(self):
sut = CustomSet([1])
self.assertIs(sut.isempty(), False)
def test_empty_set_contains_nothing(self):
sut = CustomSet()
self.assertNotIn(1, sut)
def test_set_contains_when_element_in_set(self):
sut = CustomSet([1])
self.assertIn(1, sut)
def test_set_does_not_contains_when_element_not_in_set(self):
sut = CustomSet([1, 2, 3])
self.assertNotIn(4, sut)
def test_empty_set_is_subset_of_another_empty_set(self):
set1 = CustomSet()
set2 = CustomSet()
self.assertIs(set1.issubset(set2), True)
def test_empty_set_is_subset_of_non_empty_set(self):
set1 = CustomSet()
set2 = CustomSet([1])
self.assertIs(set1.issubset(set2), True)
def test_non_empty_set_is_not_subet_of_empty_set(self):
set1 = CustomSet([1])
set2 = CustomSet()
self.assertIs(set1.issubset(set2), False)
def test_set_is_subset_of_set_with_exact_same_elements(self):
set1 = CustomSet([1, 2, 3])
set2 = CustomSet([1, 2, 3])
self.assertIs(set1.issubset(set2), True)
def test_set_is_subset_of_larger_set_with_same_elements(self):
set1 = CustomSet([1, 2, 3])
set2 = CustomSet([4, 1, 2, 3])
self.assertIs(set1.issubset(set2), True)
def test_set_not_subset_of_set_that_does_not_contain_its_elements(self):
set1 = CustomSet([1, 2, 3])
set2 = CustomSet([4, 1, 3])
self.assertIs(set1.issubset(set2), False)
def test_empty_set_disjoint_with_itself(self):
set1 = CustomSet()
set2 = CustomSet()
self.assertIs(set1.isdisjoint(set2), True)
def test_empty_set_disjoint_with_non_empty_set(self):
set1 = CustomSet()
set2 = CustomSet([1])
self.assertIs(set1.isdisjoint(set2), True)
def test_non_empty_set_disjoint_with_empty_set(self):
set1 = CustomSet([1])
set2 = CustomSet()
self.assertIs(set1.isdisjoint(set2), True)
def test_sets_not_disjoint_if_element_is_shared(self):
set1 = CustomSet([1, 2])
set2 = CustomSet([2, 3])
self.assertIs(set1.isdisjoint(set2), False)
def test_sets_disjoint_if_not_elements_are_shared(self):
set1 = CustomSet([1, 2])
set2 = CustomSet([3, 4])
self.assertIs(set1.isdisjoint(set2), True)
def test_empty_sets_are_equal(self):
set1 = CustomSet()
set2 = CustomSet()
self.assertEqual(set1, set2)
def test_empty_set_not_equal_to_non_empty_set(self):
set1 = CustomSet()
set2 = CustomSet([1, 2, 3])
self.assertNotEqual(set1, set2)
def test_non_empty_set_not_equal_to_empty_set(self):
set1 = CustomSet([1, 2, 3])
set2 = CustomSet()
self.assertNotEqual(set1, set2)
def test_sets_with_same_exact_same_elements_are_equal(self):
set1 = CustomSet([1, 2])
set2 = CustomSet([2, 1])
self.assertEqual(set1, set2)
def test_sets_with_different_elements_are_not_equal(self):
set1 = CustomSet([1, 2, 3])
set2 = CustomSet([1, 2, 4])
self.assertNotEqual(set1, set2)
def test_set_is_not_equal_to_larger_set_with_same_elements(self):
set1 = CustomSet([1, 2, 3])
set2 = CustomSet([1, 2, 3, 4])
self.assertNotEqual(set1, set2)
def test_add_to_empty_set(self):
sut = CustomSet()
sut.add(1)
expected = CustomSet([1])
self.assertEqual(sut, expected)
def test_add_to_non_empty_set(self):
sut = CustomSet([1, 2, 4])
sut.add(3)
expected = CustomSet([1, 2, 3, 4])
self.assertEqual(sut, expected)
def test_adding_existing_element_does_not_change_set(self):
sut = CustomSet([1, 2, 3])
sut.add(3)
expected = CustomSet([1, 2, 3])
self.assertEqual(sut, expected)
def test_intersection_of_two_empty_sets_is_empty_set(self):
set1 = CustomSet()
set2 = CustomSet()
expected = CustomSet()
self.assertEqual(set1.intersection(set2), expected)
def test_intersection_of_empty_set_and_non_empty_set_is_empty_set(self):
set1 = CustomSet()
set2 = CustomSet([3, 2, 5])
expected = CustomSet()
self.assertEqual(set1.intersection(set2), expected)
def test_intersection_of_non_empty_set_and_empty_set_is_empty_set(self):
set1 = CustomSet([1, 2, 3, 4])
set2 = CustomSet()
expected = CustomSet()
self.assertEqual(set1.intersection(set2), expected)
def test_intersection_of_sets_with_no_shared_elements_is_empty_set(self):
set1 = CustomSet([1, 2, 3])
set2 = CustomSet([4, 5, 6])
expected = CustomSet()
self.assertEqual(set1.intersection(set2), expected)
def test_intersection_contains_shared_elements_only(self):
set1 = CustomSet([1, 2, 3, 4])
set2 = CustomSet([3, 2, 5])
expected = CustomSet([2, 3])
self.assertEqual(set1.intersection(set2), expected)
def test_difference_of_two_empty_sets_is_empty_set(self):
set1 = CustomSet()
set2 = CustomSet()
expected = CustomSet()
self.assertEqual(set1 - set2, expected)
def test_difference_of_empty_set_and_non_empty_set_is_empty_set(self):
set1 = CustomSet()
set2 = CustomSet([3, 2, 5])
expected = CustomSet()
self.assertEqual(set1 - set2, expected)
def test_difference_of_non_empty_set_and_empty_set_is_non_empty_set(self):
set1 = CustomSet([1, 2, 3, 4])
set2 = CustomSet()
expected = CustomSet([1, 2, 3, 4])
self.assertEqual(set1 - set2, expected)
def test_difference_of_non_empty_sets_elements_in_first_set_only(self):
set1 = CustomSet([3, 2, 1])
set2 = CustomSet([2, 4])
expected = CustomSet([1, 3])
self.assertEqual(set1 - set2, expected)
def test_union_of_empty_sets_is_empty_set(self):
set1 = CustomSet()
set2 = CustomSet()
expected = CustomSet()
self.assertEqual(set1 + set2, expected)
def test_union_of_empty_set_and_non_empty_set_is_the_non_empty_set(self):
set1 = CustomSet()
set2 = CustomSet([2])
expected = CustomSet([2])
self.assertEqual(set1 + set2, expected)
def test_union_of_non_empty_set_and_empty_set_is_the_non_empty_set(self):
set1 = CustomSet([1, 3])
set2 = CustomSet()
expected = CustomSet([1, 3])
self.assertEqual(set1 + set2, expected)
def test_union_of_non_empty_sets_contains_all_unique_elements(self):
set1 = CustomSet([1, 3])
set2 = CustomSet([2, 3])
expected = CustomSet([1, 2, 3])
self.assertEqual(set1 + set2, expected)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/local/bin/python2.7
from sys import exit, stdout, argv
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
import utils
import signal
from keras.layers import Input, Dense, Dropout, concatenate, LSTM, BatchNormalization, Conv1D, concatenate
from keras.models import Model
from keras.callbacks import ModelCheckpoint, LambdaCallback, TensorBoard
from keras.optimizers import Adam, SGD
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from adversarial import Adversary
import obj
import config
#config.DEBUG = True
#config.n_truth = 5
#config.truth = 'resonanceType'
#config.adversary_mask = 0
'''
some global definitions
'''
obj.limit = 20
LEARNMASS = True
LEARNRHO = False
LEARNPT = True
DECORRMASS = True
DECORRRHO = False
DECORRPT = False
adv_loss_weights = [0.0001, 100]
ADV = 0
NEPOCH = 20
APOSTLE = 'panda_5_akt'
system('cp %s models/train_%s.py'%(argv[0], APOSTLE))
'''
instantiate data loaders
'''
def make_coll(fpath):
coll = obj.PFSVCollection()
coll.add_categories(['singletons', 'pf'], fpath)
return coll
top = make_coll('/fastscratch/snarayan/pandaarrays/v1_akt//PARTITION/Top_*_CATEGORY.npy')
qcd = make_coll('/fastscratch/snarayan/pandaarrays/v1_akt//PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
# preload some data just to get the dimensions
if obj.limit is None:
data[0].objects['train']['pf'].load(memory=False)
dims = data[0].objects['train']['pf'].data.data.shape
else:
dims = (None, obj.limit, 9) # override
'''
first build the classifier!
'''
# set up data
opts = {'learn_mass':LEARNMASS,
'learn_pt':LEARNPT,
'learn_rho':LEARNRHO,
'normalize':False}
classifier_train_gen = obj.generatePF(data, partition='train', batch=502, **opts)
classifier_validation_gen = obj.generatePF(data, partition='validate', batch=1002, **opts)
classifier_test_gen = obj.generatePF(data, partition='test', batch=2, **opts)
test_i, test_o, test_w = next(classifier_test_gen)
#print test_i
inputs = Input(shape=(dims[1], dims[2]), name='input')
mass_inputs = Input(shape=(1,), name='mass_input')
rho_inputs = Input(shape=(1,), name='rho_input')
pt_inputs = Input(shape=(1,), name='pt_input')
norm = BatchNormalization(momentum=0.6, name='input_bnorm') (inputs)
conv = Conv1D(32, 2, activation='relu', name='conv0', kernel_initializer='lecun_uniform', padding='same')(norm)
norm = BatchNormalization(momentum=0.6, name='conv0_bnorm') (conv)
conv = Conv1D(16, 4, activation='relu', name='conv1', kernel_initializer='lecun_uniform', padding='same')(norm)
norm = BatchNormalization(momentum=0.6, name='conv1_bnorm') (conv)
lstm = LSTM(100, go_backwards=True, implementation=2, name='lstm') (norm)
norm = BatchNormalization(momentum=0.6, name='lstm_norm') (lstm)
#drop = Dropout(0.1) (norm)
drop = norm
dense = Dense(100, activation='relu',name='lstmdense',kernel_initializer='lecun_uniform') (drop)
norm = BatchNormalization(momentum=0.6,name='lstmdense_norm') (dense)
for i in xrange(1,5):
dense = Dense(50, activation='relu',name='dense%i'%i)(norm)
norm = BatchNormalization(momentum=0.6,name='dense%i_norm'%i)(dense)
if LEARNMASS or LEARNPT or LEARNRHO:
to_merge = [norm]
if LEARNMASS:
to_merge.append(mass_inputs)
if LEARNRHO:
to_merge.append(rho_inputs)
if LEARNPT:
to_merge.append(pt_inputs)
merge = concatenate(to_merge)
dense = Dense(50, activation='tanh', name='dense5a')(merge)
norm = BatchNormalization(momentum=0.6,name='dense5a_norm')(dense)
# dense = Dense(50, activation='tanh', name='dense5')(norm)
# norm = BatchNormalization(momentum=0.6,name='dense5_norm')(dense)
else:
dense = Dense(50, activation='tanh',name='dense5')(norm)
norm = BatchNormalization(momentum=0.6,name='dense5_norm')(dense)
y_hat = Dense(config.n_truth, activation='softmax') (norm)
i = [inputs]
if LEARNMASS:
i.append(mass_inputs)
if LEARNRHO:
i.append(rho_inputs)
if LEARNPT:
i.append(pt_inputs)
classifier = Model(inputs=i, outputs=y_hat)
classifier.compile(optimizer=Adam(lr=0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
# print '########### CLASSIFIER ############'
# classifier.summary()
# print '###################################'
pred = classifier.predict(test_i)
# ctrl+C now triggers a graceful exit
def save_classifier(name='classifier_conv', model=classifier):
model.save('models/%s_%s.h5'%(name, APOSTLE))
def save_and_exit(signal=None, frame=None, name='classifier_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
'''
now build the adversarial setup
'''
# set up data
opts = {'decorr_mass':DECORRMASS,
'decorr_rho':DECORRRHO,
'decorr_pt':DECORRPT,
'learn_mass':LEARNMASS,
'learn_pt':LEARNPT,
'learn_rho':LEARNRHO}
train_gen = obj.generatePF(data, partition='train', batch=502, **opts)
validation_gen = obj.generatePF(data, partition='validate', batch=1002, **opts)
test_gen = obj.generatePF(data, partition='test', batch=1, **opts)
# build the model
kin_hats = Adversary(config.n_decorr_bins, n_outputs=(int(DECORRMASS)+int(DECORRPT)+int(DECORRRHO)), scale=0.0001)(y_hat)
# kin_hats = Adversary(config.n_decorr_bins, n_outputs=2, scale=0.01)(y_hat)
i = [inputs]
if LEARNMASS:
i.append(mass_inputs)
if LEARNRHO:
i.append(rho_inputs)
if LEARNPT:
i.append(pt_inputs)
pivoter = Model(inputs=i,
outputs=[y_hat]+kin_hats)
pivoter.compile(optimizer=Adam(lr=0.01),
loss=['categorical_crossentropy'] + ['categorical_crossentropy' for _ in kin_hats],
loss_weights=adv_loss_weights)
print '############# ARCHITECTURE #############'
pivoter.summary()
print '###################################'
'''
Now we train both models
'''
if ADV > 0:
print 'TRAINING ADVERSARIAL NETWORK'
system('mv logs/train_conv_adv.log logs/train_conv_adv.log.old')
flog = open('logs/train_conv_adv.log','w')
callback = LambdaCallback(
on_batch_end=lambda batch, logs: flog.write('%i,%f,%f,%f,%f\n'%(batch,logs['loss'],logs['dense_6_loss'],logs['dense_7_loss'],logs['dense_1_loss'])),
on_epoch_end=lambda epoch, logs: save_classifier(name='regularized_conv')
)
tb = TensorBoard(
log_dir = './logs/conv_logs',
write_graph = True,
write_images = True
)
print ' -Pre-training the classifier'
# bit of pre-training to get the classifer in the right place
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=200,
epochs=1)
save_classifier(name='pretrained_conv')
# np.set_printoptions(threshold='nan')
# print test_o
# print classifier.predict(test_i)
def save_and_exit(signal=None, frame=None, name='regularized_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
print ' -Training the adversarial stack'
# now train the model for real
system('rm -f models/regularized_conv_%s_*_*.h5'%(APOSTLE)) # clean checkpoints
pivoter.fit_generator(train_gen,
steps_per_epoch=1000,
epochs=NEPOCH*2,
callbacks = [ModelCheckpoint('models/regularized_conv_%s_{epoch:02d}_{val_loss:.5f}.h5'%APOSTLE)],
validation_data=validation_gen,
validation_steps=100
)
save_classifier(name='regularized_conv')
save_classifier(name='pivoter_conv', model=pivoter)
flog.close()
if ADV % 2 == 0:
print 'TRAINING CLASSIFIER ONLY'
system('mv logs/train_conv.log logs/train_conv.log.old')
flog = open('logs/train_conv.log','w')
callback = LambdaCallback(
on_batch_end=lambda batch, logs: flog.write('%i,%f\n'%(batch,logs['loss'])),
on_epoch_end=lambda epoch, logs: save_classifier(name='classifier_conv')
)
tb = TensorBoard(
log_dir = './logs/lstmnoreg_logs',
write_graph = True,
write_images = True
)
n_epochs = 1 if (ADV == 2) else 2 # fewer epochs if network is pretrained
n_epochs *= NEPOCH
def save_and_exit(signal=None, frame=None, name='classifier_conv', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
system('rm -f models/classifier_conv_%s_*_*.h5'%(APOSTLE)) # clean checkpoints
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=1000,
epochs=n_epochs,
callbacks = [ModelCheckpoint('models/classifier_conv_%s_{epoch:02d}_{val_loss:.5f}.h5'%APOSTLE)],
validation_data=classifier_validation_gen,
validation_steps=100
)
save_classifier(name='classifier_conv')
|
|
#!/usr/bin/python
""" dicom_dao
Data Access Objects for persisting PyDicom DataSet objects.
Currently we support couchdb through the DicomCouch class.
Limitations:
- Private tags are discarded
TODO:
- Unit tests with multiple objects open at a time
- Unit tests with rtstruct objects
- Support for mongodb (mongo has more direct support for binary data)
Dependencies:
- PyDicom
- python-couchdb
Tested with:
- PyDicom 0.9.4-1
- python-couchdb 0.6
- couchdb 0.10.1
"""
#
# Copyright (c) 2010 Michael Wallace
# This file is released under the pydicom license.
# See the file license.txt included with the pydicom distribution, also
# available at https://github.com/darcymason/pydicom
#
import hashlib
import os
import string
import couchdb
import pydicom
def uid2str(uid):
""" Convert PyDicom uid to a string """
return repr(uid).strip("'")
# When reading files a VR of 'US or SS' is left as binary, because we
# don't know how to interpret the values different numbers. We therefore
# treat it as binary and will continue to until either pydicom works it out
# for us, or we figure out a test.
BINARY_VR_VALUES = ['OW', 'OB', 'OW/OB', 'US or SS']
class DicomCouch(dict):
""" A Data Access Object for persisting PyDicom objects into CouchDB
We follow the same pattern as the python-couchdb library for getting and
setting documents, for example storing pydicom.dataset.Dataset object dcm:
db = DicomCouch('http://localhost:5984/', 'dbname')
db[dcm.SeriesInstanceUID] = dcm
The only constraints on the key are that it must be json-serializable and
unique within the database instance. In theory it should be possible to
use any DICOM UID. Unfortunately I have written this code under the
assumption that SeriesInstanceUID will always be used. This will be fixed.
Retrieving object with key 'foo':
dcm = db['foo']
Deleting object with key 'foo':
dcm = db['foo']
db.delete(dcm)
TODO:
- It is possible to have couchdb assign a uid when adding objects. This
should be supported.
"""
def __init__(self, server, db):
""" Create connection to couchdb server/db """
super(DicomCouch, self).__init__()
self._meta = {}
server = couchdb.Server(server)
try:
self._db = server[db]
except couchdb.client.ResourceNotFound:
self._db = server.create(db)
def __getitem__(self, key):
""" Retrieve DICOM object with specified SeriesInstanceUID """
doc = self._db[key]
dcm = json2pydicom(doc)
if dcm.SeriesInstanceUID not in self._meta:
self._meta[dcm.SeriesInstanceUID] = {}
self._meta[dcm.SeriesInstanceUID]['hashes'] = {}
if '_attachments' in doc:
self.__get_attachments(dcm, doc)
_set_meta_info_dcm(dcm)
# Keep a copy of the couch doc for use in DELETE operations
self._meta[dcm.SeriesInstanceUID]['doc'] = doc
return dcm
def __setitem__(self, key, dcm):
""" Write the supplied DICOM object to the database """
try:
dcm.PixelData = dcm.pixel_array.tostring()
except AttributeError:
pass # Silently ignore errors due to pixel_array not existing
except NotImplementedError:
pass # Silently ignore attempts to modify compressed pixel data
except TypeError:
pass # Silently ignore errors due to PixelData not existing
jsn, binary_elements, file_meta_binary_elements = pydicom2json(dcm)
_strip_elements(jsn, binary_elements)
_strip_elements(jsn['file_meta'], file_meta_binary_elements)
if dcm.SeriesInstanceUID in self._meta:
self.__set_meta_info_jsn(jsn, dcm)
try: # Actually write to the db
self._db[key] = jsn
except TypeError as type_error:
if str(type_error) == 'string indices must be integers, not str':
pass
if dcm.SeriesInstanceUID not in self._meta:
self._meta[dcm.SeriesInstanceUID] = {}
self._meta[dcm.SeriesInstanceUID]['hashes'] = {}
self.__put_attachments(dcm, binary_elements, jsn)
# Get a local copy of the document
# We get this from couch because we get the _id, _rev and _attachments
# keys which will ensure we don't overwrite the attachments we just
# uploaded.
# I don't really like the extra HTTP GET and I think we can generate
# what we need without doing it. Don't have time to work out how yet.
self._meta[dcm.SeriesInstanceUID]['doc'] = \
self._db[dcm.SeriesInstanceUID]
def __str__(self):
""" Return the string representation of the couchdb client """
return str(self._db)
def __repr__(self):
""" Return the canonical string representation of the couchdb client """
return repr(self._db)
def __get_attachments(self, dcm, doc):
""" Set binary tags by retrieving attachments from couchdb.
Values are hashed so they are only updated if they have changed.
"""
for id in doc['_attachments'].keys():
tagstack = id.split(':')
value = self._db.get_attachment(doc['_id'], id)
_add_element(dcm, tagstack, value)
self._meta[dcm.SeriesInstanceUID]['hashes'][id] = hashlib.md5(value)
def __put_attachments(self, dcm, binary_elements, jsn):
""" Upload all new and modified attachments """
elements_to_update = \
[(tagstack, item)
for tagstack, item in binary_elements
if self.__attachment_update_needed(dcm,
_tagstack2id(tagstack + [item.tag]), item)
] # nopep8
for tagstack, element in elements_to_update:
id = _tagstack2id(tagstack + [element.tag])
self._db.put_attachment(jsn, element.value, id)
self._meta[dcm.SeriesInstanceUID]['hashes'][id] = \
hashlib.md5(element.value)
def delete(self, dcm):
""" Delete from database and remove meta info from the DAO """
self._db.delete(self._meta[dcm.SeriesInstanceUID]['doc'])
self._meta.pop(dcm.SeriesInstanceUID)
def __set_meta_info_jsn(self, jsn, dcm):
""" Set the couch-specific meta data for supplied dict """
jsn['_rev'] = self._meta[dcm.SeriesInstanceUID]['doc']['_rev']
if '_attachments' in self._meta[dcm.SeriesInstanceUID]['doc']:
jsn['_attachments'] = \
self._meta[dcm.SeriesInstanceUID]['doc']['_attachments']
def __attachment_update_needed(self, dcm, id, binary_element):
""" Compare hashes for binary element and return true if different """
try:
hashes = self._meta[dcm.SeriesInstanceUID]['hashes']
except KeyError:
return True # If no hashes dict then attachments do not exist
if id not in hashes or hashes[id].digest() != \
hashlib.md5(binary_element.value).digest():
return True
else:
return False
def _add_element(dcm, tagstack, value):
""" Add element with tag, vr and value to dcm at location tagstack """
current_node = dcm
for item in tagstack[:-1]:
try:
address = int(item)
except ValueError:
address = pydicom.tag.Tag(__str2tag(item))
current_node = current_node[address]
tag = __str2tag(tagstack[-1])
vr = pydicom.datadict.dictionaryVR(tag)
current_node[tag] = pydicom.dataelem.DataElement(tag, vr, value)
def _tagstack2id(tagstack):
""" Convert a list of tags to a unique (within document) attachment id """
return string.join([str(tag) for tag in tagstack], ':')
def _strip_elements(jsn, elements):
""" Remove supplied elements from the dict object
We use this with a list of binary elements so that we don't store
empty tags in couchdb when we are already storing the binary data as
attachments.
"""
for tagstack, element in elements:
if len(tagstack) == 0:
jsn.pop(element.tag)
else:
current_node = jsn
for tag in tagstack:
current_node = current_node[tag]
current_node.pop(element.tag)
def _set_meta_info_dcm(dcm):
""" Set the file metadata DataSet attributes
This is done by PyDicom when we pydicom.read_file(foo) but we need to do it
ourselves when creating a DataSet from scratch, otherwise we cannot use
foo.pixel_array or pydicom.write_file(foo).
This code is lifted from PyDicom.
"""
TransferSyntax = dcm.file_meta.TransferSyntaxUID
if TransferSyntax == pydicom.uid.ExplicitVRLittleEndian:
dcm.is_implicit_vr = False
dcm.is_little_endian = True # This line not in PyDicom
elif TransferSyntax == pydicom.uid.ImplicitVRLittleEndian:
dcm.is_implicit_vr = True
dcm.is_little_endian = True
elif TransferSyntax == pydicom.uid.ExplicitVRBigEndian:
dcm.is_implicit_vr = False
dcm.is_little_endian = False
elif TransferSyntax == pydicom.uid.DeflatedExplicitVRLittleEndian:
dcm.is_implicit_vr = False # Deleted lines above as it relates
dcm.is_little_endian = True # to reading compressed file data.
else:
# Any other syntax should be Explicit VR Little Endian,
# e.g. all Encapsulated (JPEG etc) are ExplVR-LE by
# Standard PS 3.5-2008 A.4 (p63)
dcm.is_implicit_vr = False
dcm.is_little_endian = True
def pydicom2json(dcm):
""" Convert the supplied PyDicom object into a json-serializable dict
Binary elements cannot be represented in json so we return these as
as separate list of the tuple (tagstack, element), where:
- element = pydicom.dataelem.DataElement
- tagstack = list of tags/sequence IDs that address the element
The tagstack variable means we know the absolute address of each binary
element. We then use this as the attachment id in couchdb - when we
retrieve the attachment we can then insert it at the appropriate point in
the tree.
"""
dcm.remove_private_tags() # No support for now
dcm.decode() # Convert to unicode
binary_elements = []
tagstack = []
jsn = dict((key, __jsonify(dcm[key], binary_elements, tagstack))
for key in dcm.keys())
file_meta_binary_elements = []
jsn['file_meta'] = dict((key, __jsonify(dcm.file_meta[key],
file_meta_binary_elements, tagstack))
for key in dcm.file_meta.keys())
return jsn, binary_elements, file_meta_binary_elements
def __jsonify(element, binary_elements, tagstack):
""" Convert key, value to json-serializable types
Recursive, so if value is key/value pairs then all children will get
converted
"""
value = element.value
if element.VR in BINARY_VR_VALUES:
binary_elements.append((tagstack[:], element))
return ''
elif type(value) == list:
new_list = [__typemap(listvalue) for listvalue in value]
return new_list
elif type(value) == pydicom.sequence.Sequence:
tagstack.append(element.tag)
nested_data = []
for i in range(0, len(value)):
tagstack.append(i)
nested_data.append(dict(
(subkey, __jsonify(value[i][subkey], binary_elements, tagstack))
for subkey in value[i].keys()))
tagstack.pop()
tagstack.pop()
return nested_data
else:
return __typemap(value)
def __typemap(value):
""" Map PyDicom types that won't serialise to JSON types """
if type(value) == pydicom.uid.UID:
return uid2str(value)
elif isinstance(value, pydicom.tag.BaseTag):
return int(value)
else:
return value
def json2pydicom(jsn):
""" Convert the supplied json dict into a PyDicom object """
dataset = pydicom.dataset.Dataset()
# Don't try to convert couch specific tags
dicom_keys = [key for key in jsn.keys()
if key not in ['_rev', '_id', '_attachments', 'file_meta']]
for key in dicom_keys:
dataset.add(__dicomify(key, jsn[key]))
file_meta = pydicom.dataset.Dataset()
for key in jsn['file_meta']:
file_meta.add(__dicomify(key, jsn['file_meta'][key]))
dataset.file_meta = file_meta
return dataset
def __dicomify(key, value):
""" Convert a json key, value to a PyDicom DataElement """
tag = __str2tag(key)
if tag.element == 0: # 0 tag implies group length (filreader.py pydicom)
vr = 'UL'
else:
vr = pydicom.datadict.dictionaryVR(tag)
if vr == 'OW/OB': # Always write pixel data as bytes
vr = 'OB' # rather than words
if vr == 'US or SS': # US or SS is up to us as the data is already
if value < 0: # decoded. We therefore choose US, unless we
vr = 'SS' # need a signed value.
else:
vr = 'US'
if vr == 'SQ': # We have a sequence of datasets, so we recurse
seq_list = [__make_dataset([__dicomify(subkey, listvalue[subkey])
for subkey in listvalue.keys()])
for listvalue in value
]
seq = pydicom.sequence.Sequence(seq_list)
return pydicom.dataelem.DataElement(tag, vr, seq)
else:
return pydicom.dataelem.DataElement(tag, vr, value)
def __make_dataset(data_elements):
""" Create a Dataset from a list of DataElement objects """
dataset = pydicom.dataset.Dataset()
for element in data_elements:
dataset.add(element)
return dataset
def __str2tag(key):
""" Convert string representation of a tag into a Tag """
return pydicom.tag.Tag((int(key[1:5], 16), int(key[7:-1], 16)))
if __name__ == '__main__':
TESTDB = 'dicom_test'
SERVER = 'http://127.0.0.1:5984'
# Delete test database if it already exists
couch = couchdb.Server(SERVER)
try:
couch.delete(TESTDB)
except couchdb.client.ResourceNotFound:
pass # Don't worry if it didn't exist
db = DicomCouch(SERVER, TESTDB)
testfiles_dir = '../testfiles'
testfiles = os.listdir('../testfiles')
testfiles = [x for x in testfiles if x.endswith('dcm')]
testfiles = [os.path.join('../testfiles', x) for x in testfiles]
for dcmfile in testfiles:
dcm = pydicom.read_file(dcmfile)
db[dcm.SeriesInstanceUID] = dcm
|
|
import datetime
from unittest.mock import patch
from django.apps import apps
from django.urls import reverse
from django.test import TestCase
from ditto.core.utils import datetime_from_str
from ditto.flickr import factories as flickrfactories
from ditto.lastfm import factories as lastfmfactories
from ditto.pinboard import factories as pinboardfactories
from ditto.twitter import factories as twitterfactories
class DittoViewTests(TestCase):
def test_home_templates(self):
"Overall home page uses the correct templates"
response = self.client.get(reverse("ditto:home"))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, "ditto/home.html")
self.assertTemplateUsed(response, "ditto/base.html")
def test_home_context(self):
flickr_accounts = flickrfactories.AccountFactory.create_batch(2)
flickrfactories.PhotoFactory.create_batch(
2, user=flickr_accounts[0].user
)
flickrfactories.PhotoFactory.create_batch(
2, user=flickr_accounts[1].user
)
lastfmfactories.ScrobbleFactory.create_batch(2)
pinboard_accounts = pinboardfactories.AccountFactory.create_batch(2)
pinboardfactories.BookmarkFactory.create_batch(
2, account=pinboard_accounts[0]
)
pinboardfactories.BookmarkFactory.create_batch(
2, account=pinboard_accounts[1]
)
twitter_accounts = twitterfactories.AccountFactory.create_batch(2)
twitterfactories.TweetFactory.create_batch(
2, user=twitter_accounts[0].user
)
twitterfactories.TweetFactory.create_batch(
2, user=twitter_accounts[1].user
)
response = self.client.get(reverse("ditto:home"))
self.assertTrue("object_list" in response.context)
# 4 photos, 4 bookmarks, 4 tweets:
self.assertTrue(len(response.context), 12)
def test_home_privacy_flickr(self):
"Overall home page does not display private Photos"
public_photo = flickrfactories.PhotoFactory(is_private=False)
flickrfactories.PhotoFactory(is_private=True)
response = self.client.get(reverse("ditto:home"))
self.assertEqual(len(response.context["object_list"]), 1)
self.assertTrue(response.context["object_list"][0].pk, public_photo.pk)
def test_home_privacy_pinboard(self):
"Overall home page does not display private Bookmarks"
public_bookmark = pinboardfactories.BookmarkFactory(is_private=False)
pinboardfactories.BookmarkFactory(is_private=True)
response = self.client.get(reverse("ditto:home"))
self.assertEqual(len(response.context["object_list"]), 1)
self.assertTrue(response.context["object_list"][0].pk, public_bookmark.pk)
def test_home_privacy_twitter_recent(self):
"Overall home page does not display private Tweets"
private_user = twitterfactories.UserFactory(is_private=True)
public_user = twitterfactories.UserFactory(is_private=False)
twitterfactories.AccountFactory(user=private_user)
twitterfactories.AccountFactory(user=public_user)
public_tweet_1 = twitterfactories.TweetFactory(user=public_user)
public_tweet_2 = twitterfactories.TweetFactory(user=public_user)
twitterfactories.TweetFactory(user=private_user)
response = self.client.get(reverse("ditto:home"))
tweets = response.context["object_list"]
self.assertEqual(len(tweets), 2)
self.assertEqual(tweets[0].pk, public_tweet_2.pk)
self.assertEqual(tweets[1].pk, public_tweet_1.pk)
def test_home_no_flickr(self):
"Shouldn't try to get photos if flickr app isn't installed"
with patch.object(apps, "is_installed") as mock_method:
# Fake it so it looks like ditto.flickr isn't installed:
mock_method.side_effect = lambda x: {
"ditto.flickr": False,
"ditto.lastfm": True,
"ditto.pinboard": True,
"ditto.twitter": True,
# Without this Django 1.10 throws an error for some reason:
"django.contrib.staticfiles": True,
}[x]
response = self.client.get(reverse("ditto:home"))
self.assertFalse("flickr_photo_list" in response.context)
def test_home_no_lastfm(self):
"Shouldn't try to get scrobbles if lastfm app isn't installed"
with patch.object(apps, "is_installed") as mock_method:
# Fake it so it looks like ditto.pinboard isn't installed:
mock_method.side_effect = lambda x: {
"ditto.flickr": True,
"ditto.lastfm": False,
"ditto.pinboard": True,
"ditto.twitter": True,
# Without this Django 1.10 throws an error for some reason:
"django.contrib.staticfiles": True,
}[x]
response = self.client.get(reverse("ditto:home"))
self.assertFalse("lastfm_scrobble_list" in response.context)
def test_home_no_pinboard(self):
"Shouldn't try to get bookmarks if pinboard app isn't installed"
with patch.object(apps, "is_installed") as mock_method:
# Fake it so it looks like ditto.pinboard isn't installed:
mock_method.side_effect = lambda x: {
"ditto.flickr": True,
"ditto.lastfm": True,
"ditto.pinboard": False,
"ditto.twitter": True,
# Without this Django 1.10 throws an error for some reason:
"django.contrib.staticfiles": True,
}[x]
response = self.client.get(reverse("ditto:home"))
self.assertFalse("pinboard_bookmark_list" in response.context)
def test_home_no_twitter(self):
"Shouldn't try to get tweets if twitter app isn't installed"
with patch.object(apps, "is_installed") as mock_method:
# Fake it so it looks like ditto.twitter isn't installed:
mock_method.side_effect = lambda x: {
"ditto.flickr": True,
"ditto.lastfm": True,
"ditto.pinboard": True,
"ditto.twitter": False,
# Without this Django 1.10 throws an error for some reason:
"django.contrib.staticfiles": True,
}[x]
response = self.client.get(reverse("ditto:home"))
self.assertFalse("twitter_tweet_list" in response.context)
# def test_tag_list_templates(self):
# "Uses the correct templates"
# response = self.client.get(reverse('ditto:tag_list'))
# self.assertEquals(response.status_code, 200)
# self.assertTemplateUsed(response, 'ditto/tag_list.html')
# self.assertTemplateUsed(response, 'ditto/base.html')
# def test_tag_detail_templates(self):
# "Uses the correct templates"
# bookmark = pinboardfactories.BookmarkFactory.create()
# bookmark.tags.set(['fish'])
# response = self.client.get(reverse('ditto:tag_detail',
# kwargs={'slug': 'fish'}))
# self.assertEquals(response.status_code, 200)
# self.assertTemplateUsed(response, 'ditto/tag_detail.html')
# self.assertTemplateUsed(response, 'ditto/base.html')
# def test_tag_detail_context(self):
# "Sends correct data to templates"
# bookmark_1 = pinboardfactories.BookmarkFactory.create(title='Carp')
# bookmark_1.tags.set(['Fish', 'carp'])
# bookmark_2 = pinboardfactories.BookmarkFactory.create(title='Cod')
# bookmark_2.tags.set(['Fish', 'cod'])
# bookmark_3 = pinboardfactories.BookmarkFactory.create(title='Dog')
# bookmark_3.tags.set(['mammals', 'dog'])
# response = self.client.get(reverse('ditto:tag_detail',
# kwargs={'slug': 'fish'}))
# self.assertTrue('tag' in response.context)
# self.assertEqual(response.context['tag'], 'fish')
# self.assertTrue('bookmark_list' in response.context)
# self.assertEqual(len(response.context['bookmark_list']), 2)
# self.assertEqual(response.context['bookmark_list'][0].title, 'Cod')
# self.assertEqual(response.context['bookmark_list'][1].title, 'Carp')
# def test_tag_detail_privacy(self):
# "Does not display private bookmarks"
# bookmark_1 = pinboardfactories.BookmarkFactory.create(is_private=True)
# bookmark_1.tags.set(['fish'])
# bookmark_2 = pinboardfactories.BookmarkFactory.create(is_private=False)
# bookmark_2.tags.set(['fish'])
# response = self.client.get(reverse('ditto:tag_detail',
# kwargs={'slug': 'fish'}))
# self.assertTrue('bookmark_list' in response.context)
# self.assertEqual(len(response.context['bookmark_list']), 1)
# self.assertEqual(response.context['bookmark_list'][0].pk, bookmark_2.pk)
class DittoDayArchiveTestCase(TestCase):
def setUp(self):
self.today = datetime_from_str("2015-11-10 12:00:00")
self.tomorrow = self.today + datetime.timedelta(days=1)
self.yesterday = self.today - datetime.timedelta(days=1)
fl_account = flickrfactories.AccountFactory()
self.photo_1 = flickrfactories.PhotoFactory(
post_time=self.today, user=fl_account.user
)
self.photo_2 = flickrfactories.PhotoFactory(
post_time=self.tomorrow, user=fl_account.user
)
self.scrobble_1 = lastfmfactories.ScrobbleFactory(post_time=self.today)
self.scrobble_2 = lastfmfactories.ScrobbleFactory(post_time=self.tomorrow)
self.bookmark_1 = pinboardfactories.BookmarkFactory(post_time=self.today)
self.bookmark_2 = pinboardfactories.BookmarkFactory(post_time=self.tomorrow)
tw_account = twitterfactories.AccountFactory()
self.tweet_1 = twitterfactories.TweetFactory(
post_time=self.today, user=tw_account.user
)
self.tweet_2 = twitterfactories.TweetFactory(
post_time=self.tomorrow, user=tw_account.user
)
self.favorite_1 = twitterfactories.TweetFactory(post_time=self.today)
self.favorite_2 = twitterfactories.TweetFactory(post_time=self.tomorrow)
tw_account.user.favorites.add(self.favorite_1)
def make_url(self, app_slug=None, variety_slug=None):
kwargs = {
"year": 2015,
"month": "11",
"day": "10",
}
if app_slug is not None:
kwargs["app"] = app_slug
if variety_slug is not None:
kwargs["variety"] = variety_slug
return reverse("ditto:day_archive", kwargs=kwargs)
def test_no_app(self):
"Should redirect to default app and variety."
response = self.client.get(self.make_url())
self.assertRedirects(response, "/2015/11/10/flickr/photos")
def test_success_flickr_photos(self):
response = self.client.get(self.make_url("flickr", "photos"))
self.assertEquals(response.status_code, 200)
def test_success_lastfm(self):
response = self.client.get(self.make_url("lastfm", "listens"))
self.assertEquals(response.status_code, 200)
def test_success_pinboard(self):
response = self.client.get(self.make_url("pinboard", "bookmarks"))
self.assertEquals(response.status_code, 200)
def test_success_twitter_tweets(self):
response = self.client.get(self.make_url("twitter", "tweets"))
self.assertEquals(response.status_code, 200)
def test_success_twitter_favorites(self):
response = self.client.get(self.make_url("twitter", "likes"))
self.assertEquals(response.status_code, 200)
def test_day_templates(self):
"Day archive page uses the correct templates"
response = self.client.get(self.make_url("pinboard", "bookmarks"))
self.assertTemplateUsed(response, "ditto/archive_day.html")
self.assertTemplateUsed(response, "ditto/base.html")
self.assertTemplateUsed(response, "ditto/includes/item_lists.html")
def test_day_context(self):
"General items that are in context for all Day pages."
response = self.client.get(self.make_url("pinboard", "bookmarks"))
self.assertTrue("day" in response.context)
self.assertEqual(response.context["day"], self.today.date())
self.assertTrue("previous_day" in response.context)
self.assertEqual(response.context["previous_day"], self.yesterday.date())
self.assertTrue("next_day" in response.context)
self.assertEqual(response.context["next_day"], self.tomorrow.date())
self.assertTrue("variety_counts" in response.context)
def test_day_context_flickr_photos_uploaded(self):
response = self.client.get(self.make_url("flickr", "photos"))
self.assertTrue("date_field" in response.context)
self.assertEqual(response.context["date_field"], "post_time")
self.assertTrue("flickr_photo_list" in response.context)
self.assertEqual(1, len(response.context["flickr_photo_list"]))
self.assertEqual(response.context["flickr_photo_list"][0].pk, self.photo_1.pk)
def test_day_context_flickr_photos_taken(self):
self.photo_2.taken_time = self.today
self.photo_2.save()
response = self.client.get(self.make_url("flickr", "photos/taken"))
self.assertTrue("date_field" in response.context)
self.assertEqual(response.context["date_field"], "taken_time")
self.assertTrue("flickr_photo_list" in response.context)
self.assertEqual(1, len(response.context["flickr_photo_list"]))
self.assertEqual(response.context["flickr_photo_list"][0].pk, self.photo_2.pk)
def test_day_context_lastfm_scrobbles(self):
response = self.client.get(self.make_url("lastfm", "listens"))
self.assertTrue("lastfm_scrobble_list" in response.context)
self.assertEqual(1, len(response.context["lastfm_scrobble_list"]))
self.assertEqual(
response.context["lastfm_scrobble_list"][0].pk, self.scrobble_1.pk
)
def test_day_context_pinboard_bookmarks(self):
response = self.client.get(self.make_url("pinboard", "bookmarks"))
self.assertTrue("pinboard_bookmark_list" in response.context)
self.assertEqual(1, len(response.context["pinboard_bookmark_list"]))
self.assertEqual(
response.context["pinboard_bookmark_list"][0].pk, self.bookmark_1.pk
)
def test_day_context_twitter_tweets(self):
"Only shows items from the specified day."
response = self.client.get(self.make_url("twitter", "tweets"))
self.assertTrue("twitter_tweet_list" in response.context)
self.assertEqual(1, len(response.context["twitter_tweet_list"]))
self.assertEqual(response.context["twitter_tweet_list"][0].pk, self.tweet_1.pk)
def test_day_context_twitter_favorites(self):
response = self.client.get(self.make_url("twitter", "likes"))
self.assertTrue("twitter_favorite_list" in response.context)
self.assertEqual(1, len(response.context["twitter_favorite_list"]))
self.assertEqual(
response.context["twitter_favorite_list"][0].pk, self.favorite_1.pk
)
def test_day_privacy_flickr_photos(self):
"Doesn't show private items."
self.photo_1.is_private = True
self.photo_1.save()
response = self.client.get(self.make_url("flickr", "photos"))
self.assertEqual(0, len(response.context["flickr_photo_list"]))
def test_day_privacy_pinboard_bookmarks(self):
"Doesn't show private items."
self.bookmark_1.is_private = True
self.bookmark_1.save()
response = self.client.get(self.make_url("pinboard", "bookmarks"))
self.assertEqual(0, len(response.context["pinboard_bookmark_list"]))
def test_day_privacy_twitter_tweets(self):
"Doesn't show private items."
self.tweet_1.user.is_private = True
self.tweet_1.user.save()
response = self.client.get(self.make_url("twitter", "tweets"))
self.assertEqual(0, len(response.context["twitter_tweet_list"]))
def test_day_privacy_twitter_favorites(self):
"Doesn't show private items."
self.favorite_1.user.is_private = True
self.favorite_1.user.save()
response = self.client.get(self.make_url("twitter", "likes"))
self.assertEqual(0, len(response.context["twitter_favorite_list"]))
|
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.identity_toolkit import (
oauth_idp_config_pb2,
)
from google3.cloud.graphite.mmv2.services.google.identity_toolkit import (
oauth_idp_config_pb2_grpc,
)
from typing import List
class OAuthIdpConfig(object):
def __init__(
self,
name: str = None,
client_id: str = None,
issuer: str = None,
display_name: str = None,
enabled: bool = None,
client_secret: str = None,
response_type: dict = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.client_id = client_id
self.issuer = issuer
self.display_name = display_name
self.enabled = enabled
self.client_secret = client_secret
self.response_type = response_type
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = oauth_idp_config_pb2_grpc.IdentitytoolkitOAuthIdpConfigServiceStub(
channel.Channel()
)
request = oauth_idp_config_pb2.ApplyIdentitytoolkitOAuthIdpConfigRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.client_id):
request.resource.client_id = Primitive.to_proto(self.client_id)
if Primitive.to_proto(self.issuer):
request.resource.issuer = Primitive.to_proto(self.issuer)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.enabled):
request.resource.enabled = Primitive.to_proto(self.enabled)
if Primitive.to_proto(self.client_secret):
request.resource.client_secret = Primitive.to_proto(self.client_secret)
if OAuthIdpConfigResponseType.to_proto(self.response_type):
request.resource.response_type.CopyFrom(
OAuthIdpConfigResponseType.to_proto(self.response_type)
)
else:
request.resource.ClearField("response_type")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyIdentitytoolkitOAuthIdpConfig(request)
self.name = Primitive.from_proto(response.name)
self.client_id = Primitive.from_proto(response.client_id)
self.issuer = Primitive.from_proto(response.issuer)
self.display_name = Primitive.from_proto(response.display_name)
self.enabled = Primitive.from_proto(response.enabled)
self.client_secret = Primitive.from_proto(response.client_secret)
self.response_type = OAuthIdpConfigResponseType.from_proto(
response.response_type
)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = oauth_idp_config_pb2_grpc.IdentitytoolkitOAuthIdpConfigServiceStub(
channel.Channel()
)
request = oauth_idp_config_pb2.DeleteIdentitytoolkitOAuthIdpConfigRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.client_id):
request.resource.client_id = Primitive.to_proto(self.client_id)
if Primitive.to_proto(self.issuer):
request.resource.issuer = Primitive.to_proto(self.issuer)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.enabled):
request.resource.enabled = Primitive.to_proto(self.enabled)
if Primitive.to_proto(self.client_secret):
request.resource.client_secret = Primitive.to_proto(self.client_secret)
if OAuthIdpConfigResponseType.to_proto(self.response_type):
request.resource.response_type.CopyFrom(
OAuthIdpConfigResponseType.to_proto(self.response_type)
)
else:
request.resource.ClearField("response_type")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteIdentitytoolkitOAuthIdpConfig(request)
@classmethod
def list(self, project, service_account_file=""):
stub = oauth_idp_config_pb2_grpc.IdentitytoolkitOAuthIdpConfigServiceStub(
channel.Channel()
)
request = oauth_idp_config_pb2.ListIdentitytoolkitOAuthIdpConfigRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListIdentitytoolkitOAuthIdpConfig(request).items
def to_proto(self):
resource = oauth_idp_config_pb2.IdentitytoolkitOAuthIdpConfig()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.client_id):
resource.client_id = Primitive.to_proto(self.client_id)
if Primitive.to_proto(self.issuer):
resource.issuer = Primitive.to_proto(self.issuer)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.enabled):
resource.enabled = Primitive.to_proto(self.enabled)
if Primitive.to_proto(self.client_secret):
resource.client_secret = Primitive.to_proto(self.client_secret)
if OAuthIdpConfigResponseType.to_proto(self.response_type):
resource.response_type.CopyFrom(
OAuthIdpConfigResponseType.to_proto(self.response_type)
)
else:
resource.ClearField("response_type")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class OAuthIdpConfigResponseType(object):
def __init__(self, id_token: bool = None, code: bool = None, token: bool = None):
self.id_token = id_token
self.code = code
self.token = token
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = oauth_idp_config_pb2.IdentitytoolkitOAuthIdpConfigResponseType()
if Primitive.to_proto(resource.id_token):
res.id_token = Primitive.to_proto(resource.id_token)
if Primitive.to_proto(resource.code):
res.code = Primitive.to_proto(resource.code)
if Primitive.to_proto(resource.token):
res.token = Primitive.to_proto(resource.token)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return OAuthIdpConfigResponseType(
id_token=Primitive.from_proto(resource.id_token),
code=Primitive.from_proto(resource.code),
token=Primitive.from_proto(resource.token),
)
class OAuthIdpConfigResponseTypeArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [OAuthIdpConfigResponseType.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [OAuthIdpConfigResponseType.from_proto(i) for i in resources]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
import functools
import logging
from pathlib import Path
from typing import Callable, List, Set, Union # noqa: F401
from robot import __VERSION__
from robot.board import BoardList, TBoard
from robot.camera import Camera
from robot.game import GameMode, GameState, Zone, kill_after_delay
from robot.game_specific import GAME_DURATION_SECONDS
from robot.motor import MotorBoard
from robot.power import PowerBoard
from robot.servo import ServoBoard
_PathLike = Union[str, Path]
LOGGER = logging.getLogger(__name__)
def configure_logging() -> None:
"""
Configure basic logging.
This has us outputting ``logging.INFO`` and higher messages. This function
is called within ``Robot.__init__`` for convenience, though as it uses
``logging.basicConfig`` it is a no-op if logging has already been configured.
"""
logging.basicConfig(level=logging.INFO)
class Robot:
"""
Core class of the Robot API.
This class provides access to the various boards which comprise the API.
Internally it:
- Speaks to robotd over unix socket
- Always grabs from sockets, avoids caching
"""
ROBOTD_ADDRESS = "/var/robotd"
def __init__(
self,
robotd_path: _PathLike = ROBOTD_ADDRESS,
wait_for_start_button: bool = True,
) -> None:
self.robotd_path = Path(robotd_path)
self.known_power_boards = [] # type: List[PowerBoard]
self.known_motor_boards = [] # type: List[MotorBoard]
self.known_servo_boards = [] # type: List[ServoBoard]
self.known_cameras = [] # type: List[Camera]
self.known_gamestates = [] # type: List[GameState]
configure_logging()
LOGGER.info("Robot (v{}) Initialising...".format(__VERSION__))
self._assert_has_power_board()
self.power_board.power_on()
if wait_for_start_button:
self.power_board.wait_start()
def _assert_has_power_board(self):
power_boards = self.power_boards
if not power_boards:
raise RuntimeError('Cannot find Power Board!')
def _update_boards(
self,
known_boards: List[TBoard],
board_type: Callable[[_PathLike], TBoard],
directory_name: _PathLike,
) -> BoardList[TBoard]:
"""
Update the number of boards against the known boards.
:param known_boards: The list of all currently known boards; this list
will be updated with any newly found boards.
:param board_type: The type of board to create.
:param directory_name: The relative directory to look in for new boards.
:return: A ``BoardList[TBoard]`` of all the known boards (both
previously known and newly found).
"""
known_paths = {x.socket_path for x in known_boards} # type: Set[Path]
boards_dir = self.robotd_path / directory_name # type: Path
new_paths = set(boards_dir.glob('*')) # type: Set[Path]
# Add all boards that weren't previously there
for board_path in new_paths - known_paths:
LOGGER.info("New board found: '%s'", board_path)
try:
new_board = board_type(board_path)
known_boards.append(new_board)
except (FileNotFoundError, ConnectionRefusedError):
LOGGER.warning(
"Could not connect to the board: '%s'",
board_path,
exc_info=True,
)
return BoardList(known_boards)
@property
def motor_boards(self) -> BoardList[MotorBoard]:
"""
:return: A ``BoardList`` of available ``MotorBoard``s.
"""
return self._update_boards(self.known_motor_boards, MotorBoard, 'motor')
def _on_start_signal(self):
game_state = self._game
LOGGER.info(
"Received start signal in %s mode, zone %d",
game_state.mode.value,
game_state.zone,
)
if game_state.mode == GameMode.COMPETITION:
kill_after_delay(GAME_DURATION_SECONDS)
@property
def power_boards(self) -> BoardList[PowerBoard]:
"""
:return: A ``BoardList`` of available ``PowerBoard``s.
"""
return self._update_boards(
self.known_power_boards,
functools.partial(PowerBoard, on_start_signal=self._on_start_signal),
'power',
)
@property
def servo_boards(self) -> BoardList[ServoBoard]:
"""
:return: A ``BoardList`` of available ``ServoBoard``s.
"""
return self._update_boards(
self.known_servo_boards,
ServoBoard,
'servo_assembly',
)
@property
def cameras(self) -> BoardList[Camera]:
"""
:return: A ``BoardList`` of available cameras.
"""
return self._update_boards(self.known_cameras, Camera, 'camera')
@property
def _games(self) -> BoardList[GameState]:
"""
:return: A ``BoardList`` of available ``GameStates``.
"""
return self._update_boards(self.known_gamestates, GameState, 'game')
@staticmethod
def _single_index(name, list_of_boards: BoardList[TBoard]) -> TBoard:
if list_of_boards:
return list_of_boards[0]
else:
raise AttributeError("No {} connected".format(name))
@property
def power_board(self) -> PowerBoard:
"""
:return: The first ``PowerBoard``, if attached.
Raises an ``AttributeError`` if there are no power boards attached.
"""
return self._single_index("power boards", self.power_boards)
@property
def motor_board(self) -> MotorBoard:
"""
:return: The first ``MotorBoard``, if attached.
Raises an ``AttributeError`` if there are no motor boards attached.
"""
return self._single_index("motor boards", self.motor_boards)
@property
def servo_board(self) -> ServoBoard:
"""
:return: The first ``ServoBoard``, if attached.
Raises an ``AttributeError`` if there are no servo boards attached.
"""
return self._single_index("servo boards", self.servo_boards)
@property
def camera(self) -> Camera:
"""
:return: The first ``Camera``, if attached.
Raises an ``AttributeError`` if there are no cameras attached.
"""
return self._single_index("cameras", self.cameras)
@property
def _game(self) -> GameState:
"""
:return: The first ``GameStates``, if any.
Raises an ``AttributeError`` if there are no game state configured.
"""
return self._single_index("game states", self._games)
@property
def zone(self) -> Zone:
"""
The zone the robot is in.
This is changed by inserting a competition zone USB stick in it, the
value defaults to 0 if there is no stick plugged in.
:return: ID of the zone the robot started in (0-3)
"""
return self._game.zone
@property
def mode(self) -> GameMode:
"""
The ``GameMode`` the robot is in.
:return: one of ``GameMode.COMPETITION`` or ``GameMode.DEVELOPMENT``.
"""
return self._game.mode
def close(self):
"""
Cleanup robot instance.
"""
for board_group in (
self.known_power_boards,
self.known_motor_boards,
self.known_servo_boards,
self.known_cameras,
self.known_gamestates,
):
for board in board_group:
board.close()
# Clear the group so that any further access doesn't accidentally
# reanimate the boards (which isn't supported).
del board_group[:]
def __del__(self):
self.close()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
#Solarcoin: Disabled RBF
#self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: Update.py
#
# This class provides mechanisms to update, reply to, retweet status
# messages and send direct messages
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
import sys, time, json, logging
from sochi.twitter.Login import Login
from sochi.twitter.TwitterBase import TwitterBase
from sochi.twitter.auth_settings import *
class Update(TwitterBase):
def __init__(self,
name="Update",
logger=None,
args=(),
kwargs={}):
TwitterBase.__init__(self, name=name, logger=logger,
args=args, kwargs=kwargs)
self.update_url ="https://api.twitter.com/1.1/statuses/update.json"
self.retweet_url ="https://api.twitter.com/1.1/statuses/retweet/"
self.direct_url ="https://api.twitter.com/1.1/direct_messages/new.json"
self.status_update = False
self.status_retweet = False
self.direct_message = False
self.max_status_len = 140
self.set_request_type_as_status_update()
##
# Sets the type of request to a status update
#
def set_request_type_as_status_update(self):
if( not self.querying ):
self.status_update = True
self.status_retweet = False
self.direct_message = False
self.clear_request_params()
self.set_request_domain(self.update_url)
##
# Sets the type of request to a retweet request
#
def set_request_type_as_retweet(self):
if( not self.querying ):
self.status_update = False
self.status_retweet = True
self.direct_message = False
self.clear_request_params()
self.set_request_domain(self.retweet_url)
##
# Sets the type of request to direct message
#
def set_request_type_as_direct_message(self):
if( not self.querying ):
self.status_update = False
self.status_retweet = False
self.direct_message = True
self.clear_request_params()
self.set_request_domain(self.direct_url)
##
# Sets the status to be set when the request is made
#
def set_status(self, status=None, doit=False):
if( not self.querying ):
if( status and self.status_update):
status = self._trim_status(status)
self.set_request_param(kw="status", val=status)
if( doit ):
if( self.running ):
self.start_request()
else:
self.make_request()
else:
self.clear_request_params()
##
# Sets whether or not this status message is in reply to another message
#
def set_in_reply_to(self, status_id=None):
if( not self.querying ):
if( status_id and self.status_update):
self.set_request_param(kw="in_reply_to_status_id", val=str(status_id))
else:
self.clear_request_params()
##
# Sets the latitude and longitude
#
def set_location(self, lat=None, lon=None):
if( not self.querying ):
if( lat and lon and self.status_update ):
self.set_request_param(kw="lat", val=str(lat))
self.set_request_param(kw="long", val=str(lon))
else:
self.clear_request_params()
##
# Sets the status to be an @ reply to the specified user
#
def set_at_reply_message(self, username=None, status=None, doit=False):
if( not self.querying ):
if( user and status and self.status_update ):
status = "@"+str(username)+" "+str(status)
status = self._trim_status(status)
self.set_request_param(kw="status", val=status)
if( doit ):
if( self.running ):
self.start_request()
else:
self.make_request()
elif( status ):
self.set_status(status=status,doit=doit)
else:
self.clear_request_params()
##
# Sets a direct message to be sent to a specific user either using
# username or user_id
#
def set_direct_message(self, username=None, user_id=None, status=None, doit=False):
if( not self.querying ):
if( (username or user_id) and status and self.direct_message ):
status = self._trim_status(status)
self.set_request_param(kw="text", val=status)
if( username ):
self.set_request_param(kw="screen_name", val=username)
if( user_id ):
self.set_request_param(kw="user_id", val=user_id)
if( doit ):
if( self.running ):
self.start_request()
else:
self.make_request()
else:
self.clear_request_params()
##
# Will retweet the specified tweet ID
#
def set_retweet(self, tweet_id=None, doit=False):
if( not self.querying ):
if( tweet_id and self.status_retweet ):
url = self.retweet_url+str(tweet_id)+".json"
self.clear_request_params()
self.set_request_domain(url)
if( doit ):
if( self.running ):
self.start_request()
else:
self.make_request()
else:
self.clear_request_params()
##
# Trim the status message to fit 140 character limit of Twitter
#
def _trim_status(self, status=None):
if( status ):
status = unicode(status)
if( len(status) > self.max_status_len ):
mesg = "Status too long, truncated."
self.logger.info(mesg)
mesg = "Old status: \"%s\""%(status)
self.logger.info(mesg)
status = status[:self.max_status_len]
mesg = "New status: \"%s\""%(status)
self.logger.info(mesg)
return status
##
# Basically a cheap version of make_request for a status update
#
def update_status(self):
if( self.running ):
self.start_request()
else:
self.make_request()
##
#
#
def make_request(self):
# this code is not reentrant, don't make the request twice
if( self.querying ):
return
self.querying = True
try:
# this must be a POST request as defined by the "Update" spec
#print "domain",self.get_request_domain()
#print "payload",self.get_request_params()
self.set_request(domain=self.get_request_domain(),
method="POST",
payload=self.get_request_params())
request_results = self._make_request(request=self._request_data)
js = None
if( request_results or request_results.text ):
try:
#print request_results.text
js = request_results.json()
except ValueError, e:
mesg = "JSON ValueError: "+str(e)
self.logger.info(mesg)
js = None
if( js ):
#print json.dumps(js, sort_keys=True, indent=4)
self.put_message(m=js)
self.querying = False
except:
self.querying = False
raise
return
def parse_params(argv):
auth = None
user = None
status = None
direct = None
retweet = None
favorite = None
json = False
limits = False
debug = False
logging = False
pc = 1
while( pc < len(argv) ):
param = argv[pc]
if( param == "-auth"):
pc += 1
auth = argv[pc]
if( param == "-user"):
pc += 1
user = argv[pc]
if( param == "-status"):
pc += 1
status = argv[pc]
if( param == "-s"):
pc += 1
status = argv[pc]
if( param == "-direct"):
pc += 1
direct = argv[pc]
if( param == "-d"):
pc += 1
direct = argv[pc]
if( param == "-retweet"):
pc += 1
retweet = argv[pc]
if( param == "-r"):
pc += 1
retweet = argv[pc]
if( param == "-favorite"):
pc += 1
favorite = argv[pc]
if( param == "-f"):
pc += 1
favorite = argv[pc]
if( param == "-log"):
logging = True
if( param == "-debug"):
debug = True
if( param == "-json"):
json = True
if( param == "-limits"):
limits = True
pc += 1
return {'auth':auth, 'user':user,
'status':status, 'direct':direct, 'retweet':retweet, 'favorite':favorite,
'logging':logging, 'debug':debug, 'json':json, 'limits':limits }
def usage(argv):
print "USAGE: python %s -auth <appname> -user <auth_user> -status \"<message>\" [-direct <username>] [-retweet <tweet_id>] [-log] [-json] "%(argv[0])
sys.exit(0)
def main(argv):
if len(argv) < 4:
usage(argv)
p = parse_params(argv)
print p
twit = Update()
twit.set_user_agent(agent="random")
if( p['logging'] ):
log_fname = twit.get_preferred_logname()
fmt='[%(asctime)s][%(module)s:%(funcName)s():%(lineno)d] %(levelname)s:%(message)s'
logging.basicConfig(filename=log_fname,format=fmt,level=logging.INFO)
log = logging.getLogger("twit_tools")
lg = None
if( not p['auth'] and not p['user'] ):
print "Must have authenticating User and Application!"
usage(argv)
return
if( p['auth'] ):
app = p['auth']
app_keys = TWITTER_APP_OAUTH_PAIR(app=p['auth'])
app_token_fname = TWITTER_APP_TOKEN_FNAME(app=p['auth'])
lg = Login( name="StatusUpdateLogin",
app_name=p['auth'],
app_user=p['user'],
token_fname=app_token_fname)
if( p['debug'] ):
lg.set_debug(True)
## Key and secret for specified application
lg.set_consumer_key(consumer_key=app_keys['consumer_key'])
lg.set_consumer_secret(consumer_secret=app_keys['consumer_secret'])
lg.login()
twit.set_auth_obj(obj=lg)
if( p['retweet'] ):
twit.set_request_type_as_retweet()
twit.set_retweet(tweet_id=p['retweet'])
elif( p['direct'] and p['status']):
twit.set_request_type_as_direct_message()
twit.set_direct_message(status=p['status'],username=p['direct'])
elif( p['status'] ):
twit.set_request_type_as_status_update()
twit.set_status(status=p['status'])
else:
print "Must supply a status message to post!"
return
twit.update_status()
twit.wait_request()
if( twit.messages()>0 ):
m = twit.get_message()
if( m ):
if( p['json'] ):
print json.dumps(m,indent=4,sort_keys=True)
else:
if( "created_at" in m and "user" in m ):
print "At %s, user %s posted:"%(m['created_at'],m['user']['name'])
print m['text'].encode('utf-8')
elif( "error" in m or "errors" in m ):
print "Error response."
else:
print "Not clear what this response was!"
print json.dumps(m,indent=4,sort_keys=True)
else:
print "Nothing returned!"
if( twit.had_warning() ):
print "WARNING:",twit.get_last_warning()
if( twit.had_error() ):
print "ERROR:",twit.get_last_error()
return
if __name__ == '__main__':
main(sys.argv)
|
|
from django import forms
from django.db import models
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db.models.fields import FieldDoesNotExist
from django.forms.models import ModelChoiceIterator
from django.utils.encoding import force_unicode
from django.db.models.fields.related import add_lazy_relation
from .models.descriptors import SortableReverseManyRelatedObjectsDescriptor
from .widgets import Select, SelectMultiple
__all__ = (
'Select2FieldMixin', 'Select2ModelFieldMixin', 'ChoiceField',
'MultipleChoiceField', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ForeignKey', 'ManyToManyField',)
class Select2FieldMixin(object):
def __init__(self, *args, **kwargs):
widget_kwargs = {}
widget_kwarg_keys = ['overlay', 'js_options', 'sortable', 'ajax']
for k in widget_kwarg_keys:
if k in kwargs:
widget_kwargs[k] = kwargs.pop(k)
widget = kwargs.pop('widget', None)
if isinstance(widget, type):
if not issubclass(widget, Select):
widget = self.widget
elif not isinstance(widget, Select):
widget = self.widget
if isinstance(widget, type):
kwargs['widget'] = widget(**widget_kwargs)
else:
kwargs['widget'] = widget
super(Select2FieldMixin, self).__init__(*args, **kwargs)
# Django 1.2 backwards-compatibility
if not hasattr(self.widget, 'is_required'):
self.widget.is_required = self.required
class ChoiceField(Select2FieldMixin, forms.ChoiceField):
widget = Select
class MultipleChoiceField(Select2FieldMixin, forms.MultipleChoiceField):
widget = SelectMultiple
class Select2ModelFieldMixin(Select2FieldMixin):
search_field = None
case_sensitive = False
choice_iterator_cls = ModelChoiceIterator
def __init__(self, search_field=None, case_sensitive=False, *args, **kwargs):
if search_field is None and kwargs.get('ajax'):
raise TypeError(
("keyword argument 'search_field' is required for field "
"%s <%s>") % (self.name, self.__class__.__name__))
self.search_field = search_field
self.case_sensitive = case_sensitive
self.name = kwargs.pop('name')
self.model = kwargs.pop('model')
self.choice_iterator_cls = kwargs.pop('choice_iterator_cls', self.choice_iterator_cls)
super(Select2ModelFieldMixin, self).__init__(*args, **kwargs)
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
return self.choice_iterator_cls(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
class ModelChoiceField(Select2ModelFieldMixin, forms.ModelChoiceField):
widget = Select
def __init__(self, *args, **kwargs):
super(ModelChoiceField, self).__init__(*args, **kwargs)
self.widget.field = self
class ModelMultipleChoiceField(Select2ModelFieldMixin, forms.ModelMultipleChoiceField):
widget = SelectMultiple
#: Instance of the field on the through table used for storing sort position
sort_field = None
def __init__(self, *args, **kwargs):
self.sort_field = kwargs.pop('sort_field', self.sort_field)
if self.sort_field is not None:
kwargs['sortable'] = True
super(ModelMultipleChoiceField, self).__init__(*args, **kwargs)
self.widget.field = self
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
if isinstance(value, basestring):
value = value.split(',')
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'])
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(self.error_messages['invalid_pk_value'] % pk)
qs = self.queryset.filter(**{
('%s__in' % key): value,
})
pks = set([force_unicode(getattr(o, key)) for o in qs])
# Create a dictionary for storing the original order of the items
# passed from the form
pk_positions = {}
for i, val in enumerate(value):
pk = force_unicode(val)
if pk not in pks:
raise ValidationError(self.error_messages['invalid_choice'] % val)
pk_positions[pk] = i
if not self.sort_field:
return qs
else:
# Iterate through the objects and set the sort field to its
# position in the comma-separated request data. Then return
# a list of objects sorted on the sort field.
sort_field_name = self.sort_field.name
objs = []
for i, obj in enumerate(qs):
pk = force_unicode(getattr(obj, key))
setattr(obj, sort_field_name, pk_positions[pk])
objs.append(obj)
sorted(objs, key=lambda obj: getattr(obj, sort_field_name))
return objs
def prepare_value(self, value):
return super(ModelMultipleChoiceField, self).prepare_value(value)
class RelatedFieldMixin(object):
search_field = None
js_options = None
overlay = None
case_sensitive = False
ajax = False
def __init__(self, *args, **kwargs):
self.search_field = kwargs.pop('search_field', None)
self.js_options = kwargs.pop('js_options', None)
self.overlay = kwargs.pop('overlay', self.overlay)
self.case_sensitive = kwargs.pop('case_sensitive', self.case_sensitive)
self.ajax = kwargs.pop('ajax', self.ajax)
super(RelatedFieldMixin, self).__init__(*args, **kwargs)
def _get_queryset(self, db=None):
return self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
@property
def queryset(self):
return self._get_queryset()
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': ModelChoiceField,
'queryset': self._get_queryset(db),
'js_options': self.js_options,
'search_field': self.search_field,
'ajax': self.ajax,
'name': self.name,
'model': self.model,
}
defaults.update(kwargs)
if self.overlay is not None:
defaults.update({'overlay': self.overlay})
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return models.Field.formfield(self, **defaults)
def contribute_to_related_class(self, cls, related):
if not self.ajax:
return super(RelatedFieldMixin, self).contribute_to_related_class(cls, related)
if self.search_field is None:
raise TypeError(
("keyword argument 'search_field' is required for field "
"'%(field_name)s' of model %(app_label)s.%(object_name)s") % {
'field_name': self.name,
'app_label': self.model._meta.app_label,
'object_name': self.model._meta.object_name})
if not callable(self.search_field) and not isinstance(self.search_field, basestring):
raise TypeError(
("keyword argument 'search_field' must be either callable or "
"string on field '%(field_name)s' of model "
"%(app_label)s.%(object_name)s") % {
'field_name': self.name,
'app_label': self.model._meta.app_label,
'object_name': self.model._meta.object_name})
if isinstance(self.search_field, basestring):
opts = related.parent_model._meta
try:
opts.get_field(self.search_field)
except FieldDoesNotExist:
raise ImproperlyConfigured(
("keyword argument 'search_field' references non-existent "
"field '%(search_field)s' in %(field_name)s of model "
"<%(app_label)s.%(object_name)s>") % {
'search_field': self.search_field,
'field_name': self.name,
'app_label': opts.app_label,
'object_name': opts.object_name})
super(RelatedFieldMixin, self).contribute_to_related_class(cls, related)
class ForeignKey(RelatedFieldMixin, models.ForeignKey):
def formfield(self, **kwargs):
defaults = {
'to_field_name': self.rel.field_name,
}
defaults.update(**kwargs)
return super(ForeignKey, self).formfield(**defaults)
class OneToOneField(RelatedFieldMixin, models.OneToOneField):
def formfield(self, **kwargs):
defaults = {
'to_field_name': self.rel.field_name,
}
defaults.update(**kwargs)
return super(OneToOneField, self).formfield(**defaults)
class ManyToManyField(RelatedFieldMixin, models.ManyToManyField):
#: Name of the field on the through table used for storing sort position
sort_field_name = None
#: Instance of the field on the through table used for storing sort position
sort_field = None
def __init__(self, *args, **kwargs):
self.sort_field_name = kwargs.pop('sort_field', self.sort_field_name)
help_text = kwargs.get('help_text', u'')
super(ManyToManyField, self).__init__(*args, **kwargs)
self.help_text = help_text
def formfield(self, **kwargs):
defaults = {
'form_class': ModelMultipleChoiceField,
'sort_field': self.sort_field,
}
defaults.update(**kwargs)
return super(ManyToManyField, self).formfield(**defaults)
def contribute_to_class(self, cls, name):
"""
Replace the descriptor with our custom descriptor, so that the
position field (which is saved in the formfield clean()) gets saved
"""
if self.sort_field_name is not None:
def resolve_sort_field(field, model, cls):
field.sort_field = model._meta.get_field(field.sort_field_name)
if isinstance(self.rel.through, basestring):
add_lazy_relation(cls, self, self.rel.through, resolve_sort_field)
else:
resolve_sort_field(self, self.rel.through, cls)
super(ManyToManyField, self).contribute_to_class(cls, name)
if self.sort_field_name is not None:
setattr(cls, self.name, SortableReverseManyRelatedObjectsDescriptor(self))
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules(rules=[
((ManyToManyField,), [], {"search_field": ["search_field", {}]}),
], patterns=["^select2\.fields\.ManyToManyField"])
add_introspection_rules(rules=[
((ForeignKey,), [], {"search_field": ["search_field", {}]}),
], patterns=["^select2\.fields\.ForeignKey"])
add_introspection_rules(rules=[
((OneToOneField,), [], {"search_field": ["search_field", {}]}),
], patterns=["^select2\.fields\.OneToOneField"])
|
|
"""Support for AVM FRITZ!Box classes."""
from __future__ import annotations
from collections.abc import Callable, ValuesView
from dataclasses import dataclass, field
from datetime import datetime, timedelta
import logging
from types import MappingProxyType
from typing import Any, TypedDict
from fritzconnection import FritzConnection
from fritzconnection.core.exceptions import (
FritzActionError,
FritzConnectionException,
FritzServiceError,
)
from fritzconnection.lib.fritzhosts import FritzHosts
from fritzconnection.lib.fritzstatus import FritzStatus
from homeassistant.components.device_tracker.const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import dt as dt_util
from .const import (
DEFAULT_DEVICE_NAME,
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_USERNAME,
DOMAIN,
SERVICE_REBOOT,
SERVICE_RECONNECT,
TRACKER_SCAN_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
def _is_tracked(mac: str, current_devices: ValuesView) -> bool:
"""Check if device is already tracked."""
for tracked in current_devices:
if mac in tracked:
return True
return False
def device_filter_out_from_trackers(
mac: str,
device: FritzDevice,
current_devices: ValuesView,
) -> bool:
"""Check if device should be filtered out from trackers."""
reason: str | None = None
if device.ip_address == "":
reason = "Missing IP"
elif _is_tracked(mac, current_devices):
reason = "Already tracked"
if reason:
_LOGGER.debug(
"Skip adding device %s [%s], reason: %s", device.hostname, mac, reason
)
return bool(reason)
class ClassSetupMissing(Exception):
"""Raised when a Class func is called before setup."""
def __init__(self) -> None:
"""Init custom exception."""
super().__init__("Function called before Class setup")
@dataclass
class Device:
"""FRITZ!Box device class."""
mac: str
ip_address: str
name: str
class HostInfo(TypedDict):
"""FRITZ!Box host info class."""
mac: str
name: str
ip: str
status: bool
class FritzBoxTools:
"""FrtizBoxTools class."""
def __init__(
self,
hass: HomeAssistant,
password: str,
username: str = DEFAULT_USERNAME,
host: str = DEFAULT_HOST,
port: int = DEFAULT_PORT,
) -> None:
"""Initialize FritzboxTools class."""
self._cancel_scan: CALLBACK_TYPE | None = None
self._devices: dict[str, FritzDevice] = {}
self._options: MappingProxyType[str, Any] | None = None
self._unique_id: str | None = None
self.connection: FritzConnection = None
self.fritz_hosts: FritzHosts = None
self.fritz_status: FritzStatus = None
self.hass = hass
self.host = host
self.password = password
self.port = port
self.username = username
self._mac: str | None = None
self._model: str | None = None
self._current_firmware: str | None = None
self._latest_firmware: str | None = None
self._update_available: bool = False
async def async_setup(self) -> None:
"""Wrap up FritzboxTools class setup."""
await self.hass.async_add_executor_job(self.setup)
def setup(self) -> None:
"""Set up FritzboxTools class."""
self.connection = FritzConnection(
address=self.host,
port=self.port,
user=self.username,
password=self.password,
timeout=60.0,
pool_maxsize=30,
)
if not self.connection:
_LOGGER.error("Unable to establish a connection with %s", self.host)
return
self.fritz_status = FritzStatus(fc=self.connection)
info = self.connection.call_action("DeviceInfo:1", "GetInfo")
if not self._unique_id:
self._unique_id = info["NewSerialNumber"]
self._model = info.get("NewModelName")
self._current_firmware = info.get("NewSoftwareVersion")
self._update_available, self._latest_firmware = self._update_device_info()
async def async_start(self, options: MappingProxyType[str, Any]) -> None:
"""Start FritzHosts connection."""
self.fritz_hosts = FritzHosts(fc=self.connection)
self._options = options
await self.hass.async_add_executor_job(self.scan_devices)
self._cancel_scan = async_track_time_interval(
self.hass, self.scan_devices, timedelta(seconds=TRACKER_SCAN_INTERVAL)
)
@callback
def async_unload(self) -> None:
"""Unload FritzboxTools class."""
_LOGGER.debug("Unloading FRITZ!Box router integration")
if self._cancel_scan is not None:
self._cancel_scan()
self._cancel_scan = None
@property
def unique_id(self) -> str:
"""Return unique id."""
if not self._unique_id:
raise ClassSetupMissing()
return self._unique_id
@property
def model(self) -> str:
"""Return device model."""
if not self._model:
raise ClassSetupMissing()
return self._model
@property
def current_firmware(self) -> str:
"""Return current SW version."""
if not self._current_firmware:
raise ClassSetupMissing()
return self._current_firmware
@property
def latest_firmware(self) -> str | None:
"""Return latest SW version."""
return self._latest_firmware
@property
def update_available(self) -> bool:
"""Return if new SW version is available."""
return self._update_available
@property
def mac(self) -> str:
"""Return device Mac address."""
if not self._unique_id:
raise ClassSetupMissing()
return self._unique_id
@property
def devices(self) -> dict[str, FritzDevice]:
"""Return devices."""
return self._devices
@property
def signal_device_new(self) -> str:
"""Event specific per FRITZ!Box entry to signal new device."""
return f"{DOMAIN}-device-new-{self._unique_id}"
@property
def signal_device_update(self) -> str:
"""Event specific per FRITZ!Box entry to signal updates in devices."""
return f"{DOMAIN}-device-update-{self._unique_id}"
def _update_hosts_info(self) -> list[HostInfo]:
"""Retrieve latest hosts information from the FRITZ!Box."""
try:
return self.fritz_hosts.get_hosts_info() # type: ignore [no-any-return]
except Exception as ex: # pylint: disable=[broad-except]
if not self.hass.is_stopping:
raise HomeAssistantError("Error refreshing hosts info") from ex
return []
def _update_device_info(self) -> tuple[bool, str | None]:
"""Retrieve latest device information from the FRITZ!Box."""
userinterface = self.connection.call_action("UserInterface1", "GetInfo")
return userinterface.get("NewUpgradeAvailable"), userinterface.get(
"NewX_AVM-DE_Version"
)
def scan_devices(self, now: datetime | None = None) -> None:
"""Scan for new devices and return a list of found device ids."""
_LOGGER.debug("Checking devices for FRITZ!Box router %s", self.host)
_default_consider_home = DEFAULT_CONSIDER_HOME.total_seconds()
if self._options:
consider_home = self._options.get(
CONF_CONSIDER_HOME, _default_consider_home
)
else:
consider_home = _default_consider_home
new_device = False
for known_host in self._update_hosts_info():
if not known_host.get("mac"):
continue
dev_mac = known_host["mac"]
dev_name = known_host["name"]
dev_ip = known_host["ip"]
dev_home = known_host["status"]
dev_info = Device(dev_mac, dev_ip, dev_name)
if dev_mac in self._devices:
self._devices[dev_mac].update(dev_info, dev_home, consider_home)
else:
device = FritzDevice(dev_mac, dev_name)
device.update(dev_info, dev_home, consider_home)
self._devices[dev_mac] = device
new_device = True
dispatcher_send(self.hass, self.signal_device_update)
if new_device:
dispatcher_send(self.hass, self.signal_device_new)
_LOGGER.debug("Checking host info for FRITZ!Box router %s", self.host)
self._update_available, self._latest_firmware = self._update_device_info()
async def service_fritzbox(self, service: str) -> None:
"""Define FRITZ!Box services."""
_LOGGER.debug("FRITZ!Box router: %s", service)
if not self.connection:
raise HomeAssistantError("Unable to establish a connection")
try:
if service == SERVICE_REBOOT:
await self.hass.async_add_executor_job(
self.connection.call_action, "DeviceConfig1", "Reboot"
)
elif service == SERVICE_RECONNECT:
await self.hass.async_add_executor_job(
self.connection.call_action,
"WANIPConn1",
"ForceTermination",
)
except (FritzServiceError, FritzActionError) as ex:
raise HomeAssistantError("Service or parameter unknown") from ex
except FritzConnectionException as ex:
raise HomeAssistantError("Service not supported") from ex
@dataclass
class FritzData:
"""Storage class for platform global data."""
tracked: dict = field(default_factory=dict)
profile_switches: dict = field(default_factory=dict)
class FritzDeviceBase(Entity):
"""Entity base class for a device connected to a FRITZ!Box router."""
def __init__(self, router: FritzBoxTools, device: FritzDevice) -> None:
"""Initialize a FRITZ!Box device."""
self._router = router
self._mac: str = device.mac_address
self._name: str = device.hostname or DEFAULT_DEVICE_NAME
@property
def name(self) -> str:
"""Return device name."""
return self._name
@property
def ip_address(self) -> str | None:
"""Return the primary ip address of the device."""
if self._mac:
device: FritzDevice = self._router.devices[self._mac]
return device.ip_address
return None
@property
def mac_address(self) -> str:
"""Return the mac address of the device."""
return self._mac
@property
def hostname(self) -> str | None:
"""Return hostname of the device."""
if self._mac:
device: FritzDevice = self._router.devices[self._mac]
return device.hostname
return None
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
return DeviceInfo(
connections={(CONNECTION_NETWORK_MAC, self._mac)},
default_manufacturer="AVM",
default_model="FRITZ!Box Tracked device",
default_name=self.name,
identifiers={(DOMAIN, self._mac)},
via_device=(
DOMAIN,
self._router.unique_id,
),
)
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return False
async def async_process_update(self) -> None:
"""Update device."""
raise NotImplementedError()
async def async_on_demand_update(self) -> None:
"""Update state."""
await self.async_process_update()
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Register state update callback."""
await self.async_process_update()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
self._router.signal_device_update,
self.async_on_demand_update,
)
)
class FritzDevice:
"""Representation of a device connected to the FRITZ!Box."""
def __init__(self, mac: str, name: str) -> None:
"""Initialize device info."""
self._mac = mac
self._name = name
self._ip_address: str | None = None
self._last_activity: datetime | None = None
self._connected = False
def update(self, dev_info: Device, dev_home: bool, consider_home: float) -> None:
"""Update device info."""
utc_point_in_time = dt_util.utcnow()
if self._last_activity:
consider_home_evaluated = (
utc_point_in_time - self._last_activity
).total_seconds() < consider_home
else:
consider_home_evaluated = dev_home
if not self._name:
self._name = dev_info.name or self._mac.replace(":", "_")
self._connected = dev_home or consider_home_evaluated
if dev_home:
self._last_activity = utc_point_in_time
self._ip_address = dev_info.ip_address
@property
def is_connected(self) -> bool:
"""Return connected status."""
return self._connected
@property
def mac_address(self) -> str:
"""Get MAC address."""
return self._mac
@property
def hostname(self) -> str:
"""Get Name."""
return self._name
@property
def ip_address(self) -> str | None:
"""Get IP address."""
return self._ip_address
@property
def last_activity(self) -> datetime | None:
"""Return device last activity."""
return self._last_activity
class SwitchInfo(TypedDict):
"""FRITZ!Box switch info class."""
description: str
friendly_name: str
icon: str
type: str
callback_update: Callable
callback_switch: Callable
class FritzBoxBaseEntity:
"""Fritz host entity base class."""
def __init__(self, fritzbox_tools: FritzBoxTools, device_name: str) -> None:
"""Init device info class."""
self._fritzbox_tools = fritzbox_tools
self._device_name = device_name
@property
def mac_address(self) -> str:
"""Return the mac address of the main device."""
return self._fritzbox_tools.mac
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
return DeviceInfo(
configuration_url=f"http://{self._fritzbox_tools.host}",
connections={(CONNECTION_NETWORK_MAC, self.mac_address)},
identifiers={(DOMAIN, self._fritzbox_tools.unique_id)},
manufacturer="AVM",
model=self._fritzbox_tools.model,
name=self._device_name,
sw_version=self._fritzbox_tools.current_firmware,
)
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
from resource_management.libraries.functions import version
from resource_management.libraries.script.script import Script
from resource_management.libraries import functions
origin_exists = os.path.exists
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
@patch.object(os.path, "exists", new=MagicMock(
side_effect=lambda *args: origin_exists(args[0])
if args[0][-2:] == "j2" else True))
@patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
@patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.0.0.0-1234"))
class TestResourceManager(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
STACK_VERSION = "2.0.6"
CONFIG_OVERRIDES = {"serviceName":"YARN", "role":"RESOURCEMANAGER"}
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_configure_default_with_include_file_dont_manage(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="configure",
config_file="default_yarn_include_file_dont_manage.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default(is_include_file_configured=True, manage_include_files=False)
self.assertNoMoreResources()
def test_configure_default_with_include_file_manage(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="configure",
config_file="default_yarn_include_file_manage.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default(is_include_file_configured=True, manage_include_files=True)
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('File', '/var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
action = ['delete'],
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
)
self.assertResourceCalled('Execute', 'ulimit -c unlimited; export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf start resourcemanager',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
user = 'yarn',
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
tries = 5,
try_sleep = 1,
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop resourcemanager',
user='yarn')
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
def test_configure_secured_with_include_file_dont_manage(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="configure",
config_file="secured_yarn_include_file_dont_manage.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured(is_include_file_configured=True, manage_include_files=False)
def test_configure_secured_with_include_file_manage(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="configure",
config_file="secured_yarn_include_file_manage.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured(is_include_file_configured=True, manage_include_files=True)
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="start",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('File', '/var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
action = ['delete'],
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
)
self.assertResourceCalled('Execute', 'ulimit -c unlimited; export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf start resourcemanager',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
user = 'yarn',
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop-yarn/yarn/yarn-yarn-resourcemanager.pid',
tries = 5,
try_sleep = 1,
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname="Resourcemanager",
command="stop",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop resourcemanager',
user='yarn')
self.assertNoMoreResources()
def test_decommission_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname = "Resourcemanager",
command = "decommission",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
owner = 'yarn',
content = Template('exclude_hosts_list.j2'),
group = 'hadoop',
)
self.assertResourceCalled('Execute', ' yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
user = 'yarn',
)
self.assertNoMoreResources()
def test_decommission_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname = "Resourcemanager",
command = "decommission",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
owner = 'yarn',
content = Template('exclude_hosts_list.j2'),
group = 'hadoop',
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/rm.service.keytab rm/[email protected]; yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
user = 'yarn',
)
self.assertNoMoreResources()
def test_decommission_default_with_include_file_dont_manage(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname = "Resourcemanager",
command = "decommission",
config_file="default_yarn_include_file_dont_manage.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
owner = 'yarn',
content = Template('exclude_hosts_list.j2'),
group = 'hadoop',
)
self.assertResourceCalled('Execute', ' yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
user = 'yarn',
)
self.assertNoMoreResources()
def test_decommission_default_with_include_file_manage(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname = "Resourcemanager",
command = "decommission",
config_file="default_yarn_include_file_manage.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
owner = 'yarn',
content = Template('exclude_hosts_list.j2'),
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf_for_include/yarn.include',
owner = 'yarn',
content = Template('include_hosts_list.j2'),
group = 'hadoop',
)
self.assertResourceCalled('Execute', ' yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
user = 'yarn',
)
self.assertNoMoreResources()
def test_decommission_secured_with_include_file_dont_manage(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname = "Resourcemanager",
command = "decommission",
config_file="secured_yarn_include_file_dont_manage.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
owner = 'yarn',
content = Template('exclude_hosts_list.j2'),
group = 'hadoop',
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/rm.service.keytab rm/[email protected]; yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
user = 'yarn',
)
self.assertNoMoreResources()
def test_decommission_secured_with_include_file_manage(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname = "Resourcemanager",
command = "decommission",
config_file="secured_yarn_include_file_manage.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
owner = 'yarn',
content = Template('exclude_hosts_list.j2'),
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf_for_include/yarn.include',
owner = 'yarn',
content = Template('include_hosts_list.j2'),
group = 'hadoop',
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/rm.service.keytab rm/[email protected]; yarn --config /etc/hadoop/conf rmadmin -refreshNodes',
environment = {'PATH': "/bin:/usr/bin:/usr/lib/hadoop-yarn/bin"},
user = 'yarn',
)
self.assertNoMoreResources()
def assert_configure_default(self, is_include_file_configured = False, manage_include_files=False):
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('Directory', '/etc/hadoop/conf',
mode = 0755,
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
content = Template('exclude_hosts_list.j2'),
owner = 'yarn',
group = 'hadoop',
)
if is_include_file_configured and manage_include_files:
self.assertResourceCalled('Directory', '/etc/hadoop/conf_for_include',
mode = 0755,
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('File', '/etc/hadoop/conf_for_include/yarn.include',
content = Template('include_hosts_list.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 02050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'hdfs',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
def assert_configure_secured(self, is_include_file_configured = False, manage_include_files=False):
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('Directory', '/etc/hadoop/conf',
mode = 0755,
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
content = Template('exclude_hosts_list.j2'),
owner = 'yarn',
group = 'hadoop',
)
if is_include_file_configured and manage_include_files:
self.assertResourceCalled('Directory', '/etc/hadoop/conf_for_include',
mode = 0755,
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('File', '/etc/hadoop/conf_for_include/yarn.include',
content = Template('include_hosts_list.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/var/log/hadoop-yarn/yarn/hadoop-mapreduce.jobsummary.log',
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 06050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'root',
)
self.assertResourceCalled('File', '/usr/lib/hadoop/sbin/task-controller',
owner = 'root',
group = 'hadoop',
mode = 06050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'root',
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn_jaas.conf',
content = Template('yarn_jaas.conf.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn_nm_jaas.conf',
content = Template('yarn_nm_jaas.conf.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred_jaas.conf',
content = Template('mapred_jaas.conf.j2'),
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
def test_pre_upgrade_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/resourcemanager.py",
classname = "Resourcemanager",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-yarn-resourcemanager', version), sudo=True)
self.assertNoMoreResources()
|
|
""" Basic functions for manipulating 2d arrays
"""
import functools
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, intp, empty, promote_types,
diagonal, nonzero, indices
)
from numpy.core.overrides import set_array_function_like_doc, set_module
from numpy.core import overrides
from numpy.core import iinfo
from numpy.lib.stride_tricks import broadcast_to
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Reverse the order of elements along axis 1 (left/right).
For a 2-D array, this flips the entries in each row in the left/right
direction. Columns are preserved, but appear in a different order than
before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
flip : Flip array in one or more dimensions.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``.
Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Reverse the order of elements along axis 0 (up/down).
For a 2-D array, this flips the entries in each column in the up/down
direction. Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
flip : Flip array in one or more dimensions.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``.
Requires the array to be at least 1-D.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def _eye_dispatcher(N, M=None, k=None, dtype=None, order=None, *, like=None):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
if like is not None:
return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
_eye_with_like = array_function_dispatch(
_eye_dispatcher
)(eye)
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k, dtype=intp)
fi = i+k+i*n
else:
i = arange(0, n+k, dtype=intp)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def _tri_dispatcher(N, M=None, k=None, dtype=None, *, like=None):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def tri(N, M=None, k=0, dtype=float, *, like=None):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.]])
"""
if like is not None:
return _tri_with_like(N, M=M, k=k, dtype=dtype, like=like)
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
_tri_with_like = array_function_dispatch(
_tri_dispatcher
)(tri)
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two
axes.
Parameters
----------
m : array_like, shape (..., M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (..., M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
>>> np.tril(np.arange(3*4*5).reshape(3, 4, 5))
array([[[ 0, 0, 0, 0, 0],
[ 5, 6, 0, 0, 0],
[10, 11, 12, 0, 0],
[15, 16, 17, 18, 0]],
[[20, 0, 0, 0, 0],
[25, 26, 0, 0, 0],
[30, 31, 32, 0, 0],
[35, 36, 37, 38, 0]],
[[40, 0, 0, 0, 0],
[45, 46, 0, 0, 0],
[50, 51, 52, 0, 0],
[55, 56, 57, 58, 0]]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of an array with the elements below the `k`-th diagonal
zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the final
two axes.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
>>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
array([[[ 0, 1, 2, 3, 4],
[ 0, 6, 7, 8, 9],
[ 0, 0, 12, 13, 14],
[ 0, 0, 0, 18, 19]],
[[20, 21, 22, 23, 24],
[ 0, 26, 27, 28, 29],
[ 0, 0, 32, 33, 34],
[ 0, 0, 0, 38, 39]],
[[40, 41, 42, 43, 44],
[ 0, 46, 47, 48, 49],
[ 0, 0, 52, 53, 54],
[ 0, 0, 0, 58, 59]]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043 # may vary
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
yield x
yield y
# This terrible logic is adapted from the checks in histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N == 2:
yield from bins # bins=[x, y]
else:
yield bins
yield weights
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> from matplotlib.image import NonUniformImage
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> # Histogram does not follow Cartesian convention (see Notes),
>>> # therefore transpose H for visualization purposes.
>>> H = H.T
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='lower',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
<matplotlib.image.AxesImage object at 0x...>
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
<matplotlib.collections.QuadMesh object at 0x...>
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
It is also possible to construct a 2-D histogram without specifying bin
edges:
>>> # Generate non-symmetric test data
>>> n = 10000
>>> x = np.linspace(1, 100, n)
>>> y = 2*np.log(x) + np.random.rand(n) - 0.5
>>> # Compute 2d histogram. Note the order of x/y and xedges/yedges
>>> H, yedges, xedges = np.histogram2d(y, x, bins=20)
Now we can plot the histogram using
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a
:func:`hexbin <matplotlib.pyplot.hexbin>` for comparison.
>>> # Plot histogram using pcolormesh
>>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
>>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')
>>> ax1.plot(x, 2*np.log(x), 'k-')
>>> ax1.set_xlim(x.min(), x.max())
>>> ax1.set_ylim(y.min(), y.max())
>>> ax1.set_xlabel('x')
>>> ax1.set_ylabel('y')
>>> ax1.set_title('histogram2d')
>>> ax1.grid()
>>> # Create hexbin plot for comparison
>>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')
>>> ax2.plot(x, 2*np.log(x), 'k-')
>>> ax2.set_title('hexbin')
>>> ax2.set_xlim(x.min(), x.max())
>>> ax2.set_xlabel('x')
>>> ax2.grid()
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
@set_module('numpy')
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
@set_module('numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, ..., 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
tri_ = tri(n, m, k=k, dtype=bool)
return tuple(broadcast_to(inds, tri_.shape)[tri_]
for inds in indices(tri_.shape, sparse=True))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@set_module('numpy')
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, ..., 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
tri_ = ~tri(n, m, k=k - 1, dtype=bool)
return tuple(broadcast_to(inds, tri_.shape)[tri_]
for inds in indices(tri_.shape, sparse=True))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.contrib.data.python.ops import grouping
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class GroupByWindowTest(test.TestCase):
def testSimple(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).map(lambda x: x * x)
.apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4),
4))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
result = sess.run(get_next)
self.assertTrue(
all(x % 2 == 0 for x in result) or all(x % 2 == 1)
for x in result)
counts.append(result.shape[0])
self.assertEqual(len(components), sum(counts))
num_full_batches = len([c for c in counts if c == 4])
self.assertGreaterEqual(num_full_batches, 23)
self.assertTrue(all(c == 4 for c in counts[:num_full_batches]))
def testImmediateOutput(self):
components = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(-1).apply(
grouping.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4),
4)).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# The input is infinite, so this test demonstrates that:
# 1. We produce output without having to consume the entire input,
# 2. Different buckets can produce output at different rates, and
# 3. For deterministic input, the output is deterministic.
for _ in range(3):
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
self.assertAllEqual([1, 1, 1, 1], sess.run(get_next))
self.assertAllEqual([2, 2, 2, 2], sess.run(get_next))
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
def testSmallGroups(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4),
4)).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
self.assertAllEqual([1, 1, 1, 1], sess.run(get_next))
# The small outputs at the end are deterministically produced in key
# order.
self.assertAllEqual([0, 0, 0], sess.run(get_next))
self.assertAllEqual([1], sess.run(get_next))
def testReduceFuncError(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
def reduce_func(_, xs):
# Introduce an incorrect padded shape that cannot (currently) be
# detected at graph construction time.
return xs.padded_batch(
4,
padded_shapes=(tensor_shape.TensorShape([]),
constant_op.constant([5], dtype=dtypes.int64) * -1))
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: (x, ops.convert_to_tensor([x * x]))).apply(
grouping.group_by_window(lambda x, _: x % 2, reduce_func, 32))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def testConsumeWindowDatasetMoreThanOnce(self):
components = np.random.randint(50, size=(200,)).astype(np.int64)
def reduce_func(key, window):
# Apply two different kinds of padding to the input: tight
# padding, and quantized (to a multiple of 10) padding.
return dataset_ops.Dataset.zip((
window.padded_batch(
4, padded_shapes=tensor_shape.TensorShape([None])),
window.padded_batch(
4, padded_shapes=ops.convert_to_tensor([(key + 1) * 10])),))
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.fill([math_ops.cast(x, dtypes.int32)], x))
.apply(grouping.group_by_window(
lambda x: math_ops.cast(array_ops.shape(x)[0] // 10, dtypes.int64),
reduce_func, 4))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
tight_result, multiple_of_10_result = sess.run(get_next)
self.assertEqual(0, multiple_of_10_result.shape[1] % 10)
self.assertAllEqual(tight_result,
multiple_of_10_result[:, :tight_result.shape[1]])
counts.append(tight_result.shape[0])
self.assertEqual(len(components), sum(counts))
class GroupByWindowSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, components):
return dataset_ops.Dataset.from_tensor_slices(components).repeat(-1).apply(
grouping.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4), 4))
def testCoreGroupByWindow(self):
components = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64)
self.verify_unused_iterator(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
self.verify_init_before_restore(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
self.verify_multiple_breaks(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
self.verify_reset_restored_iterator(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
self.verify_restore_in_empty_graph(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
diff_components = np.array([0, 0, 0, 1, 1, 1], dtype=np.int64)
self.verify_restore_in_modified_graph(
lambda: self._build_dataset(components),
lambda: self._build_dataset(diff_components),
12,
verify_exhausted=False)
# NOTE(mrry): These tests are based on the tests in bucket_ops_test.py.
# Currently, they use a constant batch size, though should be made to use a
# different batch size per key.
class BucketTest(test.TestCase):
def _dynamicPad(self, bucket, window, window_size):
# TODO(mrry): To match `tf.contrib.training.bucket()`, implement a
# generic form of padded_batch that pads every component
# dynamically and does not rely on static shape information about
# the arguments.
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket), window.padded_batch(
32, (tensor_shape.TensorShape([]), tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([3])))))
def testSingleBucket(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(32)).map(_map_fn))
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda x, y, z: 0,
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
iterator = bucketed_dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
which_bucket, bucketed_values = sess.run(get_next)
self.assertEqual(0, which_bucket)
expected_scalar_int = np.arange(32, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values[0])
self.assertAllEqual(expected_unk_int64, bucketed_values[1])
self.assertAllEqual(expected_vec3_str, bucketed_values[2])
def testEvenOddBuckets(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(64)).map(_map_fn))
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda x, y, z: math_ops.cast(x % 2, dtypes.int64),
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
iterator = bucketed_dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# Get two minibatches (one containing even values, one containing odds)
which_bucket_even, bucketed_values_even = sess.run(get_next)
which_bucket_odd, bucketed_values_odd = sess.run(get_next)
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values_even))
self.assertEqual(3, len(bucketed_values_odd))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, which_bucket_even)
self.assertAllEqual(1, which_bucket_odd)
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(
3 * [np.arange(0, 32 * 2, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_even[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_even[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[2])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_odd[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[2])
def testEvenOddBucketsFilterOutAllOdd(self):
def _map_fn(v):
return {
"x": v,
"y": array_ops.fill([v], v),
"z": array_ops.fill([3], string_ops.as_string(v))
}
def _dynamic_pad_fn(bucket, window, _):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket), window.padded_batch(
32, {
"x": tensor_shape.TensorShape([]),
"y": tensor_shape.TensorShape([None]),
"z": tensor_shape.TensorShape([3])
})))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(128)).map(_map_fn)
.filter(lambda d: math_ops.equal(d["x"] % 2, 0)))
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda d: math_ops.cast(d["x"] % 2, dtypes.int64),
lambda k, bucket: _dynamic_pad_fn(k, bucket, 32), 32))
iterator = bucketed_dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
which_bucket0, bucketed_values_even0 = sess.run(get_next)
which_bucket1, bucketed_values_even1 = sess.run(get_next)
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, which_bucket0)
self.assertAllEqual(0, which_bucket1)
self.assertAllEqual(
np.arange(0, 64, 2, dtype=np.int64), bucketed_values_even0["x"])
self.assertAllEqual(
np.arange(64, 128, 2, dtype=np.int64), bucketed_values_even1["x"])
def testDynamicWindowSize(self):
components = np.arange(100).astype(np.int64)
# Key fn: even/odd
# Reduce fn: batches of 5
# Window size fn: even=5, odd=10
def window_size_func(key):
window_sizes = constant_op.constant([5, 10], dtype=dtypes.int64)
return window_sizes[key]
dataset = dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(20),
None, window_size_func))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.OutOfRangeError):
batches = 0
while True:
result = sess.run(get_next)
is_even = all(x % 2 == 0 for x in result)
is_odd = all(x % 2 == 1 for x in result)
self.assertTrue(is_even or is_odd)
expected_batch_size = 5 if is_even else 10
self.assertEqual(expected_batch_size, result.shape[0])
batches += 1
self.assertEqual(batches, 15)
if __name__ == "__main__":
test.main()
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupedMessage.score'
db.add_column(
'sentry_groupedmessage',
'score',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False
)
def backwards(self, orm):
# Deleting field 'GroupedMessage.score'
db.delete_column('sentry_groupedmessage', 'score')
models = {
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.groupedmessage': {
'Meta': {
'unique_together': "(('logger', 'view', 'checksum'),)",
'object_name': 'GroupedMessage'
},
'checksum': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
}),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'view': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.message': {
'Meta': {
'object_name': 'Message'
},
'checksum': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'message_set'",
'null': 'True',
'to': "orm['sentry.GroupedMessage']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'server_name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'db_index': 'True'
}),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'view': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
}
}
complete_apps = ['sentry']
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
import warnings
from shutil import which
import scipy
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.electronic_structure.boltztrap import BoltztrapAnalyzer
from pymatgen.electronic_structure.cohp import CompleteCohp
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.plotter import (
BoltztrapPlotter,
BSDOSPlotter,
BSPlotter,
BSPlotterProjected,
CohpPlotter,
DosPlotter,
fold_point,
plot_brillouin_zone,
plot_ellipsoid,
)
from pymatgen.io.vasp import Vasprun
from pymatgen.util.testing import PymatgenTest
class DosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "complete_dos.json"), encoding="utf-8") as f:
self.dos = CompleteDos.from_dict(json.load(f))
self.plotter = DosPlotter(sigma=0.2, stack=True)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 4)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Li", "Fe", "P", "O"]:
self.assertIn(el, d)
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_get_plot(self):
# Disabling latex is needed for this test to work.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
plt = self.plotter.get_plot()
self.plotter.save_plot("dosplot.png")
self.assertTrue(os.path.isfile("dosplot.png"))
os.remove("dosplot.png")
plt.close("all")
class BSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "CaO_2605_bandstructure.json"), encoding="utf-8") as f:
d = json.loads(f.read())
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotter(self.bs)
self.assertEqual(len(self.plotter._bs), 1, "wrong number of band objects")
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "N2_12103_bandstructure.json"), encoding="utf-8") as f:
d = json.loads(f.read())
self.sbs_sc = BandStructureSymmLine.from_dict(d)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "C_48_bandstructure.json"), encoding="utf-8") as f:
d = json.loads(f.read())
self.sbs_met = BandStructureSymmLine.from_dict(d)
self.plotter_multi = BSPlotter([self.sbs_sc, self.sbs_met])
self.assertEqual(len(self.plotter_multi._bs), 2, "wrong number of band objects")
self.assertEqual(self.plotter_multi._nb_bands, [96, 96], "wrong number of bands")
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_add_bs(self):
self.plotter_multi.add_bs(self.sbs_sc)
self.assertEqual(len(self.plotter_multi._bs), 3, "wrong number of band objects")
self.assertEqual(self.plotter_multi._nb_bands, [96, 96, 96], "wrong number of bands")
def test_get_branch_steps(self):
steps_idx = BSPlotter._get_branch_steps(self.sbs_sc.branches)
self.assertEqual(steps_idx, [0, 121, 132, 143], "wrong list of steps idx")
def test_rescale_distances(self):
rescaled_distances = self.plotter_multi._rescale_distances(self.sbs_sc, self.sbs_met)
self.assertEqual(
len(rescaled_distances),
len(self.sbs_met.distance),
"wrong length of distances list",
)
self.assertEqual(rescaled_distances[-1], 6.5191398067252875, "wrong last distance value")
self.assertEqual(
rescaled_distances[148],
self.sbs_sc.distance[19],
"wrong distance at high symm k-point",
)
def test_interpolate_bands(self):
data = self.plotter.bs_plot_data()
d = data["distances"]
en = data["energy"]["1"]
int_distances, int_energies = self.plotter._interpolate_bands(d, en)
self.assertEqual(len(int_distances), 10, "wrong length of distances list")
self.assertEqual(len(int_distances[0]), 100, "wrong length of distances in a branch")
self.assertEqual(len(int_energies), 10, "wrong length of distances list")
self.assertEqual(int_energies[0].shape, (16, 100), "wrong length of distances list")
def test_bs_plot_data(self):
self.assertEqual(
len(self.plotter.bs_plot_data()["distances"]),
10,
"wrong number of sequences of branches",
)
self.assertEqual(
len(self.plotter.bs_plot_data()["distances"][0]),
16,
"wrong number of distances in the first sequence of branches",
)
self.assertEqual(
sum(len(e) for e in self.plotter.bs_plot_data()["distances"]),
160,
"wrong number of distances",
)
length = len(self.plotter.bs_plot_data(split_branches=False)["distances"][0])
self.assertEqual(length, 144, "wrong number of distances in the first sequence of branches")
length = len(self.plotter.bs_plot_data(split_branches=False)["distances"])
self.assertEqual(length, 2, "wrong number of distances in the first sequence of branches")
self.assertEqual(self.plotter.bs_plot_data()["ticks"]["label"][5], "K", "wrong tick label")
self.assertEqual(
len(self.plotter.bs_plot_data()["ticks"]["label"]),
19,
"wrong number of tick labels",
)
def test_get_ticks(self):
self.assertEqual(self.plotter.get_ticks()["label"][5], "K", "wrong tick label")
self.assertEqual(
self.plotter.get_ticks()["distance"][5],
2.406607625322699,
"wrong tick distance",
)
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_get_plot(self):
# zero_to_efermi = True, ylim = None, smooth = False,
# vbm_cbm_marker = False, smooth_tol = None
# Disabling latex is needed for this test to work.
from matplotlib import rc
rc("text", usetex=False)
plt = self.plotter.get_plot()
self.assertEqual(plt.ylim(), (-4.0, 7.6348), "wrong ylim")
plt = self.plotter.get_plot(smooth=True)
plt = self.plotter.get_plot(vbm_cbm_marker=True)
self.plotter.save_plot("bsplot.png")
self.assertTrue(os.path.isfile("bsplot.png"))
os.remove("bsplot.png")
plt.close("all")
# test plotter with 2 bandstructures
plt = self.plotter_multi.get_plot()
self.assertEqual(len(plt.gca().get_lines()), 874, "wrong number of lines")
self.assertEqual(plt.ylim(), (-10.0, 10.0), "wrong ylim")
plt = self.plotter_multi.get_plot(zero_to_efermi=False)
self.assertEqual(plt.ylim(), (-15.2379, 12.67141266), "wrong ylim")
plt = self.plotter_multi.get_plot(smooth=True)
self.plotter_multi.save_plot("bsplot.png")
self.assertTrue(os.path.isfile("bsplot.png"))
os.remove("bsplot.png")
plt.close("all")
class BSPlotterProjectedTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "Cu2O_361_bandstructure.json"), encoding="utf-8") as f:
d = json.load(f)
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotterProjected(self.bs)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_methods(self):
self.plotter.get_elt_projected_plots().close()
self.plotter.get_elt_projected_plots_color().close()
self.plotter.get_projected_plots_dots({"Cu": ["d", "s"], "O": ["p"]}).close()
self.plotter.get_projected_plots_dots_patom_pmorb(
{"Cu": ["dxy", "s", "px"], "O": ["px", "py", "pz"]},
{"Cu": [3, 5], "O": [1]},
).close()
class BSDOSPlotterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_methods(self):
v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, "vasprun_Si_bands.xml"))
p = BSDOSPlotter()
plt = p.get_plot(
v.get_band_structure(kpoints_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "KPOINTS_Si_bands"))
)
plt.close()
plt = p.get_plot(
v.get_band_structure(kpoints_filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "KPOINTS_Si_bands")),
v.complete_dos,
)
plt.close("all")
class PlotBZTest(unittest.TestCase):
def setUp(self):
self.rec_latt = Structure.from_file(
os.path.join(PymatgenTest.TEST_FILES_DIR, "Si.cssr")
).lattice.reciprocal_lattice
self.kpath = [[[0.0, 0.0, 0.0], [0.5, 0.0, 0.5], [0.5, 0.25, 0.75], [0.375, 0.375, 0.75]]]
self.labels = {
"\\Gamma": [0.0, 0.0, 0.0],
"K": [0.375, 0.375, 0.75],
"L": [0.5, 0.5, 0.5],
"U": [0.625, 0.25, 0.625],
"W": [0.5, 0.25, 0.75],
"X": [0.5, 0.0, 0.5],
}
self.hessian = [
[17.64757034, 3.90159625, -4.77845607],
[3.90159625, 14.88874142, 6.75776076],
[-4.77845607, 6.75776076, 12.12987493],
]
self.center = [0.41, 0.0, 0.41]
self.points = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_bz_plot(self):
_, ax = plot_ellipsoid(self.hessian, self.center, lattice=self.rec_latt)
plot_brillouin_zone(
self.rec_latt,
lines=self.kpath,
labels=self.labels,
kpoints=self.points,
ax=ax,
show=False,
)
def test_fold_point(self):
self.assertTrue(
scipy.allclose(
fold_point([0.0, -0.5, 0.5], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0.0, 0.5, 0.5]),
)
)
self.assertTrue(
scipy.allclose(
fold_point([0.1, -0.6, 0.2], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0.1, 0.4, 0.2]),
)
)
x_trans = which("x_trans")
@unittest.skipIf(not x_trans, "No x_trans.")
class BoltztrapPlotterTest(unittest.TestCase):
def setUp(self):
bz = BoltztrapAnalyzer.from_files(os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap/transp/"))
self.plotter = BoltztrapPlotter(bz)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_plot_carriers(self):
plt = self.plotter.plot_carriers()
self.assertEqual(len(plt.gca().get_lines()), 7, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
6.525490122298364e22,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_complexity_factor_mu(self):
plt = self.plotter.plot_complexity_factor_mu()
self.assertEqual(len(plt.gca().get_lines()), 2, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.004708835456903449,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_conductivity_dop(self):
plt = self.plotter.plot_conductivity_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.3801957596666667,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_conductivity_mu(self):
plt = self.plotter.plot_conductivity_mu()
self.assertEqual(len(plt.gca().get_lines()), 9, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1965.1306,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_conductivity_temp(self):
plt = self.plotter.plot_conductivity_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.3801957596666667,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_dos(self):
plt = self.plotter.plot_dos()
self.assertEqual(len(plt.gca().get_lines()), 3, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.4197044934588674,
"wrong 0 data in line 0",
)
self.assertEqual(plt.gca().get_lines()[0].get_data()[1][0], 0.0, "wrong 1 data in line 0")
plt.close()
def test_plot_eff_mass_dop(self):
plt = self.plotter.plot_eff_mass_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1.4231240011719886,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_eff_mass_temp(self):
plt = self.plotter.plot_eff_mass_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1.4231240011719886,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_hall_carriers(self):
plt = self.plotter.plot_hall_carriers()
self.assertEqual(len(plt.gca().get_lines()), 7, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
9.538187273102463e17,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_power_factor_dop(self):
plt = self.plotter.plot_power_factor_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.40606868935796925,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_power_factor_mu(self):
plt = self.plotter.plot_power_factor_mu()
self.assertEqual(len(plt.gca().get_lines()), 9, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
365.5514594136157,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_power_factor_temp(self):
plt = self.plotter.plot_power_factor_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.40606868935796925,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_seebeck_dop(self):
plt = self.plotter.plot_seebeck_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1050.8197666666667,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_seebeck_eff_mass_mu(self):
plt = self.plotter.plot_seebeck_eff_mass_mu()
self.assertEqual(len(plt.gca().get_lines()), 2, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
6412.881888198197,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_seebeck_mu(self):
plt = self.plotter.plot_seebeck_mu()
self.assertEqual(len(plt.gca().get_lines()), 9, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
-433.11096000000003,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_seebeck_temp(self):
plt = self.plotter.plot_seebeck_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
1050.8197666666667,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_zt_dop(self):
plt = self.plotter.plot_zt_dop()
self.assertEqual(len(plt.gca().get_lines()), 8, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
1000000000000000.0,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
4.060682863129955e-05,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_zt_mu(self):
plt = self.plotter.plot_zt_mu()
self.assertEqual(len(plt.gca().get_lines()), 9, "wrong number of lines")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[0][0],
-2.0702422655947665,
"wrong 0 data in line 0",
)
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
0.2153839699235254,
"wrong 1 data in line 0",
)
plt.close()
def test_plot_zt_temp(self):
plt = self.plotter.plot_zt_temp()
self.assertEqual(len(plt.gca().get_lines()), 6, "wrong number of lines")
self.assertEqual(plt.gca().get_lines()[0].get_data()[0][0], 100, "wrong 0 data in line 0")
self.assertEqual(
plt.gca().get_lines()[0].get_data()[1][0],
4.060682863129955e-05,
"wrong 1 data in line 0",
)
plt.close()
class CohpPlotterTest(PymatgenTest):
def setUp(self):
path = os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "complete_cohp_lobster.json")
with open(os.path.join(path)) as f:
self.cohp = CompleteCohp.from_dict(json.load(f))
path = os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp", "complete_coop_lobster.json")
with open(os.path.join(path)) as f:
self.coop = CompleteCohp.from_dict(json.load(f))
self.cohp_plot = CohpPlotter(zero_at_efermi=False)
self.coop_plot = CohpPlotter(are_coops=True)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_attributes(self):
self.assertFalse(self.cohp_plot.are_coops)
self.assertTrue(self.coop_plot.are_coops)
self.assertFalse(self.cohp_plot.zero_at_efermi)
self.assertTrue(self.coop_plot.zero_at_efermi)
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
cohp_energies = self.cohp_plot._cohps["1"]["energies"]
self.assertEqual(len(cohp_energies), 301)
self.assertAlmostEqual(cohp_energies[0], -0.27768)
self.assertAlmostEqual(cohp_energies[-1], 14.77248)
self.coop_plot.add_cohp_dict(self.coop.all_cohps)
coop_energies = self.coop_plot._cohps["10"]["energies"]
self.assertEqual(len(coop_energies), 241)
self.assertAlmostEqual(coop_energies[0], -6.02510)
self.assertAlmostEqual(coop_energies[-1], 6.02510)
def test_add_cohp_dict(self):
# Sorts the populations by z-coordinates of the sites
def sortkeys(sites):
return sites[0].z, sites[1].z
sorted_keys = ["3", "4", "7", "8", "9", "10", "11", "6", "5", "2", "1"]
d_coop = self.coop_plot.get_cohp_dict()
self.assertEqual(len(d_coop), 0)
bonds = self.coop.bonds
self.coop_plot.add_cohp_dict(self.coop.all_cohps, key_sort_func=lambda x: sortkeys(bonds[x]["sites"]))
d_coop = self.coop_plot.get_cohp_dict()
self.assertEqual(len(d_coop), 11)
self.assertEqual(list(self.coop_plot._cohps.keys()), sorted_keys)
def test_get_cohp_dict(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
d_cohp = self.cohp_plot.get_cohp_dict()
for bond in ["1", "2"]:
self.assertIn(bond, d_cohp)
def test_get_plot(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
plt_cohp = self.cohp_plot.get_plot()
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "-COHP")
self.assertEqual(ax_cohp.get_ylabel(), "$E$ (eV)")
legend_labels = ax_cohp.get_legend_handles_labels()[1]
self.assertEqual(len(self.cohp_plot._cohps), len(legend_labels))
self.assertEqual(ax_cohp.lines[0].get_linestyle(), "-")
self.assertEqual(ax_cohp.lines[1].get_linestyle(), "--")
for label in legend_labels:
self.assertIn(label, self.cohp_plot._cohps)
linesindex = legend_labels.index("1")
linestyles = {Spin.up: "-", Spin.down: "--"}
cohp_fe_fe = self.cohp.all_cohps["1"]
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(), -cohp_fe_fe.cohp[spin])
self.assertArrayAlmostEqual(lines.get_ydata(), self.cohp.energies)
self.assertEqual(lines.get_linestyle(), linestyles[spin])
plt_cohp.close()
plt_cohp = self.cohp_plot.get_plot(invert_axes=False, plot_negative=False)
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "$E$ (eV)")
self.assertEqual(ax_cohp.get_ylabel(), "COHP")
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(), self.cohp.energies)
self.assertArrayAlmostEqual(lines.get_ydata(), cohp_fe_fe.cohp[spin])
plt_cohp.close()
plt_cohp = self.cohp_plot.get_plot(integrated=True)
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "-ICOHP (eV)")
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(), -cohp_fe_fe.icohp[spin])
coop_dict = {"Bi5-Bi6": self.coop.all_cohps["10"]}
self.coop_plot.add_cohp_dict(coop_dict)
plt_coop = self.coop_plot.get_plot()
ax_coop = plt_coop.gca()
self.assertEqual(ax_coop.get_xlabel(), "COOP")
self.assertEqual(ax_coop.get_ylabel(), "$E - E_f$ (eV)")
lines_coop = ax_coop.get_lines()[0]
self.assertArrayAlmostEqual(lines_coop.get_ydata(), self.coop.energies - self.coop.efermi)
coop_bi_bi = self.coop.all_cohps["10"].cohp[Spin.up]
self.assertArrayAlmostEqual(lines_coop.get_xdata(), coop_bi_bi)
# Cleanup.
plt_cohp.close()
plt_coop.close("all")
def test_save_plot(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
plt_cohp = self.cohp_plot.get_plot()
self.cohp_plot.save_plot("cohpplot.png")
self.assertTrue(os.path.isfile("cohpplot.png"))
os.remove("cohpplot.png")
plt_cohp.close("all")
if __name__ == "__main__":
unittest.main()
|
|
import re
from flask import jsonify
from flask_jwt_extended import JWTManager, jwt_required, create_access_token, get_jwt_identity
from flask_restplus import Resource, fields, reqparse
from .headers import app, api, databases
from .models import User, Bucket, Item
ns = api.namespace('api/v1', description='Bucketlist operations')
jwt = JWTManager(app)
databases.create_all()
parser = reqparse.RequestParser()
parser.add_argument('q', type=str, help='Search word is a string')
parser.add_argument('limit', type=int, help='Limit can only be a number')
parser.add_argument('page', type=int, help='page can only be a number')
parser.add_argument('username')
parser.add_argument('password')
add_item = api.model('Item', {
'name': fields.String(required=True, description='The Item name'),
})
item = api.model('Item', {
'id': fields.Integer(readOnly=True, description='The Item unique identifier'),
'name': fields.String(required=True, description='The Item name'),
'date_created': fields.DateTime(dt_format='rfc822'),
'date_modified': fields.DateTime(dt_format='rfc822'),
'done': fields.Boolean
})
add_bucket = api.model('Bucket', {
'name': fields.String(required=True, description='The bucket name'),
})
bucket = api.model('Bucket', {
'id': fields.Integer(readOnly=True, description='The bucket unique identifier'),
'name': fields.String(required=True, description='The bucket name'),
'items': fields.List(fields.Nested(item)),
'created_by': fields.Integer(readOnly=True, description='The bucket owner', attribute='user_id'),
'date_created': fields.DateTime(dt_format='rfc822'),
'date_modified': fields.DateTime(dt_format='rfc822'),
})
user = api.model('User', {
'id': fields.Integer(readOnly=True, description='The user unique identifier'),
'username': fields.String(required=True, description='The username'),
'buckets': fields.List(fields.Nested(bucket)),
'date_created': fields.DateTime(dt_format='rfc822'),
'date_modified': fields.DateTime(dt_format='rfc822'),
})
auth = api.model('User', {
'username': fields.String(required=True, description='The username'),
'password': fields.String(required=True, description='The Password')
})
@jwt.expired_token_loader
def error_handler(e):
data = {
"code":e.status_code,
"message":e.error+" "+e.description
}
return jsonify(data), 400
@jwt.invalid_token_loader
def error_handler(e):
data = {
"code":e.status_code,
"message":e.error+" "+e.description
}
return jsonify(data), 400
@ns.route('/auth/register')
class Users(Resource):
"""Shows a list of users for the authenticated user, and lets you POST to add new users"""
@ns.doc('register_user')
@ns.expect(auth)
@ns.response(201, 'User Created')
@ns.response(400, 'Invalid Request')
@ns.response(406, 'Registration request not accepted')
def post(self):
"""Register a new user"""
if not api.payload:
return {"message": "Payload missing"}, 400 #this is a bad request
data = api.payload.keys()
if ('username' in data) and ('password' in data):
username = api.payload['username'].strip()
password = api.payload['password'].strip()
if username == "" or password == "":
return {"message": "Both username and password are required"}, 400 # Bad request
email = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
if re.match(email, username):
user_exists = User.where(username=username).first()
if user_exists:
return {"message": "Username already used. Use a different name to register"}, 406 # This is not acceptable
else:
usr = User(api.payload)
usr.store()
return {"message": "User created"}, 201 # Resource is created and saved
else:
return {"message": "Username must be a valid email address"}, 400 # Bad request
else:
return {"message": "Both username and password are required"}, 400 # Bad request
@ns.route('/auth/login')
class Auth(Resource):
"""Shows a list of users for the authenticated user, and lets you POST to add new users"""
@ns.doc('login_user')
@ns.response(200, 'User Logged in')
@ns.response(400, 'Invalid Request')
@ns.expect(auth)
def post(self):
"""Login existing user"""
if not api.payload:
return {"message": "Payload missing"}, 400 # Bad request
data = api.payload.keys()
if ('username' in data) and ('password' in data):
email = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
username = api.payload['username']
password = api.payload['password']
if re.match(email, username):
usr = User.login(username, password)
if usr:
access_token = create_access_token(identity=usr.id)
return {'access_token': access_token, 'login': True}, 200 # OK
return {"message": "User not found"}, 404 #Reource not found
else:
return {"message": "Username must be a valid email address"}, 400 # Bad request
else:
return {"message": "Both username and password are required"}, 400 # Bad request
@ns.route('/bucketlists')
@ns.header('Authorization', 'Access token', required=True)
class BucketList(Resource):
"""Shows a list of buckets for the authenticated user, and lets you POST to add new buckets"""
@ns.doc('list_buckets')
@ns.marshal_list_with(bucket)
@ns.response(200, 'OK')
@jwt_required
def get(self):
"""List bucketlists"""
args = parser.parse_args()
lmt = 20
qword = None
page = 1
if args['limit']:
lmt = int(args['limit'])
if lmt < 1:
lmt = 20
if lmt > 100:
lmt = 100
if args['q']:
qword = args['q']
if qword is None or qword == "":
qword = None
if args['page']:
page = int(args['page'])
if page < 1:
page = 1
user_id = get_jwt_identity()
buckets = Bucket.all(lmt=lmt, page=page, q=qword, uid=user_id)
return buckets, 200 # OK
@ns.doc('create_bucket')
@ns.expect(add_bucket)
@ns.response(201, 'Bucketlist Created')
@ns.response(400, 'Invalid Request')
@ns.response(406, 'Registration request not accepted')
@ns.response(500, 'Internal error,failed to created a bucketlist ')
@jwt_required
def post(self):
"""Create a new bucketlist"""
try:
if not api.payload:
return {"message": "Payload missing"}, 400 # Bad request
data = api.payload.keys()
if ('name' in data) and (not api.payload["name"].strip() == ""):
bucket_exists = Bucket.where(name=api.payload['name'].strip()).first()
if bucket_exists:
return {'message': 'Bucket name already exists. Choose a different name to create a bucket'}, 406
user_id = get_jwt_identity()
buck = Bucket(api.payload, uid=user_id)
if buck.store():
return {'message': 'Bucketlist created'}, 201
else:
return {'message': 'Bucketlist could not be created'}, 500 # Error on server
else:
return {'message': 'Bucketlist name is missing'}, 400 # Bad request
except:
return {'message': 'An error has occured, could not create a bucketlist'}, 500 #Error on server
@ns.route('/bucketlists/<int:id>')
@ns.header('Authorization', 'Access token', required=True)
@ns.param('id', 'The bucket identifier')
class Buckets(Resource):
"""Show a single bucket and lets you update or delete them"""
@ns.doc('get_bucket')
@ns.marshal_with(bucket)
@ns.response(200, 'OK')
@ns.response(400, 'Invalid Request')
@ns.response(404, 'Resource not found')
@jwt_required
def get(self, id):
"""Fetch a given bucket"""
user_id = get_jwt_identity()
buck = Bucket.find(id, user_id)
if buck:
return buck, 200
else:
return None, 404
@ns.doc('delete_backet')
@ns.response(200, 'Bucketlist deleted')
@ns.response(404, 'Resource not found')
@jwt_required
def delete(self, id):
"""Delete a bucketlist given its identifier"""
user_id = get_jwt_identity()
buck = Bucket.find(id, user_id)
if buck:
buck.delete()
return {'message': 'Bucketlist deleted'}, 200
else:
return {'message': 'Bucketlist not found'}, 404
@ns.expect(add_bucket)
@ns.response(200, 'OK')
@ns.response(400, 'Invalid Request')
@ns.response(404, 'Resource not found')
@ns.response(406, 'Update request not accepted')
@jwt_required
def put(self, id):
"""Update a bucketlist given its identifier"""
if not api.payload:
return {"message": "Payload missing"}, 400 # Bad request
user_id = get_jwt_identity()
buck = Bucket.find(id, user_id)
if buck:
data = api.payload.keys()
if 'name' in data:
name = api.payload["name"].strip()
# Check if there are changes
if buck.name == name:
return {"message": "Bucketlist name has not changed, update not allowed"}, 406 # Not allowed
if name != "":
if name != buck.name:
buck.put(api.payload)
return {"message": "Bucketlist updated"}, 200
return {'message': 'Bucketlist name is required'}, 400 # Bad request
else:
return {'message': 'Bucketlist not found in your collection'}, 404
@ns.route('/bucketlists/<int:id>/items')
@ns.param('id', 'The bucket identifier')
@ns.header('Authorization', 'Access token', required=True)
class ItemsList(Resource):
"""Shows a list of items in a given bucketlist, and lets you POST to add new items"""
@ns.doc('list_items')
@ns.marshal_list_with(item)
@ns.response(200, 'OK')
@ns.response(404, 'Resource not found')
@jwt_required
def get(self, id):
"""List all items in a bucketlist"""
user_id = get_jwt_identity()
bucket = Bucket.find(id, user_id)
if bucket:
return bucket.items, 200
else:
return {"message": "You do not own a bucketlistAPI with id {0}".format(id)}, 404
@ns.doc('create_items')
@ns.expect(add_item)
@ns.response(200, 'OK')
@ns.response(400, 'Invalid Request')
@ns.response(403, 'Access is forbidden')
@ns.response(406, 'Create request not accepted')
@jwt_required
def post(self, id):
"""Add a new item to a bucketlist"""
if not api.payload:
return {"message": "Payload missing"}, 400 # Bad request
data = api.payload.keys()
if 'name' in data:
user_id = get_jwt_identity()
buck = Bucket.find(id, user_id)
if buck:
api.payload.update({'bucket_id': id})
item_exists = Item.where(name=api.payload['name'].strip(), bucket_id=id).first()
if item_exists:
return {'message': 'Item name already used in this bucket. Choose a different name to add an item'}, 406
itm = Item(api.payload)
itm.store()
return {'message': 'Added item to bucket {0}'.format(id)}, 201
else:
return {'message': 'You do not own a bucket with id {0}'.format(id)}, 403 # Forbidden
else:
return {'message': 'Item name is required'}, 400 # Bad request
@ns.route('/bucketlists/<int:id>/items/<int:item_id>')
@ns.header('Authorization', 'Access token', required=True)
@ns.param('id', 'The Bucket identifier')
@ns.param('item_id', 'The Item identifier')
class Items(Resource):
"""Show a single bucketlist item and lets you update or delete them"""
@ns.doc('get_bucket_item')
@ns.marshal_with(item)
@ns.response(200, 'OK')
@ns.response(404, 'Resource not found')
@jwt_required
def get(self, id, item_id):
"""Fetch a given bucketlist item"""
user_id = get_jwt_identity()
buck = Bucket.find(id, user_id)
if buck:
itm = Item.where(id=item_id, bucket_id=id).first()
return itm, 200
else:
return {"message": "You do not own a bucketlistAPI with id {0}".format(id)}, 404
@ns.doc('delete_backet_item')
@ns.response(200, 'Successful Delete')
@ns.response(404, 'Resource not found')
@jwt_required
def delete(self, id, item_id):
"""Delete a bucketlist item given its identifier"""
user_id = get_jwt_identity()
buck = Bucket.find(id, user_id)
if buck:
itm = Item.where(id=item_id, bucket_id=id).first()
if itm:
itm.delete()
return {'message': 'Item deleted'}, 200
else:
return {'message': 'Item not found'}, 404
else:
return {"message": "You do not own a bucketlist with id {0}".format(id)}, 404
@ns.expect(add_item)
@ns.marshal_with(add_item)
@ns.response(200, 'OK')
@ns.response(400, 'Invalid Request')
@ns.response(404, 'Resource not found')
@ns.response(406, 'Update request not accepted')
@jwt_required
def put(self, id, item_id):
"""Update an Item given a bucketlist identifier and Item identifier"""
if not api.payload:
return {"message": "Payload missing"}, 400 # Bad request
data = api.payload.keys()
if 'name' in data:
user_id = get_jwt_identity()
buck = Bucket.find(id, user_id)
if not buck:
return {"message": "You do not own a bucketlistAPI with id {0}".format(id)}, 404
itm = Item.where(id=item_id, bucket_id=id).first()
# Check if there are changes
if itm.name == api.payload['name']:
return {'message': 'Item name has not changed, update not allowed'}, 406 # Not allowed
if itm:
itm.put(api.payload)
return itm, 200
else:
return {'message': 'Item not found'}, 404
else:
return {'message': 'Item name is required'}, 400 # Bad request
|
|
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from neutron_lib import constants as n_consts
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import config
from neutron import context
import neutron.extensions
from neutron.extensions import metering
from neutron.plugins.common import constants
from neutron.services.metering import metering_plugin
from neutron.tests.unit.db import test_db_base_plugin_v2
DB_METERING_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
extensions_path = ':'.join(neutron.extensions.__path__)
_long_description_ok = 'x' * (attr.LONG_DESCRIPTION_MAX_LEN)
_long_description_ng = 'x' * (attr.LONG_DESCRIPTION_MAX_LEN + 1)
class MeteringPluginDbTestCaseMixin(object):
def _create_metering_label(self, fmt, name, description, **kwargs):
data = {'metering_label': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test-tenant'),
'shared': kwargs.get('shared', False),
'description': description}}
req = self.new_create_request('metering-labels', data,
fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id'],
is_admin=kwargs.get('is_admin', True)))
return req.get_response(self.ext_api)
def _make_metering_label(self, fmt, name, description, **kwargs):
res = self._create_metering_label(fmt, name, description, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _create_metering_label_rule(self, fmt, metering_label_id, direction,
remote_ip_prefix, excluded, **kwargs):
data = {'metering_label_rule':
{'metering_label_id': metering_label_id,
'tenant_id': kwargs.get('tenant_id', 'test-tenant'),
'direction': direction,
'excluded': excluded,
'remote_ip_prefix': remote_ip_prefix}}
req = self.new_create_request('metering-label-rules',
data, fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return req.get_response(self.ext_api)
def _make_metering_label_rule(self, fmt, metering_label_id, direction,
remote_ip_prefix, excluded, **kwargs):
res = self._create_metering_label_rule(fmt, metering_label_id,
direction, remote_ip_prefix,
excluded, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def metering_label(self, name='label', description='desc',
fmt=None, **kwargs):
if not fmt:
fmt = self.fmt
metering_label = self._make_metering_label(fmt, name,
description, **kwargs)
yield metering_label
@contextlib.contextmanager
def metering_label_rule(self, metering_label_id=None, direction='ingress',
remote_ip_prefix='10.0.0.0/24',
excluded='false', fmt=None):
if not fmt:
fmt = self.fmt
metering_label_rule = self._make_metering_label_rule(fmt,
metering_label_id,
direction,
remote_ip_prefix,
excluded)
yield metering_label_rule
class MeteringPluginDbTestCase(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
MeteringPluginDbTestCaseMixin):
fmt = 'json'
resource_prefix_map = dict(
(k.replace('_', '-'), "/metering")
for k in metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, plugin=None):
service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS}
super(MeteringPluginDbTestCase, self).setUp(
plugin=plugin,
service_plugins=service_plugins
)
self.plugin = metering_plugin.MeteringPlugin()
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.METERING: self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
class TestMetering(MeteringPluginDbTestCase):
def test_create_metering_label(self):
name = 'my label'
description = 'my metering label'
keys = [('name', name,), ('description', description)]
with self.metering_label(name, description) as metering_label:
for k, v, in keys:
self.assertEqual(metering_label['metering_label'][k], v)
def test_create_metering_label_shared(self):
name = 'my label'
description = 'my metering label'
shared = True
keys = [('name', name,), ('description', description),
('shared', shared)]
with self.metering_label(name, description,
shared=shared) as metering_label:
for k, v, in keys:
self.assertEqual(metering_label['metering_label'][k], v)
def test_create_metering_label_with_max_description_length(self):
res = self._create_metering_label(self.fmt, 'my label',
_long_description_ok)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_metering_label_with_too_long_description(self):
res = self._create_metering_label(self.fmt, 'my label',
_long_description_ng)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_update_metering_label(self):
name = 'my label'
description = 'my metering label'
data = {'metering_label': {}}
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
self._update('metering-labels', metering_label_id, data,
webob.exc.HTTPNotImplemented.code)
def test_delete_metering_label(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
def test_list_metering_label(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as v1,\
self.metering_label(name, description) as v2:
metering_label = (v1, v2)
self._test_list_resources('metering-label', metering_label)
def test_create_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
keys = [('metering_label_id', metering_label_id),
('direction', direction),
('excluded', excluded),
('remote_ip_prefix', remote_ip_prefix)]
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as label_rule:
for k, v, in keys:
self.assertEqual(label_rule['metering_label_rule'][k], v)
def test_update_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
data = {'metering_label_rule': {}}
with self.metering_label(name, description) as metering_label, \
self.metering_label_rule(
metering_label['metering_label']['id'],
direction, remote_ip_prefix) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._update('metering-label-rules', rule_id, data,
webob.exc.HTTPNotImplemented.code)
def test_delete_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._delete('metering-label-rules', rule_id, 204)
def test_list_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as v1,\
self.metering_label_rule(metering_label_id,
'ingress',
remote_ip_prefix,
excluded) as v2:
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
def test_create_metering_label_rules(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as v1,\
self.metering_label_rule(metering_label_id,
direction,
n_consts.IPv4_ANY,
False) as v2:
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
def test_create_overlap_metering_label_rules(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix1 = '192.168.0.0/24'
remote_ip_prefix2 = '192.168.0.0/16'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix1,
excluded):
res = self._create_metering_label_rule(self.fmt,
metering_label_id,
direction,
remote_ip_prefix2,
excluded)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_metering_label_rule_two_labels(self):
name1 = 'my label 1'
name2 = 'my label 2'
description = 'my metering label'
with self.metering_label(name1, description) as metering_label1:
metering_label_id1 = metering_label1['metering_label']['id']
with self.metering_label(name2, description) as metering_label2:
metering_label_id2 = metering_label2['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id1,
direction,
remote_ip_prefix,
excluded) as v1,\
self.metering_label_rule(metering_label_id2,
direction,
remote_ip_prefix,
excluded) as v2:
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
|
|
# Copyright 2016, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
import analyze
def test_analyze_entities():
result = analyze.analyze_entities(
"Tom Sawyer is a book written by a guy known as Mark Twain."
)
assert result["language"] == "en"
entities = result["entities"]
assert len(entities)
subject = entities[0]
assert subject["type"] == "PERSON"
assert subject["name"].startswith("Tom")
def test_analyze_sentiment(capsys):
result = analyze.analyze_sentiment("your face is really ugly and i hate it.")
sentiment = result["documentSentiment"]
assert sentiment["score"] < 0
assert sentiment["magnitude"] < 1
result = analyze.analyze_sentiment(
"cheerio, mate - I greatly admire the pallor of your visage, and your angle of repose leaves little room for improvement."
)
sentiment = result["documentSentiment"]
assert sentiment["score"] > 0
assert sentiment["magnitude"] < 1
def test_analyze_syntax(capsys):
result = analyze.analyze_syntax(
textwrap.dedent(
u"""\
Keep away from people who try to belittle your ambitions. Small people
always do that, but the really great make you feel that you, too, can
become great.
- Mark Twain"""
)
)
assert len(result["tokens"])
first_token = result["tokens"][0]
assert first_token["text"]["content"] == "Keep"
assert first_token["partOfSpeech"]["tag"] == "VERB"
assert len(result["sentences"]) > 1
assert result["language"] == "en"
def test_analyze_syntax_utf8():
"""Demonstrate the interpretation of the offsets when encoding=utf8.
UTF8 is a variable-length encoding, where each character is at least 8
bits. The offsets we get should be the index of the first byte of the
character.
"""
test_string = u"a \u00e3 \u0201 \U0001f636 b"
byte_array = test_string.encode("utf8")
result = analyze.analyze_syntax(test_string, encoding="UTF8")
tokens = result["tokens"]
assert tokens[0]["text"]["content"] == "a"
offset = tokens[0]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 1].decode("utf8") == tokens[0]["text"]["content"]
)
assert tokens[1]["text"]["content"] == u"\u00e3"
offset = tokens[1]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 2].decode("utf8") == tokens[1]["text"]["content"]
)
assert tokens[2]["text"]["content"] == u"\u0201"
offset = tokens[2]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 2].decode("utf8") == tokens[2]["text"]["content"]
)
assert tokens[3]["text"]["content"] == u"\U0001f636"
offset = tokens[3]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 4].decode("utf8") == tokens[3]["text"]["content"]
)
# This demonstrates that the offset takes into account the variable-length
# characters before the target token.
assert tokens[4]["text"]["content"] == u"b"
offset = tokens[4]["text"].get("beginOffset", 0)
# 'b' is only one byte long
assert (
byte_array[offset : offset + 1].decode("utf8") == tokens[4]["text"]["content"]
)
def test_analyze_syntax_utf16():
"""Demonstrate the interpretation of the offsets when encoding=utf16.
UTF16 is a variable-length encoding, where each character is at least 16
bits. The returned offsets will be the index of the first 2-byte character
of the token.
"""
test_string = u"a \u00e3 \u0201 \U0001f636 b"
byte_array = test_string.encode("utf16")
# Remove the byte order marker, which the offsets don't account for
byte_array = byte_array[2:]
result = analyze.analyze_syntax(test_string, encoding="UTF16")
tokens = result["tokens"]
assert tokens[0]["text"]["content"] == "a"
# The offset is an offset into an array where each entry is 16 bits. Since
# we have an 8-bit array, the offsets should be doubled to index into our
# array.
offset = 2 * tokens[0]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 2].decode("utf16") == tokens[0]["text"]["content"]
)
assert tokens[1]["text"]["content"] == u"\u00e3"
offset = 2 * tokens[1]["text"].get("beginOffset", 0)
# A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so
# slice out 2 bytes starting from the offset. Then interpret the bytes as
# utf16 for comparison.
assert (
byte_array[offset : offset + 2].decode("utf16") == tokens[1]["text"]["content"]
)
assert tokens[2]["text"]["content"] == u"\u0201"
offset = 2 * tokens[2]["text"].get("beginOffset", 0)
# A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so
# slice out 2 bytes starting from the offset. Then interpret the bytes as
# utf16 for comparison.
assert (
byte_array[offset : offset + 2].decode("utf16") == tokens[2]["text"]["content"]
)
assert tokens[3]["text"]["content"] == u"\U0001f636"
offset = 2 * tokens[3]["text"].get("beginOffset", 0)
# A UTF16 character with a high codepoint is 32 bits (4 bytes) long, so
# slice out 4 bytes starting from the offset. Then interpret those bytes as
# utf16 for comparison.
assert (
byte_array[offset : offset + 4].decode("utf16") == tokens[3]["text"]["content"]
)
# This demonstrates that the offset takes into account the variable-length
# characters before the target token.
assert tokens[4]["text"]["content"] == u"b"
offset = 2 * tokens[4]["text"].get("beginOffset", 0)
# Even though 'b' is only one byte long, utf16 still encodes it using 16
# bits
assert (
byte_array[offset : offset + 2].decode("utf16") == tokens[4]["text"]["content"]
)
def test_annotate_text_utf32():
"""Demonstrate the interpretation of the offsets when encoding=utf32.
UTF32 is a fixed-length encoding, where each character is exactly 32 bits.
The returned offsets will be the index of the first 4-byte character
of the token.
Python unicode objects index by the interpreted unicode character. This
means a given unicode character only ever takes up one slot in a unicode
string. This is equivalent to indexing into a UTF32 string, where all
characters are a fixed length and thus will only ever take up one slot.
Thus, if you're indexing into a python unicode object, you can set
encoding to UTF32 to index directly into the unicode object (as opposed to
the byte arrays, as these examples do).
Nonetheless, this test still demonstrates indexing into the byte array, for
consistency. Note that you could just index into the origin test_string
unicode object with the raw offset returned by the api (ie without
multiplying it by 4, as it is below).
"""
test_string = u"a \u00e3 \u0201 \U0001f636 b"
byte_array = test_string.encode("utf32")
# Remove the byte order marker, which the offsets don't account for
byte_array = byte_array[4:]
result = analyze.analyze_syntax(test_string, encoding="UTF32")
tokens = result["tokens"]
assert tokens[0]["text"]["content"] == "a"
# The offset is an offset into an array where each entry is 32 bits. Since
# we have an 8-bit array, the offsets should be quadrupled to index into
# our array.
offset = 4 * tokens[0]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 4].decode("utf32") == tokens[0]["text"]["content"]
)
assert tokens[1]["text"]["content"] == u"\u00e3"
offset = 4 * tokens[1]["text"].get("beginOffset", 0)
# A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so
# slice out 4 bytes starting from the offset. Then interpret the bytes as
# utf32 for comparison.
assert (
byte_array[offset : offset + 4].decode("utf32") == tokens[1]["text"]["content"]
)
assert tokens[2]["text"]["content"] == u"\u0201"
offset = 4 * tokens[2]["text"].get("beginOffset", 0)
# A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so
# slice out 4 bytes starting from the offset. Then interpret the bytes as
# utf32 for comparison.
assert (
byte_array[offset : offset + 4].decode("utf32") == tokens[2]["text"]["content"]
)
assert tokens[3]["text"]["content"] == u"\U0001f636"
offset = 4 * tokens[3]["text"].get("beginOffset", 0)
# A UTF32 character with a high codepoint is 32 bits (4 bytes) long, so
# slice out 4 bytes starting from the offset. Then interpret those bytes as
# utf32 for comparison.
assert (
byte_array[offset : offset + 4].decode("utf32") == tokens[3]["text"]["content"]
)
# This demonstrates that the offset takes into account the variable-length
# characters before the target token.
assert tokens[4]["text"]["content"] == u"b"
offset = 4 * tokens[4]["text"].get("beginOffset", 0)
# Even though 'b' is only one byte long, utf32 still encodes it using 32
# bits
assert (
byte_array[offset : offset + 4].decode("utf32") == tokens[4]["text"]["content"]
)
def test_annotate_text_utf32_directly_index_into_unicode():
"""Demonstrate using offsets directly, using encoding=utf32.
See the explanation for test_annotate_text_utf32. Essentially, indexing
into a utf32 array is equivalent to indexing into a python unicode object.
"""
test_string = u"a \u00e3 \u0201 \U0001f636 b"
result = analyze.analyze_syntax(test_string, encoding="UTF32")
tokens = result["tokens"]
assert tokens[0]["text"]["content"] == "a"
offset = tokens[0]["text"].get("beginOffset", 0)
assert test_string[offset] == tokens[0]["text"]["content"]
assert tokens[1]["text"]["content"] == u"\u00e3"
offset = tokens[1]["text"].get("beginOffset", 0)
assert test_string[offset] == tokens[1]["text"]["content"]
assert tokens[2]["text"]["content"] == u"\u0201"
offset = tokens[2]["text"].get("beginOffset", 0)
assert test_string[offset] == tokens[2]["text"]["content"]
# Temporarily disabled
# assert tokens[3]['text']['content'] == u'\U0001f636'
# offset = tokens[3]['text'].get('beginOffset', 0)
# assert test_string[offset] == tokens[3]['text']['content']
# assert tokens[4]['text']['content'] == u'b'
# offset = tokens[4]['text'].get('beginOffset', 0)
# assert test_string[offset] == tokens[4]['text']['content']
|
|
'''Stubs for patching HTTP and HTTPS requests'''
try:
import http.client
except ImportError:
pass
import logging
import six
from six.moves.http_client import (
HTTPConnection,
HTTPSConnection,
HTTPMessage,
HTTPResponse,
)
from six import BytesIO
from vcr.request import Request
from vcr.errors import CannotOverwriteExistingCassetteException
from . import compat
log = logging.getLogger(__name__)
class VCRFakeSocket(object):
"""
A socket that doesn't do anything!
Used when playing back casssettes, when there
is no actual open socket.
"""
def close(self):
pass
def settimeout(self, *args, **kwargs):
pass
def fileno(self):
"""
This is kinda crappy. requests will watch
this descriptor and make sure it's not closed.
Return file descriptor 0 since that's stdin.
"""
return 0 # wonder how bad this is....
def parse_headers(header_list):
"""
Convert headers from our serialized dict with lists for keys to a
HTTPMessage
"""
header_string = b""
for key, values in header_list.items():
for v in values:
header_string += \
key.encode('utf-8') + b":" + v.encode('utf-8') + b"\r\n"
return compat.get_httpmessage(header_string)
def serialize_headers(response):
out = {}
for key, values in compat.get_headers(response.msg):
out.setdefault(key, [])
out[key].extend(values)
return out
class VCRHTTPResponse(HTTPResponse):
"""
Stub reponse class that gets returned instead of a HTTPResponse
"""
def __init__(self, recorded_response):
self.recorded_response = recorded_response
self.reason = recorded_response['status']['message']
self.status = self.code = recorded_response['status']['code']
self.version = None
self._content = BytesIO(self.recorded_response['body']['string'])
self._closed = False
headers = self.recorded_response['headers']
# Since we are loading a response that has already been serialized, our
# response is no longer chunked. That means we don't want any
# libraries trying to process a chunked response. By removing the
# transfer-encoding: chunked header, this should cause the downstream
# libraries to process this as a non-chunked response.
te_key = [h for h in headers.keys() if h.upper() == 'TRANSFER-ENCODING']
if te_key:
del headers[te_key[0]]
self.headers = self.msg = parse_headers(headers)
self.length = compat.get_header(self.msg, 'content-length') or None
@property
def closed(self):
# in python3, I can't change the value of self.closed. So I'
# twiddling self._closed and using this property to shadow the real
# self.closed from the superclas
return self._closed
def read(self, *args, **kwargs):
return self._content.read(*args, **kwargs)
def readline(self, *args, **kwargs):
return self._content.readline(*args, **kwargs)
def close(self):
self._closed = True
return True
def getcode(self):
return self.status
def isclosed(self):
return self.closed
def info(self):
return parse_headers(self.recorded_response['headers'])
def getheaders(self):
message = parse_headers(self.recorded_response['headers'])
return list(compat.get_header_items(message))
def getheader(self, header, default=None):
values = [v for (k, v) in self.getheaders() if k.lower() == header.lower()]
if values:
return ', '.join(values)
else:
return default
class VCRConnection(object):
# A reference to the cassette that's currently being patched in
cassette = None
def _port_postfix(self):
"""
Returns empty string for the default port and ':port' otherwise
"""
port = self.real_connection.port
default_port = {'https': 443, 'http': 80}[self._protocol]
return ':{0}'.format(port) if port != default_port else ''
def _uri(self, url):
"""Returns request absolute URI"""
uri = "{0}://{1}{2}{3}".format(
self._protocol,
self.real_connection.host,
self._port_postfix(),
url,
)
return uri
def _url(self, uri):
"""Returns request selector url from absolute URI"""
prefix = "{0}://{1}{2}".format(
self._protocol,
self.real_connection.host,
self._port_postfix(),
)
return uri.replace(prefix, '', 1)
def request(self, method, url, body=None, headers=None):
'''Persist the request metadata in self._vcr_request'''
self._vcr_request = Request(
method=method,
uri=self._uri(url),
body=body,
headers=headers or {}
)
log.debug('Got {0}'.format(self._vcr_request))
# Note: The request may not actually be finished at this point, so
# I'm not sending the actual request until getresponse(). This
# allows me to compare the entire length of the response to see if it
# exists in the cassette.
def putrequest(self, method, url, *args, **kwargs):
"""
httplib gives you more than one way to do it. This is a way
to start building up a request. Usually followed by a bunch
of putheader() calls.
"""
self._vcr_request = Request(
method=method,
uri=self._uri(url),
body="",
headers={}
)
log.debug('Got {0}'.format(self._vcr_request))
def putheader(self, header, *values):
for value in values:
self._vcr_request.add_header(header, value)
def send(self, data):
'''
This method is called after request(), to add additional data to the
body of the request. So if that happens, let's just append the data
onto the most recent request in the cassette.
'''
self._vcr_request.body = (self._vcr_request.body or '') + data
def close(self):
# Note: the real connection will only close if it's open, so
# no need to check that here.
self.real_connection.close()
def endheaders(self, message_body=None):
"""
Normally, this would actually send the request to the server.
We are not sending the request until getting the response,
so bypass this part and just append the message body, if any.
"""
if message_body is not None:
self._vcr_request.body = message_body
def getresponse(self, _=False, **kwargs):
'''Retrieve the response'''
# Check to see if the cassette has a response for this request. If so,
# then return it
if self.cassette.can_play_response_for(self._vcr_request):
log.info(
"Playing response for {0} from cassette".format(
self._vcr_request
)
)
response = self.cassette.play_response(self._vcr_request)
return VCRHTTPResponse(response)
else:
if self.cassette.write_protected and self.cassette.filter_request(
self._vcr_request
):
raise CannotOverwriteExistingCassetteException(
"No match for the request (%r) was found. "
"Can't overwrite existing cassette (%r) in "
"your current record mode (%r)."
% (self._vcr_request, self.cassette._path,
self.cassette.record_mode)
)
# Otherwise, we should send the request, then get the response
# and return it.
log.info(
"{0} not in cassette, sending to real server".format(
self._vcr_request
)
)
# This is imported here to avoid circular import.
# TODO(@IvanMalison): Refactor to allow normal import.
from vcr.patch import force_reset
with force_reset():
self.real_connection.request(
method=self._vcr_request.method,
url=self._url(self._vcr_request.uri),
body=self._vcr_request.body,
headers=self._vcr_request.headers,
)
# get the response
response = self.real_connection.getresponse()
# put the response into the cassette
response = {
'status': {
'code': response.status,
'message': response.reason
},
'headers': serialize_headers(response),
'body': {'string': response.read()},
}
self.cassette.append(self._vcr_request, response)
return VCRHTTPResponse(response)
def set_debuglevel(self, *args, **kwargs):
self.real_connection.set_debuglevel(*args, **kwargs)
def connect(self, *args, **kwargs):
"""
httplib2 uses this. Connects to the server I'm assuming.
Only pass to the baseclass if we don't have a recorded response
and are not write-protected.
"""
if hasattr(self, '_vcr_request') and \
self.cassette.can_play_response_for(self._vcr_request):
# We already have a response we are going to play, don't
# actually connect
return
if self.cassette.write_protected:
# Cassette is write-protected, don't actually connect
return
return self.real_connection.connect(*args, **kwargs)
@property
def sock(self):
if self.real_connection.sock:
return self.real_connection.sock
return VCRFakeSocket()
@sock.setter
def sock(self, value):
if self.real_connection.sock:
self.real_connection.sock = value
def __init__(self, *args, **kwargs):
if six.PY3:
kwargs.pop('strict', None) # apparently this is gone in py3
# need to temporarily reset here because the real connection
# inherits from the thing that we are mocking out. Take out
# the reset if you want to see what I mean :)
from vcr.patch import force_reset
with force_reset():
self.real_connection = self._baseclass(*args, **kwargs)
class VCRHTTPConnection(VCRConnection):
'''A Mocked class for HTTP requests'''
_baseclass = HTTPConnection
_protocol = 'http'
class VCRHTTPSConnection(VCRConnection):
'''A Mocked class for HTTPS requests'''
_baseclass = HTTPSConnection
_protocol = 'https'
is_verified = True
|
|
# the module for the qt color_mixer plugin
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import (QWidget, QStackedWidget, QSlider, QGridLayout, QLabel)
from util import ColorMixer
class IntelligentSlider(QWidget):
''' A slider that adds a 'name' attribute and calls a callback
with 'name' as an argument to the registerd callback.
This allows you to create large groups of sliders in a loop,
but still keep track of the individual events
It also prints a label below the slider.
The range of the slider is hardcoded from zero - 1000,
but it supports a conversion factor so you can scale the results'''
def __init__(self, name, a, b, callback):
QWidget.__init__(self)
self.name = name
self.callback = callback
self.a = a
self.b = b
self.manually_triggered = False
self.slider = QSlider()
self.slider.setRange(0, 1000)
self.slider.setValue(500)
self.slider.valueChanged.connect(self.slider_changed)
self.name_label = QLabel()
self.name_label.setText(self.name)
self.name_label.setAlignment(QtCore.Qt.AlignCenter)
self.value_label = QLabel()
self.value_label.setText('%2.2f' % (self.slider.value() * self.a
+ self.b))
self.value_label.setAlignment(QtCore.Qt.AlignCenter)
self.layout = QGridLayout(self)
self.layout.addWidget(self.name_label, 0, 0)
self.layout.addWidget(self.slider, 1, 0, QtCore.Qt.AlignHCenter)
self.layout.addWidget(self.value_label, 2, 0)
# bind this to the valueChanged signal of the slider
def slider_changed(self, val):
val = self.val()
self.value_label.setText(str(val)[:4])
if not self.manually_triggered:
self.callback(self.name, val)
def set_conv_fac(self, a, b):
self.a = a
self.b = b
def set_value(self, val):
self.manually_triggered = True
self.slider.setValue(int((val - self.b) / self.a))
self.value_label.setText('%2.2f' % val)
self.manually_triggered = False
def val(self):
return self.slider.value() * self.a + self.b
class MixerPanel(QtGui.QFrame):
'''A color mixer to hook up to an image.
You pass the image you the panel to operate on
and it operates on that image in place. You also
pass a callback to be called to trigger a refresh.
This callback is called every time the mixer modifies
your image.'''
def __init__(self, img):
QtGui.QFrame.__init__(self)
#self.setFrameStyle(QtGui.QFrame.Box|QtGui.QFrame.Sunken)
self.img = img
self.mixer = ColorMixer(self.img)
self.callback = None
#---------------------------------------------------------------
# ComboBox
#---------------------------------------------------------------
self.combo_box_entries = ['RGB Color', 'HSV Color',
'Brightness/Contrast',
'Gamma',
'Gamma (Sigmoidal)']
self.combo_box = QtGui.QComboBox()
for entry in self.combo_box_entries:
self.combo_box.addItem(entry)
self.combo_box.currentIndexChanged.connect(self.combo_box_changed)
#---------------------------------------------------------------
# RGB color sliders
#---------------------------------------------------------------
# radio buttons
self.rgb_add = QtGui.QRadioButton('Additive')
self.rgb_mul = QtGui.QRadioButton('Multiplicative')
self.rgb_mul.toggled.connect(self.rgb_radio_changed)
self.rgb_add.toggled.connect(self.rgb_radio_changed)
# sliders
rs = IntelligentSlider('R', 0.51, -255, self.rgb_changed)
gs = IntelligentSlider('G', 0.51, -255, self.rgb_changed)
bs = IntelligentSlider('B', 0.51, -255, self.rgb_changed)
self.rs = rs
self.gs = gs
self.bs = bs
self.rgb_widget = QWidget()
self.rgb_widget.layout = QGridLayout(self.rgb_widget)
self.rgb_widget.layout.addWidget(self.rgb_add, 0, 0, 1, 3)
self.rgb_widget.layout.addWidget(self.rgb_mul, 1, 0, 1, 3)
self.rgb_widget.layout.addWidget(self.rs, 2, 0)
self.rgb_widget.layout.addWidget(self.gs, 2, 1)
self.rgb_widget.layout.addWidget(self.bs, 2, 2)
#---------------------------------------------------------------
# HSV sliders
#---------------------------------------------------------------
# radio buttons
self.hsv_add = QtGui.QRadioButton('Additive')
self.hsv_mul = QtGui.QRadioButton('Multiplicative')
self.hsv_mul.toggled.connect(self.hsv_radio_changed)
self.hsv_mul.toggled.connect(self.hsv_radio_changed)
# sliders
hs = IntelligentSlider('H', 0.36, -180, self.hsv_changed)
ss = IntelligentSlider('S', 0.002, 0, self.hsv_changed)
vs = IntelligentSlider('V', 0.002, 0, self.hsv_changed)
self.hs = hs
self.ss = ss
self.vs = vs
self.hsv_widget = QWidget()
self.hsv_widget.layout = QGridLayout(self.hsv_widget)
self.hsv_widget.layout.addWidget(self.hsv_add, 0, 0, 1, 3)
self.hsv_widget.layout.addWidget(self.hsv_mul, 1, 0, 1, 3)
self.hsv_widget.layout.addWidget(self.hs, 2, 0)
self.hsv_widget.layout.addWidget(self.ss, 2, 1)
self.hsv_widget.layout.addWidget(self.vs, 2, 2)
#---------------------------------------------------------------
# Brightness/Contrast sliders
#---------------------------------------------------------------
# sliders
cont = IntelligentSlider('x', 0.002, 0, self.bright_changed)
bright = IntelligentSlider('+', 0.51, -255, self.bright_changed)
self.cont = cont
self.bright = bright
# layout
self.bright_widget = QWidget()
self.bright_widget.layout = QtGui.QGridLayout(self.bright_widget)
self.bright_widget.layout.addWidget(self.cont, 0, 0)
self.bright_widget.layout.addWidget(self.bright, 0, 1)
#----------------------------------------------------------------------
# Gamma Slider
#----------------------------------------------------------------------
gamma = IntelligentSlider('gamma', 0.005, 0, self.gamma_changed)
self.gamma = gamma
# layout
self.gamma_widget = QWidget()
self.gamma_widget.layout = QtGui.QGridLayout(self.gamma_widget)
self.gamma_widget.layout.addWidget(self.gamma, 0, 0)
#---------------------------------------------------------------
# Sigmoid Gamma sliders
#---------------------------------------------------------------
# sliders
alpha = IntelligentSlider('alpha', 0.011, 1, self.sig_gamma_changed)
beta = IntelligentSlider('beta', 0.012, 0, self.sig_gamma_changed)
self.a_gamma = alpha
self.b_gamma = beta
# layout
self.sig_gamma_widget = QWidget()
self.sig_gamma_widget.layout = QtGui.QGridLayout(self.sig_gamma_widget)
self.sig_gamma_widget.layout.addWidget(self.a_gamma, 0, 0)
self.sig_gamma_widget.layout.addWidget(self.b_gamma, 0, 1)
#---------------------------------------------------------------
# Buttons
#---------------------------------------------------------------
self.commit_button = QtGui.QPushButton('Commit')
self.commit_button.clicked.connect(self.commit_changes)
self.revert_button = QtGui.QPushButton('Revert')
self.revert_button.clicked.connect(self.revert_changes)
#---------------------------------------------------------------
# Mixer Layout
#---------------------------------------------------------------
self.sliders = QStackedWidget()
self.sliders.addWidget(self.rgb_widget)
self.sliders.addWidget(self.hsv_widget)
self.sliders.addWidget(self.bright_widget)
self.sliders.addWidget(self.gamma_widget)
self.sliders.addWidget(self.sig_gamma_widget)
self.layout = QtGui.QGridLayout(self)
self.layout.addWidget(self.combo_box, 0, 0)
self.layout.addWidget(self.sliders, 1, 0)
self.layout.addWidget(self.commit_button, 2, 0)
self.layout.addWidget(self.revert_button, 3, 0)
#---------------------------------------------------------------
# State Initialization
#---------------------------------------------------------------
self.combo_box.setCurrentIndex(0)
self.rgb_mul.setChecked(True)
self.hsv_mul.setChecked(True)
def set_callback(self, callback):
self.callback = callback
def combo_box_changed(self, index):
self.sliders.setCurrentIndex(index)
self.reset()
def rgb_radio_changed(self):
self.reset()
def hsv_radio_changed(self):
self.reset()
def reset(self):
self.reset_sliders()
self.mixer.set_to_stateimg()
if self.callback:
self.callback()
def reset_sliders(self):
# handle changing the conversion factors necessary
if self.rgb_add.isChecked():
self.rs.set_conv_fac(0.51, -255)
self.rs.set_value(0)
self.gs.set_conv_fac(0.51, -255)
self.gs.set_value(0)
self.bs.set_conv_fac(0.51, -255)
self.bs.set_value(0)
else:
self.rs.set_conv_fac(0.002, 0)
self.rs.set_value(1.)
self.gs.set_conv_fac(0.002, 0)
self.gs.set_value(1.)
self.bs.set_conv_fac(0.002, 0)
self.bs.set_value(1.)
self.hs.set_value(0)
if self.hsv_add.isChecked():
self.ss.set_conv_fac(0.002, -1)
self.ss.set_value(0)
self.vs.set_conv_fac(0.002, -1)
self.vs.set_value(0)
else:
self.ss.set_conv_fac(0.002, 0)
self.ss.set_value(1.)
self.vs.set_conv_fac(0.002, 0)
self.vs.set_value(1.)
self.bright.set_value(0)
self.cont.set_value(1.)
self.gamma.set_value(1)
self.a_gamma.set_value(1)
self.b_gamma.set_value(0.5)
def rgb_changed(self, name, val):
if name == 'R':
channel = self.mixer.RED
elif name == 'G':
channel = self.mixer.GREEN
else:
channel = self.mixer.BLUE
if self.rgb_mul.isChecked():
self.mixer.multiply(channel, val)
elif self.rgb_add.isChecked():
self.mixer.add(channel, val)
else:
pass
if self.callback:
self.callback()
def hsv_changed(self, name, val):
h = self.hs.val()
s = self.ss.val()
v = self.vs.val()
if self.hsv_mul.isChecked():
self.mixer.hsv_multiply(h, s, v)
elif self.hsv_add.isChecked():
self.mixer.hsv_add(h, s, v)
else:
pass
if self.callback:
self.callback()
def bright_changed(self, name, val):
b = self.bright.val()
c = self.cont.val()
self.mixer.brightness(c, b)
if self.callback:
self.callback()
def gamma_changed(self, name, val):
self.mixer.gamma(val)
if self.callback:
self.callback()
def sig_gamma_changed(self, name, val):
ag = self.a_gamma.val()
bg = self.b_gamma.val()
self.mixer.sigmoid_gamma(ag, bg)
if self.callback:
self.callback()
def commit_changes(self):
self.mixer.commit_changes()
self.reset_sliders()
def revert_changes(self):
self.mixer.revert()
self.reset_sliders()
if self.callback:
self.callback()
|
|
import base64
import functools
import os
from django.conf import settings
from django.contrib.auth import login, logout
from django.core import signing
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404, HttpResponseRedirect
from django.utils.encoding import force_bytes
from django.utils.html import format_html
from django.utils.http import is_safe_url
from django.utils.translation import ugettext, ugettext_lazy as _
import waffle
from rest_framework import serializers
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import detail_route
from rest_framework.mixins import (
DestroyModelMixin, ListModelMixin, RetrieveModelMixin, UpdateModelMixin)
from rest_framework.permissions import (
AllowAny, BasePermission, IsAuthenticated)
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from waffle.decorators import waffle_switch
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.access.models import GroupUser
from olympia.amo import messages
from olympia.amo.decorators import write
from olympia.amo.utils import fetch_subscribed_newsletters
from olympia.api.authentication import (
JWTKeyAuthentication, WebTokenAuthentication)
from olympia.api.permissions import AnyOf, ByHttpMethod, GroupPermission
from olympia.users import tasks
from olympia.users.models import UserNotification, UserProfile
from olympia.users.notifications import (
NOTIFICATIONS_COMBINED, REMOTE_NOTIFICATIONS_BY_BASKET_ID)
from . import verify
from .serializers import (
AccountSuperCreateSerializer, PublicUserProfileSerializer,
UserNotificationSerializer, UserProfileSerializer)
from .utils import fxa_login_url, generate_fxa_state
log = olympia.core.logger.getLogger('accounts')
ERROR_AUTHENTICATED = 'authenticated'
ERROR_NO_CODE = 'no-code'
ERROR_NO_PROFILE = 'no-profile'
ERROR_NO_USER = 'no-user'
ERROR_STATE_MISMATCH = 'state-mismatch'
ERROR_STATUSES = {
ERROR_AUTHENTICATED: 400,
ERROR_NO_CODE: 422,
ERROR_NO_PROFILE: 401,
ERROR_STATE_MISMATCH: 400,
}
LOGIN_ERROR_MESSAGES = {
ERROR_AUTHENTICATED: _(u'You are already logged in.'),
ERROR_NO_CODE:
_(u'Your login attempt could not be parsed. Please try again.'),
ERROR_NO_PROFILE:
_(u'Your Firefox Account could not be found. Please try again.'),
ERROR_STATE_MISMATCH: _(u'You could not be logged in. Please try again.'),
}
# Name of the cookie that contains the auth token for the API. It used to be
# "api_auth_token" but we had to change it because it wasn't set on the right
# domain, and we couldn't clear both the old and new versions at the same time,
# since sending multiple Set-Cookie headers with the same name is not allowed
# by the spec, even if they have a distinct domain attribute.
API_TOKEN_COOKIE = 'frontend_auth_token'
def safe_redirect(url, action):
if not is_safe_url(url):
url = reverse('home')
log.info(u'Redirecting after {} to: {}'.format(action, url))
return HttpResponseRedirect(url)
def find_user(identity):
"""Try to find a user for a Firefox Accounts profile. If the account
hasn't been migrated we'll need to do the lookup by email but we should
use the ID after that so check both. If we get multiple users we're in
some weird state where the accounts need to be merged but that behaviour
hasn't been defined so let it raise.
"""
try:
return UserProfile.objects.get(
Q(fxa_id=identity['uid']) | Q(email=identity['email']))
except UserProfile.DoesNotExist:
return None
except UserProfile.MultipleObjectsReturned:
# This shouldn't happen, so let it raise.
log.error(
'Found multiple users for {email} and {uid}'.format(**identity))
raise
def register_user(request, identity):
user = UserProfile.objects.create_user(
email=identity['email'], username=None, fxa_id=identity['uid'])
log.info('Created user {} from FxA'.format(user))
login(request, user)
return user
def update_user(user, identity):
"""Update a user's info from FxA if needed, as well as generating the id
that is used as part of the session/api token generation."""
if (user.fxa_id != identity['uid'] or
user.email != identity['email']):
log.info(
'Updating user info from FxA for {pk}. Old {old_email} {old_uid} '
'New {new_email} {new_uid}'.format(
pk=user.pk, old_email=user.email, old_uid=user.fxa_id,
new_email=identity['email'], new_uid=identity['uid']))
user.update(fxa_id=identity['uid'], email=identity['email'])
if user.auth_id is None:
# If the user didn't have an auth id (old user account created before
# we added the field), generate one for them.
user.update(auth_id=UserProfile._meta.get_field('auth_id').default())
def login_user(request, user, identity):
update_user(user, identity)
log.info('Logging in user {} from FxA'.format(user))
user.log_login_attempt(True)
login(request, user)
def fxa_error_message(message):
login_help_url = (
'https://support.mozilla.org/kb/access-your-add-ons-firefox-accounts')
return format_html(
u'{error} <a href="{url}">{help_text}</a>',
url=login_help_url, help_text=_(u'Need help?'),
error=message)
def render_error(request, error, next_path=None, format=None):
if format == 'json':
status = ERROR_STATUSES.get(error, 422)
response = Response({'error': error}, status=status)
else:
if not is_safe_url(next_path):
next_path = None
messages.error(
request, fxa_error_message(LOGIN_ERROR_MESSAGES[error]),
extra_tags='fxa')
if next_path is None:
response = HttpResponseRedirect(reverse('users.login'))
else:
response = HttpResponseRedirect(next_path)
return response
def parse_next_path(state_parts):
next_path = None
if len(state_parts) == 2:
# The = signs will be stripped off so we need to add them back
# but it only cares if there are too few so add 4 of them.
encoded_path = state_parts[1] + '===='
try:
next_path = base64.urlsafe_b64decode(
force_bytes(encoded_path)).decode('utf-8')
except (TypeError, ValueError):
log.info('Error decoding next_path {}'.format(
encoded_path))
pass
if not is_safe_url(next_path):
next_path = None
return next_path
def with_user(format, config=None):
def outer(fn):
@functools.wraps(fn)
@write
def inner(self, request):
if config is None:
if hasattr(self, 'get_fxa_config'):
fxa_config = self.get_fxa_config(request)
else:
fxa_config = (
settings.FXA_CONFIG[settings.DEFAULT_FXA_CONFIG_NAME])
else:
fxa_config = config
if request.method == 'GET':
data = request.query_params
else:
data = request.data
state_parts = data.get('state', '').split(':', 1)
state = state_parts[0]
next_path = parse_next_path(state_parts)
if not data.get('code'):
log.info('No code provided.')
return render_error(
request, ERROR_NO_CODE, next_path=next_path, format=format)
elif (not request.session.get('fxa_state') or
request.session['fxa_state'] != state):
log.info(
'State mismatch. URL: {url} Session: {session}'.format(
url=data.get('state'),
session=request.session.get('fxa_state'),
))
return render_error(
request, ERROR_STATE_MISMATCH, next_path=next_path,
format=format)
elif request.user.is_authenticated():
response = render_error(
request, ERROR_AUTHENTICATED, next_path=next_path,
format=format)
# If the api token cookie is missing but we're still
# authenticated using the session, add it back.
if API_TOKEN_COOKIE not in request.COOKIES:
log.info('User %s was already authenticated but did not '
'have an API token cookie, adding one.',
request.user.pk)
response = add_api_token_to_response(
response, request.user)
return response
try:
identity = verify.fxa_identify(data['code'], config=fxa_config)
except verify.IdentificationError:
log.info('Profile not found. Code: {}'.format(data['code']))
return render_error(
request, ERROR_NO_PROFILE, next_path=next_path,
format=format)
else:
return fn(
self, request, user=find_user(identity), identity=identity,
next_path=next_path)
return inner
return outer
def generate_api_token(user):
"""Generate a new API token for a given user."""
data = {
'auth_hash': user.get_session_auth_hash(),
'user_id': user.pk,
}
return signing.dumps(data, salt=WebTokenAuthentication.salt)
def add_api_token_to_response(response, user):
"""Generate API token and add it to the response (both as a `token` key in
the response if it was json and by setting a cookie named API_TOKEN_COOKIE.
"""
token = generate_api_token(user)
if hasattr(response, 'data'):
response.data['token'] = token
# Also include the API token in a session cookie, so that it is
# available for universal frontend apps.
response.set_cookie(
API_TOKEN_COOKIE,
token,
domain=settings.SESSION_COOKIE_DOMAIN,
max_age=settings.SESSION_COOKIE_AGE,
secure=settings.SESSION_COOKIE_SECURE,
httponly=settings.SESSION_COOKIE_HTTPONLY)
return response
class FxAConfigMixin(object):
def get_config_name(self, request):
return request.GET.get('config', self.DEFAULT_FXA_CONFIG_NAME)
def get_allowed_configs(self):
return getattr(
self, 'ALLOWED_FXA_CONFIGS', [self.DEFAULT_FXA_CONFIG_NAME])
def get_fxa_config(self, request):
config_name = self.get_config_name(request)
if config_name in self.get_allowed_configs():
return settings.FXA_CONFIG[config_name]
log.info('Using default FxA config instead of {}'.format(config_name))
return settings.FXA_CONFIG[self.DEFAULT_FXA_CONFIG_NAME]
class LoginStartBaseView(FxAConfigMixin, APIView):
def get(self, request):
request.session.setdefault('fxa_state', generate_fxa_state())
return HttpResponseRedirect(
fxa_login_url(
config=self.get_fxa_config(request),
state=request.session['fxa_state'],
next_path=request.GET.get('to'),
action=request.GET.get('action', 'signin')))
class LoginStartView(LoginStartBaseView):
DEFAULT_FXA_CONFIG_NAME = settings.DEFAULT_FXA_CONFIG_NAME
ALLOWED_FXA_CONFIGS = settings.ALLOWED_FXA_CONFIGS
class AuthenticateView(FxAConfigMixin, APIView):
DEFAULT_FXA_CONFIG_NAME = settings.DEFAULT_FXA_CONFIG_NAME
ALLOWED_FXA_CONFIGS = settings.ALLOWED_FXA_CONFIGS
authentication_classes = (SessionAuthentication,)
@with_user(format='html')
def get(self, request, user, identity, next_path):
if user is None:
user = register_user(request, identity)
fxa_config = self.get_fxa_config(request)
if fxa_config.get('skip_register_redirect'):
response = safe_redirect(next_path, 'register')
else:
response = safe_redirect(reverse('users.edit'), 'register')
else:
login_user(request, user, identity)
response = safe_redirect(next_path, 'login')
add_api_token_to_response(response, user)
return response
def logout_user(request, response):
logout(request)
response.delete_cookie(
API_TOKEN_COOKIE, domain=settings.SESSION_COOKIE_DOMAIN)
class SessionView(APIView):
permission_classes = [IsAuthenticated]
def delete(self, request, *args, **kwargs):
response = Response({'ok': True})
logout_user(request, response)
return response
class AllowSelf(BasePermission):
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return request.user.is_authenticated() and obj == request.user
class AccountViewSet(RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin,
GenericViewSet):
permission_classes = [
ByHttpMethod({
'get': AllowAny,
'head': AllowAny,
'options': AllowAny, # Needed for CORS.
# To edit a profile it has to yours, or be an admin.
'patch': AnyOf(AllowSelf, GroupPermission(
amo.permissions.USERS_EDIT)),
'delete': AnyOf(AllowSelf, GroupPermission(
amo.permissions.USERS_EDIT)),
}),
]
def get_queryset(self):
return UserProfile.objects.all()
def get_object(self):
if hasattr(self, 'instance'):
return self.instance
identifier = self.kwargs.get('pk')
self.lookup_field = self.get_lookup_field(identifier)
self.kwargs[self.lookup_field] = identifier
self.instance = super(AccountViewSet, self).get_object()
# action won't exist for other classes that are using this ViewSet.
can_view_instance = (
not getattr(self, 'action', None) or
self.self_view or
self.admin_viewing or
self.instance.is_public)
if can_view_instance:
return self.instance
else:
raise Http404
def get_lookup_field(self, identifier):
lookup_field = 'pk'
if identifier and not identifier.isdigit():
# If the identifier contains anything other than a digit, it's
# the username.
lookup_field = 'username'
return lookup_field
@property
def self_view(self):
return (
self.request.user.is_authenticated() and
self.get_object() == self.request.user)
@property
def admin_viewing(self):
return acl.action_allowed_user(
self.request.user, amo.permissions.USERS_EDIT)
def get_serializer_class(self):
if self.self_view or self.admin_viewing:
return UserProfileSerializer
else:
return PublicUserProfileSerializer
def perform_destroy(self, instance):
if instance.is_developer:
raise serializers.ValidationError(ugettext(
u'Developers of add-ons or themes cannot delete their '
u'account. You must delete all add-ons and themes linked to '
u'this account, or transfer them to other users.'))
return super(AccountViewSet, self).perform_destroy(instance)
@detail_route(
methods=['delete'], permission_classes=[
AnyOf(AllowSelf, GroupPermission(amo.permissions.USERS_EDIT))])
def picture(self, request, pk=None):
user = self.get_object()
user.update(picture_type=None)
log.debug(u'User (%s) deleted photo' % user)
tasks.delete_photo.delay(user.picture_path)
return self.retrieve(request)
class ProfileView(APIView):
authentication_classes = [JWTKeyAuthentication, WebTokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request):
account_viewset = AccountViewSet(
request=request,
permission_classes=self.permission_classes,
kwargs={'pk': unicode(self.request.user.pk)})
account_viewset.format_kwarg = self.format_kwarg
return account_viewset.retrieve(request)
class AccountSuperCreate(APIView):
authentication_classes = [JWTKeyAuthentication]
permission_classes = [
IsAuthenticated,
GroupPermission(amo.permissions.ACCOUNTS_SUPER_CREATE)]
@waffle_switch('super-create-accounts')
def post(self, request):
serializer = AccountSuperCreateSerializer(data=request.data)
if not serializer.is_valid():
return Response({'errors': serializer.errors},
status=422)
data = serializer.data
group = serializer.validated_data.get('group', None)
user_token = os.urandom(4).encode('hex')
username = data.get('username', 'super-created-{}'.format(user_token))
fxa_id = data.get('fxa_id', None)
email = data.get('email', '{}@addons.mozilla.org'.format(username))
user = UserProfile.objects.create(
username=username,
email=email,
fxa_id=fxa_id,
display_name='Super Created {}'.format(user_token),
notes='auto-generated from API')
user.save()
if group:
GroupUser.objects.create(user=user, group=group)
login(request, user)
request.session.save()
log.info(u'API user {api_user} created and logged in a user from '
u'the super-create API: user_id: {user.pk}; '
u'user_name: {user.username}; fxa_id: {user.fxa_id}; '
u'group: {group}'
.format(user=user, api_user=request.user, group=group))
cookie = {
'name': settings.SESSION_COOKIE_NAME,
'value': request.session.session_key,
}
cookie['encoded'] = '{name}={value}'.format(**cookie)
return Response({
'user_id': user.pk,
'username': user.username,
'email': user.email,
'display_name': user.display_name,
'groups': list((g.pk, g.name, g.rules) for g in user.groups.all()),
'fxa_id': user.fxa_id,
'session_cookie': cookie,
}, status=201)
class AccountNotificationViewSet(ListModelMixin, GenericViewSet):
"""Returns account notifications.
If not already set by the user, defaults will be returned.
"""
permission_classes = [IsAuthenticated]
# We're pushing the primary permission checking to AccountViewSet for ease.
account_permission_classes = [
AnyOf(AllowSelf, GroupPermission(amo.permissions.USERS_EDIT))]
serializer_class = UserNotificationSerializer
paginator = None
def get_account_viewset(self):
if not hasattr(self, 'account_viewset'):
self.account_viewset = AccountViewSet(
request=self.request,
permission_classes=self.account_permission_classes,
kwargs={'pk': self.kwargs['user_pk']})
return self.account_viewset
def _get_default_object(self, notification):
return UserNotification(
user=self.get_account_viewset().get_object(),
notification_id=notification.id,
enabled=notification.default_checked)
def get_queryset(self):
user = self.get_account_viewset().get_object()
queryset = UserNotification.objects.filter(user=user)
# Fetch all `UserNotification` instances and then, if the
# waffle-switch is active overwrite their value with the
# data from basket. Once we switched the integration "on" on prod
# all `UserNotification` instances that are now handled by basket
# can be deleted.
# Put it into a dict so we can easily check for existence.
set_notifications = {
user_nfn.notification.short: user_nfn for user_nfn in queryset}
out = []
for notification in NOTIFICATIONS_COMBINED:
out.append(set_notifications.get(
notification.short, # It's been set by the user.
self._get_default_object(notification))) # Otherwise, default.
if waffle.switch_is_active('activate-basket-sync'):
newsletters = fetch_subscribed_newsletters(user)
by_basket_id = REMOTE_NOTIFICATIONS_BY_BASKET_ID
for basket_id, notification in by_basket_id.items():
notification = self._get_default_object(notification)
notification.enabled = notification.id in newsletters
out.append(notification)
return out
def create(self, request, *args, **kwargs):
# Loop through possible notifications.
queryset = self.get_queryset()
for notification in queryset:
# Careful with ifs. Enabled will be None|True|False.
enabled = request.data.get(notification.notification.short)
if enabled is not None:
serializer = self.get_serializer(
notification, partial=True, data={'enabled': enabled})
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(self.get_serializer(queryset, many=True).data)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import itertools
import numpy as np
import pytest
from astropy import units as u
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs import WCS
from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS
from numpy.testing import assert_allclose
from ..high_level import reproject_interp
# TODO: add reference comparisons
DATA = os.path.join(os.path.dirname(__file__), '..', '..', 'tests', 'data')
def as_high_level_wcs(wcs):
return HighLevelWCSWrapper(SlicedLowLevelWCS(wcs, Ellipsis))
def array_footprint_to_hdulist(array, footprint, header):
hdulist = fits.HDUList()
hdulist.append(fits.PrimaryHDU(array, header))
hdulist.append(fits.ImageHDU(footprint, header, name='footprint'))
return hdulist
@pytest.mark.array_compare(single_reference=True)
@pytest.mark.parametrize('wcsapi', (False, True))
def test_reproject_celestial_2d_gal2equ(wcsapi):
"""
Test reprojection of a 2D celestial image, which includes a coordinate
system conversion.
"""
hdu_in = fits.open(os.path.join(DATA, 'galactic_2d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['CTYPE1'] = 'RA---TAN'
header_out['CTYPE2'] = 'DEC--TAN'
header_out['CRVAL1'] = 266.39311
header_out['CRVAL2'] = -28.939779
if wcsapi: # Enforce a pure wcsapi API
wcs_in, data_in = as_high_level_wcs(WCS(hdu_in.header)), hdu_in.data
wcs_out = as_high_level_wcs(WCS(header_out))
shape_out = header_out['NAXIS2'], header_out['NAXIS1']
array_out, footprint_out = reproject_interp((data_in, wcs_in),
wcs_out, shape_out=shape_out)
else:
array_out, footprint_out = reproject_interp(hdu_in, header_out)
return array_footprint_to_hdulist(array_out, footprint_out, header_out)
# Note that we can't use independent_celestial_slices=True and reorder the
# axes, hence why we need to prepare the combinations in this way.
AXIS_ORDER = list(itertools.permutations((0, 1, 2)))
COMBINATIONS = []
for wcsapi in (False, True):
for axis_order in AXIS_ORDER:
COMBINATIONS.append((wcsapi, axis_order))
@pytest.mark.array_compare(single_reference=True)
@pytest.mark.parametrize(('wcsapi', 'axis_order'), tuple(COMBINATIONS))
def test_reproject_celestial_3d_equ2gal(wcsapi, axis_order):
"""
Test reprojection of a 3D cube with celestial components, which includes a
coordinate system conversion (the original header is in equatorial
coordinates). We test using both the 'fast' method which assumes celestial
slices are independent, and the 'full' method. We also scramble the input
dimensions of the data and header to make sure that the reprojection can
deal with this.
"""
# Read in the input cube
hdu_in = fits.open(os.path.join(DATA, 'equatorial_3d.fits'))[0]
# Define the output header - this should be the same for all versions of
# this test to make sure we can use a single reference file.
header_out = hdu_in.header.copy()
header_out['NAXIS1'] = 10
header_out['NAXIS2'] = 9
header_out['CTYPE1'] = 'GLON-SIN'
header_out['CTYPE2'] = 'GLAT-SIN'
header_out['CRVAL1'] = 163.16724
header_out['CRVAL2'] = -15.777405
header_out['CRPIX1'] = 6
header_out['CRPIX2'] = 5
# We now scramble the input axes
if axis_order != (0, 1, 2):
wcs_in = WCS(hdu_in.header)
wcs_in = wcs_in.sub((3 - np.array(axis_order)[::-1]).tolist())
hdu_in.header = wcs_in.to_header()
hdu_in.data = np.transpose(hdu_in.data, axis_order)
if wcsapi: # Enforce a pure wcsapi API
wcs_in, data_in = as_high_level_wcs(WCS(hdu_in.header)), hdu_in.data
wcs_out = as_high_level_wcs(WCS(header_out))
shape_out = header_out['NAXIS3'], header_out['NAXIS2'], header_out['NAXIS1']
array_out, footprint_out = reproject_interp((data_in, wcs_in),
wcs_out, shape_out=shape_out)
else:
array_out, footprint_out = reproject_interp(hdu_in, header_out)
return array_footprint_to_hdulist(array_out, footprint_out, header_out)
@pytest.mark.array_compare(single_reference=True)
@pytest.mark.parametrize('wcsapi', (False, True))
def test_small_cutout(wcsapi):
"""
Test reprojection of a cutout from a larger image (makes sure that the
pre-reprojection cropping works)
"""
hdu_in = fits.open(os.path.join(DATA, 'galactic_2d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['NAXIS1'] = 10
header_out['NAXIS2'] = 9
header_out['CTYPE1'] = 'RA---TAN'
header_out['CTYPE2'] = 'DEC--TAN'
header_out['CRVAL1'] = 266.39311
header_out['CRVAL2'] = -28.939779
header_out['CRPIX1'] = 5.1
header_out['CRPIX2'] = 4.7
if wcsapi: # Enforce a pure wcsapi API
wcs_in, data_in = as_high_level_wcs(WCS(hdu_in.header)), hdu_in.data
wcs_out = as_high_level_wcs(WCS(header_out))
shape_out = header_out['NAXIS2'], header_out['NAXIS1']
array_out, footprint_out = reproject_interp((data_in, wcs_in),
wcs_out, shape_out=shape_out)
else:
array_out, footprint_out = reproject_interp(hdu_in, header_out)
return array_footprint_to_hdulist(array_out, footprint_out, header_out)
def test_mwpan_car_to_mol():
"""
Test reprojection of the Mellinger Milky Way Panorama from CAR to MOL,
which was returning all NaNs due to a regression that was introduced in
reproject 0.3 (https://github.com/astrofrog/reproject/pull/124).
"""
hdu_in = fits.Header.fromtextfile(os.path.join(DATA, 'mwpan2_RGB_3600.hdr'))
wcs_in = WCS(hdu_in, naxis=2)
data_in = np.ones((hdu_in['NAXIS2'], hdu_in['NAXIS1']), dtype=np.float)
header_out = fits.Header()
header_out['NAXIS'] = 2
header_out['NAXIS1'] = 360
header_out['NAXIS2'] = 180
header_out['CRPIX1'] = 180
header_out['CRPIX2'] = 90
header_out['CRVAL1'] = 0
header_out['CRVAL2'] = 0
header_out['CDELT1'] = -2 * np.sqrt(2) / np.pi
header_out['CDELT2'] = 2 * np.sqrt(2) / np.pi
header_out['CTYPE1'] = 'GLON-MOL'
header_out['CTYPE2'] = 'GLAT-MOL'
header_out['RADESYS'] = 'ICRS'
array_out, footprint_out = reproject_interp((data_in, wcs_in), header_out)
assert np.isfinite(array_out).any()
def test_small_cutout_outside():
"""
Test reprojection of a cutout from a larger image - in this case the
cutout is completely outside the region of the input image so we should
take a shortcut that returns arrays of NaNs.
"""
hdu_in = fits.open(os.path.join(DATA, 'galactic_2d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['NAXIS1'] = 10
header_out['NAXIS2'] = 9
header_out['CTYPE1'] = 'RA---TAN'
header_out['CTYPE2'] = 'DEC--TAN'
header_out['CRVAL1'] = 216.39311
header_out['CRVAL2'] = -21.939779
header_out['CRPIX1'] = 5.1
header_out['CRPIX2'] = 4.7
array_out, footprint_out = reproject_interp(hdu_in, header_out)
assert np.all(np.isnan(array_out))
assert np.all(footprint_out == 0)
def test_celestial_mismatch_2d():
"""
Make sure an error is raised if the input image has celestial WCS
information and the output does not (and vice-versa). This example will
use the _reproject_celestial route.
"""
hdu_in = fits.open(os.path.join(DATA, 'galactic_2d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['CTYPE1'] = 'APPLES'
header_out['CTYPE2'] = 'ORANGES'
data = hdu_in.data
wcs1 = WCS(hdu_in.header)
wcs2 = WCS(header_out)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs1), wcs2, shape_out=(2, 2))
assert exc.value.args[0] == "Input WCS has celestial components but output WCS does not"
def test_celestial_mismatch_3d():
"""
Make sure an error is raised if the input image has celestial WCS
information and the output does not (and vice-versa). This example will
use the _reproject_full route.
"""
hdu_in = fits.open(os.path.join(DATA, 'equatorial_3d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['CTYPE1'] = 'APPLES'
header_out['CTYPE2'] = 'ORANGES'
header_out['CTYPE3'] = 'BANANAS'
data = hdu_in.data
wcs1 = WCS(hdu_in.header)
wcs2 = WCS(header_out)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs1), wcs2, shape_out=(1, 2, 3))
assert exc.value.args[0] == "Input WCS has celestial components but output WCS does not"
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs2), wcs1, shape_out=(1, 2, 3))
assert exc.value.args[0] == "Output WCS has celestial components but input WCS does not"
def test_spectral_mismatch_3d():
"""
Make sure an error is raised if there are mismatches between the presence
or type of spectral axis.
"""
hdu_in = fits.open(os.path.join(DATA, 'equatorial_3d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['CTYPE3'] = 'FREQ'
header_out['CUNIT3'] = 'Hz'
data = hdu_in.data
wcs1 = WCS(hdu_in.header)
wcs2 = WCS(header_out)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs1), wcs2, shape_out=(1, 2, 3))
assert exc.value.args[0] == "The input (VOPT) and output (FREQ) spectral coordinate types are not equivalent."
header_out['CTYPE3'] = 'BANANAS'
wcs2 = WCS(header_out)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs1), wcs2, shape_out=(1, 2, 3))
assert exc.value.args[0] == "Input WCS has a spectral component but output WCS does not"
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs2), wcs1, shape_out=(1, 2, 3))
assert exc.value.args[0] == "Output WCS has a spectral component but input WCS does not"
def test_naxis_mismatch():
"""
Make sure an error is raised if the input and output WCS have a different
number of dimensions.
"""
data = np.ones((3, 2, 2))
wcs_in = WCS(naxis=3)
wcs_out = WCS(naxis=2)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs_in), wcs_out, shape_out=(1, 2))
assert exc.value.args[0] == "Number of dimensions between input and output WCS should match"
def test_slice_reprojection():
"""
Test case where only the slices change and the celestial projection doesn't
"""
inp_cube = np.arange(3, dtype='float').repeat(4 * 5).reshape(3, 4, 5)
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_in['NAXIS1'] = 5
header_in['NAXIS2'] = 4
header_in['NAXIS3'] = 3
header_out = header_in.copy()
header_out['NAXIS3'] = 2
header_out['CRPIX3'] -= 0.5
wcs_in = WCS(header_in)
wcs_out = WCS(header_out)
out_cube, out_cube_valid = reproject_interp((inp_cube, wcs_in), wcs_out, shape_out=(2, 4, 5))
# we expect to be projecting from
# inp_cube = np.arange(3, dtype='float').repeat(4*5).reshape(3,4,5)
# to
# inp_cube_interp = (inp_cube[:-1]+inp_cube[1:])/2.
# which is confirmed by
# map_coordinates(inp_cube.astype('float'), new_coords, order=1, cval=np.nan, mode='constant')
# np.testing.assert_allclose(inp_cube_interp, map_coordinates(inp_cube.astype('float'), new_coords, order=1, cval=np.nan, mode='constant'))
assert out_cube.shape == (2, 4, 5)
assert out_cube_valid.sum() == 40.
# We only check that the *valid* pixels are equal
# but it's still nice to check that the "valid" array works as a mask
np.testing.assert_allclose(out_cube[out_cube_valid.astype('bool')],
((inp_cube[:-1] + inp_cube[1:]) / 2.)[out_cube_valid.astype('bool')])
# Actually, I fixed it, so now we can test all
np.testing.assert_allclose(out_cube, ((inp_cube[:-1] + inp_cube[1:]) / 2.))
def test_4d_fails():
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_in['NAXIS'] = 4
header_out = header_in.copy()
w_in = WCS(header_in)
w_out = WCS(header_out)
array_in = np.zeros((2, 3, 4, 5))
with pytest.raises(ValueError) as ex:
x_out, y_out, z_out = reproject_interp((array_in, w_in), w_out, shape_out=[2, 4, 5, 6])
assert str(ex.value) == "Length of shape_out should match number of dimensions in wcs_out"
def test_inequal_wcs_dims():
inp_cube = np.arange(3, dtype='float').repeat(4 * 5).reshape(3, 4, 5)
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_out = header_in.copy()
header_out['CTYPE3'] = 'VRAD'
header_out['CUNIT3'] = 'm/s'
header_in['CTYPE3'] = 'STOKES'
header_in['CUNIT3'] = ''
wcs_out = WCS(header_out)
with pytest.raises(ValueError) as ex:
out_cube, out_cube_valid = reproject_interp((inp_cube, header_in), wcs_out, shape_out=(2, 4, 5))
assert str(ex.value) == "Output WCS has a spectral component but input WCS does not"
def test_different_wcs_types():
inp_cube = np.arange(3, dtype='float').repeat(4 * 5).reshape(3, 4, 5)
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_out = header_in.copy()
header_out['CTYPE3'] = 'VRAD'
header_out['CUNIT3'] = 'm/s'
header_in['CTYPE3'] = 'VELO'
header_in['CUNIT3'] = 'm/s'
wcs_in = WCS(header_in)
wcs_out = WCS(header_out)
with pytest.raises(ValueError) as ex:
out_cube, out_cube_valid = reproject_interp((inp_cube, header_in), wcs_out, shape_out=(2, 4, 5))
assert str(ex.value) == ("The input (VELO) and output (VRAD) spectral "
"coordinate types are not equivalent.")
# TODO: add a test to check the units are the same.
def test_reproject_3d_celestial_correctness_ra2gal():
inp_cube = np.arange(3, dtype='float').repeat(7 * 8).reshape(3, 7, 8)
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_in['NAXIS1'] = 8
header_in['NAXIS2'] = 7
header_in['NAXIS3'] = 3
header_out = header_in.copy()
header_out['CTYPE1'] = 'GLON-TAN'
header_out['CTYPE2'] = 'GLAT-TAN'
header_out['CRVAL1'] = 158.5644791
header_out['CRVAL2'] = -21.59589875
# make the cube a cutout approximately in the center of the other one, but smaller
header_out['NAXIS1'] = 4
header_out['CRPIX1'] = 2
header_out['NAXIS2'] = 3
header_out['CRPIX2'] = 1.5
header_out['NAXIS3'] = 2
header_out['CRPIX3'] -= 0.5
wcs_in = WCS(header_in)
wcs_out = WCS(header_out)
out_cube, out_cube_valid = reproject_interp((inp_cube, wcs_in), wcs_out, shape_out=(2, 3, 4))
assert out_cube.shape == (2, 3, 4)
assert out_cube_valid.sum() == out_cube.size
# only compare the spectral axis
np.testing.assert_allclose(out_cube[:, 0, 0], ((inp_cube[:-1] + inp_cube[1:]) / 2.)[:, 0, 0])
def test_reproject_with_output_array():
"""
Test both full_reproject and slicewise reprojection. We use a case where the
non-celestial slices are the same and therefore where both algorithms can
work.
"""
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
array_in = np.ones((3, 200, 180))
shape_out = (3, 160, 170)
out_full = np.empty(shape_out)
wcs_in = WCS(header_in)
wcs_out = wcs_in.deepcopy()
wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN', wcs_in.wcs.ctype[2]]
wcs_out.wcs.crval = [158.0501, -21.530282, wcs_in.wcs.crval[2]]
wcs_out.wcs.crpix = [50., 50., wcs_in.wcs.crpix[2] + 0.4]
# TODO when someone learns how to do it: make sure the memory isn't duplicated...
returned_array = reproject_interp((array_in, wcs_in), wcs_out,
output_array=out_full, return_footprint=False)
assert out_full is returned_array
@pytest.mark.array_compare(single_reference=True)
@pytest.mark.parametrize('file_format', ['fits', 'asdf'])
def test_reproject_roundtrip(file_format):
# Test the reprojection with solar data, which ensures that the masking of
# pixels based on round-tripping works correctly. Using asdf is not just
# about testing a different format but making sure that GWCS works.
pytest.importorskip('sunpy')
from sunpy.map import Map
from sunpy.coordinates.ephemeris import get_body_heliographic_stonyhurst
if file_format == 'fits':
map_aia = Map(os.path.join(DATA, 'aia_171_level1.fits'))
data = map_aia.data
wcs = map_aia.wcs
date = map_aia.date
target_wcs = wcs.deepcopy()
elif file_format == 'asdf':
asdf = pytest.importorskip('asdf')
aia = asdf.open(os.path.join(DATA, 'aia_171_level1.asdf'))
data = aia['data'][...]
wcs = aia['wcs']
date = wcs.output_frame.reference_frame.obstime
target_wcs = Map(os.path.join(DATA, 'aia_171_level1.fits')).wcs.deepcopy()
else:
raise ValueError('file_format should be fits or asdf')
# Reproject to an observer on Venus
target_wcs.wcs.cdelt = ([24, 24]*u.arcsec).to(u.deg)
target_wcs.wcs.crpix = [64, 64]
venus = get_body_heliographic_stonyhurst('venus', date)
target_wcs.heliographic_observer = venus
output, footprint = reproject_interp((data, wcs), target_wcs, (128, 128))
return array_footprint_to_hdulist(output, footprint, target_wcs.to_header())
def test_identity_with_offset():
# Reproject an array and WCS to itself but with a margin, which should
# end up empty. This is a regression test for a bug that caused some
# values to extend beyond the original footprint.
wcs = WCS(naxis=2)
wcs.wcs.ctype = 'RA---TAN', 'DEC--TAN'
wcs.wcs.crpix = 322, 151
wcs.wcs.crval = 43, 23
wcs.wcs.cdelt = -0.1, 0.1
wcs.wcs.equinox = 2000.
array_in = np.random.random((233, 123))
wcs_out = wcs.deepcopy()
wcs_out.wcs.crpix += 1
shape_out = (array_in.shape[0] + 2, array_in.shape[1] + 2)
array_out, footprint = reproject_interp((array_in, wcs), wcs_out, shape_out=shape_out)
expected = np.pad(array_in, 1, 'constant', constant_values=np.nan)
assert_allclose(expected, array_out, atol=1e-10)
|
|
from bs4 import BeautifulSoup
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from django.http import Http404, HttpResponse, JsonResponse
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.views.generic import DeleteView, FormView, ListView, View
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
from reportlab.platypus import Paragraph, Frame
from . import forms
from .models import PROGRESS_CHOICES, MockExam, Section, UserSectionRelation
def get_progress_info(index, info):
"""
Returns the requested info for user's progress. Index is an integer from
0 to 3. Info is one of the following strings: text, css_glyphicon,
css_progress_bar.
"""
progress_choices = dict(PROGRESS_CHOICES)
progress_info = {
0: {'text': progress_choices[0],
'css_glyphicon': '',
'css_progress_bar': ''},
1: {'text': progress_choices[1],
'css_glyphicon': 'eye-open',
'css_progress_bar': 'warning'},
2: {'text': progress_choices[2],
'css_glyphicon': 'wrench',
'css_progress_bar': 'info'},
3: {'text': progress_choices[3],
'css_glyphicon': 'ok',
'css_progress_bar': 'success'}}
return progress_info[index][info]
class SectionListView(ListView):
"""
View for all sections, strutured via mptt template tag.
"""
queryset = Section.objects.all()
self_pattern_name = 'section_list'
def get_context_data(self, **context):
"""
Inserts the users learning progress value.
"""
context = super().get_context_data(**context)
# Evaluate section queryset
sections = dict()
for section in context['section_list']:
sections[section.pk] = section
# Total scores
total_scores = sum(section.scores for section in sections.values() if section.is_leaf_node())
if total_scores == 0:
total_scores = 1
# User scores
section_progresses = dict()
user_learning_progress = []
# # Initiate list user_learning_progress
for index in range(4):
user_learning_progress.append(dict(
css_glyphicon=get_progress_info(index, 'css_glyphicon'),
css_progress_bar=get_progress_info(index, 'css_progress_bar'),
text=get_progress_info(index, 'text'),
value=0))
# # Parse user's data into list and dict
for usersectionrelation in UserSectionRelation.objects.filter(user=self.request.user, progress__gt=0):
section = sections[usersectionrelation.section_id] # TODO: Check whether to use section.pk.
# section = sections[usersectionrelation.section.pk]
if not section.is_leaf_node():
continue
progress_dict = user_learning_progress[usersectionrelation.progress]
progress_dict['value'] += section.scores
section_progresses[section.pk] = usersectionrelation.progress
# # Calculate "Nothing done" case
for progress_dict in user_learning_progress:
user_learning_progress[0]['value'] += -progress_dict['value']
user_learning_progress[0]['value'] += total_scores
# # Calculate percentage value
for progress_dict in user_learning_progress:
progress_dict['value'] = round(progress_dict['value'] * 100 / total_scores)
context['section_progresses'] = section_progresses
context['user_learning_progress'] = user_learning_progress
return context
class UserSectionRelationUpdateView(FormView):
"""
View for a single relation between an user and a section.
"""
template_name = 'progress/usersectionrelation_form.html'
form_class = forms.UserSectionRelationUpdateForm
success_url = reverse_lazy('section_list')
self_pattern_name = 'usersectionrelation_update'
def get(self, *args, **kwargs):
"""
Handles GET requests. Special response for ajax requests.
"""
if self.request.is_ajax():
form_class = self.get_form_class()
form = self.get_form(form_class)
context = self.get_context_data(form=form)
context.update(csrf(self.request))
html = render_to_string('progress/usersectionrelation_form_snippet.html', context)
html = '<p><button type="button" class="close" aria-hidden="true">×</button></p>' + html
response = self.render_to_json_response({'html': html})
else:
response = super().get(*args, **kwargs)
return response
def get_initial(self):
"""
Returns the initial value for the form. Creates a relation from the user to the section if necessary.
"""
try:
self.section = Section.objects.get(pk=self.kwargs['pk'])
if self.section.get_children().exists():
raise Section.DoesNotExist
except Section.DoesNotExist:
raise Http404
self.user_section_relation, __ = UserSectionRelation.objects.get_or_create(
user=self.request.user, section=self.section)
return {'progress': self.user_section_relation.progress, 'comment': self.user_section_relation.comment}
def get_context_data(self, **context):
"""
Inserts the section into the template context.
"""
return super().get_context_data(section=self.section, **context)
def form_valid(self, form):
"""
Processes the valid form. Saves the input into the database.
"""
self.user_section_relation.progress = form.cleaned_data['progress']
self.user_section_relation.comment = form.cleaned_data['comment']
self.user_section_relation.save()
if self.request.is_ajax():
response = self.render_to_json_response({})
else:
response = super().form_valid(form)
return response
def form_invalid(self, form):
"""
Returns a response with the form errors. Returns a special response
for ajax requests.
"""
if self.request.is_ajax():
response = self.render_to_json_response({'form_errors': form.errors})
else:
response = super().form_invalid(form)
return response
def render_to_json_response(self, context, **response_kwargs):
"""
Returns a JsonResponse object for ajax requests.
"""
return JsonResponse(context, **response_kwargs)
class UserSectionRelationExportView(View):
"""
View to export all leaf node sections with users progress data and
comments as JSON.
"""
def get(self, request, *args, **kwargs):
sections = [
section.serialize(user=self.request.user)
for section in Section.objects.all()
if section.is_leaf_node()]
return JsonResponse(sections, safe=False)
class PrintNoteCardsView(View):
"""
View to export all user's section comments in a printable format (PDF).
"""
def get_queryset(self):
"""
Returns the queryset with all UserSectionRelation objects that contain
a personal comment.
"""
queryset = UserSectionRelation.objects.filter(user=self.request.user)
queryset = queryset.exclude(comment='').select_related('section')
return queryset
def get(self, request, *args, **kwargs):
"""
Returns the response containing a reportlab generated PDF.
"""
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=learningprogress_note_cards.pdf'
# Create the PDF object, using the response object as its "file".
pdf = canvas.Canvas(response)
styles = getSampleStyleSheet()
# Get all cards with their stories.
cards = []
max_comment_length = 500
for usersectionrelation in self.get_queryset():
# Delete attributes from span tags.
comment = BeautifulSoup(usersectionrelation.comment, 'html.parser')
for tag in comment.find_all('span'):
del tag.attrs
# Parse the card.
story = []
story.append(Paragraph(
usersectionrelation.section.name,
styles['Heading2']))
story.append(Paragraph(
usersectionrelation.section.notes,
styles['Normal']))
if len(str(comment)) <= max_comment_length:
story.append(Paragraph(
str(comment),
styles['Normal']))
else:
story.append(Paragraph(
_('Sorry, your comment is too long.'),
styles['Normal']))
cards.append(story)
# Add cards to PDF object.
width, height = A4
if len(cards) % 2 != 0:
cards.append('')
while True:
for i in range(3):
if len(cards) == 0:
break
h = height * 2/3 - height * 1/3 * i
f1 = Frame(0, h, width/2, height * 1/3, showBoundary=1)
f2 = Frame(width/2, h, width/2, height * 1/3, showBoundary=1)
f1.addFromList(cards.pop(0), pdf)
f2.addFromList(cards.pop(0), pdf)
else:
pdf.showPage()
continue
pdf.showPage()
break
# Close the PDF object cleanly and we're done.
pdf.save()
return response
class MockExamFormView(FormView):
"""
View to display and update user's mock exams.
"""
template_name = 'progress/mockexam_form.html'
form_class = forms.MockExamForm
success_url = reverse_lazy('mockexam_form')
self_pattern_name = 'mockexam_form'
def get_context_data(self, **context):
"""
Inserts the mock exams into the context.
"""
context = super().get_context_data(**context)
context['mockexam_list'] = MockExam.objects.filter(user=self.request.user)
return context
def form_valid(self, form):
"""
Processes a valid input of a new mock exam.
"""
mockexam = form.save(commit=False)
mockexam.user = self.request.user
mockexam.save()
return super().form_valid(form)
class MockExamDeleteView(DeleteView):
"""
View to delete a single mock exam.
"""
model = MockExam
success_url = reverse_lazy('mockexam_form')
self_pattern_name = 'mockexam_delete'
def dispatch(self, *args, **kwargs):
"""
Ensures that you can only delete your own mock exams.
"""
if not self.get_object().user == self.request.user:
raise PermissionDenied
return super().dispatch(*args, **kwargs)
|
|
import os
import os.path as op
from flask import Flask, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.event import listens_for
from jinja2 import Markup
from flask_admin import Admin, form
from flask_admin.form import rules
from flask_admin.contrib import sqla
# Create application
app = Flask(__name__, static_folder='files')
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Create directory for file fields to use
file_path = op.join(op.dirname(__file__), 'files')
try:
os.mkdir(file_path)
except OSError:
pass
# Create models
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64))
path = db.Column(db.Unicode(128))
def __unicode__(self):
return self.name
class Image(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64))
path = db.Column(db.Unicode(128))
def __unicode__(self):
return self.name
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Unicode(64))
last_name = db.Column(db.Unicode(64))
email = db.Column(db.Unicode(128))
phone = db.Column(db.Unicode(32))
city = db.Column(db.Unicode(128))
country = db.Column(db.Unicode(128))
notes = db.Column(db.UnicodeText)
# Delete hooks for models, delete files if models are getting deleted
@listens_for(File, 'after_delete')
def del_file(mapper, connection, target):
if target.path:
try:
os.remove(op.join(file_path, target.path))
except OSError:
# Don't care if was not deleted because it does not exist
pass
@listens_for(Image, 'after_delete')
def del_image(mapper, connection, target):
if target.path:
# Delete image
try:
os.remove(op.join(file_path, target.path))
except OSError:
pass
# Delete thumbnail
try:
os.remove(op.join(file_path,
form.thumbgen_filename(target.path)))
except OSError:
pass
# Administrative views
class FileView(sqla.ModelView):
# Override form field to use Flask-Admin FileUploadField
form_overrides = {
'path': form.FileUploadField
}
# Pass additional parameters to 'path' to FileUploadField constructor
form_args = {
'path': {
'label': 'File',
'base_path': file_path,
'allow_overwrite': False
}
}
class ImageView(sqla.ModelView):
def _list_thumbnail(view, context, model, name):
if not model.path:
return ''
return Markup('<img src="%s">' % url_for('static',
filename=form.thumbgen_filename(model.path)))
column_formatters = {
'path': _list_thumbnail
}
# Alternative way to contribute field is to override it completely.
# In this case, Flask-Admin won't attempt to merge various parameters for the field.
form_extra_fields = {
'path': form.ImageUploadField('Image',
base_path=file_path,
thumbnail_size=(100, 100, True))
}
class UserView(sqla.ModelView):
"""
This class demonstrates the use of 'rules' for controlling the rendering of forms.
"""
form_create_rules = [
# Header and four fields. Email field will go above phone field.
rules.FieldSet(('first_name', 'last_name', 'email', 'phone'), 'Personal'),
# Separate header and few fields
rules.Header('Location'),
rules.Field('city'),
# String is resolved to form field, so there's no need to explicitly use `rules.Field`
'country',
# Show macro from Flask-Admin lib.html (it is included with 'lib' prefix)
rules.Container('rule_demo.wrap', rules.Field('notes'))
]
# Use same rule set for edit page
form_edit_rules = form_create_rules
create_template = 'rule_create.html'
edit_template = 'rule_edit.html'
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
# Create admin
admin = Admin(app, 'Example: Forms')
# Add views
admin.add_view(FileView(File, db.session))
admin.add_view(ImageView(Image, db.session))
admin.add_view(UserView(User, db.session, name='User'))
def build_sample_db():
"""
Populate a small db with some example entries.
"""
import random
import string
db.drop_all()
db.create_all()
first_names = [
'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie','Sophie', 'Mia',
'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',
'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'
]
last_names = [
'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas',
'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson',
'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander'
]
locations = [
("Shanghai", "China"),
("Istanbul", "Turkey"),
("Karachi", "Pakistan"),
("Mumbai", "India"),
("Moscow", "Russia"),
("Sao Paulo", "Brazil"),
("Beijing", "China"),
("Tianjin", "China"),
("Guangzhou", "China"),
("Delhi", "India"),
("Seoul", "South Korea"),
("Shenzhen", "China"),
("Jakarta", "Indonesia"),
("Tokyo", "Japan"),
("Mexico City", "Mexico"),
("Kinshasa", "Democratic Republic of the Congo"),
("Bangalore", "India"),
("New York City", "United States"),
("London", "United Kingdom"),
("Bangkok", "Thailand"),
("Tehran", "Iran"),
("Dongguan", "China"),
("Lagos", "Nigeria"),
("Lima", "Peru"),
("Ho Chi Minh City", "Vietnam"),
]
for i in range(len(first_names)):
user = User()
user.first_name = first_names[i]
user.last_name = last_names[i]
user.email = user.first_name.lower() + "@example.com"
tmp = ''.join(random.choice(string.digits) for i in range(10))
user.phone = "(" + tmp[0:3] + ") " + tmp[3:6] + " " + tmp[6::]
user.city = locations[i][0]
user.country = locations[i][1]
db.session.add(user)
images = ["Buffalo", "Elephant", "Leopard", "Lion", "Rhino"]
for name in images:
image = Image()
image.name = name
image.path = name.lower() + ".jpg"
db.session.add(image)
for i in [1, 2, 3]:
file = File()
file.name = "Example " + str(i)
file.path = "example_" + str(i) + ".pdf"
db.session.add(file)
db.session.commit()
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = op.realpath(os.path.dirname(__file__))
database_path = op.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_sample_db()
# Start app
app.run(debug=True)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Nicira Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Dan Wendlandt, Nicira, Inc
#
import eventlet
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as l3_constants
from neutron.common import legacy
from neutron.common import topics
from neutron.common import utils as common_utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.openstack.common.rpc import common as rpc_common
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import service
from neutron import service as neutron_service
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
INTERNAL_DEV_PREFIX = 'qr-'
EXTERNAL_DEV_PREFIX = 'qg-'
RPC_LOOP_INTERVAL = 1
FLOATING_IP_CIDR_SUFFIX = '/32'
class L3PluginApi(proxy.RpcProxy):
"""Agent side of the l3 agent RPC API.
API version history:
1.0 - Initial version.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic, host):
super(L3PluginApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.host = host
def get_routers(self, context, router_ids=None):
"""Make a remote process call to retrieve the sync data for routers."""
return self.call(context,
self.make_msg('sync_routers', host=self.host,
router_ids=router_ids),
topic=self.topic)
def get_external_network_id(self, context):
"""Make a remote process call to retrieve the external network id.
@raise common.RemoteError: with TooManyExternalNetworks
as exc_type if there are
more than one external network
"""
return self.call(context,
self.make_msg('get_external_network_id',
host=self.host),
topic=self.topic)
class RouterInfo(object):
def __init__(self, router_id, root_helper, use_namespaces, router):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.root_helper = root_helper
self.use_namespaces = use_namespaces
# Invoke the setter for establishing initial SNAT action
self.router = router
self.iptables_manager = iptables_manager.IptablesManager(
root_helper=root_helper,
#FIXME(danwent): use_ipv6=True,
namespace=self.ns_name())
self.routes = []
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
def ns_name(self):
if self.use_namespaces:
return NS_PREFIX + self.router_id
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self, self._router.get('gw_port'),
*args, action=self._snat_action)
self._snat_action = None
class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, manager.Manager):
"""Manager for L3NatAgent
API version history:
1.0 initial Version
1.1 changed the type of the routers parameter
to the routers_updated method.
It was previously a list of routers in dict format.
It is now a list of router IDs only.
Per rpc versioning rules, it is backwards compatible.
"""
RPC_API_VERSION = '1.1'
OPTS = [
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual "
"interface.")),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port used by Neutron metadata namespace "
"proxy.")),
cfg.IntOpt('send_arp_for_ha',
default=0,
help=_("Send this many gratuitous ARPs for HA setup, if "
"less than or equal to 0, the feature is disabled")),
cfg.BoolOpt('use_namespaces', default=True,
help=_("Allow overlapping IP.")),
cfg.StrOpt('router_id', default='',
help=_("If namespaces is disabled, the l3 agent can only"
" configure a router that has the matching router "
"ID.")),
cfg.BoolOpt('handle_internal_only_routers',
default=True,
help=_("Agent should implement routers with no gateway")),
cfg.StrOpt('gateway_external_network_id', default='',
help=_("UUID of external network for routers implemented "
"by the agents.")),
cfg.BoolOpt('enable_metadata_proxy', default=True,
help=_("Allow running metadata proxy.")),
cfg.BoolOpt('router_delete_namespaces', default=False,
help=_("Delete namespace after removing a router.")),
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket')),
]
def __init__(self, host, conf=None):
if conf:
self.conf = conf
else:
self.conf = cfg.CONF
self.root_helper = config.get_root_helper(self.conf)
self.router_info = {}
self._check_config_params()
try:
self.driver = importutils.import_object(
self.conf.interface_driver,
self.conf
)
except Exception:
msg = _("Error importing interface driver "
"'%s'") % self.conf.interface_driver
LOG.error(msg)
raise SystemExit(msg)
self.context = context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
self.fullsync = True
self.updated_routers = set()
self.removed_routers = set()
self.sync_progress = False
if self.conf.use_namespaces:
self._destroy_router_namespaces(self.conf.router_id)
self.rpc_loop = loopingcall.FixedIntervalLoopingCall(
self._rpc_loop)
self.rpc_loop.start(interval=RPC_LOOP_INTERVAL)
super(L3NATAgent, self).__init__(conf=self.conf)
def _check_config_params(self):
"""Check items in configuration files.
Check for required and invalid configuration items.
The actual values are not verified for correctness.
"""
if not self.conf.interface_driver:
msg = _('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(msg)
if not self.conf.use_namespaces and not self.conf.router_id:
msg = _('Router id is required if not using namespaces.')
LOG.error(msg)
raise SystemExit(msg)
def _destroy_router_namespaces(self, only_router_id=None):
"""Destroy router namespaces on the host to eliminate all stale
linux devices, iptables rules, and namespaces.
If only_router_id is passed, only destroy single namespace, to allow
for multiple l3 agents on the same host, without stepping on each
other's toes on init. This only makes sense if only_router_id is set.
"""
root_ip = ip_lib.IPWrapper(self.root_helper)
for ns in root_ip.get_namespaces(self.root_helper):
if ns.startswith(NS_PREFIX):
router_id = ns[len(NS_PREFIX):]
if only_router_id and not only_router_id == router_id:
continue
if self.conf.enable_metadata_proxy:
self._destroy_metadata_proxy(router_id, ns)
try:
self._destroy_router_namespace(ns)
except Exception:
LOG.exception(_("Failed deleting namespace '%s'"), ns)
def _destroy_router_namespace(self, namespace):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=namespace)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(INTERNAL_DEV_PREFIX):
# device is on default bridge
self.driver.unplug(d.name, namespace=namespace,
prefix=INTERNAL_DEV_PREFIX)
elif d.name.startswith(EXTERNAL_DEV_PREFIX):
self.driver.unplug(d.name,
bridge=self.conf.external_network_bridge,
namespace=namespace,
prefix=EXTERNAL_DEV_PREFIX)
if self.conf.router_delete_namespaces:
try:
ns_ip.netns.delete(namespace)
except RuntimeError:
msg = _('Failed trying to delete namespace: %s')
LOG.exception(msg % namespace)
def _create_router_namespace(self, ri):
ip_wrapper_root = ip_lib.IPWrapper(self.root_helper)
ip_wrapper = ip_wrapper_root.ensure_namespace(ri.ns_name())
ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1'])
def _fetch_external_net_id(self):
"""Find UUID of single external network for this agent."""
if self.conf.gateway_external_network_id:
return self.conf.gateway_external_network_id
# L3 agent doesn't use external_network_bridge to handle external
# networks, so bridge_mappings with provider networks will be used
# and the L3 agent is able to handle any external networks.
if not self.conf.external_network_bridge:
return
try:
return self.plugin_rpc.get_external_network_id(self.context)
except rpc_common.RemoteError as e:
if e.exc_type == 'TooManyExternalNetworks':
msg = _(
"The 'gateway_external_network_id' option must be "
"configured for this agent as Neutron has more than "
"one external network.")
raise Exception(msg)
else:
raise
def _router_added(self, router_id, router):
ri = RouterInfo(router_id, self.root_helper,
self.conf.use_namespaces, router)
self.router_info[router_id] = ri
if self.conf.use_namespaces:
self._create_router_namespace(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].add_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].add_rule(c, r)
ri.iptables_manager.apply()
super(L3NATAgent, self).process_router_add(ri)
if self.conf.enable_metadata_proxy:
self._spawn_metadata_proxy(ri.router_id, ri.ns_name())
def _router_removed(self, router_id):
ri = self.router_info.get(router_id)
if ri is None:
LOG.warn(_("Info for router %s were not found. "
"Skipping router removal"), router_id)
return
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
self.process_router(ri)
for c, r in self.metadata_filter_rules():
ri.iptables_manager.ipv4['filter'].remove_rule(c, r)
for c, r in self.metadata_nat_rules():
ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
ri.iptables_manager.apply()
if self.conf.enable_metadata_proxy:
self._destroy_metadata_proxy(ri.router_id, ri.ns_name())
del self.router_info[router_id]
self._destroy_router_namespace(ri.ns_name())
def _spawn_metadata_proxy(self, router_id, ns_name):
def callback(pid_file):
metadata_proxy_socket = cfg.CONF.metadata_proxy_socket
proxy_cmd = ['neutron-ns-metadata-proxy',
'--pid_file=%s' % pid_file,
'--metadata_proxy_socket=%s' % metadata_proxy_socket,
'--router_id=%s' % router_id,
'--state_path=%s' % self.conf.state_path,
'--metadata_port=%s' % self.conf.metadata_port]
proxy_cmd.extend(config.get_log_args(
cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' %
router_id))
return proxy_cmd
pm = external_process.ProcessManager(
self.conf,
router_id,
self.root_helper,
ns_name)
pm.enable(callback)
def _destroy_metadata_proxy(self, router_id, ns_name):
pm = external_process.ProcessManager(
self.conf,
router_id,
self.root_helper,
ns_name)
pm.disable()
def _set_subnet_info(self, port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
LOG.error(_("Ignoring multiple IPs on router port %s"),
port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
def process_router(self, ri):
ri.iptables_manager.defer_apply_on()
ex_gw_port = self._get_ex_gw_port(ri)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports if
p['id'] in current_port_ids and
p['id'] not in existing_port_ids]
old_ports = [p for p in ri.internal_ports if
p['id'] not in current_port_ids]
for p in new_ports:
self._set_subnet_info(p)
ri.internal_ports.append(p)
self.internal_network_added(ri, p['network_id'], p['id'],
p['ip_cidr'], p['mac_address'])
for p in old_ports:
ri.internal_ports.remove(p)
self.internal_network_removed(ri, p['id'], p['ip_cidr'])
# Get IPv4 only internal CIDRs
internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports
if netaddr.IPNetwork(p['ip_cidr']).version == 4]
# TODO(salv-orlando): RouterInfo would be a better place for
# this logic too
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
ri.ex_gw_port and ri.ex_gw_port['id'])
interface_name = None
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
if ex_gw_port and not ri.ex_gw_port:
self._set_subnet_info(ex_gw_port)
self.external_gateway_added(ri, ex_gw_port,
interface_name, internal_cidrs)
elif not ex_gw_port and ri.ex_gw_port:
self.external_gateway_removed(ri, ri.ex_gw_port,
interface_name, internal_cidrs)
# Process SNAT rules for external gateway
ri.perform_snat_action(self._handle_router_snat_rules,
internal_cidrs, interface_name)
# Process DNAT rules for floating IPs
if ex_gw_port:
self.process_router_floating_ips(ri, ex_gw_port)
ri.ex_gw_port = ex_gw_port
ri.enable_snat = ri.router.get('enable_snat')
self.routes_updated(ri)
ri.iptables_manager.defer_apply_off()
def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs,
interface_name, action):
# Remove all the rules
# This is safe because if use_namespaces is set as False
# then the agent can only configure one router, otherwise
# each router's SNAT rules will be in their own namespace
ri.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
ri.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
ri.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action if add_rules
if action == 'add_rules' and ex_gw_port:
# ex_gw_port should not be None in this case
# NAT rules are added only if ex_gw_port has an IPv4 address
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
if netaddr.IPAddress(ex_gw_ip).version == 4:
rules = self.external_gateway_nat_rules(ex_gw_ip,
internal_cidrs,
interface_name)
for rule in rules:
ri.iptables_manager.ipv4['nat'].add_rule(*rule)
break
ri.iptables_manager.apply()
def process_router_floating_ips(self, ri, ex_gw_port):
"""Configure the router's floating IPs
Configures floating ips in iptables and on the router's gateway device.
Cleans up floating ips that should not longer be configured.
"""
interface_name = self.get_external_device_name(ex_gw_port['id'])
device = ip_lib.IPDevice(interface_name, self.root_helper,
namespace=ri.ns_name())
# Clear out all iptables rules for floating ips
ri.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
existing_cidrs = set([addr['cidr'] for addr in device.addr.list()])
new_cidrs = set()
# Loop once to ensure that floating ips are configured.
for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_ip = fip['floating_ip_address']
ip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
new_cidrs.add(ip_cidr)
if ip_cidr not in existing_cidrs:
net = netaddr.IPNetwork(ip_cidr)
device.addr.add(net.version, ip_cidr, str(net.broadcast))
self._send_gratuitous_arp_packet(ri, interface_name, fip_ip)
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
ri.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
ri.iptables_manager.apply()
# Clean up addresses that no longer belong on the gateway interface.
for ip_cidr in existing_cidrs - new_cidrs:
if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX):
net = netaddr.IPNetwork(ip_cidr)
device.addr.delete(net.version, ip_cidr)
def _get_ex_gw_port(self, ri):
return ri.router.get('gw_port')
def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address):
if self.conf.send_arp_for_ha > 0:
arping_cmd = ['arping', '-A', '-U',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
ip_address]
try:
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
else:
utils.execute(arping_cmd, check_exit_code=True,
root_helper=self.root_helper)
except Exception as e:
LOG.error(_("Failed sending gratuitous ARP: %s"), str(e))
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def external_gateway_added(self, ri, ex_gw_port,
interface_name, internal_cidrs):
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'], interface_name,
ex_gw_port['mac_address'],
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name(),
prefix=EXTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']],
namespace=ri.ns_name())
ip_address = ex_gw_port['ip_cidr'].split('/')[0]
self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
gw_ip = ex_gw_port['subnet']['gateway_ip']
if ex_gw_port['subnet']['gateway_ip']:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(cmd, check_exit_code=False)
else:
utils.execute(cmd, check_exit_code=False,
root_helper=self.root_helper)
def external_gateway_removed(self, ri, ex_gw_port,
interface_name, internal_cidrs):
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.unplug(interface_name,
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name(),
prefix=EXTERNAL_DEV_PREFIX)
def metadata_filter_rules(self):
rules = []
if self.conf.enable_metadata_proxy:
rules.append(('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' % self.conf.metadata_port))
return rules
def metadata_nat_rules(self):
rules = []
if self.conf.enable_metadata_proxy:
rules.append(('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j REDIRECT '
'--to-port %s' % self.conf.metadata_port))
return rules
def external_gateway_nat_rules(self, ex_gw_ip, internal_cidrs,
interface_name):
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})]
for cidr in internal_cidrs:
rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr))
return rules
def internal_network_added(self, ri, network_id, port_id,
internal_cidr, mac_address):
interface_name = self.get_internal_device_name(port_id)
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ri.ns_name(),
prefix=INTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [internal_cidr],
namespace=ri.ns_name())
ip_address = internal_cidr.split('/')[0]
self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
def internal_network_removed(self, ri, port_id, internal_cidr):
interface_name = self.get_internal_device_name(port_id)
if ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name()):
self.driver.unplug(interface_name, namespace=ri.ns_name(),
prefix=INTERNAL_DEV_PREFIX)
def internal_network_nat_rules(self, ex_gw_ip, internal_cidr):
rules = [('snat', '-s %s -j SNAT --to-source %s' %
(internal_cidr, ex_gw_ip))]
return rules
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message."""
LOG.debug(_('Got router deleted notification for %s'), router_id)
self.removed_routers.add(router_id)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug(_('Got routers updated notification :%s'), routers)
if routers:
# This is needed for backward compatiblity
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
self.updated_routers.update(routers)
def router_removed_from_agent(self, context, payload):
LOG.debug(_('Got router removed from agent :%r'), payload)
self.removed_routers.add(payload['router_id'])
def router_added_to_agent(self, context, payload):
LOG.debug(_('Got router added to agent :%r'), payload)
self.routers_updated(context, payload)
def _process_routers(self, routers, all_routers=False):
pool = eventlet.GreenPool()
if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge)
return
target_ex_net_id = self._fetch_external_net_id()
# if routers are all the routers we have (They are from router sync on
# starting or when error occurs during running), we seek the
# routers which should be removed.
# If routers are from server side notification, we seek them
# from subset of incoming routers and ones we have now.
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
for r in routers:
if not r['admin_state_up']:
continue
# If namespaces are disabled, only process the router associated
# with the configured agent id.
if (not self.conf.use_namespaces and
r['id'] != self.conf.router_id):
continue
ex_net_id = (r['external_gateway_info'] or {}).get('network_id')
if not ex_net_id and not self.conf.handle_internal_only_routers:
continue
if (target_ex_net_id and ex_net_id and
ex_net_id != target_ex_net_id):
continue
cur_router_ids.add(r['id'])
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
pool.spawn_n(self.process_router, ri)
# identify and remove routers that no longer exist
for router_id in prev_router_ids - cur_router_ids:
pool.spawn_n(self._router_removed, router_id)
pool.waitall()
@lockutils.synchronized('l3-agent', 'neutron-')
def _rpc_loop(self):
# _rpc_loop and _sync_routers_task will not be
# executed in the same time because of lock.
# so we can clear the value of updated_routers
# and removed_routers
try:
LOG.debug(_("Starting RPC loop for %d updated routers"),
len(self.updated_routers))
if self.updated_routers:
router_ids = list(self.updated_routers)
self.updated_routers.clear()
routers = self.plugin_rpc.get_routers(
self.context, router_ids)
self._process_routers(routers)
self._process_router_delete()
LOG.debug(_("RPC loop successfully completed"))
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
def _process_router_delete(self):
current_removed_routers = list(self.removed_routers)
for router_id in current_removed_routers:
self._router_removed(router_id)
self.removed_routers.remove(router_id)
def _router_ids(self):
if not self.conf.use_namespaces:
return [self.conf.router_id]
@periodic_task.periodic_task
@lockutils.synchronized('l3-agent', 'neutron-')
def _sync_routers_task(self, context):
if self.services_sync:
super(L3NATAgent, self).process_services_sync(context)
LOG.debug(_("Starting _sync_routers_task - fullsync:%s"),
self.fullsync)
if not self.fullsync:
return
try:
router_ids = self._router_ids()
self.updated_routers.clear()
self.removed_routers.clear()
routers = self.plugin_rpc.get_routers(
context, router_ids)
LOG.debug(_('Processing :%r'), routers)
self._process_routers(routers, all_routers=True)
self.fullsync = False
LOG.debug(_("_sync_routers_task successfully completed"))
except Exception:
LOG.exception(_("Failed synchronizing routers"))
self.fullsync = True
def after_start(self):
LOG.info(_("L3 agent started"))
def _update_routing_table(self, ri, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
#TODO(nati) move this code to iplib
if self.conf.use_namespaces:
ip_wrapper = ip_lib.IPWrapper(self.conf.root_helper,
namespace=ri.ns_name())
ip_wrapper.netns.execute(cmd, check_exit_code=False)
else:
utils.execute(cmd, check_exit_code=False,
root_helper=self.conf.root_helper)
def routes_updated(self, ri):
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug(_("Added route entry is '%s'"), route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table(ri, 'replace', route)
for route in removes:
LOG.debug(_("Removed route entry is '%s'"), route)
self._update_routing_table(ri, 'delete', route)
ri.routes = new_routes
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {
'use_namespaces': self.conf.use_namespaces,
'router_id': self.conf.router_id,
'handle_internal_only_routers':
self.conf.handle_internal_only_routers,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
LOG.debug(_("Report state task started"))
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
for ri in router_infos:
ex_gw_port = self._get_ex_gw_port(ri)
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(l3_constants.INTERFACE_KEY,
[]))
num_floating_ips += len(ri.router.get(l3_constants.FLOATINGIP_KEY,
[]))
configurations = self.agent_state['configurations']
configurations['routers'] = num_routers
configurations['ex_gw_ports'] = num_ex_gw_ports
configurations['interfaces'] = num_interfaces
configurations['floating_ips'] = num_floating_ips
try:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
LOG.debug(_("Report state task successfully completed"))
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.fullsync = True
LOG.info(_("agent_updated by server side %s!"), payload)
def main(manager='neutron.agent.l3_agent.L3NATAgentWithStateReport'):
eventlet.monkey_patch()
conf = cfg.CONF
conf.register_opts(L3NATAgent.OPTS)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
conf(project='neutron')
config.setup_logging(conf)
legacy.modernize_quantum_config(conf)
server = neutron_service.Service.create(
binary='neutron-l3-agent',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager=manager)
service.launch(server).wait()
|
|
#!/usr/bin/env python
import sys
from datetime import datetime
import time
import array
import struct
import os
try:
import serial
except:
print "This tool needs PySerial, but it was not found"
sys.exit(1)
class FT900Bootloader:
def __init__(self, ser):
ser.setDTR(1)
ser.setRTS(1)
ser.setDTR(0)
ser.setRTS(0)
self.ser = ser
self.verbose = False
self.cumcrc = 0
def rd1(self):
""" Return the last incoming character, if any """
n = self.ser.inWaiting()
if n:
r = self.ser.read(n)
return r[-1]
else:
return None
def waitprompt(self):
# Might already be at the bootloader prompt
if self.rd1() == '>':
return
# Might be at prompt already, or halted. Send ' '
self.ser.write(' ')
self.ser.flush()
time.sleep(0.001)
if self.rd1() == '>':
return
# Is somewhere else, request manual reset
print "Please press RESET on target board"
while True:
s = self.ser.read(1)
# print repr(s)
if s == ">":
break
def confirm(self):
self.ser.write("C")
return struct.unpack("I", self.ser.read(4))[0]
def version(self):
self.ser.write("V")
return struct.unpack("I", self.ser.read(4))[0]
def pmcrc32(self, a, sz):
t0 = time.time()
self.ser.write("Q" + struct.pack("II", a, sz))
(r, ) = struct.unpack("I", self.ser.read(4))
if self.verbose:
t = time.time() - t0
self.cumcrc += t
print 'crc', sz, t, self.cumcrc
return r
def flashcrc32(self, a, sz):
t0 = time.time()
self.ser.write("G" + struct.pack("II", a, sz))
(r, ) = struct.unpack("I", self.ser.read(4))
if self.verbose:
t = time.time() - t0
self.cumcrc += t
print 'crc', sz, t, self.cumcrc
return r
def ex(self, ):
self.ser.write("R")
self.ser.flush()
def setspeed(self, s):
if hasattr(self.ser, 'setBaudrate'):
self.ser.write("S" + struct.pack("I", s))
self.ser.flush()
time.sleep(.001)
self.ser.setBaudrate(s)
self.ser.flushInput()
self.ser.flushOutput()
def loadprogram(self, program):
pstr = program.tostring()
self.ser.write("P" + struct.pack("II", 0, len(pstr)))
self.ser.write(pstr)
def flash(self, addr, s):
self.ser.write('F' + struct.pack("II", addr, len(s)) + s)
(answer, ) = struct.unpack("I", self.ser.read(4))
assert answer == 0xf1a54ed
def hardboot(self, ):
self.ser.write("H")
self.ser.flush()
class Bye(Exception):
pass
def collect_screenshot(dest, ser):
import Image
t0 = time.time()
match = "!screenshot"
have = "X" * len(match)
while have != match:
have = (have + ser.read(1))[-len(match):]
(w, h) = struct.unpack("II", ser.read(8))
print '%dx%d image' % (w, h),
sys.stdout.flush()
if 0:
imd = ser.read(4 * w * h)
im = Image.fromstring("RGBA", (w, h), imd)
else:
# print [ord(c) for c in ser.read(20)]
def getn():
b = ord(ser.read(1))
n = b
while b == 255:
b = ord(ser.read(1))
n += b
# print ' length', n
return n
imd = ""
for y in range(h):
# print 'line', y
prev = 4 * chr(0)
d = ""
while len(d) < 4 * w:
# print ' have', len(d) / 4
d += prev * getn()
d += ser.read(4 * getn())
prev = d[-4:]
assert len(d) == 4 * w, 'corrupted screen dump stream'
imd += d
im = Image.fromstring("RGBA", (w, h), imd)
(b,g,r,a) = im.split()
im = Image.merge("RGBA", (r, g, b, a))
im.convert("RGB").save(dest)
took = time.time() - t0
print 'took %.1fs. Wrote RGB image to %s' % (took, dest)
ser.write('k')
class TetheredFT900:
def __init__(self, port):
ser = serial.Serial(port, 115200, timeout=None, rtscts=0)
self.ser = ser
self.searchpath = ['.']
self.log = open("log", "w")
def boot(self, bootfile = None):
ser = self.ser
speed = 921600
bl = FT900Bootloader(ser)
ser.setDTR(1)
ser.setDTR(0)
bl.waitprompt()
time.sleep(.001)
ser.flushInput()
if bl.confirm() != 0xf70a0d13:
print 'CONFIRM command failed'
sys.exit(1)
bl.setspeed(speed)
if bl.confirm() != 0xf70a0d13:
print 'High-speed CONFIRM command failed'
sys.exit(1)
if bootfile is not None:
program = array.array('I', open(bootfile).read())
bl.loadprogram(program)
bl.ex()
time.sleep(.05)
while True:
n = ser.inWaiting()
if not n:
break
ser.read(n)
ser.write("true tethered !\r\n")
while ser.read(1) != chr(30):
pass
def listen(self):
print 'listen'
ser = self.ser
while 1:
c = ser.read(max(1, ser.inWaiting()))
sys.stdout.write(repr(c))
sys.stdout.flush()
def command_response(self, cmd):
ser = self.ser
# print
# print 'cmd', repr(cmd)
ser.write(cmd + '\r')
r = []
while True:
c = ser.read(max(1, ser.inWaiting()))
# print 'got', repr(c)
r.append(c.replace(chr(30), ''))
if chr(30) in c:
# print 'full reponse', repr("".join(r))
return "".join(r)
def interactive_command(self, cmd = None):
ser = self.ser
if cmd is not None:
ser.write(cmd + '\r')
while True:
if ser.inWaiting() == 0:
sys.stdout.flush()
c = ser.read(max(1, ser.inWaiting()))
sys.stdout.write(c.replace(chr(30), ''))
self.log.write(c.replace(chr(30), ''))
if chr(30) in c:
return
def include(self, filename, write = sys.stdout.write):
for p in self.searchpath:
try:
incf = open(p + "/" + filename, "rt")
except IOError:
continue
for l in incf:
# time.sleep(.001)
# sys.stdout.write(l)
if l.endswith('\n'):
l = l[:-1]
print l
if l == "#bye":
raise Bye
l = l.expandtabs(4)
rs = l.split()
if rs and rs[0] == 'include':
self.include(rs[1])
else:
r = self.command_response(l)
if r.startswith(' '):
r = r[1:]
if r.endswith(' ok\r\n'):
r = r[:-5]
if 'error: ' in r:
print '--- ERROR --'
sys.stdout.write(l + '\n')
sys.stdout.write(r)
raise Bye
else:
write(r)
# print repr(r)
return
print "Cannot find file %s in %r" % (filename, self.searchpath)
raise Bye
def shellcmd(self, cmd):
ser = self.ser
if cmd.startswith('#include'):
cmd = cmd.split()
if len(cmd) != 2:
print 'Usage: #include <source-file>'
else:
try:
self.include(cmd[1])
except Bye:
pass
elif cmd.startswith('#flash'):
cmd = cmd.split()
if len(cmd) != 2:
print 'Usage: #flash <dest-file>'
ser.write('\r')
else:
print 'please wait...'
dest = cmd[1]
l = self.command_response('serialize')
print l[:100]
print l[-100:]
d = [int(x, 36) for x in l.split()[:-1]]
open(dest, "w").write(array.array("i", d).tostring())
print 4*len(d), 'bytes dumped to', dest
elif cmd.startswith('#setclock'):
n = datetime.utcnow()
cmd = "decimal %d %d %d %d %d %d >time&date" % (n.second, n.minute, n.hour, n.day, n.month, n.year)
ser.write(cmd + "\r\n")
ser.readline()
elif cmd.startswith('#bye'):
sys.exit(0)
elif cmd.startswith('#measure'):
ser = self.ser
# measure the board's clock
cmd = ":noname begin $21 emit 100000000 0 do loop again ; execute\r\n"
time.time() # warmup
ser.write(cmd)
while ser.read(1) != '!':
pass
t0 = time.time()
n = 0
while True:
ser.read(1)
t = time.time()
n += 1
print "%.6f MHz" % ((2 * 100.000000 * n) / (t - t0))
elif cmd.startswith('#screenshot'):
cmd = cmd.split()
if len(cmd) != 2:
print 'Usage: #screenshot <dest-image-file>'
ser.write('\r')
else:
dest = cmd[1]
ser.write('GD.screenshot\r\n')
collect_screenshot(dest, ser)
ser.write('\r\n')
elif cmd.startswith('#movie'):
cmd = cmd.split()
if len(cmd) != 2:
print 'Usage: #movie <command>'
ser.write('\r')
else:
dest = cmd[1]
ser.write('%s\r' % cmd[1])
for i in xrange(10000):
collect_screenshot("%04d.png" % i, ser)
ser.write('\r\n')
else:
# texlog.write(r"\underline{\textbf{%s}}" % cmd)
self.interactive_command(cmd)
def shell(self, autocomplete = True):
import readline
import os
histfile = os.path.join(os.path.expanduser("~"), ".swapforthhist")
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
if autocomplete:
words = sorted((self.command_response('words')).split())
print 'Loaded', len(words), 'words'
def completer(text, state):
text = text.lower()
candidates = [w for w in words if w.startswith(text)]
if state < len(candidates):
return candidates[state]
else:
return None
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
readline.set_completer(completer)
readline.set_completer_delims(' ')
ser = self.ser
while True:
try:
cmd = raw_input('>').strip()
self.shellcmd(cmd)
except KeyboardInterrupt:
ser.write(chr(3))
ser.flush()
self.interactive_command()
except EOFError:
# texlog.write(r"\end{Verbatim}" + '\n')
# texlog.write(r"\end{framed}" + '\n')
break
if __name__ == '__main__':
port = '/dev/ttyUSB0'
image = None
r = None
searchpath = []
args = sys.argv[1:]
while args:
a = args[0]
if a.startswith('-i'):
image = args[1]
args = args[2:]
elif a.startswith('-h'):
port = args[1]
args = args[2:]
elif a.startswith('-p'):
searchpath.append(args[1])
args = args[2:]
else:
if not r:
r = TetheredFT900(port)
r.boot(image)
r.searchpath += searchpath
if a.startswith('-e'):
print r.shellcmd(args[1])
args = args[2:]
else:
r.include(a)
args = args[1:]
if not r:
r = TetheredFT900(port)
r.boot(image)
r.searchpath += searchpath
r.shell()
|
|
##
## Visualize samples produced by MISO.
##
## TODO: In future interface with spliceplot to produce densities along a gene model
##
from scipy import *
from numpy import *
import matplotlib
#from plotting import colors, show_spines, axes_square
import matplotlib.pyplot as plt
from matplotlib import rc
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
import time
import misopy
from misopy.parse_csv import csv2array
from misopy.sashimi_plot.plot_utils.plotting import *
import misopy.Gene as Gene
import misopy.hypothesis_test as ht
class SamplesPlotter:
"""
Visualize a set of samples from a run of MISO.
"""
def __init__(self, samples, params, log_scores=None,
percent_acceptance=None,
true_psi=None):
"""
Given a sampler instance, store its properties.
"""
# sampler parameters
self.samples = samples
self.params = params
self.log_scores = log_scores
self.percent_acceptance = percent_acceptance
self.true_psi = true_psi
assert(len(samples) > 1)
def plot(self, fig=None, output_dir=None, num_rows=1, num_cols=1, subplot_start=1,
title=None, plot_intervals=None, value_to_label=None, label=None, bins=10,
bbox_coords=None, vanilla=False,
plot_mean=False, fig_dims=(5, 5)):
"""
Plot a set of samples.
- credible_intervals: if set to true, plot Bayesian confidence intervals
"""
plot_handle = None
num_samples, num_isoforms = shape(self.samples)
if num_isoforms == 2:
plot_handle = self.plot_two_iso_samples(fig=fig, plots_dir=output_dir, num_cols=num_cols,
num_rows=num_rows, subplot_start=subplot_start,
plot_intervals=plot_intervals,
value_to_label=value_to_label,
label=label, bbox_coords=bbox_coords,
title=title, vanilla=vanilla,
plot_mean=plot_mean, fig_dims=fig_dims)
elif num_isoforms > 2:
num_isoforms = self.samples.shape[1]
num_rows = 1
num_cols = num_isoforms
for c in range(num_cols):
plot_handle = self.plot_two_iso_samples(fig, isoform_index=c,
subplot_start=c + 1, num_cols=num_cols,
plot_intervals=plot_intervals,
title=title, bins=bins, vanilla=vanilla,
plot_mean=plot_mean, fig_dims=fig_dims)
plt.ylabel('Frequency (Isoform %d)' %(c + 1))
plt.subplots_adjust(wspace=0.5)
else:
raise Exception, "Invalid number of isoforms %d" %(num_isoforms)
return plot_handle
def plot_two_iso_samples(self, fig=None, isoform_index=0, num_rows=1, num_cols=1, subplot_start=1,
plots_dir=None, map_estimate=None, simulation_num=1,
plot_intervals=False, value_to_label=None, label=None, plot_filename=None,
bins=None, bbox_coords=None, with_legend=True, title=None, vanilla=False,
plot_mean=False, normed=False, fig_dims=(5, 5)):
"""
Plot a set of samples for Psi of a two isoform gene.
"""
if not fig:
sampled_psi_fig = plt.figure(figsize=fig_dims, dpi=300)
else:
sampled_psi_fig = fig
ax = sampled_psi_fig.add_subplot(num_rows, num_cols, subplot_start)
num_iters = int(self.params['iters'])
burn_in = int(self.params['burn_in'])
lag = int(self.params['lag'])
percent_acceptance = float(self.params['percent_accept'])
proposal_type = self.params['proposal_type']
plt.rcParams['font.size'] = 10
show_spines(ax, ['left', 'bottom'])
bins = bins
assert((value_to_label == None and label == None) or \
(value_to_label != None and label != None))
# retrieve samples
samples_to_plot = self.samples[:, isoform_index]
# picasso blue #0276FD
if not vanilla:
if bins != None:
plt.hist(samples_to_plot, align='mid', lw=0.5, facecolor='#0276FD',
edgecolor='#ffffff')
else:
plt.hist(samples_to_plot, align='mid', lw=0.5, facecolor='#0276FD',
edgecolor='#ffffff')
else:
plt.hist(samples_to_plot, align='mid', facecolor='#0276FD', edgecolor='#0276FD')
plt.xlabel(r'${\hat{\Psi}}_{\mathregular{MISO}}$')
plt.ylabel('Frequency')
plt.xlim([0, 1])
# Normalize samples
if normed:
yticks = list(plt.gca().get_yticks())
print "yticks: ", yticks
ytick_labels = ["%.2f" %(float(ytick) / float(normed)) for ytick in yticks]
ax.set_yticklabels(ytick_labels)
# samples_to_plot = samples_to_plot / float(len(samples_to_plot))
# curr_tick_labels = [label.get_label() for label in ax.get_yticklabels()]
# print "Current tick labels: ", curr_tick_labels
# new_tick_labels = []
# for label in curr_tick_labels:
# if len(label) > 0:
# new_label = "%.1f" %(float(label) / normed)
# else:
# new_label = ""
# new_tick_labels.append(new_label)
# #ax.set_yticklabels(new_tick_labels)
curr_axes = plt.gca()
# Plot MAP estimate for same data
if map_estimate:
l = plt.axvline(x=map_estimate, color='b', linewidth=1.2, ls='-', label=r'${\hat{\Psi}}_{MAP}\ =\ %.2f$' %(map_estimate))
# Plot true Psi
if self.true_psi:
plot_id = "%dsimul_%diters_%dburnin_%dlag_%s_truepsi_%.2f.pdf" \
%(simulation_num, num_iters, burn_in, lag, proposal_type, self.true_psi)
l = plt.axvline(x=self.true_psi, color='r', linewidth=1.2, ls='-', label=r'True $\Psi$')
else:
# Unknown true Psi
plot_id = "%dsimul_%diters_%dburnin_%dlag_%s_%s_truepsi.pdf" \
%(simulation_num, num_iters, burn_in, lag, proposal_type, 'unknown')
if value_to_label:
l = plt.axvline(x=value_to_label, color='r', linewidth=1.2, ls='-', label=label)
# plot credible intervals if given
if plot_intervals:
# print "Plotting %.2f confidence intervals" %(plot_intervals * 100)
interval_c1, interval_c2 = ht.compute_credible_intervals(samples_to_plot, plot_intervals)
plt.axvline(x=interval_c1, color='#999999', linewidth=0.7, ls='--',
label=r'%d' %(plot_intervals*100) + '% CI')
plt.axvline(x=interval_c2, color='#999999', linewidth=0.7, ls='--')
if plot_mean:
sample_mean = mean(samples_to_plot)
plt.axvline(x=sample_mean, color='r', linewidth=0.8, label='Mean')
if with_legend and (plot_intervals or self.true_psi):
if not bbox_coords:
lg = plt.legend(handletextpad=0.172, borderpad=0.01, labelspacing=.008,
handlelength=1.4, loc='best', numpoints=1)
else:
lg = plt.legend(handletextpad=0.172, borderpad=0.01, labelspacing=.008,
handlelength=1.4, loc='best', numpoints=1,
bbox_to_anchor=bbox_coords)
lg.get_frame().set_linewidth(0)
for t in lg.get_texts():
t.set_fontsize(8)
if title:
plt.title(title)
if plots_dir:
if not plot_filename:
plt.savefig(plots_dir + "sampled_psi_hist_%s" %(plot_id))
else:
plt.savefig(plots_dir + plot_filename + '.pdf')
return curr_axes
# Plot joint scores as function of number of samples
#log_joint_fig = plt.figure(figsize=(7,4.5), dpi=300)
#skip = 15
#print "Skip of %d when plotting log joint scores" %(skip)
#plt.plot(arange(0, len(total_log_scores), skip),
# total_log_scores[arange(0, len(total_log_scores), skip)])
#print "Total log scores plotted: ", len(total_log_scores)
#plt.xlabel('Number of iterations (lag not shown)')
#plt.ylabel('Log joint score')
#plt.savefig(plots_dir + "log_joint_scores_skip%d_%s" %(skip, plot_id))
# def load_samples(samples_filename):
# """
# Load a set of samples from a file and build an associated gene.
# """
# samples_data, h = csv2array(samples_filename, skiprows=1,
# raw_header=True)
# samples = []
# log_scores = []
# for line in samples_data:
# psi_vals = [float(v) for v in line['sampled_psi'].split(',')]
# samples.append(psi_vals)
# log_scores.append(float(line['log_score']))
# params, gene = parse_sampler_params(h[0])
# return (array(samples), array(log_scores), params, gene)
|
|
"""Alpenhorn client interface."""
import datetime
import os
import sys
import click
import peewee as pw
from ch_util import data_index as di
from ch_util import ephemeris
@click.group()
def cli():
"""Client interface for alpenhorn. Use to request transfers, mount drives,
check status etc."""
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.argument("group_name", metavar="GROUP")
@click.option(
"--acq", help="Sync only this acquisition.", metavar="ACQ", type=str, default=None
)
@click.option("--force", "-f", help="proceed without confirmation", is_flag=True)
@click.option("--nice", "-n", help="nice level for transfer", default=0)
@click.option(
"--target",
metavar="TARGET_GROUP",
default=None,
type=str,
help="Only transfer files not available on this group.",
)
@click.option(
"--transport",
"-t",
is_flag=True,
help="[DEPRECATED] transport mode: only copy if fewer than two archived copies exist.",
)
@click.option("--show_acq", help="Summarise acquisitions to be copied.", is_flag=True)
@click.option("--show_files", help="Show files to be copied.", is_flag=True)
def sync(
node_name, group_name, acq, force, nice, target, transport, show_acq, show_files
):
"""Copy all files from NODE to GROUP that are not already present.
We can also use the --target option to only transfer files that are not
available on both the destination group, and the TARGET_GROUP. This is
useful for transferring data to a staging location before going to a final
archive (e.g. HPSS, transport disks).
"""
# Make sure we connect RW
di.connect_database(read_write=True)
try:
from_node = di.StorageNode.get(name=node_name)
except pw.DoesNotExist:
raise Exception('Node "%s" does not exist in the DB.' % node_name)
try:
to_group = di.StorageGroup.get(name=group_name)
except pw.DoesNotExist:
raise Exception('Group "%s" does not exist in the DB.' % group_name)
# Construct list of file copies that are available on the source node, and
# not available on any nodes at the destination. This query is quite complex
# so I've broken it up...
# First get the nodes at the destination...
nodes_at_dest = di.StorageNode.select().where(di.StorageNode.group == to_group)
# Then use this to get a list of all files at the destination...
files_at_dest = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_dest, di.ArchiveFileCopy.has_file == "Y"
)
)
# Then combine to get all file(copies) that are available at the source but
# not at the destination...
copy = di.ArchiveFileCopy.select().where(
di.ArchiveFileCopy.node == from_node,
di.ArchiveFileCopy.has_file == "Y",
~(di.ArchiveFileCopy.file << files_at_dest),
)
# If the target option has been specified, only copy nodes also not
# available there...
if target is not None:
# Fetch a reference to the target group
try:
target_group = di.StorageGroup.get(name=target)
except pw.DoesNotExist:
raise RuntimeError('Target group "%s" does not exist in the DB.' % target)
# First get the nodes at the destination...
nodes_at_target = di.StorageNode.select().where(
di.StorageNode.group == target_group
)
# Then use this to get a list of all files at the destination...
files_at_target = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_target,
di.ArchiveFileCopy.has_file == "Y",
)
)
# Only match files that are also not available at the target
copy = copy.where(~(di.ArchiveFileCopy.file << files_at_target))
# In transport mode (DEPRECATED) we only move files that don't have an
# archive copy elsewhere...
if transport:
import warnings
warnings.warn("Transport mode is deprecated. Try to use --target instead.")
# Get list of other archive nodes
other_archive_nodes = di.StorageNode.select().where(
di.StorageNode.storage_type == "A", di.StorageNode.id != from_node
)
files_in_archive = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << other_archive_nodes,
di.ArchiveFileCopy.has_file == "Y",
)
)
copy = copy.where(~(di.ArchiveFileCopy.file << files_in_archive))
# Join onto ArchiveFile for later query parts
copy = copy.join(di.ArchiveFile)
# If requested, limit query to a specific acquisition...
if acq is not None:
# Fetch acq if specified
try:
acq = di.ArchiveAcq.get(name=acq)
except pw.DoesNotExist:
raise Exception('Acquisition "%s" does not exist in the DB.' % acq)
# Restrict files to be in the acquisition
copy = copy.where(di.ArchiveFile.acq == acq)
if not copy.count():
print("No files to copy from node %s." % (node_name))
return
# Show acquisitions based summary of files to be copied
if show_acq:
acqs = [c.file.acq.name for c in copy]
import collections
for acq, count in collections.Counter(acqs).items():
print("%s [%i files]" % (acq, count))
# Show all files to be copied
if show_files:
for c in copy:
print("%s/%s" % (c.file.acq.name, c.file.name))
size_bytes = copy.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
size_gb = int(size_bytes) / 1073741824.0
print(
"Will request that %d files (%.1f GB) be copied from node %s to group %s."
% (copy.count(), size_gb, node_name, group_name)
)
if not (force or click.confirm("Do you want to proceed?")):
print("Aborted.")
return
dtnow = datetime.datetime.now()
# Perform update in a transaction to avoid any clobbering from concurrent updates
with di.ArchiveFileCopyRequest._meta.database.atomic():
# Get a list of all the file ids for the copies we should perform
files_ids = [c.file_id for c in copy]
# Get a list of all the file ids for exisiting requests
requests = di.ArchiveFileCopyRequest.select().where(
di.ArchiveFileCopyRequest.group_to == to_group,
di.ArchiveFileCopyRequest.node_from == from_node,
)
req_file_ids = [req.file_id for req in requests]
# Separate the files into ones that already have requests and ones that don't
files_in = filter(lambda x: x in req_file_ids, files_ids)
files_out = filter(lambda x: x not in req_file_ids, files_ids)
sys.stdout.write(
"Updating %i existing requests and inserting %i new ones.\n"
% (len(files_in), len(files_out))
)
# Perform an update of all the existing copy requests
if len(files_in) > 0:
update = di.ArchiveFileCopyRequest.update(
nice=nice,
completed=False,
cancelled=False,
timestamp=dtnow,
n_requests=di.ArchiveFileCopyRequest.n_requests + 1,
)
update = update.where(
di.ArchiveFileCopyRequest.file << files_in,
di.ArchiveFileCopyRequest.group_to == to_group,
di.ArchiveFileCopyRequest.node_from == from_node,
)
update.execute()
# Insert any new requests
if len(files_out) > 0:
# Construct a list of all the rows to insert
insert = [
{
"file": fid,
"node_from": from_node,
"nice": 0,
"group_to": to_group,
"completed": False,
"n_requests": 1,
"timestamp": dtnow,
}
for fid in files_out
]
# Do a bulk insert of these new rows
di.ArchiveFileCopyRequest.insert_many(insert).execute()
@cli.command()
@click.option(
"--all", help="Show the status of all nodes, not just mounted ones.", is_flag=True
)
def status(all):
"""Summarise the status of alpenhorn storage nodes."""
import tabulate
# Data to fetch from the database (node name, total files, total size)
query_info = (
di.StorageNode.name,
pw.fn.Count(di.ArchiveFileCopy.id).alias("count"),
pw.fn.Sum(di.ArchiveFile.size_b).alias("total_size"),
di.StorageNode.host,
di.StorageNode.root,
)
# Per node totals
nodes = (
di.StorageNode.select(*query_info)
.join(di.ArchiveFileCopy)
.where(di.ArchiveFileCopy.has_file == "Y")
.join(di.ArchiveFile)
.group_by(di.StorageNode)
.order_by(di.StorageNode.name)
)
if not all:
nodes = nodes.where(di.StorageNode.mounted)
# Totals for the whole archive
tot = di.ArchiveFile.select(
pw.fn.Count(di.ArchiveFile.id).alias("count"),
pw.fn.Sum(di.ArchiveFile.size_b).alias("total_size"),
).scalar(as_tuple=True)
data = [
[
node[0],
int(node[1]),
int(node[2]) / 2 ** 40.0,
100.0 * int(node[1]) / int(tot[0]),
100.0 * int(node[2]) / int(tot[1]),
"%s:%s" % (node[3], node[4]),
]
for node in nodes.tuples()
]
headers = ["Node", "Files", "Size [TB]", "Files [%]", "Size [%]", "Path"]
print(tabulate.tabulate(data, headers=headers, floatfmt=".1f"))
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option("--md5", help="perform full check against md5sum", is_flag=True)
@click.option(
"--fixdb", help="fix up the database to be consistent with reality", is_flag=True
)
@click.option(
"--acq",
metavar="ACQ",
multiple=True,
help="Limit verification to specified acquisitions. Use repeated --acq flags to specify multiple acquisitions.",
)
def verify(node_name, md5, fixdb, acq):
"""Verify the archive on NODE against the database."""
import os
try:
this_node = di.StorageNode.get(di.StorageNode.name == node_name)
except pw.DoesNotExist:
print("Specified node does not exist.")
return
## Use a complicated query with a tuples construct to fetch everything we
## need in a single query. This massively speeds up the whole process versus
## fetching all the FileCopy's then querying for Files and Acqs.
lfiles = (
di.ArchiveFile.select(
di.ArchiveFile.name,
di.ArchiveAcq.name,
di.ArchiveFile.size_b,
di.ArchiveFile.md5sum,
di.ArchiveFileCopy.id,
)
.join(di.ArchiveAcq)
.switch(di.ArchiveFile)
.join(di.ArchiveFileCopy)
.where(di.ArchiveFileCopy.node == this_node, di.ArchiveFileCopy.has_file == "Y")
.tuples()
)
missing_files = []
corrupt_files = []
missing_ids = []
corrupt_ids = []
nfiles = 0
with click.progressbar(lfiles, label="Scanning files") as lfiles_iter:
for filename, acqname, filesize, md5sum, fc_id in lfiles_iter:
# Skip if not in specified acquisitions
if len(acq) > 0 and acqname not in acq:
continue
nfiles += 1
filepath = this_node.root + "/" + acqname + "/" + filename
# Check if file is plain missing
if not os.path.exists(filepath):
missing_files.append(filepath)
missing_ids.append(fc_id)
continue
if md5:
file_md5 = di.md5sum_file(filepath)
corrupt = file_md5 != md5sum
else:
corrupt = os.path.getsize(filepath) != filesize
if corrupt:
corrupt_files.append(filepath)
corrupt_ids.append(fc_id)
continue
if len(missing_files) > 0:
print()
print("=== Missing files ===")
for fname in missing_files:
print(fname)
if len(corrupt_files) > 0:
print()
print("=== Corrupt files ===")
for fname in corrupt_files:
print(fname)
print()
print("=== Summary ===")
print(" %i total files" % nfiles)
print(" %i missing files" % len(missing_files))
print(" %i corrupt files" % len(corrupt_files))
print()
# Fix up the database by marking files as missing, and marking
# corrupt files for verification by alpenhornd.
if fixdb:
# Make sure we connect RW
di.connect_database(read_write=True)
if (len(missing_files) > 0) and click.confirm("Fix missing files"):
missing_count = (
di.ArchiveFileCopy.update(has_file="N")
.where(di.ArchiveFileCopy.id << missing_ids)
.execute()
)
print(" %i marked as missing" % missing_count)
if (len(corrupt_files) > 0) and click.confirm("Fix corrupt files"):
corrupt_count = (
di.ArchiveFileCopy.update(has_file="M")
.where(di.ArchiveFileCopy.id << corrupt_ids)
.execute()
)
print(" %i corrupt files marked for verification" % corrupt_count)
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option(
"--days", "-d", help="clean files older than <days>", type=int, default=None
)
@click.option("--force", "-f", help="force cleaning on an archive node", is_flag=True)
@click.option("--now", "-n", help="force immediate removal", is_flag=True)
@click.option(
"--target",
metavar="TARGET_GROUP",
default=None,
type=str,
help="Only clean files already available in this group.",
)
@click.option(
"--acq", metavar="ACQ", default=None, type=str, help="Limit removal to acquisition"
)
def clean(node_name, days, force, now, target, acq):
"""Clean up NODE by marking older files as potentially removable.
If --target is specified we will only remove files already available in the
TARGET_GROUP. This is useful for cleaning out intermediate locations such as
transport disks.
Using the --days flag will only clean correlator and housekeeping
files which have a timestamp associated with them. It will not
touch other types. If no --days flag is given, all files will be
considered for removal.
"""
import peewee as pw
di.connect_database(read_write=True)
try:
this_node = di.StorageNode.get(di.StorageNode.name == node_name)
except pw.DoesNotExist:
print("Specified node does not exist.")
# Check to see if we are on an archive node
if this_node.storage_type == "A":
if force or click.confirm("DANGER: run clean on archive node?"):
print("%s is an archive node. Forcing clean." % node_name)
else:
print("Cannot clean archive node %s without forcing." % node_name)
return
# Select FileCopys on this node.
files = di.ArchiveFileCopy.select(di.ArchiveFileCopy.id).where(
di.ArchiveFileCopy.node == this_node, di.ArchiveFileCopy.wants_file == "Y"
)
# Limit to acquisition
if acq is not None:
try:
acq = di.ArchiveAcq.get(name=acq)
except pw.DoesNotExit:
raise RuntimeError("Specified acquisition %s does not exist" % acq)
files_in_acq = di.ArchiveFile.select().where(di.ArchiveFile.acq == acq)
files = files.where(di.ArchiveFileCopy.file << files_in_acq)
# If the target option has been specified, only clean files also available there...
if target is not None:
# Fetch a reference to the target group
try:
target_group = di.StorageGroup.get(name=target)
except pw.DoesNotExist:
raise RuntimeError('Target group "%s" does not exist in the DB.' % target)
# First get the nodes at the destination...
nodes_at_target = di.StorageNode.select().where(
di.StorageNode.group == target_group
)
# Then use this to get a list of all files at the destination...
files_at_target = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_target,
di.ArchiveFileCopy.has_file == "Y",
)
)
# Only match files that are also available at the target
files = files.where(di.ArchiveFileCopy.file << files_at_target)
# If --days has been set we need to restrict to files older than the given
# time. This only works for a few particular file types
if days is not None and days > 0:
# Get the time for the oldest files to keep
oldest = datetime.datetime.now() - datetime.timedelta(days)
oldest_unix = ephemeris.ensure_unix(oldest)
# List of filetypes we want to update, needs a human readable name and a
# FileInfo table.
filetypes = [["correlation", di.CorrFileInfo], ["housekeeping", di.HKFileInfo]]
file_ids = []
# Iterate over file types for cleaning
for name, infotable in filetypes:
# Filter to fetch only ones with a start time older than `oldest`
oldfiles = (
files.join(di.ArchiveFile)
.join(infotable)
.where(infotable.start_time < oldest_unix)
)
local_file_ids = list(oldfiles)
# Get number of correlation files
count = oldfiles.count()
if count > 0:
size_bytes = (
di.ArchiveFileCopy.select()
.where(di.ArchiveFileCopy.id << local_file_ids)
.join(di.ArchiveFile)
.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
)
size_gb = int(size_bytes) / 2 ** 30.0
print(
"Cleaning up %i %s files (%.1f GB) from %s "
% (count, name, size_gb, node_name)
)
file_ids += local_file_ids
# If days is not set, then just select all files that meet the requirements so far
else:
file_ids = list(files)
count = files.count()
if count > 0:
size_bytes = (
di.ArchiveFileCopy.select()
.where(di.ArchiveFileCopy.id << file_ids)
.join(di.ArchiveFile)
.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
)
size_gb = int(size_bytes) / 1073741824.0
print(
"Cleaning up %i files (%.1f GB) from %s " % (count, size_gb, node_name)
)
# If there are any files to clean, ask for confirmation and the mark them in
# the database for removal
if len(file_ids) > 0:
if force or click.confirm(" Are you sure?"):
print(" Marking files for cleaning.")
state = "N" if now else "M"
update = di.ArchiveFileCopy.update(wants_file=state).where(
di.ArchiveFileCopy.id << file_ids
)
n = update.execute()
print("Marked %i files for cleaning" % n)
else:
print(" Cancelled")
else:
print("No files selected for cleaning on %s." % node_name)
@cli.command()
@click.option(
"--host",
"-H",
help="use specified host rather than local machine",
type=str,
default=None,
)
def mounted(host):
"""list the nodes mounted on this, or another specified, machine"""
import socket
if host is None:
host = socket.gethostname().split(".")[0]
zero = True
for node in di.StorageNode.select().where(
di.StorageNode.host == host, di.StorageNode.mounted == True
):
n_file = (
di.ArchiveFileCopy.select().where(di.ArchiveFileCopy.node == node).count()
)
print("%-25s %-30s %5d files" % (node.name, node.root, n_file))
zero = False
if zero:
print("No nodes are mounted on host %s." % host)
@cli.command()
@click.argument("serial_num")
def format_transport(serial_num):
"""Interactive routine for formatting a transport disc as a storage
node; formats and labels the disc as necessary, the adds to the
database. The disk is specified using the manufacturers
SERIAL_NUM, which is printed on the disk.
"""
import glob
import os
if os.getuid() != 0:
print("You must be root to run mount on a transport disc. I quit.")
return
# Find the disc.
dev = glob.glob("/dev/disk/by-id/*%s" % serial_num)
if len(dev) == 0:
print("No disc with that serial number is attached.")
return
elif len(dev) > 1:
print("Confused: found more than one device matching that serial number:")
for d in dev:
print(" %s" % dev)
print("Aborting.")
return
dev = dev[0]
dev_part = "%s-part1" % dev
# Figure out if it is formatted.
print("Checking to see if disc is formatted. Please wait.")
fp = os.popen("parted -s %s print" % dev)
formatted = False
part_start = False
while True:
l = fp.readline()
if not l:
break
if l.find("Number") == 0 and l.find("Start") > 0 and l.find("File system") > 0:
part_start = True
elif l.strip() != "" and part_start:
formatted = True
fp.close()
if not formatted:
if not click.confirm("Disc is not formatted. Should I format it?"):
return
print("Creating partition. Please wait.")
os.system(
"parted -s -a optimal %s mklabel gpt -- mkpart primary 0%% 100%%" % dev
)
print("Formatting disc. Please wait.")
os.system("mkfs.ext4 %s -m 0 -L CH-%s" % (dev_part, serial_num))
else:
print("Disc is already formatted.")
e2label = get_e2label(dev_part)
name = "CH-%s" % serial_num
if e2label and e2label != name:
print(
"Disc label %s does not conform to labelling standard, "
"which is CH-<serialnum>."
)
exit
elif not e2label:
print('Labelling the disc as "%s" (using e2label) ...' % (name))
assert dev_part is not None
assert len(name) <= MAX_E2LABEL_LEN
stat = os.system("/sbin/e2label %s %s" % (dev_part, name))
if stat:
print("Failed to e2label! Stat = %s. I quit." % (stat))
exit()
# Ensure the mount path exists.
root = "/mnt/%s" % name
if not os.path.isdir(root):
print("Creating mount point %s." % root)
os.mkdir(root)
# Check to see if the disc is mounted.
fp = os.popen("df")
mounted = False
dev_part_abs = os.path.realpath(dev_part)
while 1:
l = fp.readline()
if not l:
break
if l.find(root) > 0:
if l[: len(dev_part)] == dev or l[: len(dev_part_abs)] == dev_part_abs:
mounted = True
else:
print(
"%s is a mount point, but %s is already mounted there."(
root, l.split()[0]
)
)
fp.close()
try:
node = di.StorageNode.get(name=name)
except pw.DoesNotExist:
print(
"This disc has not been registered yet as a storage node. "
"Registering now."
)
try:
group = di.StorageGroup.get(name="transport")
except pw.DoesNotExist:
print('Hmmm. Storage group "transport" does not exist. I quit.')
exit()
# We need to write to the database.
di.connect_database(read_write=True)
node = di.StorageNode.create(
name=name, root=root, group=group, storage_type="T", min_avail_gb=1
)
print("Successfully created storage node.")
print("Node created but not mounted. Run alpenhorn mount_transport for that.")
@cli.command()
@click.pass_context
@click.argument("node")
@click.option("--user", help="username to access this node.", type=str, default=None)
@click.option(
"--address", help="address for remote access to this node.", type=str, default=None
)
def mount_transport(ctx, node, user, address):
"""Mount a transport disk into the system and then make it available to alpenhorn."""
mnt_point = "/mnt/%s" % node
print("Mounting disc at %s" % mnt_point)
os.system("mount %s" % mnt_point)
ctx.invoke(mount, name=node, path=mnt_point, user=user, address=address)
@cli.command()
@click.pass_context
@click.argument("node")
def unmount_transport(ctx, node):
"""Mount a transport disk into the system and then make it available to alpenhorn."""
mnt_point = "/mnt/%s" % node
print("Unmounting disc at %s" % mnt_point)
os.system("umount %s" % mnt_point)
ctx.invoke(unmount, root_or_name=node)
@cli.command()
@click.argument("name")
@click.option("--path", help="Root path for this node", type=str, default=None)
@click.option("--user", help="username to access this node.", type=str, default=None)
@click.option(
"--address", help="address for remote access to this node.", type=str, default=None
)
@click.option(
"--hostname",
help="hostname running the alpenhornd instance for this node (set to this hostname by default).",
type=str,
default=None,
)
def mount(name, path, user, address, hostname):
"""Interactive routine for mounting a storage node located at ROOT."""
import socket
# We need to write to the database.
di.connect_database(read_write=True)
try:
node = di.StorageNode.get(name=name)
except pw.DoesNotExist:
print('Storage node "%s" does not exist. I quit.' % name)
if node.mounted:
print('Node "%s" is already mounted.' % name)
return
# Set the default hostname if required
if hostname is None:
hostname = socket.gethostname()
print('I will set the host to "%s".' % hostname)
# Set the parameters of this node
node.username = user
node.address = address
node.mounted = True
node.host = hostname
if path is not None:
node.root = path
node.save()
print('Successfully mounted "%s".' % name)
@cli.command()
@click.argument("root_or_name")
def unmount(root_or_name):
"""Unmount a storage node with location or named ROOT_OR_NAME."""
import os
import socket
# We need to write to the database.
di.connect_database(read_write=True)
try:
node = di.StorageNode.get(name=root_or_name)
except pw.DoesNotExist:
if root_or_name[-1] == "/":
root_or_name = root_or_name[: len(root_or_name) - 1]
if not os.path.exists(root_or_name):
print("That is neither a node name, nor a path on this host. " "I quit.")
exit()
try:
node = di.StorageNode.get(root=root_or_name, host=socket.gethostname())
except pw.DoesNotExist:
print(
"That is neither a node name nor a root name that is " "known. I quit."
)
exit()
if not node.mounted:
print("There is no node mounted there any more.")
else:
node.mounted = False
node.save()
print("Node successfully unmounted.")
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option("-v", "--verbose", count=True)
@click.option(
"--acq",
help="Limit import to specified acquisition directories",
multiple=True,
default=None,
)
@click.option("--dry", "-d", help="Dry run. Do not modify database.", is_flag=True)
def import_files(node_name, verbose, acq, dry):
"""Scan the current directory for known acquisition files and add them into the database for NODE.
This command is useful for manually maintaining an archive where we can run
alpenhornd in the usual manner.
"""
import glob
from ch_util import data_index as di
di.connect_database(read_write=True)
import peewee as pw
# Construct list of acqs to scan
if acq is None:
acqs = glob.glob("*")
else:
acqs = acq
# Keep track of state as we process the files
added_files = [] # Files we have added to the database
corrupt_files = [] # Known files which are corrupt
registered_files = [] # Files already registered in the database
unknown_files = [] # Files not known in the database
not_acqs = [] # Directories which were not known acquisitions
# Fetch a reference to the node
try:
node = di.StorageNode.select().where(di.StorageNode.name == node_name).get()
except pw.DoesNotExist:
print("Unknown node.")
return
with click.progressbar(acqs, label="Scanning acquisitions") as acq_iter:
for acq_name in acq_iter:
try:
di.parse_acq_name(acq_name)
except di.Validation:
not_acqs.append(acq_name)
continue
try:
acq = di.ArchiveAcq.select().where(di.ArchiveAcq.name == acq_name).get()
except pw.DoesNotExist:
not_acqs.append(acq_name)
continue
files = glob.glob(acq_name + "/*")
# Fetch lists of all files in this acquisition, and all
# files in this acq with local copies
file_names = [f.name for f in acq.files]
local_file_names = [
f.name
for f in acq.files.join(di.ArchiveFileCopy).where(
di.ArchiveFileCopy.node == node
)
]
for fn in files:
f_name = os.path.split(fn)[1]
# Check if file exists in database
if f_name not in file_names:
unknown_files.append(fn)
continue
# Check if file is already registered on this node
if f_name in local_file_names:
registered_files.append(fn)
else:
archive_file = (
di.ArchiveFile.select()
.where(di.ArchiveFile.name == f_name, di.ArchiveFile.acq == acq)
.get()
)
if os.path.getsize(fn) != archive_file.size_b:
corrupt_files.append(fn)
continue
added_files.append(fn)
if not dry:
di.ArchiveFileCopy.create(
file=archive_file, node=node, has_file="Y", wants_file="Y"
)
print("\n==== Summary ====")
print()
print("Added %i files" % len(added_files))
print()
print("%i corrupt files." % len(corrupt_files))
print("%i files already registered." % len(registered_files))
print("%i files not known" % len(unknown_files))
print("%i directories were not acquisitions." % len(not_acqs))
if verbose > 0:
print()
print("Added files:")
print()
for fn in added_files:
print(fn)
if verbose > 1:
print("Corrupt:")
for fn in corrupt_files:
print(fn)
print()
print("Unknown files:")
for fn in unknown_files:
print(fn)
print()
print("Unknown acquisitions:")
for fn in not_acqs:
print(fn)
print()
# A few utitly routines for dealing with filesystems
MAX_E2LABEL_LEN = 16
def get_e2label(dev):
import os
pin, pout, perr = os.popen3("/sbin/e2label %s" % dev, "r")
pin.close()
res = pout.read().strip()
err = perr.read()
pout.close()
perr.close()
if not len(err) and len(res) < MAX_E2LABEL_LEN:
return res
return None
def get_mount_device(path):
import os
p = os.popen("mount", "r")
res = p.read()
p.close()
dev = None
for l in res.split("\n"):
if not len(l):
continue
s = l.split()
assert s[1] == "on"
if s[2] == os.path.abspath(path):
dev = s[0]
return dev
|
|
"""The tests for the MQTT switch platform."""
import unittest
from homeassistant.setup import setup_component
from homeassistant.const import STATE_ON, STATE_OFF, STATE_UNAVAILABLE,\
ATTR_ASSUMED_STATE
import homeassistant.components.switch as switch
from tests.common import (
mock_mqtt_component, fire_mqtt_message, get_test_home_assistant)
class TestSensorMQTT(unittest.TestCase):
"""Test the MQTT switch."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
""""Stop everything that was started."""
self.hass.stop()
def test_controlling_state_via_topic(self):
"""Test the controlling state via topic."""
assert setup_component(self.hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_on': 1,
'payload_off': 0
}
})
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
self.assertFalse(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'state-topic', '1')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
fire_mqtt_message(self.hass, 'state-topic', '0')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_sending_mqtt_commands_and_optimistic(self):
"""Test the sending MQTT commands in optimistic mode."""
assert setup_component(self.hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'command-topic',
'payload_on': 'beer on',
'payload_off': 'beer off',
'qos': '2'
}
})
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
self.assertTrue(state.attributes.get(ATTR_ASSUMED_STATE))
switch.turn_on(self.hass, 'switch.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'beer on', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.block_till_done()
self.assertEqual(('command-topic', 'beer off', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_controlling_state_via_topic_and_json_message(self):
"""Test the controlling state via topic and JSON message."""
assert setup_component(self.hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'payload_on': 'beer on',
'payload_off': 'beer off',
'value_template': '{{ value_json.val }}'
}
})
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'state-topic', '{"val":"beer on"}')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
fire_mqtt_message(self.hass, 'state-topic', '{"val":"beer off"}')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_controlling_availability(self):
"""Test the controlling state via topic."""
assert setup_component(self.hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'availability_topic': 'availability_topic',
'payload_on': 1,
'payload_off': 0,
'payload_available': 1,
'payload_not_available': 0
}
})
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_UNAVAILABLE, state.state)
fire_mqtt_message(self.hass, 'availability_topic', '1')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
self.assertFalse(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'availability_topic', '0')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_UNAVAILABLE, state.state)
fire_mqtt_message(self.hass, 'state-topic', '1')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_UNAVAILABLE, state.state)
fire_mqtt_message(self.hass, 'availability_topic', '1')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
def test_custom_availability_payload(self):
"""Test the availability payload."""
assert setup_component(self.hass, switch.DOMAIN, {
switch.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'state-topic',
'command_topic': 'command-topic',
'availability_topic': 'availability_topic',
'payload_on': 1,
'payload_off': 0,
'payload_available': 'online',
'payload_not_available': 'offline'
}
})
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_UNAVAILABLE, state.state)
fire_mqtt_message(self.hass, 'availability_topic', 'online')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
self.assertFalse(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'availability_topic', 'offline')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_UNAVAILABLE, state.state)
fire_mqtt_message(self.hass, 'state-topic', '1')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_UNAVAILABLE, state.state)
fire_mqtt_message(self.hass, 'availability_topic', 'online')
self.hass.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
|
|
import os
import logging
import struct
import claripy
from cle import MetaELF
from cle.backends.elf.symbol import ELFSymbol, ELFSymbolType
from cle.backends.elf.elfcore import ELFCore
from cle.address_translator import AT
from archinfo import ArchX86, ArchAMD64, ArchARM, ArchAArch64, ArchMIPS32, ArchMIPS64, ArchPPC32, ArchPPC64
from ..tablespecs import StringTableSpec
from ..procedures import SIM_PROCEDURES as P, SIM_LIBRARIES as L
from ..state_plugins import SimFilesystem, SimHostFilesystem
from ..storage.file import SimFile, SimFileBase
from ..errors import AngrSyscallError
from .userland import SimUserland
_l = logging.getLogger(name=__name__)
class SimLinux(SimUserland):
"""
OS-specific configuration for \\*nix-y OSes.
"""
def __init__(self, project, **kwargs):
super(SimLinux, self).__init__(project,
syscall_library=L['linux'],
syscall_addr_alignment=project.arch.instruction_alignment,
name="Linux",
**kwargs)
self._loader_addr = None
self._loader_lock_addr = None
self._loader_unlock_addr = None
self._error_catch_tsd_addr = None
self._is_core = None
self.vsyscall_addr = None
def configure_project(self): # pylint: disable=arguments-differ
self._is_core = isinstance(self.project.loader.main_object, ELFCore)
if not self._is_core:
self._loader_addr = self.project.loader.extern_object.allocate()
self._loader_lock_addr = self.project.loader.extern_object.allocate()
self._loader_unlock_addr = self.project.loader.extern_object.allocate()
self._error_catch_tsd_addr = self.project.loader.extern_object.allocate()
self.vsyscall_addr = self.project.loader.extern_object.allocate()
self.project.hook(self._loader_addr, P['linux_loader']['LinuxLoader']())
self.project.hook(self._loader_lock_addr, P['linux_loader']['_dl_rtld_lock_recursive']())
self.project.hook(self._loader_unlock_addr, P['linux_loader']['_dl_rtld_unlock_recursive']())
self.project.hook(self._error_catch_tsd_addr,
P['linux_loader']['_dl_initial_error_catch_tsd'](
static_addr=self.project.loader.extern_object.allocate()
)
)
self.project.hook(self.vsyscall_addr, P['linux_kernel']['_vsyscall']())
# there are some functions we MUST use the simprocedures for, regardless of what the user wants
self._weak_hook_symbol('__tls_get_addr', L['ld.so'].get('__tls_get_addr', self.arch)) # ld
self._weak_hook_symbol('___tls_get_addr', L['ld.so'].get('___tls_get_addr', self.arch)) # ld
self._weak_hook_symbol('_dl_vdso_vsym', L['libc.so.6'].get('_dl_vdso_vsym', self.arch)) # libc
# set up some static data in the loader object...
_rtld_global = self.project.loader.find_symbol('_rtld_global')
if _rtld_global is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF08, self._loader_lock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF10, self._loader_unlock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0x990, self._error_catch_tsd_addr)
# TODO: what the hell is this
_rtld_global_ro = self.project.loader.find_symbol('_rtld_global_ro')
if _rtld_global_ro is not None:
pass
tls_obj = self.project.loader.tls.new_thread()
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x28, 0x5f43414e41525900) # _CANARY\x00
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x30, 0x5054524755415244)
elif isinstance(self.project.arch, ArchX86):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x10, self.vsyscall_addr)
# Only set up ifunc resolution if we are using the ELF backend on AMD64
if isinstance(self.project.loader.main_object, MetaELF):
if isinstance(self.project.arch, (ArchAMD64, ArchX86)):
for binary in self.project.loader.all_objects:
if not isinstance(binary, MetaELF):
continue
for reloc in binary.relocs:
if reloc.symbol is None or reloc.resolvedby is None:
continue
try:
if reloc.resolvedby.subtype != ELFSymbolType.STT_GNU_IFUNC:
continue
except ValueError: # base class Symbol throws this, meaning we don't have an ELFSymbol, etc
continue
gotaddr = reloc.rebased_addr
gotvalue = self.project.loader.memory.unpack_word(gotaddr)
if self.project.is_hooked(gotvalue):
continue
# Replace it with a ifunc-resolve simprocedure!
kwargs = {
'funcaddr': gotvalue,
'gotaddr': gotaddr,
'funcname': reloc.symbol.name
}
# TODO: should this be replaced with hook_symbol?
randaddr = self.project.loader.extern_object.allocate()
self.project.hook(randaddr, P['linux_loader']['IFuncResolver'](**kwargs))
self.project.loader.memory.pack_word(gotaddr, randaddr)
if isinstance(self.project.arch, ArchARM):
self.project.hook(0xffff0fe0, P['linux_kernel']['_kernel_user_helper_get_tls']())
# maybe move this into archinfo?
if self.arch.name == 'X86':
syscall_abis = ['i386']
elif self.arch.name == 'AMD64':
syscall_abis = ['i386', 'amd64']
elif self.arch.name.startswith('ARM'):
syscall_abis = ['arm']
if self.arch.name == 'ARMHF':
syscall_abis.append('armhf')
elif self.arch.name == 'AARCH64':
syscall_abis = ['aarch64']
# https://www.linux-mips.org/wiki/WhatsWrongWithO32N32N64
elif self.arch.name == 'MIPS32':
syscall_abis = ['mips-o32']
elif self.arch.name == 'MIPS64':
syscall_abis = ['mips-n32', 'mips-n64']
elif self.arch.name == 'PPC32':
syscall_abis = ['ppc']
elif self.arch.name == 'PPC64':
syscall_abis = ['ppc64']
else:
syscall_abis = [] # ?
super(SimLinux, self).configure_project(syscall_abis)
def syscall_abi(self, state):
if state.arch.name != 'AMD64':
return None
jk = state.history.jumpkind
if jk is None:
# we are being invoked in the middle of a step
jk = state.history.parent.jumpkind
if jk == 'Ijk_Sys_int128':
return 'i386'
elif jk == 'Ijk_Sys_syscall':
return 'amd64'
else:
raise AngrSyscallError("Unknown syscall jumpkind %s" % jk)
# pylint: disable=arguments-differ
def state_blank(self, fs=None, concrete_fs=False, chroot=None,
cwd=None, pathsep=b'/', thread_idx=None, **kwargs):
state = super(SimLinux, self).state_blank(thread_idx=thread_idx, **kwargs)
tls_obj = self.project.loader.tls.threads[thread_idx if thread_idx is not None else 0]
if isinstance(state.arch, ArchAMD64):
state.regs.fs = tls_obj.user_thread_pointer
elif isinstance(state.arch, ArchX86):
state.regs.gs = tls_obj.user_thread_pointer >> 16
elif isinstance(state.arch, (ArchMIPS32, ArchMIPS64)):
state.regs.ulr = tls_obj.user_thread_pointer
elif isinstance(state.arch, ArchPPC32):
state.regs.r2 = tls_obj.user_thread_pointer
elif isinstance(state.arch, ArchPPC64):
state.regs.r13 = tls_obj.user_thread_pointer
elif isinstance(state.arch, ArchAArch64):
state.regs.tpidr_el0 = tls_obj.user_thread_pointer
if fs is None:
fs = {}
for name in fs:
if type(fs[name]) is str:
fs[name] = fs[name].encode('utf-8')
if type(fs[name]) is bytes:
fs[name] = claripy.BVV(fs[name])
if isinstance(fs[name], claripy.Bits):
fs[name] = SimFile(name, content=fs[name])
if not isinstance(fs[name], SimFileBase):
raise TypeError("Provided fs initializer with unusable type %r" % type(fs[name]))
mounts = {}
if concrete_fs:
if fs:
raise TypeError("Providing both fs and concrete_fs doesn't make sense")
if chroot is not None:
chroot = os.path.abspath(chroot)
else:
chroot = os.path.sep
mounts[pathsep] = SimHostFilesystem(chroot)
if cwd is None:
cwd = os.getcwd()
if chroot != os.path.sep:
# try to translate the cwd into the chroot
if cwd.startswith(chroot):
cwd = cwd[len(chroot):]
else:
cwd = os.path.sep
cwd = cwd.encode()
else:
if cwd is None:
cwd = b'/home/user'
state.register_plugin('fs', SimFilesystem(files=fs, pathsep=pathsep, cwd=cwd, mountpoints=mounts))
if isinstance(self.project.loader.main_object, MetaELF) and self.project.loader.main_object.is_ppc64_abiv1:
state.libc.ppc64_abiv = 'ppc64_1'
return state
def state_entry(self, args=None, env=None, argc=None, **kwargs):
state = super(SimLinux, self).state_entry(**kwargs)
# Handle default values
filename = self.project.filename or 'dummy_filename'
if args is None:
args = [filename]
if env is None:
env = {}
# Prepare argc
if argc is None:
argc = claripy.BVV(len(args), state.arch.bits)
elif type(argc) is int: # pylint: disable=unidiomatic-typecheck
argc = claripy.BVV(argc, state.arch.bits)
# Make string table for args/env/auxv
table = StringTableSpec()
# Add args to string table
table.append_args(args)
# Add environment to string table
table.append_env(env)
# Prepare the auxiliary vector and add it to the end of the string table
# TODO: Actually construct a real auxiliary vector
# current vector is an AT_RANDOM entry where the "random" value is 0xaec0aec0aec0...
aux = [(25, b"\xAE\xC0" * 8)]
for a, b in aux:
table.add_pointer(a)
if isinstance(b, bytes):
table.add_string(b)
else:
table.add_pointer(b)
table.add_null()
table.add_null()
# Dump the table onto the stack, calculate pointers to args, env, and auxv
state.memory.store(state.regs.sp - 16, claripy.BVV(0, 8 * 16))
argv = table.dump(state, state.regs.sp - 16)
envp = argv + ((len(args) + 1) * state.arch.bytes)
auxv = argv + ((len(args) + len(env) + 2) * state.arch.bytes)
# Put argc on stack and fix the stack pointer
newsp = argv - state.arch.bytes
state.memory.store(newsp, argc, endness=state.arch.memory_endness)
state.regs.sp = newsp
if state.arch.name in ('PPC32',):
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
# store argc argv envp auxv in the posix plugin
state.posix.argv = argv
state.posix.argc = argc
state.posix.environ = envp
state.posix.auxv = auxv
self.set_entry_register_values(state)
# set __progname
progname_full = 0
progname = 0
if args:
progname_full = state.mem[argv].long.concrete
progname_cur = progname_full
progname = progname_full
while True:
byte = state.mem[progname_cur].byte.resolved
if byte.symbolic:
break
else:
if state.solver.eval(byte) == ord('/'):
progname = progname_cur + 1
elif state.solver.eval(byte) == 0:
break
progname_cur += 1
# there will be multiple copies of these symbol but the canonical ones (in the main binary,
# or elsewhere if the main binary didn't have one) should get picked up here
for name, val in [
('__progname_full', progname_full),
('__progname', progname),
('__environ', envp),
('environ', envp),
('__libc_stack_end', state.regs.sp)]:
sym = self.project.loader.find_symbol(name)
if sym is not None:
if sym.size != self.arch.bytes:
_l.warning("Something is wrong with %s - bad size", name)
else:
state.mem[sym.rebased_addr].long = val
return state
def set_entry_register_values(self, state):
for reg, val in state.arch.entry_register_values.items():
if isinstance(val, int):
state.registers.store(reg, val)
elif isinstance(val, (str,)):
if val == 'argc':
state.registers.store(reg, state.posix.argc, size=state.arch.bytes)
elif val == 'argv':
state.registers.store(reg, state.posix.argv)
elif val == 'envp':
state.registers.store(reg, state.posix.environ)
elif val == 'auxv':
state.registers.store(reg, state.posix.auxv)
elif val == 'ld_destructor':
# a pointer to the dynamic linker's destructor routine, to be called at exit
# or NULL. We like NULL. It makes things easier.
state.registers.store(reg, 0)
elif val == 'toc':
if self.project.loader.main_object.is_ppc64_abiv1:
state.registers.store(reg, self.project.loader.main_object.ppc64_initial_rtoc)
elif val == 'entry':
state.registers.store(reg, state.registers.load('pc'))
elif val == 'thread_pointer':
state.registers.store(reg, self.project.loader.tls.threads[0].user_thread_pointer)
else:
_l.warning('Unknown entry point register value indicator "%s"', val)
else:
_l.error('What the ass kind of default value is %s?', val)
def state_full_init(self, **kwargs):
kwargs['addr'] = self._loader_addr
return super(SimLinux, self).state_full_init(**kwargs)
def prepare_function_symbol(self, symbol_name, basic_addr=None):
"""
Prepare the address space with the data necessary to perform relocations pointing to the given symbol.
Returns a 2-tuple. The first item is the address of the function code, the second is the address of the
relocation target.
"""
if self.project.loader.main_object.is_ppc64_abiv1:
if basic_addr is not None:
pointer = self.project.loader.memory.unpack_word(basic_addr)
return pointer, basic_addr
pseudo_hookaddr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
pseudo_toc = self.project.loader.extern_object.allocate(size=0x18)
self.project.loader.extern_object.memory.pack_word(
AT.from_mva(pseudo_toc, self.project.loader.extern_object).to_rva(), pseudo_hookaddr)
return pseudo_hookaddr, pseudo_toc
else:
if basic_addr is None:
basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
return basic_addr, basic_addr
def initialize_segment_register_x64(self, state, concrete_target):
"""
Set the fs register in the angr to the value of the fs register in the concrete process
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return: None
"""
_l.debug("Synchronizing fs segment register")
state.regs.fs = self._read_fs_register_x64(concrete_target)
def initialize_gdt_x86(self,state,concrete_target):
"""
Create a GDT in the state memory and populate the segment registers.
Rehook the vsyscall address using the real value in the concrete process memory
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return:
"""
_l.debug("Creating fake Global Descriptor Table and synchronizing gs segment register")
gs = self._read_gs_register_x86(concrete_target)
gdt = self.generate_gdt(0x0, gs)
self.setup_gdt(state, gdt)
# Synchronize the address of vsyscall in simprocedures dictionary with the concrete value
_vsyscall_address = concrete_target.read_memory(gs + 0x10, state.project.arch.bits / 8)
_vsyscall_address = struct.unpack(state.project.arch.struct_fmt(), _vsyscall_address)[0]
state.project.rehook_symbol(_vsyscall_address, '_vsyscall', True)
return gdt
@staticmethod
def _read_fs_register_x64(concrete_target):
'''
Injects a small shellcode to leak the fs segment register address. In Linux x64 this address is pointed by fs[0]
:param concrete_target: ConcreteTarget which will be used to get the fs register address
:return: fs register address
:rtype string
'''
# register used to read the value of the segment register
exfiltration_reg = "rax"
# instruction to inject for reading the value at segment value = offset
read_fs0_x64 = b"\x64\x48\x8B\x04\x25\x00\x00\x00\x00\x90\x90\x90\x90" # mov rax, fs:[0]
return concrete_target.execute_shellcode(read_fs0_x64, exfiltration_reg)
@staticmethod
def _read_gs_register_x86(concrete_target):
'''
Injects a small shellcode to leak the gs segment register address. In Linux x86 this address is pointed by gs[0]
:param concrete_target: ConcreteTarget which will be used to get the gs register address
:return: gs register address
:rtype :str
'''
# register used to read the value of the segment register
exfiltration_reg = "eax"
# instruction to inject for reading the value at segment value = offset
read_gs0_x64 = b"\x65\xA1\x00\x00\x00\x00\x90\x90\x90\x90" # mov eax, gs:[0]
return concrete_target.execute_shellcode(read_gs0_x64, exfiltration_reg)
def get_segment_register_name(self):
if isinstance(self.arch, ArchAMD64):
for register in self.arch.register_list:
if register.name == 'fs':
return register.vex_offset
elif isinstance(self.arch, ArchX86):
for register in self.arch.register_list:
if register.name == 'gs':
return register.vex_offset
return None
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import condition
from .fhirdate import FHIRDate
class ConditionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Condition", js["resourceType"])
return condition.Condition(js)
def testCondition1(self):
inst = self.instantiate_from("condition-example-f003-abscess.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition1(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition1(inst2)
def implCondition1(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "280193007")
self.assertEqual(inst.bodySite[0].coding[0].display, "Entire retropharyngeal area")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[0].code, "439401001")
self.assertEqual(inst.category[0].coding[0].display, "diagnosis")
self.assertEqual(inst.category[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.clinicalStatus.coding[0].code, "active")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "18099001")
self.assertEqual(inst.code.coding[0].display, "Retropharyngeal abscess")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.evidence[0].code[0].coding[0].code, "169068008")
self.assertEqual(inst.evidence[0].code[0].coding[0].display, "CT of neck")
self.assertEqual(inst.evidence[0].code[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.id, "f003")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.onsetDateTime.date, FHIRDate("2012-02-27").date)
self.assertEqual(inst.onsetDateTime.as_json(), "2012-02-27")
self.assertEqual(inst.recordedDate.date, FHIRDate("2012-02-20").date)
self.assertEqual(inst.recordedDate.as_json(), "2012-02-20")
self.assertEqual(inst.severity.coding[0].code, "371923003")
self.assertEqual(inst.severity.coding[0].display, "Mild to moderate")
self.assertEqual(inst.severity.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "confirmed")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
def testCondition2(self):
inst = self.instantiate_from("condition-example-f203-sepsis.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition2(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition2(inst2)
def implCondition2(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "281158006")
self.assertEqual(inst.bodySite[0].coding[0].display, "Pulmonary vascular structure")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[0].code, "55607006")
self.assertEqual(inst.category[0].coding[0].display, "Problem")
self.assertEqual(inst.category[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[1].code, "problem-list-item")
self.assertEqual(inst.category[0].coding[1].system, "http://terminology.hl7.org/CodeSystem/condition-category")
self.assertEqual(inst.clinicalStatus.coding[0].code, "active")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "10001005")
self.assertEqual(inst.code.coding[0].display, "Bacterial sepsis")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.id, "f203")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.onsetDateTime.date, FHIRDate("2013-03-08").date)
self.assertEqual(inst.onsetDateTime.as_json(), "2013-03-08")
self.assertEqual(inst.recordedDate.date, FHIRDate("2013-03-11").date)
self.assertEqual(inst.recordedDate.as_json(), "2013-03-11")
self.assertEqual(inst.severity.coding[0].code, "371924009")
self.assertEqual(inst.severity.coding[0].display, "Moderate to severe")
self.assertEqual(inst.severity.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "confirmed")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
def testCondition3(self):
inst = self.instantiate_from("condition-example-stroke.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition3(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition3(inst2)
def implCondition3(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "encounter-diagnosis")
self.assertEqual(inst.category[0].coding[0].display, "Encounter Diagnosis")
self.assertEqual(inst.category[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-category")
self.assertEqual(inst.clinicalStatus.coding[0].code, "active")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "422504002")
self.assertEqual(inst.code.coding[0].display, "Ischemic stroke (disorder)")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.code.text, "Stroke")
self.assertEqual(inst.id, "stroke")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.onsetDateTime.date, FHIRDate("2010-07-18").date)
self.assertEqual(inst.onsetDateTime.as_json(), "2010-07-18")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Ischemic stroke, July 18, 2010</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "confirmed")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
def testCondition4(self):
inst = self.instantiate_from("condition-example-family-history.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition4(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition4(inst2)
def implCondition4(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "problem-list-item")
self.assertEqual(inst.category[0].coding[0].display, "Problem List Item")
self.assertEqual(inst.category[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-category")
self.assertEqual(inst.clinicalStatus.coding[0].code, "active")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "312824007")
self.assertEqual(inst.code.coding[0].display, "Family history of cancer of colon")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.id, "family-history")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Family history of cancer of colon</div>")
self.assertEqual(inst.text.status, "generated")
def testCondition5(self):
inst = self.instantiate_from("condition-example-f002-lung.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition5(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition5(inst2)
def implCondition5(self, inst):
self.assertEqual(inst.bodySite[0].coding[0].code, "51185008")
self.assertEqual(inst.bodySite[0].coding[0].display, "Thorax")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[0].code, "439401001")
self.assertEqual(inst.category[0].coding[0].display, "diagnosis")
self.assertEqual(inst.category[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.clinicalStatus.coding[0].code, "active")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "254637007")
self.assertEqual(inst.code.coding[0].display, "NSCLC - Non-small cell lung cancer")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.evidence[0].code[0].coding[0].code, "169069000")
self.assertEqual(inst.evidence[0].code[0].coding[0].display, "CT of thorax")
self.assertEqual(inst.evidence[0].code[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.id, "f002")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.onsetDateTime.date, FHIRDate("2011-05-05").date)
self.assertEqual(inst.onsetDateTime.as_json(), "2011-05-05")
self.assertEqual(inst.recordedDate.date, FHIRDate("2012-06-03").date)
self.assertEqual(inst.recordedDate.as_json(), "2012-06-03")
self.assertEqual(inst.severity.coding[0].code, "24484000")
self.assertEqual(inst.severity.coding[0].display, "Severe")
self.assertEqual(inst.severity.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.stage[0].summary.coding[0].code, "258219007")
self.assertEqual(inst.stage[0].summary.coding[0].display, "stage II")
self.assertEqual(inst.stage[0].summary.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.stage[0].type.coding[0].code, "260998006")
self.assertEqual(inst.stage[0].type.coding[0].display, "Clinical staging (qualifier value)")
self.assertEqual(inst.stage[0].type.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "confirmed")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
def testCondition6(self):
inst = self.instantiate_from("condition-example-f205-infection.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition6(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition6(inst2)
def implCondition6(self, inst):
self.assertEqual(inst.clinicalStatus.coding[0].code, "active")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "87628006")
self.assertEqual(inst.code.coding[0].display, "Bacterial infectious disease")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.id, "f205")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.recordedDate.date, FHIRDate("2013-04-04").date)
self.assertEqual(inst.recordedDate.as_json(), "2013-04-04")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "differential")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
def testCondition7(self):
inst = self.instantiate_from("condition-example-f204-renal.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition7(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition7(inst2)
def implCondition7(self, inst):
self.assertEqual(inst.abatementDateTime.date, FHIRDate("2013-03-20").date)
self.assertEqual(inst.abatementDateTime.as_json(), "2013-03-20")
self.assertEqual(inst.bodySite[0].coding[0].code, "181414000")
self.assertEqual(inst.bodySite[0].coding[0].display, "Kidney")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[0].code, "55607006")
self.assertEqual(inst.category[0].coding[0].display, "Problem")
self.assertEqual(inst.category[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[1].code, "problem-list-item")
self.assertEqual(inst.category[0].coding[1].system, "http://terminology.hl7.org/CodeSystem/condition-category")
self.assertEqual(inst.clinicalStatus.coding[0].code, "inactive")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "36225005")
self.assertEqual(inst.code.coding[0].display, "Acute renal insufficiency specified as due to procedure")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.id, "f204")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.note[0].text, "The patient is anuric.")
self.assertEqual(inst.onsetDateTime.date, FHIRDate("2013-03-11").date)
self.assertEqual(inst.onsetDateTime.as_json(), "2013-03-11")
self.assertEqual(inst.recordedDate.date, FHIRDate("2013-03-11").date)
self.assertEqual(inst.recordedDate.as_json(), "2013-03-11")
self.assertEqual(inst.severity.coding[0].code, "24484000")
self.assertEqual(inst.severity.coding[0].display, "Severe")
self.assertEqual(inst.severity.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.stage[0].summary.coding[0].code, "14803004")
self.assertEqual(inst.stage[0].summary.coding[0].display, "Temporary")
self.assertEqual(inst.stage[0].summary.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "differential")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
def testCondition8(self):
inst = self.instantiate_from("condition-example2.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition8(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition8(inst2)
def implCondition8(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "problem-list-item")
self.assertEqual(inst.category[0].coding[0].display, "Problem List Item")
self.assertEqual(inst.category[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-category")
self.assertEqual(inst.clinicalStatus.coding[0].code, "active")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.text, "Asthma")
self.assertEqual(inst.id, "example2")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.onsetString, "approximately November 2012")
self.assertEqual(inst.severity.coding[0].code, "255604002")
self.assertEqual(inst.severity.coding[0].display, "Mild")
self.assertEqual(inst.severity.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Mild Asthma (Date: 12-Nov 2012)</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "confirmed")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
def testCondition9(self):
inst = self.instantiate_from("condition-example-f202-malignancy.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition9(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition9(inst2)
def implCondition9(self, inst):
self.assertEqual(inst.abatementAge.code, "a")
self.assertEqual(inst.abatementAge.system, "http://unitsofmeasure.org")
self.assertEqual(inst.abatementAge.unit, "years")
self.assertEqual(inst.abatementAge.value, 54)
self.assertEqual(inst.bodySite[0].coding[0].code, "361355005")
self.assertEqual(inst.bodySite[0].coding[0].display, "Entire head and neck")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[0].code, "encounter-diagnosis")
self.assertEqual(inst.category[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-category")
self.assertEqual(inst.clinicalStatus.coding[0].code, "resolved")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "363346000")
self.assertEqual(inst.code.coding[0].display, "Malignant neoplastic disease")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.id, "f202")
self.assertEqual(inst.meta.security[0].code, "TBOO")
self.assertEqual(inst.meta.security[0].display, "taboo")
self.assertEqual(inst.meta.security[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.onsetAge.code, "a")
self.assertEqual(inst.onsetAge.system, "http://unitsofmeasure.org")
self.assertEqual(inst.onsetAge.unit, "years")
self.assertEqual(inst.onsetAge.value, 52)
self.assertEqual(inst.recordedDate.date, FHIRDate("2012-12-01").date)
self.assertEqual(inst.recordedDate.as_json(), "2012-12-01")
self.assertEqual(inst.severity.coding[0].code, "24484000")
self.assertEqual(inst.severity.coding[0].display, "Severe")
self.assertEqual(inst.severity.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "confirmed")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
def testCondition10(self):
inst = self.instantiate_from("condition-example-f201-fever.json")
self.assertIsNotNone(inst, "Must have instantiated a Condition instance")
self.implCondition10(inst)
js = inst.as_json()
self.assertEqual("Condition", js["resourceType"])
inst2 = condition.Condition(js)
self.implCondition10(inst2)
def implCondition10(self, inst):
self.assertEqual(inst.abatementString, "around April 9, 2013")
self.assertEqual(inst.bodySite[0].coding[0].code, "38266002")
self.assertEqual(inst.bodySite[0].coding[0].display, "Entire body as a whole")
self.assertEqual(inst.bodySite[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[0].code, "55607006")
self.assertEqual(inst.category[0].coding[0].display, "Problem")
self.assertEqual(inst.category[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.category[0].coding[1].code, "problem-list-item")
self.assertEqual(inst.category[0].coding[1].system, "http://terminology.hl7.org/CodeSystem/condition-category")
self.assertEqual(inst.clinicalStatus.coding[0].code, "resolved")
self.assertEqual(inst.clinicalStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-clinical")
self.assertEqual(inst.code.coding[0].code, "386661006")
self.assertEqual(inst.code.coding[0].display, "Fever")
self.assertEqual(inst.code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.evidence[0].code[0].coding[0].code, "258710007")
self.assertEqual(inst.evidence[0].code[0].coding[0].display, "degrees C")
self.assertEqual(inst.evidence[0].code[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.id, "f201")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.onsetDateTime.date, FHIRDate("2013-04-02").date)
self.assertEqual(inst.onsetDateTime.as_json(), "2013-04-02")
self.assertEqual(inst.recordedDate.date, FHIRDate("2013-04-04").date)
self.assertEqual(inst.recordedDate.as_json(), "2013-04-04")
self.assertEqual(inst.severity.coding[0].code, "255604002")
self.assertEqual(inst.severity.coding[0].display, "Mild")
self.assertEqual(inst.severity.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.verificationStatus.coding[0].code, "confirmed")
self.assertEqual(inst.verificationStatus.coding[0].system, "http://terminology.hl7.org/CodeSystem/condition-ver-status")
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import copy
import datetime
import glob
import optparse
import os
import re
import sys
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_make
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import NACLPORTS_DIR, GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
CYGTAR = os.path.join(NACL_DIR, 'build', 'cygtar.py')
NACLPORTS_URL = 'https://naclports.googlecode.com/svn/trunk/src'
NACLPORTS_REV = 954
GYPBUILD_DIR = 'gypbuild'
options = None
def GetGlibcToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'toolchain_%s_x86.tar.bz2' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetNewlibToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'naclsdk_%s_x86.tgz' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetPNaClToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'naclsdk_pnacl_%s_x86.tgz' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetToolchainNaClInclude(tcname, tcpath, arch):
if arch == 'x86':
if tcname == 'pnacl':
return os.path.join(tcpath, 'sdk', 'include')
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetGypGenDir(xarch):
if xarch == 'arm':
build_dir = GYPBUILD_DIR + '-arm'
else:
build_dir = GYPBUILD_DIR
return os.path.join(OUT_DIR, build_dir, 'Release', 'gen')
def GetGypBuiltLib(tcname, xarch=None):
if tcname == 'pnacl':
tcname = 'pnacl_newlib'
if not xarch:
xarch = ''
return os.path.join(GetGypGenDir(xarch), 'tc_' + tcname, 'lib' + xarch)
def GetToolchainNaClLib(tcname, tcpath, xarch):
if tcname == 'pnacl':
return os.path.join(tcpath, 'sdk', 'lib')
elif xarch == '32':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif xarch == '64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif xarch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
def GetToolchainDirName(tcname, xarch):
if tcname == 'pnacl':
return '%s_%s' % (getos.GetPlatform(), tcname)
elif xarch == 'arm':
return '%s_arm_%s' % (getos.GetPlatform(), tcname)
else:
return '%s_x86_%s' % (getos.GetPlatform(), tcname)
def GetGypToolchainLib(tcname, xarch):
tcpath = os.path.join(GetGypGenDir(xarch), 'sdk', 'toolchain',
GetToolchainDirName(tcname, xarch))
return GetToolchainNaClLib(tcname, tcpath, xarch)
def GetOutputToolchainLib(pepperdir, tcname, xarch):
tcpath = os.path.join(pepperdir, 'toolchain',
GetToolchainDirName(tcname, xarch))
return GetToolchainNaClLib(tcname, tcpath, xarch)
def GetPNaClNativeLib(tcpath, arch):
if arch not in ['arm', 'x86-32', 'x86-64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
return os.path.join(tcpath, 'lib-' + arch)
def BuildStepDownloadToolchains():
buildbot_common.BuildStep('Running download_toolchains.py')
download_script = os.path.join('build', 'download_toolchains.py')
buildbot_common.Run([sys.executable, download_script,
'--no-arm-trusted', '--arm-untrusted', '--keep'],
cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
buildbot_common.RemoveDir(pepperdir_old)
buildbot_common.RemoveDir(pepperdir)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def PrunePNaClToolchain(root):
dirs_to_prune = [
'lib-bc-x86-64',
'usr-bc-x86-64'
# TODO(sbc): remove this once its really not needed.
# Currently we seem to rely on it at least for <bits/stat.h>
#'sysroot',
]
for dirname in dirs_to_prune:
buildbot_common.RemoveDir(os.path.join(root, dirname))
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
if 'newlib' in toolchains:
# Untar the newlib toolchains
tarfile = GetNewlibToolchain()
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
srcdir = os.path.join(tmpdir, 'sdk', 'nacl-sdk')
tcname = platform + '_x86_newlib'
newlibdir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(srcdir, newlibdir)
if 'arm' in toolchains:
# Copy the existing arm toolchain from native_client tree
tcname = platform + '_arm_newlib'
arm_toolchain = os.path.join(NACL_DIR, 'toolchain', tcname)
arm_toolchain_sdk = os.path.join(pepperdir, 'toolchain',
os.path.basename(arm_toolchain))
buildbot_common.CopyDir(arm_toolchain, arm_toolchain_sdk)
if 'glibc' in toolchains:
# Untar the glibc toolchains
tarfile = GetGlibcToolchain()
tcname = platform + '_x86_glibc'
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
srcdir = os.path.join(tmpdir, 'toolchain', platform + '_x86')
glibcdir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(srcdir, glibcdir)
# Untar the pnacl toolchains
if 'pnacl' in toolchains:
tmpdir = os.path.join(tmpdir, 'pnacl')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
tarfile = GetPNaClToolchain()
tcname = platform + '_pnacl'
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
pnacldir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(tmpdir, pnacldir)
PrunePNaClToolchain(pnacldir)
buildbot_common.RemoveDir(tmpdir)
if options.gyp and platform != 'win':
# If the gyp options is specified we install a toolchain
# wrapper so that gyp can switch toolchains via a commandline
# option.
bindir = os.path.join(pepperdir, 'toolchain', tcname, 'bin')
wrapper = os.path.join(SDK_SRC_DIR, 'tools', 'compiler-wrapper.py')
buildbot_common.MakeDir(bindir)
buildbot_common.CopyFile(wrapper, bindir)
# Module 'os' has no 'symlink' member (on Windows).
# pylint: disable=E1101
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-g++'))
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-gcc'))
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-ar'))
# List of toolchain headers to install.
# Source is relative to native_client tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('src/include/nacl/nacl_exception.h', 'nacl/'),
('src/include/nacl/nacl_minidump.h', 'nacl/'),
('src/untrusted/irt/irt.h', ''),
('src/untrusted/irt/irt_dev.h', ''),
('src/untrusted/irt/irt_ppapi.h', ''),
('src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('src/untrusted/nacl/nacl_thread.h', 'nacl/'),
('src/untrusted/pthread/pthread.h', ''),
('src/untrusted/pthread/semaphore.h', ''),
('src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
],
'glibc': [
('src/include/nacl/nacl_exception.h', 'nacl/'),
('src/include/nacl/nacl_minidump.h', 'nacl/'),
('src/untrusted/irt/irt.h', ''),
('src/untrusted/irt/irt_dev.h', ''),
('src/untrusted/irt/irt_ppapi.h', ''),
('src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('src/untrusted/nacl/nacl_thread.h', 'nacl/'),
('src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
],
'host': []
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tc_name):
"""Copies NaCl headers to expected locations in the toolchain."""
if tc_name == 'arm':
# arm toolchain header should be the same as the x86 newlib
# ones
tc_name = 'newlib'
InstallFiles(NACL_DIR, tc_dst_inc, NACL_HEADER_MAP[tc_name])
def MakeNinjaRelPath(path):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), path)
TOOLCHAIN_LIBS = {
'newlib' : [
'crti.o',
'crtn.o',
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
],
'glibc': [
'libminidump_generator.a',
'libminidump_generator.so',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_dyncode.so',
'libnacl_exception.a',
'libnacl_exception.so',
'libnacl_list_mappings.a',
'libnacl_list_mappings.so',
'libppapi.a',
'libppapi.so',
'libppapi_stub.a',
],
'pnacl': [
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
]
}
def GypNinjaInstall(pepperdir, toolchains):
build_dir = GYPBUILD_DIR
ninja_out_dir = os.path.join(OUT_DIR, build_dir, 'Release')
tools_files = [
['sel_ldr', 'sel_ldr_x86_32'],
['ncval_new', 'ncval'],
['irt_core_newlib_x32.nexe', 'irt_core_x86_32.nexe'],
['irt_core_newlib_x64.nexe', 'irt_core_x86_64.nexe'],
]
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
tools_files.append(['sel_ldr64', 'sel_ldr_x86_64'])
if platform == 'linux':
tools_files.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32'])
tools_files.append(['nacl_helper_bootstrap64',
'nacl_helper_bootstrap_x86_64'])
buildbot_common.MakeDir(os.path.join(pepperdir, 'tools'))
# Add .exe extensions to all windows tools
for pair in tools_files:
if platform == 'win' and not pair[0].endswith('.nexe'):
pair[0] += '.exe'
pair[1] += '.exe'
InstallFiles(ninja_out_dir, os.path.join(pepperdir, 'tools'), tools_files)
for tc in set(toolchains) & set(['newlib', 'glibc', 'pnacl']):
if tc == 'pnacl':
xarches = (None,)
else:
xarches = ('arm', '32', '64')
for xarch in xarches:
if tc == 'glibc' and xarch == 'arm':
continue
src_dir = GetGypBuiltLib(tc, xarch)
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
InstallFiles(src_dir, dst_dir, TOOLCHAIN_LIBS[tc])
if tc != 'pnacl':
src_dir = GetGypToolchainLib(tc, xarch)
InstallFiles(src_dir, dst_dir, ['crt1.o'])
def GypNinjaBuild_NaCl(rel_out_dir):
gyp_py = os.path.join(NACL_DIR, 'build', 'gyp_nacl')
nacl_core_sdk_gyp = os.path.join(NACL_DIR, 'build', 'nacl_core_sdk.gyp')
all_gyp = os.path.join(NACL_DIR, 'build', 'all.gyp')
out_dir = MakeNinjaRelPath(rel_out_dir)
out_dir_arm = MakeNinjaRelPath(rel_out_dir + '-arm')
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir)
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_arm)
GypNinjaBuild('ia32', gyp_py, all_gyp, 'ncval_new', out_dir)
platform = getos.GetPlatform()
if platform == 'win':
NinjaBuild('sel_ldr64', out_dir)
else:
out_dir_64 = MakeNinjaRelPath(rel_out_dir + '-64')
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'sel_ldr', out_dir_64)
# We only need sel_ldr from the 64-bit out directory.
# sel_ldr needs to be renamed, so we'll call it sel_ldr64.
files_to_copy = [('sel_ldr', 'sel_ldr64')]
if platform == 'linux':
files_to_copy.append(('nacl_helper_bootstrap', 'nacl_helper_bootstrap64'))
for src, dst in files_to_copy:
buildbot_common.CopyFile(
os.path.join(SRC_DIR, out_dir_64, 'Release', src),
os.path.join(SRC_DIR, out_dir, 'Release', dst))
def GypNinjaBuild_Breakpad(rel_out_dir):
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if getos.GetPlatform() == 'win':
return
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'breakpad', 'breakpad.gyp')
build_list = ['dump_syms', 'minidump_dump', 'minidump_stackwalk']
GypNinjaBuild('ia32', gyp_py, gyp_file, build_list, out_dir)
def GypNinjaBuild_PPAPI(arch, rel_out_dir):
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client',
'native_client.gyp')
GypNinjaBuild(arch, gyp_py, gyp_file, 'ppapi_lib', out_dir)
def GypNinjaBuild_Pnacl(rel_out_dir, target_arch):
# TODO(binji): This will build the pnacl_irt_shim twice; once as part of the
# Chromium build, and once here. When we move more of the SDK build process
# to gyp, we can remove this.
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'src',
'untrusted', 'pnacl_irt_shim', 'pnacl_irt_shim.gyp')
targets = ['pnacl_irt_shim']
GypNinjaBuild(target_arch, gyp_py, gyp_file, targets, out_dir, False)
def GypNinjaBuild(arch, gyp_py_script, gyp_file, targets,
out_dir, force_arm_gcc=True):
gyp_env = copy.copy(os.environ)
gyp_env['GYP_GENERATORS'] = 'ninja'
gyp_defines = []
if options.mac_sdk:
gyp_defines.append('mac_sdk=%s' % options.mac_sdk)
if arch:
gyp_defines.append('target_arch=%s' % arch)
if arch == 'arm':
gyp_defines += ['armv7=1', 'arm_thumb=0', 'arm_neon=1']
if force_arm_gcc:
gyp_defines += ['nacl_enable_arm_gcc=1']
gyp_env['GYP_DEFINES'] = ' '.join(gyp_defines)
for key in ['GYP_GENERATORS', 'GYP_DEFINES']:
value = gyp_env[key]
print '%s="%s"' % (key, value)
gyp_generator_flags = ['-G', 'output_dir=%s' % (out_dir,)]
gyp_depth = '--depth=.'
buildbot_common.Run(
[sys.executable, gyp_py_script, gyp_file, gyp_depth] + \
gyp_generator_flags,
cwd=SRC_DIR,
env=gyp_env)
NinjaBuild(targets, out_dir)
def NinjaBuild(targets, out_dir):
if type(targets) is not list:
targets = [targets]
out_config_dir = os.path.join(out_dir, 'Release')
buildbot_common.Run(['ninja', '-C', out_config_dir] + targets, cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('SDK Items')
GypNinjaBuild_NaCl(GYPBUILD_DIR)
GypNinjaBuild_Breakpad(GYPBUILD_DIR)
platform = getos.GetPlatform()
newlibdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_newlib')
glibcdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_glibc')
armdir = os.path.join(pepperdir, 'toolchain', platform + '_arm_newlib')
pnacldir = os.path.join(pepperdir, 'toolchain', platform + '_pnacl')
if set(toolchains) & set(['glibc', 'newlib']):
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR)
if 'arm' in toolchains:
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-arm')
GypNinjaInstall(pepperdir, toolchains)
if 'newlib' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', newlibdir, 'x86'),
'newlib')
if 'glibc' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('glibc', glibcdir, 'x86'),
'glibc')
if 'arm' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', armdir, 'arm'),
'arm')
if 'pnacl' in toolchains:
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
GypNinjaBuild_Pnacl(build_dir, arch)
if arch == 'ia32':
nacl_arches = ['x86-32', 'x86-64']
elif arch == 'arm':
nacl_arches = ['arm']
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
for nacl_arch in nacl_arches:
release_build_dir = os.path.join(OUT_DIR, build_dir, 'Release',
'gen', 'tc_pnacl_translate',
'lib-' + nacl_arch)
buildbot_common.CopyFile(
os.path.join(release_build_dir, 'libpnacl_irt_shim.a'),
GetPNaClNativeLib(pnacldir, nacl_arch))
InstallNaClHeaders(GetToolchainNaClInclude('pnacl', pnacldir, 'x86'),
'newlib')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
if toolchains:
toolchains = toolchains[:]
# arm isn't a valid toolchain for build_projects
if 'arm' in toolchains:
toolchains.remove('arm')
if 'host' in toolchains:
toolchains.remove('host')
toolchains.append(getos.GetPlatform())
filters['TOOLS'] = toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, directory):
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Debug',
clean=True, config='Debug')
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Release',
clean=True, config='Release')
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/%s' % (
build_version.ChromeVersion(),)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
buildbot_common.Archive(tarname, bucket_path, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
buildbot_common.Archive(tarname + '.json', bucket_path, OUT_DIR,
step_link=False)
def BuildStepArchiveSDKTools():
# Only push up sdk_tools.tgz and nacl_sdk.zip on the linux buildbot.
builder_name = os.getenv('BUILDBOT_BUILDERNAME', '')
if builder_name == 'linux-sdk-multi':
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/%s' % (
build_version.ChromeVersion(),)
buildbot_common.Archive('sdk_tools.tgz', bucket_path, OUT_DIR,
step_link=False)
buildbot_common.Archive('nacl_sdk.zip', bucket_path, OUT_DIR,
step_link=False)
def BuildStepSyncNaClPorts():
"""Pull the pinned revision of naclports from SVN."""
buildbot_common.BuildStep('Sync naclports')
if not os.path.exists(NACLPORTS_DIR):
# checkout new copy of naclports
cmd = ['svn', 'checkout', '-q', '-r', str(NACLPORTS_REV), NACLPORTS_URL,
'naclports']
buildbot_common.Run(cmd, cwd=os.path.dirname(NACLPORTS_DIR))
else:
# sync existing copy to pinned revision.
cmd = ['svn', 'update', '-r', str(NACLPORTS_REV)]
buildbot_common.Run(cmd, cwd=NACLPORTS_DIR)
def BuildStepBuildNaClPorts(pepper_ver, pepperdir):
"""Build selected naclports in all configurations."""
# TODO(sbc): currently naclports doesn't know anything about
# Debug builds so the Debug subfolders are all empty.
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['PEPPER_DIR'] = os.path.basename(pepperdir) # pepper_NN
env['NACLPORTS_NO_ANNOTATE'] = "1"
env['NACLPORTS_NO_UPLOAD'] = "1"
build_script = 'build_tools/bots/linux/naclports-linux-sdk-bundle.sh'
buildbot_common.BuildStep('Build naclports')
buildbot_common.Run([build_script], env=env, cwd=NACLPORTS_DIR)
bundle_dir = os.path.join(NACLPORTS_DIR, 'out', 'sdk_bundle')
out_dir = os.path.join(bundle_dir, 'pepper_%s' % pepper_ver)
# Some naclports do not include a standalone LICENSE/COPYING file
# so we explicitly list those here for inclusion.
extra_licenses = ('tinyxml/readme.txt',
'jpeg-8d/README',
'zlib-1.2.3/README')
src_root = os.path.join(NACLPORTS_DIR, 'out', 'repository-i686')
output_license = os.path.join(out_dir, 'ports', 'LICENSE')
GenerateNotice(src_root , output_license, extra_licenses)
readme = os.path.join(out_dir, 'ports', 'README')
oshelpers.Copy(['-v', os.path.join(SDK_SRC_DIR, 'README.naclports'), readme])
def BuildStepTarNaClPorts(pepper_ver, tarfile):
"""Create tar archive containing headers and libs from naclports build."""
buildbot_common.BuildStep('Tar naclports Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
pepper_dir = 'pepper_%s' % pepper_ver
archive_dirs = [os.path.join(pepper_dir, 'ports')]
ports_out = os.path.join(NACLPORTS_DIR, 'out', 'sdk_bundle')
cmd = [sys.executable, CYGTAR, '-C', ports_out, '-cjf', tarfile]
cmd += archive_dirs
buildbot_common.Run(cmd, cwd=NACL_DIR)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = optparse.OptionParser()
parser.add_option('--tar', help='Force the tar step.',
action='store_true')
parser.add_option('--archive', help='Force the archive step.',
action='store_true')
parser.add_option('--gyp',
help='Use gyp to build examples/libraries/Makefiles.',
action='store_true')
parser.add_option('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_option('--build-ports',
help='Build naclport bundle.', action='store_true')
parser.add_option('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_option('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_option('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_option('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options, args = parser.parse_args(args[1:])
if args:
parser.error("Unexpected arguments: %s" % str(args))
generate_make.use_gyp = options.gyp
if buildbot_common.IsSDKBuilder():
options.archive = True
options.build_ports = True
options.build_app_engine = True
options.tar = True
toolchains = ['newlib', 'glibc', 'arm', 'pnacl', 'host']
print 'Building: ' + ' '.join(toolchains)
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_' + getos.GetPlatform() + '.tar.bz2'
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains()
BuildStepUntarToolchains(pepperdir, toolchains)
BuildStepBuildToolchains(pepperdir, toolchains)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir, 'src')
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if options.build_ports and getos.GetPlatform() == 'linux':
ports_tarfile = os.path.join(OUT_DIR, 'naclports.tar.bz2')
BuildStepSyncNaClPorts()
BuildStepBuildNaClPorts(pepper_ver, pepperdir)
if options.tar:
BuildStepTarNaClPorts(pepper_ver, ports_tarfile)
if options.build_app_engine and getos.GetPlatform() == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
# Archive on non-trybots.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
if options.build_ports and getos.GetPlatform() == 'linux':
BuildStepArchiveBundle('naclports', pepper_ver, chrome_revision,
nacl_revision, ports_tarfile)
BuildStepArchiveSDKTools()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
|
|
# Copyright (c) 2014 Alex Meade
# Copyright (c) 2015 Yogesh Kshirsagar
# Copyright (c) 2015 Michael Price
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import json
import mock
from simplejson import scanner
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \
eseries_fake
from cinder.volume.drivers.netapp.eseries import exception as es_exception
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetAppEseriesClientDriverTestCase(test.TestCase):
"""Test case for NetApp e-series client."""
def setUp(self):
super(NetAppEseriesClientDriverTestCase, self).setUp()
self.mock_log = mock.Mock()
self.mock_object(client, 'LOG', self.mock_log)
self.fake_password = 'mysecret'
self.my_client = client.RestClient('http', 'host', '80', '/test',
'user', self.fake_password,
system_id='fake_sys_id')
self.my_client._endpoint = eseries_fake.FAKE_ENDPOINT_HTTP
fake_response = mock.Mock()
fake_response.status_code = 200
self.my_client.invoke_service = mock.Mock(return_value=fake_response)
self.my_client.api_version = '01.52.9000.1'
@ddt.data(200, 201, 203, 204)
def test_eval_response_success(self, status_code):
fake_resp = mock.Mock()
fake_resp.status_code = status_code
self.assertIsNone(self.my_client._eval_response(fake_resp))
@ddt.data(300, 400, 404, 500)
def test_eval_response_failure(self, status_code):
fake_resp = mock.Mock()
fake_resp.status_code = status_code
expected_msg = "Response error code - %s." % status_code
with self.assertRaisesRegex(es_exception.WebServiceException,
expected_msg) as exc:
self.my_client._eval_response(fake_resp)
self.assertEqual(status_code, exc.status_code)
@ddt.data(('30', 'storage array password.*?incorrect'),
('authFailPassword', 'storage array password.*?incorrect'),
('unknown', None))
@ddt.unpack
def test_eval_response_422(self, ret_code, exc_regex):
status_code = 422
fake_resp = mock.Mock()
fake_resp.text = "fakeError"
fake_resp.json = mock.Mock(return_value={'retcode': ret_code})
fake_resp.status_code = status_code
exc_regex = exc_regex if exc_regex is not None else fake_resp.text
with self.assertRaisesRegexp(es_exception.WebServiceException,
exc_regex) as exc:
self.my_client._eval_response(fake_resp)
self.assertEqual(status_code, exc.status_code)
def test_eval_response_424(self):
status_code = 424
fake_resp = mock.Mock()
fake_resp.status_code = status_code
fake_resp.text = "Fake Error Message"
with self.assertRaisesRegex(es_exception.WebServiceException,
"The storage-system is offline") as exc:
self.my_client._eval_response(fake_resp)
self.assertEqual(status_code, exc.status_code)
def test_register_storage_system_does_not_log_password(self):
self.my_client._eval_response = mock.Mock()
self.my_client.register_storage_system([], password=self.fake_password)
for call in self.mock_log.debug.mock_calls:
__, args, __ = call
self.assertNotIn(self.fake_password, args[0])
def test_update_stored_system_password_does_not_log_password(self):
self.my_client._eval_response = mock.Mock()
self.my_client.update_stored_system_password(
password=self.fake_password)
for call in self.mock_log.debug.mock_calls:
__, args, __ = call
self.assertNotIn(self.fake_password, args[0])
def test_list_target_wwpns(self):
fake_hardware_inventory = copy.deepcopy(
eseries_fake.HARDWARE_INVENTORY)
mock_hardware_inventory = mock.Mock(
return_value=fake_hardware_inventory)
self.mock_object(self.my_client, 'list_hardware_inventory',
mock_hardware_inventory)
expected_wwpns = [eseries_fake.WWPN, eseries_fake.WWPN_2]
actual_wwpns = self.my_client.list_target_wwpns()
self.assertEqual(expected_wwpns, actual_wwpns)
def test_list_target_wwpns_single_wwpn(self):
fake_hardware_inventory = copy.deepcopy(
eseries_fake.HARDWARE_INVENTORY)
fake_hardware_inventory['fibrePorts'] = [
fake_hardware_inventory['fibrePorts'][0]
]
mock_hardware_inventory = mock.Mock(
return_value=fake_hardware_inventory)
self.mock_object(self.my_client, 'list_hardware_inventory',
mock_hardware_inventory)
expected_wwpns = [eseries_fake.WWPN]
actual_wwpns = self.my_client.list_target_wwpns()
self.assertEqual(expected_wwpns, actual_wwpns)
def test_list_target_wwpns_no_wwpn(self):
fake_hardware_inventory = copy.deepcopy(
eseries_fake.HARDWARE_INVENTORY)
fake_hardware_inventory['fibrePorts'] = []
mock_hardware_inventory = mock.Mock(
return_value=fake_hardware_inventory)
self.mock_object(self.my_client, 'list_hardware_inventory',
mock_hardware_inventory)
expected_wwpns = []
actual_wwpns = self.my_client.list_target_wwpns()
self.assertEqual(expected_wwpns, actual_wwpns)
def test_get_host_group_by_name(self):
groups = copy.deepcopy(eseries_fake.HOST_GROUPS)
group = groups[0]
self.mock_object(self.my_client, 'list_host_groups',
return_value=groups)
result = self.my_client.get_host_group_by_name(group['label'])
self.assertEqual(group, result)
def test_move_volume_mapping_via_symbol(self):
invoke = self.mock_object(self.my_client, '_invoke',
mock.Mock(return_value='ok'))
host_ref = 'host'
cluster_ref = 'cluster'
lun_id = 10
expected_data = {'lunMappingRef': host_ref, 'lun': lun_id,
'mapRef': cluster_ref}
result = self.my_client.move_volume_mapping_via_symbol(host_ref,
cluster_ref,
lun_id)
invoke.assert_called_once_with('POST', '/storage-systems/{system-id}/'
'symbol/moveLUNMapping',
expected_data)
self.assertEqual({'lun': lun_id}, result)
def test_move_volume_mapping_via_symbol_fail(self):
self.mock_object(self.my_client, '_invoke',
mock.Mock(return_value='failure'))
self.assertRaises(
exception.NetAppDriverException,
self.my_client.move_volume_mapping_via_symbol, '1', '2', 10)
def test_create_host_from_ports_fc(self):
label = 'fake_host'
host_type = 'linux'
port_type = 'fc'
port_ids = [eseries_fake.WWPN, eseries_fake.WWPN_2]
expected_ports = [
{'type': port_type, 'port': eseries_fake.WWPN, 'label': mock.ANY},
{'type': port_type, 'port': eseries_fake.WWPN_2,
'label': mock.ANY}]
mock_create_host = self.mock_object(self.my_client, 'create_host')
self.my_client.create_host_with_ports(label, host_type, port_ids,
port_type)
mock_create_host.assert_called_once_with(label, host_type,
expected_ports, None)
def test_host_from_ports_with_no_ports_provided_fc(self):
label = 'fake_host'
host_type = 'linux'
port_type = 'fc'
port_ids = []
expected_ports = []
mock_create_host = self.mock_object(self.my_client, 'create_host')
self.my_client.create_host_with_ports(label, host_type, port_ids,
port_type)
mock_create_host.assert_called_once_with(label, host_type,
expected_ports, None)
def test_create_host_from_ports_iscsi(self):
label = 'fake_host'
host_type = 'linux'
port_type = 'iscsi'
port_ids = [eseries_fake.INITIATOR_NAME,
eseries_fake.INITIATOR_NAME_2]
expected_ports = [
{'type': port_type, 'port': eseries_fake.INITIATOR_NAME,
'label': mock.ANY},
{'type': port_type, 'port': eseries_fake.INITIATOR_NAME_2,
'label': mock.ANY}]
mock_create_host = self.mock_object(self.my_client, 'create_host')
self.my_client.create_host_with_ports(label, host_type, port_ids,
port_type)
mock_create_host.assert_called_once_with(label, host_type,
expected_ports, None)
def test_get_volume_mappings_for_volume(self):
volume_mapping_1 = copy.deepcopy(eseries_fake.VOLUME_MAPPING)
volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING)
volume_mapping_2['volumeRef'] = '2'
self.mock_object(self.my_client, 'get_volume_mappings',
mock.Mock(return_value=[volume_mapping_1,
volume_mapping_2]))
mappings = self.my_client.get_volume_mappings_for_volume(
eseries_fake.VOLUME)
self.assertEqual([volume_mapping_1], mappings)
def test_get_volume_mappings_for_host(self):
volume_mapping_1 = copy.deepcopy(
eseries_fake.VOLUME_MAPPING)
volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING)
volume_mapping_2['volumeRef'] = '2'
volume_mapping_2['mapRef'] = 'hostRef'
self.mock_object(self.my_client, 'get_volume_mappings',
mock.Mock(return_value=[volume_mapping_1,
volume_mapping_2]))
mappings = self.my_client.get_volume_mappings_for_host(
'hostRef')
self.assertEqual([volume_mapping_2], mappings)
def test_get_volume_mappings_for_hostgroup(self):
volume_mapping_1 = copy.deepcopy(
eseries_fake.VOLUME_MAPPING)
volume_mapping_2 = copy.deepcopy(eseries_fake.VOLUME_MAPPING)
volume_mapping_2['volumeRef'] = '2'
volume_mapping_2['mapRef'] = 'hostGroupRef'
self.mock_object(self.my_client, 'get_volume_mappings',
mock.Mock(return_value=[volume_mapping_1,
volume_mapping_2]))
mappings = self.my_client.get_volume_mappings_for_host_group(
'hostGroupRef')
self.assertEqual([volume_mapping_2], mappings)
def test_to_pretty_dict_string(self):
dict = {
'foo': 'bar',
'fu': {
'nested': 'boo'
}
}
expected_dict_string = ("""{
"foo": "bar",
"fu": {
"nested": "boo"
}
}""")
dict_string = self.my_client._to_pretty_dict_string(dict)
self.assertEqual(expected_dict_string, dict_string)
def test_log_http_request(self):
mock_log = self.mock_object(client, 'LOG')
verb = "POST"
url = "/v2/test/me"
headers = {"Content-Type": "application/json"}
headers_string = """{
"Content-Type": "application/json"
}"""
body = {}
body_string = "{}"
self.my_client._log_http_request(verb, url, headers, body)
args = mock_log.debug.call_args
log_message, log_params = args[0]
final_msg = log_message % log_params
self.assertIn(verb, final_msg)
self.assertIn(url, final_msg)
self.assertIn(headers_string, final_msg)
self.assertIn(body_string, final_msg)
def test_log_http_request_no_body(self):
mock_log = self.mock_object(client, 'LOG')
verb = "POST"
url = "/v2/test/me"
headers = {"Content-Type": "application/json"}
headers_string = """{
"Content-Type": "application/json"
}"""
body = None
body_string = ""
self.my_client._log_http_request(verb, url, headers, body)
args = mock_log.debug.call_args
log_message, log_params = args[0]
final_msg = log_message % log_params
self.assertIn(verb, final_msg)
self.assertIn(url, final_msg)
self.assertIn(headers_string, final_msg)
self.assertIn(body_string, final_msg)
def test_log_http_response(self):
mock_log = self.mock_object(client, 'LOG')
status = "200"
headers = {"Content-Type": "application/json"}
headers_string = """{
"Content-Type": "application/json"
}"""
body = {}
body_string = "{}"
self.my_client._log_http_response(status, headers, body)
args = mock_log.debug.call_args
log_message, log_params = args[0]
final_msg = log_message % log_params
self.assertIn(status, final_msg)
self.assertIn(headers_string, final_msg)
self.assertIn(body_string, final_msg)
def test_log_http_response_no_body(self):
mock_log = self.mock_object(client, 'LOG')
status = "200"
headers = {"Content-Type": "application/json"}
headers_string = """{
"Content-Type": "application/json"
}"""
body = None
body_string = ""
self.my_client._log_http_response(status, headers, body)
args = mock_log.debug.call_args
log_message, log_params = args[0]
final_msg = log_message % log_params
self.assertIn(status, final_msg)
self.assertIn(headers_string, final_msg)
self.assertIn(body_string, final_msg)
def test_add_autosupport_data(self):
self.mock_object(
client.RestClient, 'get_eseries_api_info',
mock.Mock(return_value=(
eseries_fake.FAKE_ASUP_DATA['operating-mode'],
eseries_fake.FAKE_ABOUT_RESPONSE['version'])))
self.mock_object(
self.my_client, 'get_asup_info',
mock.Mock(return_value=eseries_fake.GET_ASUP_RETURN))
self.mock_object(
self.my_client, 'set_counter',
mock.Mock(return_value={'value': 1}))
mock_invoke = self.mock_object(
self.my_client, '_invoke',
mock.Mock(return_value=eseries_fake.FAKE_ASUP_DATA))
client.RestClient.add_autosupport_data(
self.my_client,
eseries_fake.FAKE_KEY,
eseries_fake.FAKE_ASUP_DATA
)
mock_invoke.assert_called_with(*eseries_fake.FAKE_POST_INVOKE_DATA)
@ddt.data((eseries_fake.FAKE_SERIAL_NUMBERS,
eseries_fake.HARDWARE_INVENTORY),
(eseries_fake.FAKE_DEFAULT_SERIAL_NUMBER, {}),
(eseries_fake.FAKE_SERIAL_NUMBER,
eseries_fake.HARDWARE_INVENTORY_SINGLE_CONTROLLER))
@ddt.unpack
def test_get_asup_info_serial_numbers(self, expected_serial_numbers,
controllers):
self.mock_object(
client.RestClient, 'list_hardware_inventory',
mock.Mock(return_value=controllers))
self.mock_object(
client.RestClient, 'list_storage_system',
mock.Mock(return_value={}))
sn = client.RestClient.get_asup_info(self.my_client)['serial_numbers']
self.assertEqual(expected_serial_numbers, sn)
def test_get_asup_info_model_name(self):
self.mock_object(
client.RestClient, 'list_hardware_inventory',
mock.Mock(return_value=eseries_fake.HARDWARE_INVENTORY))
self.mock_object(
client.RestClient, 'list_storage_system',
mock.Mock(return_value=eseries_fake.STORAGE_SYSTEM))
model_name = client.RestClient.get_asup_info(self.my_client)['model']
self.assertEqual(eseries_fake.HARDWARE_INVENTORY['controllers'][0]
['modelName'], model_name)
def test_get_asup_info_model_name_empty_controllers_list(self):
self.mock_object(
client.RestClient, 'list_hardware_inventory',
mock.Mock(return_value={}))
self.mock_object(
client.RestClient, 'list_storage_system',
mock.Mock(return_value={}))
model_name = client.RestClient.get_asup_info(self.my_client)['model']
self.assertEqual(eseries_fake.FAKE_DEFAULT_MODEL, model_name)
def test_get_eseries_api_info(self):
fake_invoke_service = mock.Mock()
fake_invoke_service.json = mock.Mock(
return_value=eseries_fake.FAKE_ABOUT_RESPONSE)
self.mock_object(
client.RestClient, '_get_resource_url',
mock.Mock(return_value=eseries_fake.FAKE_RESOURCE_URL))
self.mock_object(
self.my_client, 'invoke_service',
mock.Mock(return_value=fake_invoke_service))
eseries_info = client.RestClient.get_eseries_api_info(
self.my_client, verify=False)
self.assertEqual((eseries_fake.FAKE_ASUP_DATA['operating-mode'],
eseries_fake.FAKE_ABOUT_RESPONSE['version']),
eseries_info)
def test_list_ssc_storage_pools(self):
self.my_client.features = mock.Mock()
self.my_client._invoke = mock.Mock(
return_value=eseries_fake.SSC_POOLS)
pools = client.RestClient.list_ssc_storage_pools(self.my_client)
self.assertEqual(eseries_fake.SSC_POOLS, pools)
def test_get_ssc_storage_pool(self):
fake_pool = eseries_fake.SSC_POOLS[0]
self.my_client.features = mock.Mock()
self.my_client._invoke = mock.Mock(
return_value=fake_pool)
pool = client.RestClient.get_ssc_storage_pool(self.my_client,
fake_pool['poolId'])
self.assertEqual(fake_pool, pool)
@ddt.data(('volumes', True), ('volumes', False),
('volume', True), ('volume', False))
@ddt.unpack
def test_get_volume_api_path(self, path_key, ssc_available):
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=ssc_available)
expected_key = 'ssc_' + path_key if ssc_available else path_key
expected = self.my_client.RESOURCE_PATHS.get(expected_key)
actual = self.my_client._get_volume_api_path(path_key)
self.assertEqual(expected, actual)
@ddt.data(True, False)
def test_get_volume_api_path_invalid(self, ssc_available):
key = 'invalidKey'
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=ssc_available)
self.assertRaises(KeyError, self.my_client._get_volume_api_path, key)
def test_list_volumes(self):
url = client.RestClient.RESOURCE_PATHS['ssc_volumes']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUMES)
volumes = client.RestClient.list_volumes(self.my_client)
self.assertEqual(eseries_fake.VOLUMES, volumes)
self.my_client._invoke.assert_called_once_with('GET', url)
@ddt.data(client.RestClient.ID, client.RestClient.WWN,
client.RestClient.NAME)
def test_list_volume_v1(self, uid_field_name):
url = client.RestClient.RESOURCE_PATHS['volumes']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=False)
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUMES)
volume = client.RestClient.list_volume(self.my_client,
fake_volume[uid_field_name])
self.my_client._invoke.assert_called_once_with('GET', url)
self.assertEqual(fake_volume, volume)
def test_list_volume_v1_not_found(self):
url = client.RestClient.RESOURCE_PATHS['volumes']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=False)
self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUMES)
self.assertRaises(exception.VolumeNotFound,
client.RestClient.list_volume,
self.my_client, 'fakeId')
self.my_client._invoke.assert_called_once_with('GET', url)
def test_list_volume_v2(self):
url = client.RestClient.RESOURCE_PATHS['ssc_volume']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client._invoke = mock.Mock(return_value=fake_volume)
volume = client.RestClient.list_volume(self.my_client,
fake_volume['id'])
self.my_client._invoke.assert_called_once_with('GET', url,
**{'object-id':
mock.ANY})
self.assertEqual(fake_volume, volume)
def test_list_volume_v2_not_found(self):
status_code = 404
url = client.RestClient.RESOURCE_PATHS['ssc_volume']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
msg = "Response error code - %s." % status_code
self.my_client._invoke = mock.Mock(
side_effect=es_exception.WebServiceException(message=msg,
status_code=
status_code))
self.assertRaises(exception.VolumeNotFound,
client.RestClient.list_volume,
self.my_client, 'fakeId')
self.my_client._invoke.assert_called_once_with('GET', url,
**{'object-id':
mock.ANY})
def test_list_volume_v2_failure(self):
status_code = 422
url = client.RestClient.RESOURCE_PATHS['ssc_volume']
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
msg = "Response error code - %s." % status_code
self.my_client._invoke = mock.Mock(
side_effect=es_exception.WebServiceException(message=msg,
status_code=
status_code))
self.assertRaises(es_exception.WebServiceException,
client.RestClient.list_volume, self.my_client,
'fakeId')
self.my_client._invoke.assert_called_once_with('GET', url,
**{'object-id':
mock.ANY})
def test_create_volume_V1(self):
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=False)
create_volume = self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUME)
volume = client.RestClient.create_volume(self.my_client,
'fakePool', '1', 1)
args, kwargs = create_volume.call_args
verb, url, body = args
# Ensure the correct API was used
self.assertEqual('/storage-systems/{system-id}/volumes', url)
self.assertEqual(eseries_fake.VOLUME, volume)
def test_create_volume_V2(self):
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
create_volume = self.my_client._invoke = mock.Mock(
return_value=eseries_fake.VOLUME)
volume = client.RestClient.create_volume(self.my_client,
'fakePool', '1', 1)
args, kwargs = create_volume.call_args
verb, url, body = args
# Ensure the correct API was used
self.assertIn('/storage-systems/{system-id}/ssc/volumes', url,
'The legacy API was used!')
self.assertEqual(eseries_fake.VOLUME, volume)
def test_create_volume_unsupported_specs(self):
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=False)
self.my_client.api_version = '01.52.9000.1'
self.assertRaises(exception.NetAppDriverException,
client.RestClient.create_volume, self.my_client,
'1', 'label', 1, read_cache=True)
@ddt.data(True, False)
def test_update_volume(self, ssc_api_enabled):
label = 'updatedName'
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
expected_volume = copy.deepcopy(fake_volume)
expected_volume['name'] = label
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=ssc_api_enabled)
self.my_client._invoke = mock.Mock(return_value=expected_volume)
updated_volume = self.my_client.update_volume(fake_volume['id'],
label)
if ssc_api_enabled:
url = self.my_client.RESOURCE_PATHS.get('ssc_volume')
else:
url = self.my_client.RESOURCE_PATHS.get('volume')
self.my_client._invoke.assert_called_once_with('POST', url,
{'name': label},
**{'object-id':
fake_volume['id']}
)
self.assertDictMatch(expected_volume, updated_volume)
def test_get_pool_operation_progress(self):
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
fake_response = copy.deepcopy(eseries_fake.FAKE_POOL_ACTION_PROGRESS)
self.my_client._invoke = mock.Mock(return_value=fake_response)
response = self.my_client.get_pool_operation_progress(fake_pool['id'])
url = self.my_client.RESOURCE_PATHS.get('pool_operation_progress')
self.my_client._invoke.assert_called_once_with('GET', url,
**{'object-id':
fake_pool['id']})
self.assertEqual(fake_response, response)
def test_extend_volume(self):
new_capacity = 10
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
self.my_client._invoke = mock.Mock(return_value=fake_volume)
expanded_volume = self.my_client.expand_volume(fake_volume['id'],
new_capacity, False)
url = self.my_client.RESOURCE_PATHS.get('volume_expand')
body = {'expansionSize': new_capacity, 'sizeUnit': 'gb'}
self.my_client._invoke.assert_called_once_with('POST', url, body,
**{'object-id':
fake_volume['id']})
self.assertEqual(fake_volume, expanded_volume)
def test_extend_volume_thin(self):
new_capacity = 10
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=True)
self.my_client._invoke = mock.Mock(return_value=fake_volume)
expanded_volume = self.my_client.expand_volume(fake_volume['id'],
new_capacity, True)
url = self.my_client.RESOURCE_PATHS.get('thin_volume_expand')
body = {'newVirtualSize': new_capacity, 'sizeUnit': 'gb',
'newRepositorySize': new_capacity}
self.my_client._invoke.assert_called_once_with('POST', url, body,
**{'object-id':
fake_volume['id']})
self.assertEqual(fake_volume, expanded_volume)
@ddt.data(True, False)
def test_delete_volume(self, ssc_api_enabled):
fake_volume = copy.deepcopy(eseries_fake.VOLUME)
self.my_client.features = mock.Mock()
self.my_client.features.SSC_API_V2 = na_utils.FeatureState(
supported=ssc_api_enabled)
self.my_client._invoke = mock.Mock()
self.my_client.delete_volume(fake_volume['id'])
if ssc_api_enabled:
url = self.my_client.RESOURCE_PATHS.get('ssc_volume')
else:
url = self.my_client.RESOURCE_PATHS.get('volume')
self.my_client._invoke.assert_called_once_with('DELETE', url,
**{'object-id':
fake_volume['id']})
def test_list_snapshot_group(self):
grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=grp))
fake_ref = 'fake'
result = self.my_client.list_snapshot_group(fake_ref)
self.assertEqual(grp, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_group'],
**{'object-id': fake_ref})
def test_list_snapshot_groups(self):
grps = [copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)]
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=grps))
result = self.my_client.list_snapshot_groups()
self.assertEqual(grps, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_groups'])
def test_delete_snapshot_group(self):
invoke = self.mock_object(self.my_client, '_invoke')
fake_ref = 'fake'
self.my_client.delete_snapshot_group(fake_ref)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['snapshot_group'],
**{'object-id': fake_ref})
@ddt.data((None, None, None, None, None), ('1', 50, 75, 32, 'purgepit'))
@ddt.unpack
def test_create_snapshot_group(self, pool_id, repo, warn, limit, policy):
vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=vol))
snap_grp = copy.deepcopy(eseries_fake.SNAPSHOT_GROUP)
result = self.my_client.create_snapshot_group(
snap_grp['label'], snap_grp['id'], pool_id, repo, warn, limit,
policy)
self.assertEqual(vol, result)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['snapshot_groups'],
{'baseMappableObjectId': snap_grp['id'], 'name': snap_grp['label'],
'storagePoolId': pool_id, 'repositoryPercentage': repo,
'warningThreshold': warn, 'autoDeleteLimit': limit,
'fullPolicy': policy})
def test_list_snapshot_volumes(self):
vols = [copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)]
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=vols))
result = self.my_client.list_snapshot_volumes()
self.assertEqual(vols, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_volumes'])
def test_delete_snapshot_volume(self):
invoke = self.mock_object(self.my_client, '_invoke')
fake_ref = 'fake'
self.my_client.delete_snapshot_volume(fake_ref)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['snapshot_volume'],
**{'object-id': fake_ref})
@ddt.data((None, None, None, None), ('1', 50, 75, 'readWrite'))
@ddt.unpack
def test_create_snapshot_volume(self, pool_id, repo, warn, mode):
vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=vol))
result = self.my_client.create_snapshot_volume(
vol['basePIT'], vol['label'], vol['id'], pool_id,
repo, warn, mode)
self.assertEqual(vol, result)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['snapshot_volumes'],
mock.ANY)
def test_update_snapshot_volume(self):
snap_id = '1'
label = 'name'
pct = 99
vol = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=vol))
result = self.my_client.update_snapshot_volume(snap_id, label, pct)
self.assertEqual(vol, result)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['snapshot_volume'],
{'name': label, 'fullThreshold': pct}, **{'object-id': snap_id})
def test_create_snapshot_image(self):
img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=img))
grp_id = '1'
result = self.my_client.create_snapshot_image(grp_id)
self.assertEqual(img, result)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['snapshot_images'],
{'groupId': grp_id})
def test_list_snapshot_image(self):
img = copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=img))
fake_ref = 'fake'
result = self.my_client.list_snapshot_image(fake_ref)
self.assertEqual(img, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_image'],
**{'object-id': fake_ref})
def test_list_snapshot_images(self):
imgs = [copy.deepcopy(eseries_fake.SNAPSHOT_IMAGE)]
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=imgs))
result = self.my_client.list_snapshot_images()
self.assertEqual(imgs, result)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['snapshot_images'])
def test_delete_snapshot_image(self):
invoke = self.mock_object(self.my_client, '_invoke')
fake_ref = 'fake'
self.my_client.delete_snapshot_image(fake_ref)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['snapshot_image'],
**{'object-id': fake_ref})
def test_create_consistency_group(self):
invoke = self.mock_object(self.my_client, '_invoke')
name = 'fake'
self.my_client.create_consistency_group(name)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['cgroups'], mock.ANY)
def test_list_consistency_group(self):
invoke = self.mock_object(self.my_client, '_invoke')
ref = 'fake'
self.my_client.get_consistency_group(ref)
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['cgroup'],
**{'object-id': ref})
def test_list_consistency_groups(self):
invoke = self.mock_object(self.my_client, '_invoke')
self.my_client.list_consistency_groups()
invoke.assert_called_once_with(
'GET', self.my_client.RESOURCE_PATHS['cgroups'])
def test_delete_consistency_group(self):
invoke = self.mock_object(self.my_client, '_invoke')
ref = 'fake'
self.my_client.delete_consistency_group(ref)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['cgroup'],
**{'object-id': ref})
def test_add_consistency_group_member(self):
invoke = self.mock_object(self.my_client, '_invoke')
vol_id = eseries_fake.VOLUME['id']
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.add_consistency_group_member(vol_id, cg_id)
invoke.assert_called_once_with(
'POST', self.my_client.RESOURCE_PATHS['cgroup_members'],
mock.ANY, **{'object-id': cg_id})
def test_remove_consistency_group_member(self):
invoke = self.mock_object(self.my_client, '_invoke')
vol_id = eseries_fake.VOLUME['id']
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.remove_consistency_group_member(vol_id, cg_id)
invoke.assert_called_once_with(
'DELETE', self.my_client.RESOURCE_PATHS['cgroup_member'],
**{'object-id': cg_id, 'vol-id': vol_id})
def test_create_consistency_group_snapshot(self):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshots')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.create_consistency_group_snapshot(cg_id)
invoke.assert_called_once_with('POST', path, **{'object-id': cg_id})
@ddt.data(0, 32)
def test_delete_consistency_group_snapshot(self, seq_num):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshot')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.delete_consistency_group_snapshot(cg_id, seq_num)
invoke.assert_called_once_with(
'DELETE', path, **{'object-id': cg_id, 'seq-num': seq_num})
def test_get_consistency_group_snapshots(self):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshots')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.get_consistency_group_snapshots(cg_id)
invoke.assert_called_once_with(
'GET', path, **{'object-id': cg_id})
def test_create_cg_snapshot_view(self):
cg_snap_view = copy.deepcopy(
eseries_fake.FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME)
view = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=cg_snap_view))
list_views = self.mock_object(
self.my_client, 'list_cg_snapshot_views',
mock.Mock(return_value=[view]))
name = view['name']
snap_id = view['basePIT']
path = self.my_client.RESOURCE_PATHS.get('cgroup_cgsnap_views')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.my_client.create_cg_snapshot_view(cg_id, name, snap_id)
invoke.assert_called_once_with(
'POST', path, mock.ANY, **{'object-id': cg_id})
list_views.assert_called_once_with(cg_id, cg_snap_view['cgViewRef'])
def test_create_cg_snapshot_view_not_found(self):
cg_snap_view = copy.deepcopy(
eseries_fake.FAKE_CONSISTENCY_GROUP_SNAPSHOT_VOLUME)
view = copy.deepcopy(eseries_fake.SNAPSHOT_VOLUME)
invoke = self.mock_object(self.my_client, '_invoke', mock.Mock(
return_value=cg_snap_view))
list_views = self.mock_object(
self.my_client, 'list_cg_snapshot_views',
mock.Mock(return_value=[view]))
del_view = self.mock_object(self.my_client, 'delete_cg_snapshot_view')
name = view['name']
# Ensure we don't get a match on the retrieved views
snap_id = None
path = self.my_client.RESOURCE_PATHS.get('cgroup_cgsnap_views')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
self.assertRaises(
exception.NetAppDriverException,
self.my_client.create_cg_snapshot_view, cg_id, name, snap_id)
invoke.assert_called_once_with(
'POST', path, mock.ANY, **{'object-id': cg_id})
list_views.assert_called_once_with(cg_id, cg_snap_view['cgViewRef'])
del_view.assert_called_once_with(cg_id, cg_snap_view['id'])
def test_list_cg_snapshot_views(self):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snapshot_views')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
view_id = 'id'
self.my_client.list_cg_snapshot_views(cg_id, view_id)
invoke.assert_called_once_with(
'GET', path, **{'object-id': cg_id, 'view-id': view_id})
def test_delete_cg_snapshot_view(self):
invoke = self.mock_object(self.my_client, '_invoke')
path = self.my_client.RESOURCE_PATHS.get('cgroup_snap_view')
cg_id = eseries_fake.FAKE_CONSISTENCY_GROUP['id']
view_id = 'id'
self.my_client.delete_cg_snapshot_view(cg_id, view_id)
invoke.assert_called_once_with(
'DELETE', path, **{'object-id': cg_id, 'view-id': view_id})
@ddt.data('00.00.00.00', '01.52.9000.2', '01.52.9001.2', '01.51.9000.3',
'01.51.9001.3', '01.51.9010.5', '0.53.9000.3', '0.53.9001.4')
def test_api_version_not_support_asup(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertFalse(self.my_client.features.AUTOSUPPORT.supported)
@ddt.data('01.52.9000.3', '01.52.9000.4', '01.52.8999.2',
'01.52.8999.3', '01.53.8999.3', '01.53.9000.2',
'02.51.9000.3', '02.52.8999.3', '02.51.8999.2')
def test_api_version_supports_asup(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertTrue(self.my_client.features.AUTOSUPPORT)
@ddt.data('00.00.00.00', '01.52.9000.2', '01.52.9001.2', '01.51.9000.3',
'01.51.9001.3', '01.51.9010.5', '0.53.9000.3', '0.53.9001.4')
def test_api_version_not_support_chap(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertFalse(self.my_client.features.CHAP_AUTHENTICATION)
@ddt.data('01.53.9000.15', '01.53.9000.16', '01.53.8999.15',
'01.54.8999.16', '01.54.9010.15', '01.54.9090.15',
'02.52.9000.15', '02.53.8999.15', '02.54.8999.14')
def test_api_version_supports_chap(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertTrue(self.my_client.features.CHAP_AUTHENTICATION)
@ddt.data('00.00.00.00', '01.52.9000.1', '01.52.9001.2', '00.53.9001.3',
'01.53.9090.1', '1.53.9010.14', '0.53.9011.15')
def test_api_version_not_support_ssc_api(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertFalse(self.my_client.features.SSC_API_V2.supported)
@ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1',
'01.53.9010.20', '01.53.9010.17', '01.54.9000.1',
'02.51.9000.3', '02.52.8999.3', '02.51.8999.2')
def test_api_version_supports_ssc_api(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertTrue(self.my_client.features.SSC_API_V2.supported)
@ddt.data('00.00.00.00', '01.52.9000.5', '01.52.9001.2', '00.53.9001.3',
'01.52.9090.1', '1.52.9010.7', '0.53.9011.7')
def test_api_version_not_support_1_3(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertFalse(self.my_client.features.REST_1_3_RELEASE.supported)
@ddt.data('01.53.9000.1', '01.53.9000.5', '01.53.8999.1',
'01.54.9010.20', '01.54.9000.1', '02.51.9000.3',
'02.52.8999.3', '02.51.8999.2')
def test_api_version_1_3(self, api_version):
self.mock_object(client.RestClient,
'get_eseries_api_info',
mock.Mock(return_value=('proxy', api_version)))
client.RestClient._init_features(self.my_client)
self.assertTrue(self.my_client.features.REST_1_3_RELEASE.supported)
def test_invoke_bad_content_type(self):
"""Tests the invoke behavior with a non-JSON response"""
fake_response = mock.Mock()
fake_response.json = mock.Mock(side_effect=scanner.JSONDecodeError(
'', '{}', 1))
fake_response.status_code = 424
fake_response.text = "Fake Response"
self.mock_object(self.my_client, 'invoke_service',
mock.Mock(return_value=fake_response))
self.assertRaises(es_exception.WebServiceException,
self.my_client._invoke, 'GET',
eseries_fake.FAKE_ENDPOINT_HTTP)
def test_list_backend_store(self):
path = self.my_client.RESOURCE_PATHS.get('persistent-store')
fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE)
invoke = self.mock_object(
self.my_client, '_invoke', mock.Mock(
return_value=fake_store))
expected = json.loads(fake_store.get('value'))
result = self.my_client.list_backend_store('key')
self.assertEqual(expected, result)
invoke.assert_called_once_with('GET', path, key='key')
def test_save_backend_store(self):
path = self.my_client.RESOURCE_PATHS.get('persistent-stores')
fake_store = copy.deepcopy(eseries_fake.FAKE_BACKEND_STORE)
key = 'key'
invoke = self.mock_object(
self.my_client, '_invoke',
mock.Mock())
self.my_client.save_backend_store(key, fake_store)
invoke.assert_called_once_with('POST', path, mock.ANY)
@ddt.ddt
class TestWebserviceClientTestCase(test.TestCase):
def setUp(self):
"""sets up the mock tests"""
super(TestWebserviceClientTestCase, self).setUp()
self.mock_log = mock.Mock()
self.mock_object(client, 'LOG', self.mock_log)
self.webclient = client.WebserviceClient('http', 'host', '80',
'/test', 'user', '****')
@ddt.data({'params': {'host': None, 'scheme': 'https', 'port': '80'}},
{'params': {'host': 'host', 'scheme': None, 'port': '80'}},
{'params': {'host': 'host', 'scheme': 'http', 'port': None}})
@ddt.unpack
def test__validate_params_value_error(self, params):
"""Tests various scenarios for ValueError in validate method"""
self.assertRaises(exception.InvalidInput,
self.webclient._validate_params, **params)
def test_invoke_service_no_endpoint_error(self):
"""Tests Exception and Log error if no endpoint is provided"""
self.webclient._endpoint = None
log_error = 'Unexpected error while invoking web service'
self.assertRaises(exception.NetAppDriverException,
self.webclient.invoke_service)
self.assertTrue(self.mock_log.exception.find(log_error))
def test_invoke_service(self):
"""Tests if invoke_service evaluates the right response"""
self.webclient._endpoint = eseries_fake.FAKE_ENDPOINT_HTTP
self.mock_object(self.webclient.conn, 'request',
mock.Mock(return_value=eseries_fake.FAKE_INVOC_MSG))
result = self.webclient.invoke_service()
self.assertIsNotNone(result)
|
|
# -*- coding: utf-8 -*-
from DeviceTemplate import Ui_Form
import time, os, sys, gc
from PyQt4 import QtCore, QtGui
#from acq4.pyqtgraph.graphicsItems import ImageItem
import acq4.Manager
from acq4.util.imageAnalysis import *
from acq4.util.debug import *
import numpy as np
import acq4.pyqtgraph as pg
#import acq4.pyqtgraph.WidgetGroup as WidgetGroup
#from acq4.pyqtgraph.ProgressDialog import ProgressDialog
from acq4.util.HelpfulException import HelpfulException
class ScannerDeviceGui(QtGui.QWidget):
def __init__(self, dev, win):
QtGui.QWidget.__init__(self)
self.dev = dev
self.win = win
self.ui = Ui_Form()
self.ui.setupUi(self)
self.stateGroup = pg.WidgetGroup({
'duration': self.ui.scanDurationSpin,
'xMin': self.ui.xMinSpin,
'xMax': self.ui.xMaxSpin,
'yMin': self.ui.yMinSpin,
'yMax': self.ui.yMaxSpin,
'splitter': self.ui.splitter,
})
spos = dev.getShutterVals()
if spos is None:
self.ui.shutterGroup.hide()
else:
self.shutterChanged()
self.ui.shutterXSpin.setValue(spos[0])
self.ui.shutterYSpin.setValue(spos[1])
## Populate Device lists
#defCam = None
#if 'defaultCamera' in self.dev.config:
#defCam = self.dev.config['defaultCamera']
defCam = self.dev.config.get('defaultCamera', None)
#defLaser = None
#if 'defaultLaser' in self.dev.config:
#defLaser = self.dev.config['defaultLaser']
defLaser = self.dev.config.get('defaultLaser', None)
#devs = self.dev.dm.listDevices()
#for d in devs:
#self.ui.cameraCombo.addItem(d)
#self.ui.laserCombo.addItem(d)
#if d == defCam:
#self.ui.cameraCombo.setCurrentIndex(self.ui.cameraCombo.count()-1)
#if d == defLaser:
#self.ui.laserCombo.setCurrentIndex(self.ui.laserCombo.count()-1)
self.ui.cameraCombo.setTypes('camera')
self.ui.laserCombo.setTypes('laser')
self.spots = []
## Populate list of calibrations
self.updateCalibrationList()
## load default config
state = self.dev.loadCalibrationDefaults()
if state is not None:
self.stateGroup.setState(state)
## create graphics scene
#self.image = ImageItem()
#self.scene = self.ui.view.scene
#self.ui.view.enableMouse()
#self.scene.addItem(self.image)
#self.ui.view.setAspectLocked(True)
#self.ui.view.invertY()
self.ui.calibrateBtn.clicked.connect(self.calibrateClicked)
self.ui.storeCamConfBtn.clicked.connect(self.storeCamConf)
self.ui.deleteBtn.clicked.connect(self.deleteClicked)
self.ui.shutterBtn.clicked.connect(self.shutterClicked)
self.dev.sigShutterChanged.connect(self.shutterChanged)
def shutterClicked(self):
self.dev.setShutterOpen(not self.lastShutterState)
def shutterChanged(self):
sh = self.dev.getShutterOpen()
self.lastShutterState = sh
if sh:
self.ui.shutterBtn.setText('Close Shutter')
else:
self.ui.shutterBtn.setText('Open Shutter')
def updateCalibrationList(self):
self.ui.calibrationList.clear()
## Populate calibration lists
index = self.dev.getCalibrationIndex()
for laser in index:
for obj in index[laser]:
cal = index[laser][obj]
spot = '%0.0f, %0.1f um' % (cal['spot'][0], cal['spot'][1]*1e6)
date = cal['date']
item = QtGui.QTreeWidgetItem([', '.join(obj), laser, str(spot), date])
item.opticState = obj
self.ui.calibrationList.addTopLevelItem(item)
def storeCamConf(self):
cam = str(self.ui.cameraCombo.currentText())
self.dev.storeCameraConfig(cam)
def calibrateClicked(self):
self.ui.calibrateBtn.setEnabled(False)
self.ui.calibrateBtn.setChecked(True)
self.ui.calibrateBtn.setText('Calibrating...')
try:
cam = str(self.ui.cameraCombo.currentText())
laser = str(self.ui.laserCombo.currentText())
#obj = self.dev.getObjective()
opticState = self.dev.getDeviceStateKey()
## Run calibration
(cal, spot) = self.runCalibration()
#gc.collect() ## a lot of memory is used in running calibration, make sure we collect all the leftovers now
#cal = MetaArray((512, 512, 2))
#spot = 100e-6
date = time.strftime('%Y.%m.%d %H:%M', time.localtime())
#fileName = cam + '_' + laser + '_' + obj + '.ma'
index = self.dev.getCalibrationIndex()
if laser not in index:
index[laser] = {}
index[laser][opticState] = {'spot': spot, 'date': date, 'params': cal}
self.dev.writeCalibrationIndex(index)
self.dev.writeCalibrationDefaults(self.stateGroup.state())
#cal.write(os.path.join(self.dev.config['calibrationDir'], fileName))
self.updateCalibrationList()
finally:
self.ui.calibrateBtn.setEnabled(True)
self.ui.calibrateBtn.setChecked(False)
self.ui.calibrateBtn.setText('Calibrate')
def deleteClicked(self):
cur = self.ui.calibrationList.currentItem()
optState = cur.opticState
laser = str(cur.text(1))
index = self.dev.getCalibrationIndex()
del index[laser][optState]
self.dev.writeCalibrationIndex(index)
self.updateCalibrationList()
def addSpot(self, pos, size):
"""Add a circle to the image"""
s2 = size/2.0
s = QtGui.QGraphicsEllipseItem(0, 0, 1, 1)
s.scale(size, size)
s.setPos(pos[0]-s2, pos[1]-s2)
s.setPen(QtGui.QPen(QtGui.QColor(100, 255, 100, 70)))
self.ui.view.addItem(s)
s.setZValue(100)
self.spots.append(s)
def clearSpots(self):
"""Clear all circles from the image"""
for s in self.spots:
self.ui.view.removeItem(s)
self.spots = []
def runCalibration(self):
"""The scanner calibration routine:
1) Measure background frame, then scan mirrors
while collecting frames as fast as possible (self.scan())
2) Locate spot in every frame using gaussian fit
3) Map image spot locations to coordinate system of Scanner device's parent
3) Do parabolic fit to determine mapping between voltage and position
"""
camera = str(self.ui.cameraCombo.currentText())
laser = str(self.ui.laserCombo.currentText())
blurRadius = 5
## Do fast scan of entire allowed command range
(background, cameraResult, positions) = self.scan()
#self.calibrationResult = {'bg': background, 'frames': cameraResult, 'pos': positions}
with pg.ProgressDialog("Calibrating scanner: Computing spot positions...", 0, 100) as dlg:
dlg.show()
dlg.raise_() # Not sure why this is needed here..
## Forget first 2 frames since some cameras can't seem to get these right.
frames = cameraResult.asArray()
frames = frames[2:]
positions = positions[2:]
## Do background subtraction
## take out half the data until it can do the calculation without having a MemoryError.
finished = False
gc.collect()
while not finished:
try:
frames = frames.astype(np.float32)
frames -= background.astype(np.float32)
finished=True
except MemoryError:
frames = frames[::2,:,:]
positions = positions[::2]
finished = False
## Find a frame with a spot close to the center (within center 1/3)
cx = frames.shape[1] / 3
cy = frames.shape[2] / 3
centerSlice = blur(frames[:, cx:cx*2, cy:cy*2], (0, 5, 5)).max(axis=1).max(axis=1)
maxIndex = argmax(centerSlice)
maxFrame = frames[maxIndex]
dlg.setValue(5)
## Determine spot intensity and width
mfBlur = blur(maxFrame, blurRadius)
amp = mfBlur.max() - median(mfBlur) ## guess intensity of spot
(x, y) = argwhere(mfBlur == mfBlur.max())[0] ## guess location of spot
fit = fitGaussian2D(maxFrame, [amp, x, y, maxFrame.shape[0] / 10, 0.])[0] ## gaussian fit to locate spot exactly
# convert sigma to full width at 1/e
fit[3] = abs(2 * (2 ** 0.5) * fit[3]) ## sometimes the fit for width comes out negative. *shrug*
someFrame = cameraResult.frames()[0]
frameTransform = pg.SRTTransform(someFrame.globalTransform())
pixelSize = someFrame.info()['pixelSize'][0]
spotAmplitude = fit[0]
spotWidth = abs(fit[3] * pixelSize)
size = self.spotSize(mfBlur)
dlg.setValue(50)
## Determine location of spot within each frame,
## ignoring frames where the spot is too dim or too close to the frame edge
spotLocations = []
globalSpotLocations = []
spotCommands = []
spotFrames = []
margin = fit[3]
for i in range(len(positions)):
dlg.setValue(50. + 50. * i / frames.shape[0])
if dlg.wasCanceled():
raise HelpfulException('Calibration canceled by user.', msgType='warning')
frame = frames[i]
fBlur = blur(frame.astype(np.float32), blurRadius)
mx = fBlur.max()
diff = mx - fBlur.min()
ss = self.spotSize(fBlur)
if ss < size * 0.6:
#print "Ignoring spot:", ss
continue
#else:
#print "Keeping spot:", ss
(x, y) = argwhere(fBlur == mx)[0] # guess location of spot
if x < margin or x > frame.shape[0] - margin:
#print " ..skipping; too close to edge", x, y
continue
if y < margin or y > frame.shape[1] - margin:
#print " ..skipping; too close to edge", x, y
continue
frame[x,y] = -1 ## mark location of peak in image
## convert pixel location to coordinate system of scanner's parent
globalPos = frameTransform.map(pg.Point(x, y)) ## Map from frame pixel location to global coordinates
localPos = self.dev.mapGlobalToParent(globalPos) ## map from global to parent coordinate system. This is the position we calibrate to.
#print (x, y), (globalPos.x(), globalPos.y()), (localPos.x(), localPos.y())
spotLocations.append([localPos.x(), localPos.y()])
globalSpotLocations.append([globalPos.x(), globalPos.y()])
spotCommands.append(positions[i])
spotFrames.append(frame[newaxis])
## sanity check on spot frame
if len(spotFrames) == 0:
self.ui.view.setImage(frames)
raise HelpfulException('Calibration never detected laser spot! Looking for spots that are %f pixels wide.'% fit[3], reasons=['shutter is disabled', 'mirrors are disabled', 'objective is not clean', 'spot is not visible or not bright enough when shutter is open'])
spotFrameMax = concatenate(spotFrames).max(axis=0)
self.ui.view.setImage(spotFrameMax, transform=frameTransform)
self.clearSpots()
for sl in globalSpotLocations:
#self.addSpot(sl, fit[3]*binning[0])
self.addSpot(sl, spotWidth)
self.ui.view.autoRange()
if len(spotFrames) < 10:
raise HelpfulException('Calibration detected only %d frames with laser spot; need minimum of 10.' % len(spotFrames), reasons=['spot is too dim for camera sensitivity', 'objective is not clean', 'mirrors are scanning too quickly', 'mirror scanning region is not within the camera\'s view'])
## Fit all data to a map function
mapParams = self.generateMap(array(spotLocations), array(spotCommands))
#print
#print "Map parameters:", mapParams
if spotWidth < 0:
raise Exception()
return (mapParams, (spotAmplitude, spotWidth))
def generateMap(self, loc, cmd):
"""Generates parameters for functions that map spot locations (Loc) to command values (Cmd).
We assume that command values can be approximated by parabolic functions:
Cmd.X = A + B * Loc.X + C * Loc.Y + D * Loc.X^2 + E * Loc.Y^2
Cmd.Y = F + G * Loc.X + H * Loc.Y + I * Loc.X^2 + J * Loc.Y^2
Returns [[A, B, C, D, E], [F, G, H, I, J]]
"""
# print "==========="
# print loc
# print "============"
# print cmd
#for i in range(loc.shape[0]):
#print tuple(loc[i]), tuple(cmd[i])
## do a two-stage fit, using only linear parameters first.
## this is to make sure the second-order parameters do no interfere with the first-order fit.
def fn1(v, loc):
return v[0] + v[1] * loc[:, 0] + v[2] * loc[:, 1]
def fn2(v, loc):
return v[0] + v[1] * loc[:, 0] + v[2] * loc[:, 1] + v[3] * loc[:, 0]**2 + v[4] * loc[:, 1]**2
def erf1(v, loc, cmd):
return fn1(v, loc) - cmd
def erf2(v, loc, cmd):
return fn2(v, loc) - cmd
### sanity checks here on loc and cmd
if loc.shape[0] < 6:
raise Exception("Calibration only detected %d spots; this is not enough." % loc.shape[0])
## fit linear parameters first
xFit = leastsq(erf1, [0, 0, 0], (loc, cmd[:,0]))[0]
yFit = leastsq(erf1, [0, 0, 0], (loc, cmd[:,1]))[0]
#print "fit stage 1:", xFit, yFit
## then fit the parabolic equations, using the linear fit as the seed
#xFit = leastsq(erf2, list(xFit)+[0, 0], (loc, cmd[:,0]))[0]
#yFit = leastsq(erf2, list(yFit)+[0, 0], (loc, cmd[:,1]))[0]
# 2nd stage disabled -- we can bring this back when we have a good method
# for optimization with constraints.
xFit = list(xFit)+[0,0]
yFit = list(yFit)+[0,0]
#print "fit stage 2:", xFit, yFit
## compute fit error
errx = abs(erf2(xFit, loc, cmd[:, 0])).mean()
erry = abs(erf2(yFit, loc, cmd[:, 1])).mean()
print "Fit error:", errx, erry
self.dev.lastCalData = (loc, cmd)
return (list(xFit), list(yFit))
def spotSize(self, frame):
"""Return the normalized integral of all values in the frame that are between max and max/e"""
med = median(frame)
fr1 = frame - med ## subtract median value so baseline is at 0
mask = fr1 > (fr1.max() / np.e) ## find all values > max/e
ss = (fr1 * mask).sum() / mask.sum() ## integrate values within mask, divide by mask area
assert(not np.isnan(ss))
return ss
def scan(self):
"""Scan over x and y ranges in a nPts x nPts grid, return the image recorded at each location."""
camera = str(self.ui.cameraCombo.currentText())
laser = str(self.ui.laserCombo.currentText())
## Camera settings to use during scan
camParams = self.dev.getCameraConfig(camera)
duration = self.ui.scanDurationSpin.value()
rate = 10000
nPts = int(rate * duration)
sweeps = 20
#cameraTrigger = ones(nPts, dtype=byte)
##(cmdMin, cmdMax) = self.dev.config['commandLimits']
xRange = (self.ui.xMinSpin.value(), self.ui.xMaxSpin.value())
yRange = (self.ui.yMinSpin.value(), self.ui.yMaxSpin.value())
xDiff = xRange[1] - xRange[0]
yDiff = yRange[1] - yRange[0]
xCommand = np.fromfunction(lambda i: xRange[0] + ((xDiff * i * float(sweeps) / nPts) % xDiff), (nPts,), dtype=float)
xCommand[-1] = 0.0
yCommand = np.empty((nPts,), dtype=float)
start = 0
for i in range(sweeps):
stop = start + (nPts / sweeps)
yCommand[start:stop] = yRange[0] + yDiff * (float(i)/(sweeps-1))
start = stop
yCommand[-1] = 0.0
daqName = self.dev.config['XAxis']['device']
## Record 10 camera frames with the shutter closed
#print "parameters:", camParams
cmd = {
'protocol': {'duration': 0.0, 'timeout': 5.0},
camera: {'record': True, 'minFrames': 10, 'params': camParams, 'pushState': 'scanProt'},
#laser: {'Shutter': {'preset': 0, 'holding': 0}}
}
#print "\n\n====> Record background\n"
task = acq4.Manager.getManager().createTask(cmd)
task.execute()
result = task.getResult()
## pull result, convert to ndarray float, take average over all frames
background = result[camera].asArray().astype(float).mean(axis=0)
#print "Background shape:", result[camera]['frames'].shape
## Record full scan.
cmd = {
'protocol': {'duration': duration, 'timeout': duration+5.0},
camera: {'record': True, 'triggerProtocol': True, 'params': camParams, 'channels': {
'exposure': {'record': True},
},
'popState': 'scanProt'},
#laser: {'shutter': {'preset': 0, 'holding': 0, 'command': np.ones(len(xCommand), dtype=byte)}},
laser: {'alignMode': True},
self.dev.name(): {'xCommand': xCommand, 'yCommand': yCommand},
daqName: {'numPts': nPts, 'rate': rate, 'triggerDevice': camera}
}
#print "\n\n====> Scan\n"
task = acq4.Manager.getManager().createTask(cmd)
task.execute(block=False)
with pg.ProgressDialog("Calibrating scanner: Running scan protocol..", 0, 100) as dlg:
while not task.isDone():
dlg.setValue(100.*task.runTime()/task.duration())
if dlg.wasCanceled():
task.abort()
raise HelpfulException('Calibration canceled by user.', msgType='warning')
time.sleep(0.2)
result = task.getResult()
frames = result[camera].asMetaArray()
if frames._info[-1]['preciseTiming'] is not True:
raise HelpfulException("Calibration could not accurately measure camera frame timing.",
reasons=["The exposure signal from the camera was not recorded by the DAQ."])
#print "scan shape:", frames.shape
#print "parameters:", camParams
## Generate a list of the scanner command values for each frame
positions = []
for i in range(frames.shape[0]):
t = frames.xvals('Time')[i]
ind = int((t/duration) * nPts)
if ind >= len(xCommand):
break
positions.append([xCommand[ind], yCommand[ind]])
if frames.ndim != 3 or frames.shape[0] < 5:
raise Exception("Camera did not collect enough frames (data shape is %s)" % str(frames.shape))
if background.shape != frames.shape[1:]:
raise Exception("Background measurement frame has different shape %s from scan frames %s" % (str(background.shape), str(frames.shape[1:])))
return (background, result[camera], positions)
|
|
"""
protocolserver
"""
__author__ = "Andrea Cavalli ([email protected])"
__copyright__ = "Copyright 2016, Andrea Cavalli"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import os
import socket
import protocol.utils
from protocol.utils import Event
from protocol.utils import StringToBytes
class Server(object):
"""Server-side class"""
ip = "localhost"
port = 4
def __init__(self, debug=0):
self.debug = debug
self.evt_responseRequested = Event()
def setHost(self, ip="localhost", port=4):
self.ip = ip
self.port = port
def responseRequested(self):
# Call event object with self as a sender
self.evt_responseRequested(self)
def initialize(self, HOST=None, PORT=None):
if (HOST == None):
HOST = self.ip
if (PORT == None):
PORT = self.port
client_connection = None
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
try:
try:
listen_socket.bind((HOST, PORT))
except OSError as ee:
print("SERVER> Can't bind address "+HOST+":"+str(PORT)+"! Killing some processes...")
os.system("sudo fuser -kuv "+str(PORT)+"/tcp")
listen_socket.bind((HOST, PORT))
except:
print("SERVER> Can't bind address "+HOST+":"+str(PORT)+"! Killing some processes...")
os.system("sudo fuser -kuv "+str(PORT)+"/tcp")
listen_socket.bind((HOST, PORT))
except OSError as e0:
print("SERVER> Can't bind address "+HOST+":"+str(PORT)+"! Trying to bind localhost:"+str(PORT))
listen_socket.bind(("localhost", PORT))
except:
print("SERVER> Can't bind address "+HOST+":"+str(PORT)+"! Trying to bind localhost:"+str(PORT))
listen_socket.bind(("localhost", PORT))
listen_socket.listen(1)
print('SERVER> Serving HTTP on port %s ...' % PORT)
while True:
try:
client_connection, client_address = listen_socket.accept()
request = client_connection.recv(1024)
path = str(request).split(" ")
if (len(path) >= 2):
args = path[1].split("/")
if (len(args) >= 3):
if self.debug==1:
print("SERVER> Received a server request. Responding...")
http_response = """\
HTTP/1.1 200 OK
"""
arg1 = protocol.utils.url2pathname(args[1])
arg2 = protocol.utils.url2pathname(args[2])
respData = self.evt_responseRequested.fireFunction2(self, arg1, arg2)
if (len(args) >= 4):
if (args[3] == "graphic"):
respClass = "error" if respData == "error" else "result"
respData = """\
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
<meta http-equiv="Pragma" content="no-cache" />
<meta http-equiv="Expires" content="0" />
<title>Protocol</title>
<style>
body {
font-family: Roboto, Segoe UI, Calibri, Arial, Sans-serif;
background: aliceblue;
}
::selection {
background: #FFC107 !important;
color: black !important;
}
::-moz-selection {
background: #FFC107 !important;
color: black !important;
}
.val {
border: 2px solid #64B5F6;
border-radius: 15px;
padding: 5px 15px;
background: #E3F2FD;
color: #1565C0;
font-family: monospace;
font-size: 12px;
margin: 5px;
word-wrap: break-word;
max-height: 150px;
overflow: auto;
}
.val::selection {
background: #1565C0 !important;
color: white !important;
}
.val::-moz-selection {
background: #1565C0 !important;
color: white !important;
}
.val.result {
border-color: #81C784;
background: #E8F5E9;
color: #388E3C;
}
.val.result::-moz-selection {
background: #388E3C !important;
color: white !important;
}
.val.result::selection {
background: #388E3C !important;
color: white !important;
}
.val.error {
border-color: #E57373;
background: #FFEBEE;
color: #D32F2F;
}
.val.error::selection {
background: #D32F2F !important;
color: white !important;
}
.val.error::-moz-selection {
background: #D32F2F !important;
color: white !important;
}
hr {
border-color: #9E9E9E;
border-style: solid;
margin-top: 30px;
}
</style>
</head>
<body>
<h1>Protocol</h1>
<h2>Host device informations</h2>
<h5>Hostname: """+socket.gethostname()+"""</h5>
<h5>Network address: """+HOST+":"+str(PORT)+"""</h5>
<h2>Request variables</h2>
<div class=\"val\">"""+arg1+"""</div>
<div class=\"val\">"""+arg2+"""</div>
<hr>
<h2>Response</h2>
<div class=\"val """+respClass+"""\">"""+respData+"""</div>
</body>
</html>"""
client_connection.sendall(StringToBytes(http_response+respData))
elif (len(args) >= 2 and args[1] == "favicon.ico"):
if self.debug==1:
print("SERVER> Received a favicon request. Responding...")
http_response = """\
HTTP/1.1 200 OK
"""
try:
with open("favicon.ico", "rb") as image_file:
encoded_string = image_file.read()
except OSError as e2:
print("SERVER> Can't find local file favicon.ico")
except TypeError as e2:
print("SERVER> Can't open local file favicon.ico")
client_connection.sendall(StringToBytes(http_response)+encoded_string)
else:
print("SERVER> Received a bad request (URL too short). Responding with error 400...")
client_connection.sendall(StringToBytes("""\
HTTP/1.1 400 Bad Request
error"""))
else:
print("SERVER> Received a bad request (Request file too short). Responding with error 400...")
client_connection.sendall(StringToBytes("""\
HTTP/1.1 400 Bad Request
error"""))
except TypeError as e2:
print("TypeError: "+str(e2))
if client_connection != None:
client_connection.close()
except OSError as e1:
print("SERVER> Can't bind address localhost:"+str(PORT)+"!")
|
|
from django.test import TestCase
from casexml.apps.case.tests.util import delete_all_xforms
from casexml.apps.stock.const import COMMTRACK_REPORT_XMLNS
from corehq.apps.commtrack.sms import handle
from corehq.apps.commtrack.tests import util
from corehq.apps.products.models import Product
from corehq.apps.reminders.util import get_two_way_number_for_recipient
from corehq.apps.sms.tests.util import setup_default_sms_test_backend
from corehq.apps.users.dbaccessors import delete_all_users
from corehq.form_processor.exceptions import LedgerValueNotFound
from corehq.form_processor.backends.sql.dbaccessors import LedgerAccessorSQL
from corehq.form_processor.models import XFormInstance
class SMSTests(TestCase):
user_definitions = [util.ROAMING_USER, util.FIXED_USER]
@classmethod
def setUpClass(cls):
super(SMSTests, cls).setUpClass()
cls.backend, cls.backend_mapping = setup_default_sms_test_backend()
cls.domain = util.bootstrap_domain(util.TEST_DOMAIN)
util.bootstrap_location_types(cls.domain.name)
util.bootstrap_products(cls.domain.name)
cls.products = sorted(Product.by_domain(cls.domain.name), key=lambda p: p._id)
cls.loc = util.make_loc('loc1')
cls.sp = cls.loc.linked_supply_point()
cls.users = [util.bootstrap_user(cls, **user_def) for user_def in cls.user_definitions]
@classmethod
def tearDownClass(cls):
cls.domain.delete() # domain delete cascades to everything else
cls.backend_mapping.delete()
cls.backend.delete()
delete_all_users()
super(SMSTests, cls).tearDownClass()
def tearDown(self):
delete_all_xforms()
super(SMSTests, self).tearDown()
def check_stock(self, code, amount, case_id=None, section_id='stock'):
if not case_id:
case_id = self.sp.case_id
[product] = [p for p in self.products if p.code_ == code]
try:
state = LedgerAccessorSQL.get_ledger_value(case_id, section_id, product._id)
self.assertEqual(amount, state.stock_on_hand)
except LedgerValueNotFound:
if amount != 0:
# only error if we weren't checking for no stock
raise Exception(f'Ledger value for "{section_id}" section does not exist')
def testStockReportRoaming(self):
self.assertEqual([], list(iter_commtrack_forms(self.domain.name)))
amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# soh loc1 pp 10 pq 20...
handled = handle(get_two_way_number_for_recipient(self.users[0]), 'soh {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in amounts.items())
), None)
self.assertTrue(handled)
forms = list(iter_commtrack_forms(self.domain.name))
self.assertEqual(1, len(forms))
self.assertEqual(_get_location_from_sp(self.sp), _get_location_from_form(forms[0]))
ledger_transactions = LedgerAccessorSQL.get_ledger_transactions_for_case(self.sp.case_id)
self.assertEqual({forms[0].form_id}, set(t.form_id for t in ledger_transactions))
self.assertEqual({'balance'}, set(t.readable_type for t in ledger_transactions))
self.assertEqual(3, len(ledger_transactions))
self.check_transaction_amounts(ledger_transactions, amounts)
def testStockReportFixed(self):
self.assertEqual([], list(iter_commtrack_forms(self.domain.name)))
amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# soh loc1 pp 10 pq 20...
handled = handle(get_two_way_number_for_recipient(self.users[1]), 'soh {report}'.format(
report=' '.join('%s %s' % (k, v) for k, v in amounts.items())
), None)
self.assertTrue(handled)
forms = list(iter_commtrack_forms(self.domain.name))
self.assertEqual(1, len(forms))
self.assertEqual(_get_location_from_sp(self.sp), _get_location_from_form(forms[0]))
ledger_transactions = LedgerAccessorSQL.get_ledger_transactions_for_case(self.sp.case_id)
self.check_transaction_amounts(ledger_transactions, amounts)
def check_transaction_amounts(self, ledger_transactions, amounts):
transactions_by_product_id = {t.entry_id: t for t in ledger_transactions}
for code, amt in amounts.items():
[product] = [p for p in self.products if p.code_ == code]
trans = transactions_by_product_id[product._id]
self.assertEqual(self.sp.case_id, trans.case_id)
self.assertEqual(amt, trans.delta)
self.assertEqual(amt, trans.updated_balance)
def check_form_type(self, is_consumption):
transactions = LedgerAccessorSQL.get_ledger_transactions_for_case(self.sp.case_id)
transactions = [t for t in transactions if t.readable_type != 'balance']
self.assertEqual(3, len(transactions))
for transaction in transactions:
if is_consumption:
self.assertLess(transaction.delta, 0)
else:
self.assertGreater(transaction.delta, 0)
def testReceipt(self):
original_amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# First submit an soh so we can make sure receipts functions
# differently than soh
handle(get_two_way_number_for_recipient(self.users[0]), 'soh {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in original_amounts.items())
), None)
received_amounts = {
'pp': 1,
'pq': 2,
'pr': 3,
}
handled = handle(get_two_way_number_for_recipient(self.users[0]), 'r {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in received_amounts.items())
), None)
self.assertTrue(handled)
self.check_form_type(is_consumption=False)
for code in original_amounts.keys():
expected_amount = original_amounts[code] + received_amounts[code]
self.check_stock(code, expected_amount)
def testLosses(self):
original_amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# First submit an soh so we can make sure losses functions properly
handle(get_two_way_number_for_recipient(self.users[0]), 'soh {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in original_amounts.items())
), None)
lost_amounts = {
'pp': 1,
'pq': 2,
'pr': 3,
}
# First character in text indicates "loss"
handled = handle(get_two_way_number_for_recipient(self.users[0]), 'l {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in lost_amounts.items())
), None)
self.assertTrue(handled)
self.check_form_type(is_consumption=True)
for code in original_amounts.keys():
expected_amount = original_amounts[code] - lost_amounts[code]
self.check_stock(code, expected_amount)
def testConsumption(self):
original_amounts = {
'pp': 10,
'pq': 20,
'pr': 30,
}
# First submit an soh so we can make sure consumption functions properly
handle(get_two_way_number_for_recipient(self.users[0]), 'soh {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in original_amounts.items())
), None)
lost_amounts = {
'pp': 1,
'pq': 2,
'pr': 3,
}
# First character in text indicates "consumption"
handled = handle(get_two_way_number_for_recipient(self.users[0]), 'c {loc} {report}'.format(
loc='loc1',
report=' '.join('%s %s' % (k, v) for k, v in lost_amounts.items())
), None)
self.assertTrue(handled)
self.check_form_type(is_consumption=True)
for code in original_amounts.keys():
expected_amount = original_amounts[code] - lost_amounts[code]
self.check_stock(code, expected_amount)
def iter_commtrack_forms(domain_name):
db = XFormInstance.objects
for form_id in db.iter_form_ids_by_xmlns(domain_name, COMMTRACK_REPORT_XMLNS):
yield db.get_form(form_id)
def _get_location_from_form(form):
return form.form_data['location']
def _get_location_from_sp(sp):
return sp.location_id
|
|
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
from unittest import TestCase
from muntjac.ui.label import Label
from muntjac.ui.horizontal_layout import HorizontalLayout
from muntjac.ui.grid_layout import GridLayout
from muntjac.ui.absolute_layout import AbsoluteLayout
from muntjac.ui.css_layout import CssLayout
from muntjac.ui.abstract_ordered_layout import AbstractOrderedLayout
from muntjac.ui.component_container import \
IComponentAttachListener, IComponentDetachListener
class ComponentAttachDetachListenerTest(TestCase):
def resetVariables(self):
# Attach
self._attachCounter = 0
self._attachedComponent = None
self._attachTarget = None
self._foundInContainer = False
# Detach
self._detachCounter = 0
self._detachedComponent = None
self._detachedTarget = None
# Common
self._indexOfComponent = -1
self._componentArea = None
self._componentPosition = None
def setUp(self):
super(ComponentAttachDetachListenerTest, self).setUp()
# General variables
self._attachCounter = 0
self._attachedComponent = None
self._attachTarget = None
self._foundInContainer = False
self._detachCounter = 0
self._detachedComponent = None
self._detachedTarget = None
# Ordered layout specific variables
self._indexOfComponent = -1
# Grid layout specific variables
self._componentArea = None
# Absolute layout specific variables
self._componentPosition = None
self._olayout = HorizontalLayout()
listener = MyAttachListener(self)
self._olayout.addListener(listener, IComponentAttachListener)
listener = MyDetachListener(self)
self._olayout.addListener(listener, IComponentDetachListener)
self._gridlayout = GridLayout()
listener = MyAttachListener(self)
self._gridlayout.addListener(listener, IComponentAttachListener)
listener = MyDetachListener(self)
self._gridlayout.addListener(listener, IComponentDetachListener)
self._absolutelayout = AbsoluteLayout()
listener = MyAttachListener(self)
self._absolutelayout.addListener(listener, IComponentAttachListener)
listener = MyDetachListener(self)
self._absolutelayout.addListener(listener, IComponentDetachListener)
self._csslayout = CssLayout()
listener = MyAttachListener(self)
self._csslayout.addListener(listener, IComponentAttachListener)
listener = MyDetachListener(self)
self._csslayout.addListener(listener, IComponentDetachListener)
def testOrderedLayoutAttachListener(self):
# Reset state variables
self.resetVariables()
# Add component -> Should trigger attach listener
comp = Label()
self._olayout.addComponent(comp)
# Attach counter should get incremented
self.assertEquals(1, self._attachCounter)
# The attached component should be the label
self.assertEquals(comp, self._attachedComponent)
# The attached target should be the layout
self.assertEquals(self._olayout, self._attachTarget)
# The attached component should be found in the container
self.assertTrue(self._foundInContainer)
# The index of the component should not be -1
self.assertFalse(self._indexOfComponent == -1)
def testOrderedLayoutDetachListener(self):
# Add a component to detach
comp = Label()
self._olayout.addComponent(comp)
# Reset state variables (since they are set by the attach listener)
self.resetVariables()
# Detach the component -> triggers the detach listener
self._olayout.removeComponent(comp)
# Detach counter should get incremented
self.assertEquals(1, self._detachCounter)
# The detached component should be the label
self.assertEquals(comp, self._detachedComponent)
# The detached target should be the layout
self.assertEquals(self._olayout, self._detachedTarget)
# The detached component should not be found in the container
self.assertFalse(self._foundInContainer)
# The index of the component should be -1
self.assertEquals(-1, self._indexOfComponent)
def testGridLayoutAttachListener(self):
# Reset state variables
self.resetVariables()
# Add component -> Should trigger attach listener
comp = Label()
self._gridlayout.addComponent(comp)
# Attach counter should get incremented
self.assertEquals(1, self._attachCounter)
# The attached component should be the label
self.assertEquals(comp, self._attachedComponent)
# The attached target should be the layout
self.assertEquals(self._gridlayout, self._attachTarget)
# The attached component should be found in the container
self.assertTrue(self._foundInContainer)
# The grid area should not be null
self.assertIsNotNone(self._componentArea)
def testGridLayoutDetachListener(self):
# Add a component to detach
comp = Label()
self._gridlayout.addComponent(comp)
# Reset state variables (since they are set by the attach listener)
self.resetVariables()
# Detach the component -> triggers the detach listener
self._gridlayout.removeComponent(comp)
# Detach counter should get incremented
self.assertEquals(1, self._detachCounter)
# The detached component should be the label
self.assertEquals(comp, self._detachedComponent)
# The detached target should be the layout
self.assertEquals(self._gridlayout, self._detachedTarget)
# The detached component should not be found in the container
self.assertFalse(self._foundInContainer)
# The grid area should be null
self.assertIsNone(self._componentArea)
def testAbsoluteLayoutAttachListener(self):
# Reset state variables
self.resetVariables()
# Add component -> Should trigger attach listener
comp = Label()
self._absolutelayout.addComponent(comp)
# Attach counter should get incremented
self.assertEquals(1, self._attachCounter)
# The attached component should be the label
self.assertEquals(comp, self._attachedComponent)
# The attached target should be the layout
self.assertEquals(self._absolutelayout, self._attachTarget)
# The attached component should be found in the container
self.assertTrue(self._foundInContainer)
# The component position should not be null
self.assertIsNotNone(self._componentPosition)
def testAbsoluteLayoutDetachListener(self):
# Add a component to detach
comp = Label()
self._absolutelayout.addComponent(comp)
# Reset state variables (since they are set by the attach listener)
self.resetVariables()
# Detach the component -> triggers the detach listener
self._absolutelayout.removeComponent(comp)
# Detach counter should get incremented
self.assertEquals(1, self._detachCounter)
# The detached component should be the label
self.assertEquals(comp, self._detachedComponent)
# The detached target should be the layout
self.assertEquals(self._absolutelayout, self._detachedTarget)
# The detached component should not be found in the container
self.assertFalse(self._foundInContainer)
# The component position should be null
self.assertIsNone(self._componentPosition)
def testCSSLayoutAttachListener(self):
# Reset state variables
self.resetVariables()
# Add component -> Should trigger attach listener
comp = Label()
self._csslayout.addComponent(comp)
# Attach counter should get incremented
self.assertEquals(1, self._attachCounter)
# The attached component should be the label
self.assertEquals(comp, self._attachedComponent)
# The attached target should be the layout
self.assertEquals(self._csslayout, self._attachTarget)
# The attached component should be found in the container
self.assertTrue(self._foundInContainer)
def testCSSLayoutDetachListener(self):
# Add a component to detach
comp = Label()
self._csslayout.addComponent(comp)
# Reset state variables (since they are set by the attach listener)
self.resetVariables()
# Detach the component -> triggers the detach listener
self._csslayout.removeComponent(comp)
# Detach counter should get incremented
self.assertEquals(1, self._detachCounter)
# The detached component should be the label
self.assertEquals(comp, self._detachedComponent)
# The detached target should be the layout
self.assertEquals(self._csslayout, self._detachedTarget)
# The detached component should not be found in the container
self.assertFalse(self._foundInContainer)
class MyAttachListener(IComponentAttachListener):
def __init__(self, test):
self._test = test
def componentAttachedToContainer(self, event):
self._test._attachCounter += 1
self._test._attachedComponent = event.getAttachedComponent()
self._test._attachTarget = event.getContainer()
# Search for component in container (should be found)
it = self._test._attachTarget.getComponentIterator()
while True:
try:
if it.next() == self._test._attachedComponent:
self._test._foundInContainer = True
break
except StopIteration:
break
# Get layout specific variables
if isinstance(self._test._attachTarget, AbstractOrderedLayout):
self._test._indexOfComponent = \
self._test._attachTarget.getComponentIndex(
self._test._attachedComponent)
elif isinstance(self._test._attachTarget, GridLayout):
self._test._componentArea = \
self._test._attachTarget.getComponentArea(
self._test._attachedComponent)
elif isinstance(self._test._attachTarget, AbsoluteLayout):
self._test._componentPosition = \
self._test._attachTarget.getPosition(
self._test._attachedComponent)
class MyDetachListener(IComponentDetachListener):
def __init__(self, test):
self._test = test
def componentDetachedFromContainer(self, event):
self._test._detachCounter += 1
self._test._detachedComponent = event.getDetachedComponent()
self._test._detachedTarget = event.getContainer()
# Search for component in container (should NOT be found)
it = self._test._detachedTarget.getComponentIterator()
while True:
try:
if it.next() == self._test._detachedComponent:
self._test._foundInContainer = True
break
except StopIteration:
break
# Get layout specific variables
if isinstance(self._test._detachedTarget, AbstractOrderedLayout):
self._test._indexOfComponent = \
self._test._detachedTarget.getComponentIndex(
self._test._detachedComponent)
elif isinstance(self._test._detachedTarget, GridLayout):
self._test._componentArea = \
self._test._detachedTarget.getComponentArea(
self._test._detachedComponent)
elif isinstance(self._test._detachedTarget, AbsoluteLayout):
self._test._componentPosition = \
self._test._detachedTarget.getPosition(
self._test._detachedComponent)
|
|
# -------------------------------------------------------------------------------------------------
# Name: ToolBar
# Purpose: Represents Canvas Toolbox. Manages Canvas Tools and Inks
#
# Author: Rafael Vasco
#
# Created: 06/10/13
# Copyright: (c) Rafael Vasco 2014
# -------------------------------------------------------------------------------------------------
from PyQt5.QtCore import pyqtSignal, Qt, QSize
from PyQt5.QtGui import QColor, QIcon, QPixmap
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, QButtonGroup, \
QStackedWidget, QLabel, \
QListWidget, QPushButton
from src.model.resources_cache import ResourcesCache
class ToolBar(QWidget):
mouseEntered = pyqtSignal()
mouseLeft = pyqtSignal()
toolChanged = pyqtSignal(str)
primaryInkChanged = pyqtSignal(str)
secondaryInkChanged = pyqtSignal(str)
def __init__(self):
super(ToolBar, self).__init__()
self.setAttribute(Qt.WA_StaticContents)
self.setAttribute(Qt.WA_NoSystemBackground)
self.setFont(ResourcesCache.get("BigFont"))
self._registeredTools = {}
self._registeredInks = {}
self._toolSlots = []
self._inkSlots = []
self._currentActiveToolSlot = None
self._previousActiveToolSlot = None
self._currentEditedInkSlot = None
self._previousEditedInkSlot = None
self._editMode = False
self._backgroundColor = QColor(40, 40, 40)
self._toolLabelColor = QColor(112, 231, 255)
self._layout = QVBoxLayout()
self._layout.setAlignment(Qt.AlignTop)
self._layout.setContentsMargins(4, 4, 4, 4)
top_layout = QHBoxLayout()
self._layout.addLayout(top_layout)
self._toolsLayout = QHBoxLayout()
self._inksLayout = QHBoxLayout()
self._toolsLayout.setContentsMargins(0, 0, 0, 0)
self._toolsLayout.setAlignment(Qt.AlignLeft)
self._inksLayout.setContentsMargins(0, 0, 0, 0)
self._inksLayout.setAlignment(Qt.AlignRight)
top_layout.addLayout(self._toolsLayout)
top_layout.addLayout(self._inksLayout)
self._toolsButtonGroup = QButtonGroup()
self._toolsButtonGroup.buttonClicked.connect(
self._on_tool_slot_triggered)
self._inksButtonGroup = QButtonGroup()
self._inksButtonGroup.setExclusive(False)
self._inksButtonGroup.buttonClicked.connect(self._on_ink_slot_triggered)
self.setLayout(self._layout)
self._toolbarSubPanel = None
self._toolsListWidget = None
self._toolsOptionsPanel = None
self._init_edit_panel()
self._add_ink_slot(0)
self._add_ink_slot(1)
self.resize(0, 50)
# -------------------------------------------------------------------------
def get_tool_by_name(self, name):
return self._registeredTools[name]
def register_tool(self, tool, is_default=None):
if tool.name not in self._registeredTools:
self._registeredTools[tool.name] = tool
self._toolsListWidget.addItem(tool.name)
if is_default is True:
self._toolsListWidget.setCurrentRow(0)
self._build_tool_options_pane(tool)
if len(self._toolSlots) < 4:
slot_index = self._add_tool_slot(is_default)
self._assign_tool_to_slot(tool, slot_index)
def register_ink(self, ink, slot):
if not ink.name in self._registeredInks:
self._registeredInks[ink.name] = ink
self._inksListWidget.addItem(ink.name)
self._build_ink_options_pane(ink)
if self._inkSlots[slot]['id'] is None:
self._assign_ink_to_slot(ink, slot)
def switch_tool_slot(self, slot):
self._previousActiveToolSlot = self._currentActiveToolSlot
self._currentActiveToolSlot = slot
if self._currentActiveToolSlot == self._previousActiveToolSlot:
return
tool_name = self._toolSlots[slot]['id']
self._toolSlots[slot]['button'].setChecked(True)
self.toolChanged.emit(tool_name)
self._select_tool_on_list(tool_name)
# -------------------------------------------------------------------------
def _go_back_to_last_tool(self):
self.switch_tool_slot(self._previousActiveToolSlot)
def _add_tool_slot(self, selected=None):
slot_button = QPushButton()
slot_button.setCheckable(True)
index = len(self._toolSlots)
if selected is not None and selected is True:
slot_button.setChecked(True)
slot = {
'id': None,
'button': slot_button
}
if selected:
self._currentActiveToolSlot = index
self._toolSlots.append(slot)
self._toolsButtonGroup.addButton(slot_button, index)
self._toolsLayout.addWidget(slot_button)
return index
def _add_ink_slot(self, slot_number):
slot_button = QPushButton()
slot_button.setFont(self.font())
slot_button.setStyleSheet(
"border-color: rgb(56,56,56); background-color: rgb(17,17,"
"17); font-size: 12pt;")
index = len(self._inkSlots)
if slot_number == 0:
icon = QIcon()
icon.addPixmap(QPixmap(":/icons/ico_mouse_button1"), QIcon.Normal,
QIcon.Off)
slot_button.setIcon(icon)
slot_button.setIconSize(QSize(18, 23))
elif slot_number == 1:
icon = QIcon()
icon.addPixmap(QPixmap(":/icons/ico_mouse_button2"), QIcon.Normal,
QIcon.Off)
slot_button.setIcon(icon)
slot_button.setIconSize(QSize(18, 23))
slot = {
'id': None,
'button': slot_button
}
self._inkSlots.append(slot)
self._inksButtonGroup.addButton(slot_button)
self._inksButtonGroup.setId(slot_button, index)
self._inksLayout.addWidget(slot_button)
return index
def _assign_tool_to_slot(self, tool, slot):
if slot < 0 or slot > len(self._toolSlots) - 1:
raise Exception(
'[ToolBar] > _assignToolToSlot : invalid slot parameter')
self._toolSlots[slot]['id'] = tool.name
icon = tool.icon
if icon is not None:
tool_button = self._toolSlots[slot]['button']
tool_button.setIcon(tool.icon)
tool_button.setIconSize(QSize(24, 24))
def _assign_ink_to_slot(self, ink, slot):
if slot != 0 and slot != 1:
raise Exception(
'[ToolBar] > _assignInkToSlot : invalid slot parameter')
ink_name = ink.name
self._inkSlots[slot]['id'] = ink_name
self._inkSlots[slot]['button'].setText(ink_name)
if slot == 0:
self.primaryInkChanged.emit(ink_name)
elif slot == 1:
self.secondaryInkChanged.emit(ink_name)
def _init_edit_panel(self):
self._toolbarSubPanel = QStackedWidget()
# 1. Initialize Tools Control Panel -----------------------------------
self._toolsListWidget = QListWidget()
self._toolsListWidget.currentRowChanged.connect(
lambda v: self._toolsOptionsPanel.setCurrentIndex(v))
self._toolsListWidget.setMaximumSize(QSize(150, 200))
self._toolsListWidget.itemClicked.connect(
self._on_tool_list_item_clicked)
# Tools Subpanel ------------------------------------------------------
tools_control_panel = QWidget()
tools_control_panel_layout = QHBoxLayout()
tools_control_panel.setLayout(tools_control_panel_layout)
tools_control_panel_layout.setAlignment(Qt.AlignLeft)
# Tools List ----------------------------------------------------------
tools_list_sublayout = QVBoxLayout()
tools_list_sublayout.setAlignment(Qt.AlignTop)
tools_list_sublayout.setContentsMargins(0, 0, 0, 0)
tools_list_sublayout.addWidget(QLabel("Tools"))
tools_list_sublayout.addWidget(self._toolsListWidget)
tools_control_panel_layout.addLayout(tools_list_sublayout)
# Tools Options -------------------------------------------------------
tools_options_sublayout = QVBoxLayout()
tools_options_sublayout.setAlignment(Qt.AlignTop)
tools_control_panel_layout.addLayout(tools_options_sublayout)
self._toolsOptionsPanel = QStackedWidget()
tools_options_sublayout.addWidget(QLabel("Tools Options"))
tools_options_sublayout.addWidget(self._toolsOptionsPanel)
self._toolbarSubPanel.addWidget(tools_control_panel)
# 2. Initialize Inks Control Panel ------------------------------------
self._inksListWidget = QListWidget()
self._inksListWidget.currentRowChanged.connect(
lambda v: self._inksOptionsPanel.setCurrentIndex(v))
self._inksListWidget.setMaximumSize(QSize(150, 200))
self._inksListWidget.itemClicked.connect(self._on_ink_list_item_clicked)
# Inks Subpanel -------------------------------------------------------
inks_control_panel = QWidget()
inks_control_panel_layout = QHBoxLayout()
inks_control_panel.setLayout(inks_control_panel_layout)
inks_control_panel_layout.setAlignment(Qt.AlignLeft)
# Inks List -----------------------------------------------------------
inks_list_sublayout = QVBoxLayout()
inks_list_sublayout.setAlignment(Qt.AlignTop)
inks_list_sublayout.setContentsMargins(0, 0, 0, 0)
inks_list_sublayout.addWidget(QLabel("Inks"))
inks_list_sublayout.addWidget(self._inksListWidget)
inks_control_panel_layout.addLayout(inks_list_sublayout)
# Inks Options --------------------------------------------------------
inks_options_sublayout = QVBoxLayout()
inks_options_sublayout.setAlignment(Qt.AlignTop)
inks_control_panel_layout.addLayout(inks_options_sublayout)
self._inksOptionsPanel = QStackedWidget()
inks_options_sublayout.addWidget(QLabel("Ink Options"))
inks_options_sublayout.addWidget(self._inksOptionsPanel)
self._toolbarSubPanel.addWidget(inks_control_panel)
# ---------------------------------------------------------------------
self._layout.addWidget(self._toolbarSubPanel)
self._toolbarSubPanel.setVisible(False)
def _build_tool_options_pane(self, tool):
pane = QWidget()
pane_layout = QVBoxLayout()
pane_layout.setAlignment(Qt.AlignTop)
pane.setLayout(pane_layout)
for prop in tool.properties.values():
field_layout = QHBoxLayout()
field_layout.addWidget(QLabel(prop.description))
prop_widget = prop.build_property_widget()
field_layout.addWidget(prop_widget)
pane_layout.addLayout(field_layout)
self._toolsOptionsPanel.addWidget(pane)
def _build_ink_options_pane(self, ink):
pane = QWidget()
pane_layout = QVBoxLayout()
pane_layout.setAlignment(Qt.AlignTop)
pane.setLayout(pane_layout)
for prop in ink.properties.values():
field_layout = QHBoxLayout()
field_layout.addWidget(QLabel(prop.description))
prop_widget = prop.build_property_widget()
field_layout.addWidget(prop_widget)
pane_layout.addLayout(field_layout)
self._inksOptionsPanel.addWidget(pane)
def _select_tool_on_list(self, tool_name):
tool_list_item = \
self._toolsListWidget.findItems(tool_name, Qt.MatchExactly)[0]
if tool_list_item is not None:
self._toolsListWidget.setCurrentItem(tool_list_item)
def _select_ink_on_list(self, ink_name):
ink_list_item = \
self._inksListWidget.findItems(ink_name, Qt.MatchExactly)[0]
if ink_list_item is not None:
self._inksListWidget.setCurrentItem(ink_list_item)
def _toggle_edit_mode(self):
if not self._editMode:
self._show_sub_panel()
else:
self._hide_sub_panel()
self.update()
def _show_sub_panel(self):
self._editMode = True
self.resize(self.width(), 300)
self._toolbarSubPanel.setVisible(True)
def _hide_sub_panel(self):
self._editMode = False
self.resize(self.width(), 50)
self._toolbarSubPanel.setVisible(False)
self._finish_ink_edit_mode()
def _finish_ink_edit_mode(self):
if self._currentEditedInkSlot is not None:
self._inksButtonGroup.button(self._currentEditedInkSlot). \
setStyleSheet("border-color: rgb(56,56,56);")
self._currentEditedInkSlot = None
self._previousEditedInkSlot = None
self._inksListWidget.setCurrentRow(0)
self._toolbarSubPanel.setCurrentIndex(0)
# -------------------------------------------------------------------------
def mousePressEvent(self, e):
self._toggle_edit_mode()
e.accept()
def wheelEvent(self, e):
e.accept()
def enterEvent(self, e):
self.mouseEntered.emit()
self.setCursor(Qt.PointingHandCursor)
def leaveEvent(self, e):
self.mouseLeft.emit()
def _on_tool_slot_triggered(self):
self._toolbarSubPanel.setCurrentIndex(0)
triggered_slot = self._toolsButtonGroup.checkedId()
if self._currentEditedInkSlot is not None:
self._finish_ink_edit_mode()
self.switch_tool_slot(triggered_slot)
self.update()
def _on_ink_slot_triggered(self, slot_button):
if not self._editMode:
self._show_sub_panel()
triggered_slot_id = self._inksButtonGroup.id(slot_button)
if triggered_slot_id != self._currentEditedInkSlot:
self._previousEditedInkSlot = self._currentEditedInkSlot
self._currentEditedInkSlot = triggered_slot_id
if self._previousEditedInkSlot is not None:
self._inksButtonGroup. \
button(self._previousEditedInkSlot). \
setStyleSheet("border-color: rgb(56,56,56);")
slot_button.setStyleSheet("border-color: rgb(255,0,0);")
self._toolbarSubPanel.setCurrentIndex(1)
ink_name = self._inkSlots[triggered_slot_id]['id']
self._select_ink_on_list(ink_name)
if triggered_slot_id == 0:
self.primaryInkChanged.emit(ink_name)
elif triggered_slot_id == 1:
self.secondaryInkChanged.emit(ink_name)
else:
self._hide_sub_panel()
def _on_tool_list_item_clicked(self, new_item):
new_item_name = new_item.text()
self._assign_tool_to_slot(self.get_tool_by_name(new_item_name),
self._currentActiveToolSlot)
self.toolChanged.emit(new_item_name)
self._toolbarSubPanel.update()
def _on_ink_list_item_clicked(self, item):
item_name = item.text()
ink = self._registeredInks[item_name]
if ink is not None:
self._assign_ink_to_slot(ink, self._currentEditedInkSlot)
|
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./setup_marathon_job.py <service.instance> [options]
Deploy a service instance to Marathon from a configuration file.
Attempts to load the marathon configuration at
/etc/paasta/marathon.json, and read
from the soa_dir /nail/etc/services by default.
This script will attempt to load a service's configuration
from the soa_dir and generate a marathon job configuration for it,
as well as handle deploying that configuration with a bounce strategy
if there's an old version of the service. To determine whether or not
a deployment is 'old', each marathon job has a complete id of
service.instance.configuration_hash, where configuration_hash
is an MD5 hash of the configuration dict to be sent to marathon (without
the configuration_hash in the id field, of course- we change that after
the hash is calculated).
The script will emit a sensu event based on how the deployment went-
if something went wrong, it'll alert the team responsible for the service
(as defined in that service's monitoring.yaml), and it'll send resolves
when the deployment goes alright.
Command line options:
- -d <SOA_DIR>, --soa-dir <SOA_DIR>: Specify a SOA config dir to read from
- -v, --verbose: Verbose output
"""
import argparse
import asyncio
import logging
import sys
import traceback
from collections import defaultdict
from typing import Any
from typing import Callable
from typing import Collection
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
import a_sync
import pysensu_yelp
import requests_cache
from marathon.exceptions import MarathonHttpError
from marathon.models.app import MarathonApp
from marathon.models.app import MarathonTask
from mypy_extensions import Arg
from mypy_extensions import DefaultNamedArg
from requests.exceptions import HTTPError
from requests.exceptions import ReadTimeout
from paasta_tools import bounce_lib
from paasta_tools import drain_lib
from paasta_tools import marathon_tools
from paasta_tools import monitoring_tools
from paasta_tools.marathon_tools import get_num_at_risk_tasks
from paasta_tools.marathon_tools import kill_given_tasks
from paasta_tools.marathon_tools import MarathonClient
from paasta_tools.mesos.exceptions import NoSlavesAvailableError
from paasta_tools.mesos_maintenance import get_draining_hosts
from paasta_tools.mesos_maintenance import reserve_all_resources
from paasta_tools.utils import _log
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import NoDockerImageError
from paasta_tools.utils import SPACER
from paasta_tools.utils import SystemPaastaConfig
# Marathon REST API:
# https://github.com/mesosphere/marathon/blob/master/REST.md#post-v2apps
log = logging.getLogger(__name__)
LogDeployError = Callable[[Arg(str, 'errormsg'), DefaultNamedArg(str, 'level')], None]
LogBounceAction = Callable[[Arg(str, 'line'), DefaultNamedArg(str, 'level')], None]
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Creates marathon jobs.')
parser.add_argument(
'service_instance_list', nargs='+',
help="The list of marathon service instances to create or update",
metavar="SERVICE%sINSTANCE" % SPACER,
)
parser.add_argument(
'-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=marathon_tools.DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
'-v', '--verbose', action='store_true',
dest="verbose", default=False,
)
args = parser.parse_args()
return args
def send_event(name: str, instance: str, soa_dir: str, status: int, output: str) -> None:
"""Send an event to sensu via pysensu_yelp with the given information.
:param name: The service name the event is about
:param instance: The instance of the service the event is about
:param soa_dir: The service directory to read monitoring information from
:param status: The status to emit for this event
:param output: The output to emit for this event
"""
cluster = load_system_paasta_config().get_cluster()
monitoring_overrides = marathon_tools.load_marathon_service_config(
name,
instance,
cluster,
soa_dir=soa_dir,
load_deployments=False,
).get_monitoring()
# In order to let sensu know how often to expect this check to fire,
# we need to set the ``check_every`` to the frequency of our cron job, which
# is 10s.
monitoring_overrides['check_every'] = '10s'
# Most setup_marathon_job failures are transient and represent issues
# that will probably be fixed eventually, so we set an alert_after
# to suppress extra noise
monitoring_overrides['alert_after'] = '10m'
check_name = 'setup_marathon_job.%s' % compose_job_id(name, instance)
monitoring_tools.send_event(name, check_name, monitoring_overrides, status, output, soa_dir)
def drain_tasks_and_find_tasks_to_kill(
tasks_to_drain: Collection[Tuple[MarathonTask, MarathonClient]],
already_draining_tasks: Collection[Tuple[MarathonTask, MarathonClient]],
drain_method: drain_lib.DrainMethod,
log_bounce_action: LogBounceAction,
bounce_method: str,
at_risk_tasks: Collection[Tuple[MarathonTask, MarathonClient]],
) -> Set[Tuple[MarathonTask, MarathonClient]]:
"""Drain the tasks_to_drain, and return the set of tasks that are safe to kill."""
all_draining_tasks: Set[Tuple[MarathonTask, MarathonClient]] = set(already_draining_tasks) | set(at_risk_tasks)
tasks_to_kill: Set[Tuple[MarathonTask, MarathonClient]] = set()
if len(tasks_to_drain) > 0:
tasks_to_drain_by_app_id: Dict[str, Set[MarathonTask]] = defaultdict(set)
for task, client in tasks_to_drain:
tasks_to_drain_by_app_id[task.app_id].add(task)
for app_id, tasks in tasks_to_drain_by_app_id.items():
log_bounce_action(
line='%s bounce draining %d old tasks with app_id %s' %
(bounce_method, len(tasks), app_id),
)
async def drain_and_kill_if_draining_fails(task: MarathonTask, client: MarathonClient) -> None:
all_draining_tasks.add((task, client))
if task.state == 'TASK_UNREACHABLE':
return
try:
await drain_method.drain(task)
except Exception:
log_bounce_action(
line=f"{bounce_method} bounce killing task {task.id} "
f"due to exception when draining: {traceback.format_exc()}",
)
tasks_to_kill.add((task, client))
if tasks_to_drain:
a_sync.block(
asyncio.wait,
[asyncio.ensure_future(drain_and_kill_if_draining_fails(t, c)) for t, c in tasks_to_drain],
)
async def add_to_tasks_to_kill_if_safe_to_kill(task: MarathonTask, client: MarathonClient) -> None:
try:
if task.state != 'TASK_RUNNING' or await drain_method.is_safe_to_kill(task):
tasks_to_kill.add((task, client))
log_bounce_action(
line='%s bounce killing not_running or drained task %s %s' % (
bounce_method, task.id, task.state,
),
)
except Exception:
tasks_to_kill.add((task, client))
log_bounce_action(
line=f'{bounce_method} bounce killing task {task.id} '
f'due to exception in is_safe_to_kill: {traceback.format_exc()}',
)
if all_draining_tasks:
a_sync.block(
asyncio.wait,
[asyncio.ensure_future(add_to_tasks_to_kill_if_safe_to_kill(t, c)) for t, c in all_draining_tasks],
)
return tasks_to_kill
def old_app_tasks_to_task_client_pairs(
old_app_tasks: Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
) -> Set[Tuple[MarathonTask, MarathonClient]]:
ret: Set[Tuple[MarathonTask, MarathonClient]] = set()
for (app, client), tasks in old_app_tasks.items():
for task in tasks:
ret.add((task, client))
return ret
def do_bounce(
bounce_func: bounce_lib.BounceMethod,
drain_method: drain_lib.DrainMethod,
config: marathon_tools.FormattedMarathonAppDict,
new_app_running: bool,
happy_new_tasks: List[Tuple[MarathonTask, MarathonClient]],
old_app_live_happy_tasks: Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
old_app_live_unhappy_tasks: Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
old_app_draining_tasks: Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
old_app_at_risk_tasks: Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
service: str,
bounce_method: str,
serviceinstance: str,
cluster: str,
instance: str,
marathon_jobid: str,
clients: marathon_tools.MarathonClients,
soa_dir: str,
job_config: marathon_tools.MarathonServiceConfig,
bounce_margin_factor: float=1.0,
) -> Optional[float]:
def log_bounce_action(line: str, level: str='debug') -> None:
return _log(
service=service,
line=line,
component='deploy',
level=level,
cluster=cluster,
instance=instance,
)
# log if we're not in a steady state.
if any([
(not new_app_running),
old_app_live_happy_tasks.keys(),
]):
log_bounce_action(
line=' '.join([
'%s bounce in progress on %s.' % (bounce_method, serviceinstance),
'New marathon app %s %s.' % (marathon_jobid, ('exists' if new_app_running else 'not created yet')),
'%d new tasks to bring up.' % (config['instances'] - len(happy_new_tasks)),
'%d old tasks receiving traffic and happy.' % len(bounce_lib.flatten_tasks(old_app_live_happy_tasks)),
'%d old tasks unhappy.' % len(bounce_lib.flatten_tasks(old_app_live_unhappy_tasks)),
'%d old tasks draining.' % len(bounce_lib.flatten_tasks(old_app_draining_tasks)),
'%d old tasks at risk.' % len(bounce_lib.flatten_tasks(old_app_at_risk_tasks)),
'%d old apps.' % len(old_app_live_happy_tasks.keys()),
]),
level='event',
)
else:
log.debug("Nothing to do, bounce is in a steady state")
new_client = clients.get_current_client_for_service(job_config)
old_non_draining_tasks = list(
old_app_tasks_to_task_client_pairs(old_app_live_happy_tasks),
) + list(
old_app_tasks_to_task_client_pairs(old_app_live_unhappy_tasks),
) + list(
old_app_tasks_to_task_client_pairs(old_app_at_risk_tasks),
)
actions = bounce_func(
new_config=config,
new_app_running=new_app_running,
happy_new_tasks=happy_new_tasks,
old_non_draining_tasks=old_non_draining_tasks,
margin_factor=bounce_margin_factor,
)
if actions['create_app'] and not new_app_running:
log_bounce_action(
line='%s bounce creating new app with app_id %s' % (bounce_method, marathon_jobid),
)
with requests_cache.disabled():
try:
bounce_lib.create_marathon_app(
app_id=marathon_jobid,
config=config,
client=new_client,
)
except MarathonHttpError as e:
if e.status_code == 409:
log.warning(
"Failed to create, app %s already exists. This means another bounce beat us to it."
" Skipping the rest of the bounce for this run" % marathon_jobid,
)
return 60
raise
tasks_to_kill = drain_tasks_and_find_tasks_to_kill(
tasks_to_drain=actions['tasks_to_drain'],
already_draining_tasks=old_app_tasks_to_task_client_pairs(old_app_draining_tasks),
drain_method=drain_method,
log_bounce_action=log_bounce_action,
bounce_method=bounce_method,
at_risk_tasks=old_app_tasks_to_task_client_pairs(old_app_at_risk_tasks),
)
tasks_to_kill_by_client: Dict[MarathonClient, List[MarathonTask]] = defaultdict(list)
for task, client in tasks_to_kill:
tasks_to_kill_by_client[client].append(task)
for client, tasks in tasks_to_kill_by_client.items():
kill_given_tasks(client=client, task_ids=[task.id for task in tasks], scale=True)
for task in bounce_lib.flatten_tasks(old_app_at_risk_tasks):
if task in tasks_to_kill:
hostname = task.host
try:
reserve_all_resources([hostname])
except HTTPError:
log.warning("Failed to reserve resources on %s" % hostname)
apps_to_kill: List[Tuple[str, MarathonClient]] = []
for app, client in old_app_live_happy_tasks.keys():
if app != '/%s' % marathon_jobid or client != new_client:
live_happy_tasks = old_app_live_happy_tasks[(app, client)]
live_unhappy_tasks = old_app_live_unhappy_tasks[(app, client)]
draining_tasks = old_app_draining_tasks[(app, client)]
at_risk_tasks = old_app_at_risk_tasks[(app, client)]
remaining_tasks = (live_happy_tasks | live_unhappy_tasks | draining_tasks | at_risk_tasks)
for task, _ in tasks_to_kill:
remaining_tasks.discard(task)
if 0 == len(remaining_tasks):
apps_to_kill.append((app, client))
if apps_to_kill:
log_bounce_action(
line='%s bounce removing old unused apps with app_ids: %s' %
(
bounce_method,
', '.join([app for app, client in apps_to_kill]),
),
)
with requests_cache.disabled():
for app_id, client in apps_to_kill:
bounce_lib.kill_old_ids([app_id], client)
all_old_tasks: Set[MarathonTask] = set()
all_old_tasks = set.union(all_old_tasks, *old_app_live_happy_tasks.values())
all_old_tasks = set.union(all_old_tasks, *old_app_live_unhappy_tasks.values())
all_old_tasks = set.union(all_old_tasks, *old_app_draining_tasks.values())
all_old_tasks = set.union(all_old_tasks, *old_app_at_risk_tasks.values())
if all_old_tasks or (not new_app_running):
# Still have work more work to do, try again in 60 seconds
return 60
else:
# log if we appear to be finished
if all([
(apps_to_kill or tasks_to_kill),
apps_to_kill == list(old_app_live_happy_tasks),
tasks_to_kill == all_old_tasks,
]):
log_bounce_action(
line='%s bounce on %s finishing. Now running %s' %
(
bounce_method,
serviceinstance,
marathon_jobid,
),
level='event',
)
return None
TasksByStateDict = Dict[str, Set[MarathonTask]]
def get_tasks_by_state_for_app(
app: MarathonApp,
drain_method: drain_lib.DrainMethod,
service: str,
nerve_ns: str,
bounce_health_params: Dict[str, Any],
system_paasta_config: SystemPaastaConfig,
log_deploy_error: LogDeployError,
draining_hosts: Collection[str],
) -> TasksByStateDict:
tasks_by_state: TasksByStateDict = {
'happy': set(),
'unhappy': set(),
'draining': set(),
'at_risk': set(),
}
happy_tasks = bounce_lib.get_happy_tasks(app, service, nerve_ns, system_paasta_config, **bounce_health_params)
async def categorize_task(task: MarathonTask) -> None:
try:
is_draining = await drain_method.is_draining(task)
except Exception:
log_deploy_error(
f"Ignoring exception during is_draining of task {task.id}: "
f"{traceback.format_exc()}. Treating task as 'unhappy'.",
)
state = 'unhappy'
else:
if is_draining is True:
state = 'draining'
elif task in happy_tasks:
if task.host in draining_hosts:
state = 'at_risk'
else:
state = 'happy'
else:
state = 'unhappy'
tasks_by_state[state].add(task)
if app.tasks:
a_sync.block(
asyncio.wait,
[asyncio.ensure_future(categorize_task(task)) for task in app.tasks],
)
return tasks_by_state
def get_tasks_by_state(
other_apps_with_clients: Collection[Tuple[MarathonApp, MarathonClient]],
drain_method: drain_lib.DrainMethod,
service: str,
nerve_ns: str,
bounce_health_params: Dict[str, Any],
system_paasta_config: SystemPaastaConfig,
log_deploy_error: LogDeployError,
draining_hosts: Collection[str],
) -> Tuple[
Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
Dict[Tuple[str, MarathonClient], Set[MarathonTask]],
]:
"""Split tasks from old apps into 4 categories:
- live (not draining) and happy (according to get_happy_tasks)
- live (not draining) and unhappy
- draining
- at-risk (running on a host marked draining in Mesos in preparation for maintenance)
"""
old_app_live_happy_tasks = {}
old_app_live_unhappy_tasks = {}
old_app_draining_tasks = {}
old_app_at_risk_tasks = {}
for app, client in other_apps_with_clients:
tasks_by_state = get_tasks_by_state_for_app(
app=app,
drain_method=drain_method,
service=service,
nerve_ns=nerve_ns,
bounce_health_params=bounce_health_params,
system_paasta_config=system_paasta_config,
log_deploy_error=log_deploy_error,
draining_hosts=draining_hosts,
)
old_app_live_happy_tasks[(app.id, client)] = tasks_by_state['happy']
old_app_live_unhappy_tasks[(app.id, client)] = tasks_by_state['unhappy']
old_app_draining_tasks[(app.id, client)] = tasks_by_state['draining']
old_app_at_risk_tasks[(app.id, client)] = tasks_by_state['at_risk']
return old_app_live_happy_tasks, old_app_live_unhappy_tasks, old_app_draining_tasks, old_app_at_risk_tasks
def undrain_tasks(
to_undrain: Collection[MarathonTask],
leave_draining: Collection[MarathonTask],
drain_method: drain_lib.DrainMethod,
log_deploy_error: LogDeployError,
) -> None:
# If any tasks on the new app happen to be draining (e.g. someone reverts to an older version with
# `paasta mark-for-deployment`), then we should undrain them.
async def undrain_task(task: MarathonTask) -> None:
if task not in leave_draining:
if task.state == 'TASK_UNREACHABLE':
return
try:
await drain_method.stop_draining(task)
except Exception as e:
log_deploy_error(f"Ignoring exception during stop_draining of task {task.id}: {traceback.format_exc()}")
if to_undrain:
a_sync.block(
asyncio.wait,
[asyncio.ensure_future(undrain_task(task)) for task in to_undrain],
)
def deploy_service(
service: str,
instance: str,
marathon_jobid: str,
config: marathon_tools.FormattedMarathonAppDict,
clients: marathon_tools.MarathonClients,
marathon_apps_with_clients: Collection[Tuple[MarathonApp, MarathonClient]],
bounce_method: str,
drain_method_name: str,
drain_method_params: Dict[str, Any],
nerve_ns: str,
bounce_health_params: Dict[str, Any],
soa_dir: str,
job_config: marathon_tools.MarathonServiceConfig,
bounce_margin_factor: float=1.0,
) -> Tuple[int, str, Optional[float]]:
"""Deploy the service to marathon, either directly or via a bounce if needed.
Called by setup_service when it's time to actually deploy.
:param service: The name of the service to deploy
:param instance: The instance of the service to deploy
:param marathon_jobid: Full id of the marathon job
:param config: The complete configuration dict to send to marathon
:param clients: A MarathonClients object
:param bounce_method: The bounce method to use, if needed
:param drain_method_name: The name of the traffic draining method to use.
:param nerve_ns: The nerve namespace to look in.
:param bounce_health_params: A dictionary of options for bounce_lib.get_happy_tasks.
:param bounce_margin_factor: the multiplication factor used to calculate the number of instances to be drained
:returns: A tuple of (status, output, bounce_in_seconds) to be used with send_sensu_event"""
def log_deploy_error(errormsg: str, level: str='event') -> None:
return _log(
service=service,
line=errormsg,
component='deploy',
level='event',
cluster=cluster,
instance=instance,
)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
existing_apps_with_clients = marathon_tools.get_matching_apps_with_clients(
service=service,
instance=instance,
marathon_apps_with_clients=marathon_apps_with_clients,
)
new_client = clients.get_current_client_for_service(job_config)
new_apps_with_clients_list: List[Tuple[MarathonApp, MarathonClient]] = []
other_apps_with_clients: List[Tuple[MarathonApp, MarathonClient]] = []
for a, c in existing_apps_with_clients:
if a.id == '/%s' % config['id'] and c == new_client:
new_apps_with_clients_list.append((a, c))
else:
other_apps_with_clients.append((a, c))
serviceinstance = "%s.%s" % (service, instance)
if new_apps_with_clients_list:
new_app, new_client = new_apps_with_clients_list[0]
if len(new_apps_with_clients_list) != 1:
raise ValueError("Only expected one app per ID per shard; found %d" % len(new_apps_with_clients_list))
new_app_running = True
happy_new_tasks = bounce_lib.get_happy_tasks(
new_app, service, nerve_ns, system_paasta_config,
**bounce_health_params,
)
else:
new_app_running = False
happy_new_tasks = []
try:
drain_method = drain_lib.get_drain_method(
drain_method_name,
service=service,
instance=instance,
nerve_ns=nerve_ns,
drain_method_params=drain_method_params,
)
except KeyError:
errormsg = 'ERROR: drain_method not recognized: %s. Must be one of (%s)' % \
(drain_method_name, ', '.join(drain_lib.list_drain_methods()))
log_deploy_error(errormsg)
return (1, errormsg, None)
try:
draining_hosts = get_draining_hosts()
except ReadTimeout as e:
errormsg = "ReadTimeout encountered trying to get draining hosts: %s" % e
return (1, errormsg, 60)
(
old_app_live_happy_tasks,
old_app_live_unhappy_tasks,
old_app_draining_tasks,
old_app_at_risk_tasks,
) = get_tasks_by_state(
other_apps_with_clients=other_apps_with_clients,
drain_method=drain_method,
service=service,
nerve_ns=nerve_ns,
bounce_health_params=bounce_health_params,
system_paasta_config=system_paasta_config,
log_deploy_error=log_deploy_error,
draining_hosts=draining_hosts,
)
# The first thing we need to do is take up the "slack" of old apps, to stop
# them from launching new things that we are going to have to end up draining
# and killing anyway.
for a, c in other_apps_with_clients:
marathon_tools.take_up_slack(app=a, client=c)
num_at_risk_tasks = 0
if new_app_running:
num_at_risk_tasks = get_num_at_risk_tasks(new_app, draining_hosts=draining_hosts)
if new_app.instances < config['instances'] + num_at_risk_tasks:
log.info("Scaling %s up from %d to %d instances." %
(new_app.id, new_app.instances, config['instances'] + num_at_risk_tasks))
new_client.scale_app(app_id=new_app.id, instances=config['instances'] + num_at_risk_tasks, force=True)
# If we have more than the specified number of instances running, we will want to drain some of them.
# We will start by draining any tasks running on at-risk hosts.
elif new_app.instances > config['instances']:
num_tasks_to_scale = max(min(len(new_app.tasks), new_app.instances) - config['instances'], 0)
task_dict = get_tasks_by_state_for_app(
app=new_app,
drain_method=drain_method,
service=service,
nerve_ns=nerve_ns,
bounce_health_params=bounce_health_params,
system_paasta_config=system_paasta_config,
log_deploy_error=log_deploy_error,
draining_hosts=draining_hosts,
)
scaling_app_happy_tasks = list(task_dict['happy'])
scaling_app_unhappy_tasks = list(task_dict['unhappy'])
scaling_app_draining_tasks = list(task_dict['draining'])
scaling_app_at_risk_tasks = list(task_dict['at_risk'])
tasks_to_move_draining = min(len(scaling_app_draining_tasks), num_tasks_to_scale)
old_app_draining_tasks[(new_app.id, new_client)] = set(scaling_app_draining_tasks[:tasks_to_move_draining])
num_tasks_to_scale = num_tasks_to_scale - tasks_to_move_draining
tasks_to_move_unhappy = min(len(scaling_app_unhappy_tasks), num_tasks_to_scale)
old_app_live_unhappy_tasks[(new_app.id, new_client)] = set(
scaling_app_unhappy_tasks[:tasks_to_move_unhappy],
)
num_tasks_to_scale = num_tasks_to_scale - tasks_to_move_unhappy
tasks_to_move_at_risk = min(len(scaling_app_at_risk_tasks), num_tasks_to_scale)
old_app_at_risk_tasks[(new_app.id, new_client)] = set(scaling_app_at_risk_tasks[:tasks_to_move_at_risk])
num_tasks_to_scale = num_tasks_to_scale - tasks_to_move_at_risk
tasks_to_move_happy = min(len(scaling_app_happy_tasks), num_tasks_to_scale)
old_app_live_happy_tasks[(new_app.id, new_client)] = set(scaling_app_happy_tasks[:tasks_to_move_happy])
happy_new_tasks = scaling_app_happy_tasks[tasks_to_move_happy:]
# slack represents remaining the extra remaining instances that are configured
# in marathon that don't have a launched task yet. When scaling down we want to
# reduce this slack so marathon doesn't get a chance to launch a new task in
# that space that we will then have to drain and kill again.
marathon_tools.take_up_slack(client=new_client, app=new_app)
# TODO: don't take actions in deploy_service.
undrain_tasks(
to_undrain=new_app.tasks,
leave_draining=old_app_draining_tasks.get((new_app.id, new_client), []),
drain_method=drain_method,
log_deploy_error=log_deploy_error,
)
# log all uncaught exceptions and raise them again
try:
try:
bounce_func = bounce_lib.get_bounce_method_func(bounce_method)
except KeyError:
errormsg = 'ERROR: bounce_method not recognized: %s. Must be one of (%s)' % \
(bounce_method, ', '.join(bounce_lib.list_bounce_methods()))
log_deploy_error(errormsg)
return (1, errormsg, None)
bounce_again_in_seconds = do_bounce(
bounce_func=bounce_func,
drain_method=drain_method,
config=config,
new_app_running=new_app_running,
happy_new_tasks=happy_new_tasks,
old_app_live_happy_tasks=old_app_live_happy_tasks,
old_app_live_unhappy_tasks=old_app_live_unhappy_tasks,
old_app_draining_tasks=old_app_draining_tasks,
old_app_at_risk_tasks=old_app_at_risk_tasks,
service=service,
bounce_method=bounce_method,
serviceinstance=serviceinstance,
cluster=cluster,
instance=instance,
marathon_jobid=marathon_jobid,
clients=clients,
soa_dir=soa_dir,
job_config=job_config,
bounce_margin_factor=bounce_margin_factor,
)
except bounce_lib.LockHeldException:
logline = 'Failed to get lock to create marathon app for %s.%s' % (service, instance)
log_deploy_error(logline, level='debug')
return (0, "Couldn't get marathon lock, skipping until next time", None)
except Exception:
logline = 'Exception raised during deploy of service %s:\n%s' % (service, traceback.format_exc())
log_deploy_error(logline, level='debug')
raise
if num_at_risk_tasks:
bounce_again_in_seconds = 60
elif new_app_running:
if new_app.instances > config['instances']:
bounce_again_in_seconds = 60
return (0, 'Service deployed.', bounce_again_in_seconds)
def setup_service(
service: str,
instance: str,
clients: marathon_tools.MarathonClients,
job_config: marathon_tools.MarathonServiceConfig,
marathon_apps_with_clients: Collection[Tuple[MarathonApp, MarathonClient]],
soa_dir: str,
) -> Tuple[int, str, Optional[float]]:
"""Setup the service instance given and attempt to deploy it, if possible.
Doesn't do anything if the service is already in Marathon and hasn't changed.
If it's not, attempt to find old instances of the service and bounce them.
:param service: The service name to setup
:param instance: The instance of the service to setup
:param clients: A MarathonClients object
:param job_config: The service instance's configuration dict
:returns: A tuple of (status, output, bounce_in_seconds) to be used with send_sensu_event"""
log.info("Setting up instance %s for service %s", instance, service)
try:
marathon_app_dict = job_config.format_marathon_app_dict()
except NoDockerImageError:
error_msg = (
"Docker image for {0}.{1} not in deployments.json. Exiting. Has Jenkins deployed it?\n"
).format(
service,
instance,
)
log.error(error_msg)
return (1, error_msg, None)
full_id = marathon_app_dict['id']
service_namespace_config = marathon_tools.load_service_namespace_config(
service=service, namespace=job_config.get_nerve_namespace(), soa_dir=soa_dir,
)
log.info("Desired Marathon instance id: %s", full_id)
return deploy_service(
service=service,
instance=instance,
marathon_jobid=full_id,
config=marathon_app_dict,
clients=clients,
marathon_apps_with_clients=marathon_apps_with_clients,
bounce_method=job_config.get_bounce_method(),
drain_method_name=job_config.get_drain_method(service_namespace_config),
drain_method_params=job_config.get_drain_method_params(service_namespace_config),
nerve_ns=job_config.get_nerve_namespace(),
bounce_health_params=job_config.get_bounce_health_params(service_namespace_config),
soa_dir=soa_dir,
job_config=job_config,
bounce_margin_factor=job_config.get_bounce_margin_factor(),
)
def main() -> None:
"""Attempt to set up a list of marathon service instances given.
Exits 1 if any service.instance deployment failed.
This is done in the following order:
- Load the marathon configuration
- Connect to marathon
- Do the following for each service.instance:
- Load the service instance's configuration
- Create the complete marathon job configuration
- Deploy/bounce the service
- Emit an event about the deployment to sensu"""
args = parse_args()
soa_dir = args.soa_dir
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
# Setting up transparent cache for http API calls
requests_cache.install_cache("setup_marathon_jobs", backend="memory")
system_paasta_config = load_system_paasta_config()
clients = marathon_tools.get_marathon_clients(marathon_tools.get_marathon_servers(system_paasta_config))
unique_clients = clients.get_all_clients()
marathon_apps_with_clients = marathon_tools.get_marathon_apps_with_clients(unique_clients, embed_tasks=True)
num_failed_deployments = 0
for service_instance in args.service_instance_list:
try:
service, instance, _, __ = decompose_job_id(service_instance)
except InvalidJobNameError:
log.error("Invalid service instance specified. Format is service%sinstance." % SPACER)
num_failed_deployments = num_failed_deployments + 1
else:
if deploy_marathon_service(service, instance, clients, soa_dir, marathon_apps_with_clients)[0]:
num_failed_deployments = num_failed_deployments + 1
requests_cache.uninstall_cache()
log.debug("%d out of %d service.instances failed to deploy." %
(num_failed_deployments, len(args.service_instance_list)))
sys.exit(1 if num_failed_deployments else 0)
def deploy_marathon_service(
service: str,
instance: str,
clients: marathon_tools.MarathonClients,
soa_dir: str,
marathon_apps_with_clients: Optional[Collection[Tuple[MarathonApp, MarathonClient]]],
) -> Tuple[int, float]:
"""deploy the service instance given and proccess return code
if there was an error we send a sensu alert.
:param service: The service name to setup
:param instance: The instance of the service to setup
:param clients: A MarathonClients object
:param soa_dir: Path to yelpsoa configs
:param marathon_apps: A list of all marathon app objects
:returns: A tuple of (status, bounce_in_seconds) to be used by paasta-deployd
bounce_in_seconds instructs how long until the deployd should try another bounce
None means that it is in a steady state and doesn't need to bounce again
"""
short_id = marathon_tools.format_job_id(service, instance)
try:
with bounce_lib.bounce_lock_zookeeper(short_id):
try:
service_instance_config = marathon_tools.load_marathon_service_config_no_cache(
service,
instance,
load_system_paasta_config().get_cluster(),
soa_dir=soa_dir,
)
except NoDeploymentsAvailable:
log.debug("No deployments found for %s.%s in cluster %s. Skipping." %
(service, instance, load_system_paasta_config().get_cluster()))
return 0, None
except NoConfigurationForServiceError:
error_msg = "Could not read marathon configuration file for %s.%s in cluster %s" % \
(service, instance, load_system_paasta_config().get_cluster())
log.error(error_msg)
return 1, None
if marathon_apps_with_clients is None:
marathon_apps_with_clients = marathon_tools.get_marathon_apps_with_clients(
clients=clients.get_all_clients_for_service(job_config=service_instance_config),
embed_tasks=True,
)
try:
with a_sync.idle_event_loop():
status, output, bounce_again_in_seconds = setup_service(
service=service,
instance=instance,
clients=clients,
job_config=service_instance_config,
marathon_apps_with_clients=marathon_apps_with_clients,
soa_dir=soa_dir,
)
sensu_status = pysensu_yelp.Status.CRITICAL if status else pysensu_yelp.Status.OK
send_event(service, instance, soa_dir, sensu_status, output)
return 0, bounce_again_in_seconds
except (KeyError, TypeError, AttributeError, InvalidInstanceConfig, NoSlavesAvailableError):
error_str = traceback.format_exc()
log.error(error_str)
send_event(service, instance, soa_dir, pysensu_yelp.Status.CRITICAL, error_str)
return 1, None
except bounce_lib.LockHeldException:
log.error("Instance %s already being bounced. Exiting", short_id)
return 0, None
if __name__ == "__main__":
main()
|
|
import os
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy import stats
from scipy.optimize import differential_evolution
from .test_continuous_basic import distcont
from scipy.stats._distr_params import distdiscrete
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
fit_sizes = [1000, 5000, 10000] # sample sizes to try
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
mle_failing_fits = [
'burr',
'chi2',
'gausshyper',
'genexpon',
'gengamma',
'kappa4',
'ksone',
'kstwo',
'mielke',
'ncf',
'ncx2',
'pearson3',
'powerlognorm',
'truncexpon',
'tukeylambda',
'vonmises',
'levy_stable',
'trapezoid',
'truncweibull_min',
'studentized_range',
]
mm_failing_fits = ['alpha', 'betaprime', 'burr', 'burr12', 'cauchy', 'chi',
'chi2', 'crystalball', 'dgamma', 'dweibull', 'f',
'fatiguelife', 'fisk', 'foldcauchy', 'genextreme',
'gengamma', 'genhyperbolic', 'gennorm', 'genpareto',
'halfcauchy', 'invgamma', 'invweibull', 'johnsonsu',
'kappa3', 'ksone', 'kstwo', 'levy', 'levy_l',
'levy_stable', 'loglaplace', 'lomax', 'mielke', 'nakagami',
'ncf', 'nct', 'ncx2', 'pareto', 'powerlognorm', 'powernorm',
'skewcauchy', 't', 'trapezoid', 'triang',
'truncweibull_min', 'tukeylambda', 'studentized_range']
# not sure if these fail, but they caused my patience to fail
mm_slow_fits = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
'kappa4', 'kstwobign', 'recipinvgauss', 'skewnorm',
'truncexpon', 'vonmises', 'vonmises_line']
failing_fits = {"MM": mm_failing_fits + mm_slow_fits, "MLE": mle_failing_fits}
# Don't run the fit test on these:
skip_fit = [
'erlang', # Subclass of gamma, generates a warning.
'genhyperbolic', # too slow
]
def cases_test_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample
# size <= 10000
for distname, arg in distcont:
if distname not in skip_fit:
yield distname, arg
@pytest.mark.slow
@pytest.mark.parametrize('distname,arg', cases_test_cont_fit())
@pytest.mark.parametrize('method', ["MLE", 'MM'])
def test_cont_fit(distname, arg, method):
if distname in failing_fits[method]:
# Skip failing fits unless overridden
try:
xfail = not int(os.environ['SCIPY_XFAIL'])
except Exception:
xfail = True
if xfail:
msg = "Fitting %s doesn't work reliably yet" % distname
msg += (" [Set environment variable SCIPY_XFAIL=1 to run this"
" test nevertheless.]")
pytest.xfail(msg)
distfn = getattr(stats, distname)
truearg = np.hstack([arg, [0.0, 1.0]])
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.full(distfn.numargs+2, thresh_min)]),
0)
for fit_size in fit_sizes:
# Note that if a fit succeeds, the other fit_sizes are skipped
np.random.seed(1234)
with np.errstate(all='ignore'):
rvs = distfn.rvs(size=fit_size, *arg)
est = distfn.fit(rvs, method=method) # start with default values
diff = est - truearg
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,
thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')
else:
if np.all(np.abs(diff) <= diffthreshold):
break
else:
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
def _check_loc_scale_mle_fit(name, data, desired, atol=None):
d = getattr(stats, name)
actual = d.fit(data)[-2:]
assert_allclose(actual, desired, atol=atol,
err_msg='poor mle fit of (loc, scale) in %s' % name)
def test_non_default_loc_scale_mle_fit():
data = np.array([1.01, 1.78, 1.78, 1.78, 1.88, 1.88, 1.88, 2.00])
_check_loc_scale_mle_fit('uniform', data, [1.01, 0.99], 1e-3)
_check_loc_scale_mle_fit('expon', data, [1.01, 0.73875], 1e-3)
def test_expon_fit():
"""gh-6167"""
data = [0, 0, 0, 0, 2, 2, 2, 2]
phat = stats.expon.fit(data, floc=0)
assert_allclose(phat, [0, 1.0], atol=1e-3)
@pytest.mark.parametrize("dist, params",
[(stats.norm, (0.5, 2.5)), # type: ignore[attr-defined] # noqa
(stats.binom, (10, 0.3, 2))]) # type: ignore[attr-defined] # noqa
def test_nnlf_and_related_methods(dist, params):
rng = np.random.default_rng(983459824)
if hasattr(dist, 'pdf'):
logpxf = dist.logpdf
else:
logpxf = dist.logpmf
x = dist.rvs(*params, size=100, random_state=rng)
ref = -logpxf(x, *params).sum()
res1 = dist.nnlf(params, x)
res2 = dist._penalized_nnlf(params, x)
assert_allclose(res1, ref)
assert_allclose(res2, ref)
def cases_test_fit():
skip_basic_fit = {'nhypergeom', 'boltzmann', 'nbinom',
'randint', 'yulesimon', 'nchypergeom_fisher',
'nchypergeom_wallenius'}
slow_basic_fit = {'binom'}
xslow_basic_fit = {'skellam', 'hypergeom', 'zipfian', 'betabinom'}
for dist in dict(distdiscrete):
if dist in skip_basic_fit or not isinstance(dist, str):
reason = "tested separately"
yield pytest.param(dist, marks=pytest.mark.skip(reason=reason))
elif dist in slow_basic_fit:
reason = "too slow (>= 0.25s)"
yield pytest.param(dist, marks=pytest.mark.slow(reason=reason))
elif dist in xslow_basic_fit:
reason = "too slow (>= 1.0s)"
yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason))
class TestFit:
dist = stats.binom # type: ignore[attr-defined]
seed = 654634816187
rng = np.random.default_rng(seed)
data = stats.binom.rvs(5, 0.5, size=100, random_state=rng) # type: ignore[attr-defined] # noqa
shape_bounds_a = [(1, 10), (0, 1)]
shape_bounds_d = {'n': (1, 10), 'p': (0, 1)}
atol = 5e-2
rtol = 1e-2
tols = {'atol': atol, 'rtol': rtol}
def opt(self, *args, **kwds):
return differential_evolution(*args, seed=0, **kwds)
def test_dist_iv(self):
message = "`dist` must be an instance of..."
with pytest.raises(ValueError, match=message):
stats.fit(10, self.data, self.shape_bounds_a)
message = "Distribution `laplace` is not yet supported by..."
with pytest.raises(ValueError, match=message):
stats.fit(stats.laplace, self.data)
def test_data_iv(self):
message = "`data` must be exactly one-dimensional."
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [[1, 2, 3]], self.shape_bounds_a)
message = "All elements of `data` must be finite numbers."
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [1, 2, 3, np.nan], self.shape_bounds_a)
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, [1, 2, 3, np.inf], self.shape_bounds_a)
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, ['1', '2', '3'], self.shape_bounds_a)
def test_bounds_iv(self):
message = "Bounds provided for the following unrecognized..."
shape_bounds = {'n': (1, 10), 'p': (0, 1), '1': (0, 10)}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "Each element of a `bounds` sequence must be a tuple..."
shape_bounds = [(1, 10, 3), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "Each element of `bounds` must be a tuple specifying..."
shape_bounds = [(1, 10, 3), (0, 1, 0.5)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
shape_bounds = [1, 0]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "A `bounds` sequence must contain at least 2 elements..."
shape_bounds = [(1, 10)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "A `bounds` sequence may not contain more than 3 elements..."
bounds = [(1, 10), (1, 10), (1, 10), (1, 10)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, bounds)
message = "There are no values for `p` on the interval..."
shape_bounds = {'n': (1, 10), 'p': (1, 0)}
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "There are no values for `n` on the interval..."
shape_bounds = [(10, 1), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "There are no integer values for `n` on the interval..."
shape_bounds = [(1.4, 1.6), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
message = "The intersection of user-provided bounds for `n`"
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data)
shape_bounds = [(-np.inf, np.inf), (0, 1)]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, shape_bounds)
def test_guess_iv(self):
message = "Guesses provided for the following unrecognized..."
guess = {'n': 1, 'p': 0.5, '1': 255}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Each element of `guess` must be a scalar..."
guess = {'n': 1, 'p': 'hi'}
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
guess = [1, 'f']
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
guess = [[1, 2]]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "A `guess` sequence must contain at least 2..."
guess = [1]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "A `guess` sequence may not contain more than 3..."
guess = [1, 2, 3, 4]
with pytest.raises(ValueError, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `n` rounded..."
guess = {'n': 4.5, 'p': -0.5}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `loc` rounded..."
guess = [5, 0.5, 0.5]
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `p` clipped..."
guess = {'n': 5, 'p': -0.5}
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
message = "Guess for parameter `loc` clipped..."
guess = [5, 0.5, 1]
with pytest.warns(RuntimeWarning, match=message):
stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
@pytest.mark.parametrize("dist_name", cases_test_fit())
def test_basic_fit(self, dist_name):
N = 5000
dist_data = dict(distcont + distdiscrete)
rng = np.random.default_rng(self.seed)
dist = getattr(stats, dist_name)
shapes = np.array(dist_data[dist_name])
bounds = np.empty((len(shapes) + 2, 2), dtype=np.float64)
bounds[:-2, 0] = shapes/10 # essentially all shapes are > 0
bounds[:-2, 1] = shapes*10
bounds[-2] = (0, 10)
bounds[-1] = (0, 10)
loc = rng.uniform(*bounds[-2])
scale = rng.uniform(*bounds[-1])
ref = list(dist_data[dist_name]) + [loc, scale]
if getattr(dist, 'pmf', False):
ref = ref[:-1]
ref[-1] = np.floor(loc)
data = dist.rvs(*ref, size=N, random_state=rng)
res = stats.fit(dist, data, bounds[:-1], optimizer=self.opt)
if getattr(dist, 'pdf', False):
data = dist.rvs(*ref, size=N, random_state=rng)
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, ref, **self.tols)
@pytest.mark.skip("Tested in test_basic_fit")
def test_hypergeom(self):
# hypergeometric distribution (M, n, N) \equiv (M, N, n)
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.hypergeom
shapes = (20, 7, 12)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0, 30)]*3
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params[:-1], shapes, **self.tols)
@pytest.mark.xslow
def test_nhypergeom(self):
# DE doesn't find optimum for the bounds in `test_basic_fit`. NBD.
N = 2000
rng = np.random.default_rng(self.seed)
dist = stats.nhypergeom
shapes = (20, 7, 12)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0, 30)]*3
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params[:-1], (20, 7, 12), **self.tols)
def test_boltzmann(self):
# Boltzmann distribution shape is very insensitive to parameter N
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.boltzmann
shapes = (1.4, 19, 4)
data = dist.rvs(*shapes, size=N, random_state=rng)
bounds = [(0, 30)]*2 + [(0, 10)]
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params[0], 1.4, **self.tols)
assert_allclose(res.params[2], 4, **self.tols)
def test_nbinom(self):
# Fitting nbinom doesn't always get original shapes if loc is free
N = 7000
rng = np.random.default_rng(self.seed)
dist = stats.nbinom
shapes = (5, 0.5)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0.5, 50), (0.05, 5)]
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params[:-1], shapes, **self.tols)
def test_randint(self):
# randint is overparameterized; test_basic_fit finds equally good fit
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.randint
shapes = (7, 31)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0, 70), (0, 310)]
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params[:2], shapes, **self.tols)
def test_yulesimon(self):
# yulesimon fit is not very sensitive to alpha except for small alpha
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.yulesimon
params = (1.5, 4)
data = dist.rvs(*params, size=N, random_state=rng)
bounds = [(0.15, 15), (0, 10)]
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, params, **self.tols)
@pytest.mark.xslow
def test_nchypergeom_fisher(self):
# The NC hypergeometric distributions are more challenging
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.nchypergeom_fisher
shapes = (14, 8, 6, 0.5)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0, 20), (8, 8), (0, 10), (0, 1)]
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params[:-1], shapes, **self.tols)
@pytest.mark.xslow
def test_nchypergeom_wallenius(self):
# The NC hypergeometric distributions are more challenging
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.nchypergeom_wallenius
shapes = (14, 8, 6, 0.5)
data = dist.rvs(*shapes, size=N, random_state=rng)
shape_bounds = [(0, 20), (0, 10), (0, 10), (0, 0.5)]
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params[:-1], shapes, **self.tols)
def test_missing_shape_bounds(self):
# some distributions have a small domain w.r.t. a parameter, e.g.
# $p \in [0, 1]$ for binomial distribution
# User does not need to provide these because the intersection of the
# user's bounds (none) and the distribution's domain is finite
N = 1000
rng = np.random.default_rng(self.seed)
dist = stats.binom
n, p, loc = 10, 0.65, 0
data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
shape_bounds = {'n': np.array([0, 20])} # check arrays are OK, too
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params, (n, p, loc), **self.tols)
dist = stats.bernoulli
p, loc = 0.314159, 0
data = dist.rvs(p, loc=loc, size=N, random_state=rng)
res = stats.fit(dist, data, optimizer=self.opt)
assert_allclose(res.params, (p, loc), **self.tols)
def test_fit_only_loc_scale(self):
# fit only loc
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.norm
loc, scale = 1.5, 1
data = dist.rvs(loc=loc, size=N, random_state=rng)
loc_bounds = (0, 5)
bounds = {'loc': loc_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
# fit only scale
loc, scale = 0, 2.5
data = dist.rvs(scale=scale, size=N, random_state=rng)
scale_bounds = (0, 5)
bounds = {'scale': scale_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
# fit only loc and scale
dist = stats.norm
loc, scale = 1.5, 2.5
data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
bounds = {'loc': loc_bounds, 'scale': scale_bounds}
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert_allclose(res.params, (loc, scale), **self.tols)
def test_everything_fixed(self):
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.norm
loc, scale = 1.5, 2.5
data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
# loc, scale fixed to 0, 1 by default
res = stats.fit(dist, data)
assert_allclose(res.params, (0, 1), **self.tols)
# loc, scale explicitly fixed
bounds = {'loc': (loc, loc), 'scale': (scale, scale)}
res = stats.fit(dist, data, bounds)
assert_allclose(res.params, (loc, scale), **self.tols)
# `n` gets fixed during polishing
dist = stats.binom
n, p, loc = 10, 0.65, 0
data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
shape_bounds = {'n': (0, 20), 'p': (0.65, 0.65)}
res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
assert_allclose(res.params, (n, p, loc), **self.tols)
def test_failure(self):
N = 5000
rng = np.random.default_rng(self.seed)
dist = stats.nbinom
shapes = (5, 0.5)
data = dist.rvs(*shapes, size=N, random_state=rng)
assert data.min() == 0
# With lower bounds on location at 0.5, likelihood is zero
bounds = [(0, 30), (0, 1), (0.5, 10)]
res = stats.fit(dist, data, bounds)
message = "Optimization converged to parameter values that are"
assert res.message.startswith(message)
assert res.success is False
@pytest.mark.xslow
def test_guess(self):
# Test that guess helps DE find the desired solution
N = 2000
rng = np.random.default_rng(self.seed)
dist = stats.nhypergeom
params = (20, 7, 12, 0)
bounds = [(2, 200), (0.7, 70), (1.2, 120), (0, 10)]
data = dist.rvs(*params, size=N, random_state=rng)
res = stats.fit(dist, data, bounds, optimizer=self.opt)
assert not np.allclose(res.params, params, **self.tols)
res = stats.fit(dist, data, bounds, guess=params, optimizer=self.opt)
assert_allclose(res.params, params, **self.tols)
|
|
"""
homeassistant.components.media_player.squeezebox
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides an interface to the Logitech SqueezeBox API
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.squeezebox/
"""
import logging
import telnetlib
import urllib.parse
from homeassistant.components.media_player import (
MediaPlayerDevice, SUPPORT_PAUSE, SUPPORT_SEEK, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_MUTE, SUPPORT_PREVIOUS_TRACK, SUPPORT_NEXT_TRACK,
SUPPORT_TURN_ON, SUPPORT_TURN_OFF,
MEDIA_TYPE_MUSIC, DOMAIN)
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD,
STATE_IDLE, STATE_PLAYING, STATE_PAUSED, STATE_OFF, STATE_UNKNOWN)
_LOGGER = logging.getLogger(__name__)
SUPPORT_SQUEEZEBOX = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_SEEK | SUPPORT_TURN_ON | SUPPORT_TURN_OFF
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the squeezebox platform. """
if not config.get(CONF_HOST):
_LOGGER.error(
"Missing required configuration items in %s: %s",
DOMAIN,
CONF_HOST)
return False
lms = LogitechMediaServer(
config.get(CONF_HOST),
config.get('port', '9090'),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD))
if not lms.init_success:
return False
add_devices(lms.create_players())
return True
class LogitechMediaServer(object):
""" Represents a Logitech media server. """
def __init__(self, host, port, username, password):
self.host = host
self.port = port
self._username = username
self._password = password
self.http_port = self._get_http_port()
self.init_success = True if self.http_port else False
def _get_http_port(self):
""" Get http port from media server, it is used to get cover art. """
http_port = None
try:
http_port = self.query('pref', 'httpport', '?')
if not http_port:
_LOGGER.error(
"Unable to read data from server %s:%s",
self.host,
self.port)
return
return http_port
except ConnectionError as ex:
_LOGGER.error(
"Failed to connect to server %s:%s - %s",
self.host,
self.port,
ex)
return
def create_players(self):
""" Create a list of SqueezeBoxDevices connected to the LMS. """
players = []
count = self.query('player', 'count', '?')
for index in range(0, int(count)):
player_id = self.query('player', 'id', str(index), '?')
player = SqueezeBoxDevice(self, player_id)
players.append(player)
return players
def query(self, *parameters):
""" Send request and await response from server. """
telnet = telnetlib.Telnet(self.host, self.port)
if self._username and self._password:
telnet.write('login {username} {password}\n'.format(
username=self._username,
password=self._password).encode('UTF-8'))
telnet.read_until(b'\n', timeout=3)
message = '{}\n'.format(' '.join(parameters))
telnet.write(message.encode('UTF-8'))
response = telnet.read_until(b'\n', timeout=3)\
.decode('UTF-8')\
.split(' ')[-1]\
.strip()
telnet.write(b'exit\n')
return urllib.parse.unquote(response)
def get_player_status(self, player):
""" Get ithe status of a player. """
# (title) : Song title
# Requested Information
# a (artist): Artist name 'artist'
# d (duration): Song duration in seconds 'duration'
# K (artwork_url): URL to remote artwork
tags = 'adK'
new_status = {}
telnet = telnetlib.Telnet(self.host, self.port)
telnet.write('{player} status - 1 tags:{tags}\n'.format(
player=player,
tags=tags
).encode('UTF-8'))
response = telnet.read_until(b'\n', timeout=3)\
.decode('UTF-8')\
.split(' ')
telnet.write(b'exit\n')
for item in response:
parts = urllib.parse.unquote(item).partition(':')
new_status[parts[0]] = parts[2]
return new_status
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
class SqueezeBoxDevice(MediaPlayerDevice):
""" Represents a SqueezeBox device. """
# pylint: disable=too-many-arguments, abstract-method
def __init__(self, lms, player_id):
super(SqueezeBoxDevice, self).__init__()
self._lms = lms
self._id = player_id
self._name = self._lms.query(self._id, 'name', '?')
self._status = self._lms.get_player_status(self._id)
@property
def name(self):
""" Returns the name of the device. """
return self._name
@property
def state(self):
""" Returns the state of the device. """
if 'power' in self._status and self._status['power'] == '0':
return STATE_OFF
if 'mode' in self._status:
if self._status['mode'] == 'pause':
return STATE_PAUSED
if self._status['mode'] == 'play':
return STATE_PLAYING
if self._status['mode'] == 'stop':
return STATE_IDLE
return STATE_UNKNOWN
def update(self):
""" Retrieve latest state. """
self._status = self._lms.get_player_status(self._id)
@property
def volume_level(self):
""" Volume level of the media player (0..1). """
if 'mixer volume' in self._status:
return int(float(self._status['mixer volume'])) / 100.0
@property
def is_volume_muted(self):
if 'mixer volume' in self._status:
return self._status['mixer volume'].startswith('-')
@property
def media_content_id(self):
""" Content ID of current playing media. """
if 'current_title' in self._status:
return self._status['current_title']
@property
def media_content_type(self):
""" Content type of current playing media. """
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
""" Duration of current playing media in seconds. """
if 'duration' in self._status:
return int(float(self._status['duration']))
@property
def media_image_url(self):
""" Image url of current playing media. """
if 'artwork_url' in self._status:
media_url = self._status['artwork_url']
elif 'id' in self._status:
media_url = ('/music/{track_id}/cover.jpg').format(
track_id=self._status['id'])
else:
media_url = ('/music/current/cover.jpg?player={player}').format(
player=self._id)
base_url = 'http://{server}:{port}/'.format(
server=self._lms.host,
port=self._lms.http_port)
return urllib.parse.urljoin(base_url, media_url)
@property
def media_title(self):
""" Title of current playing media. """
if 'artist' in self._status and 'title' in self._status:
return '{artist} - {title}'.format(
artist=self._status['artist'],
title=self._status['title']
)
if 'current_title' in self._status:
return self._status['current_title']
@property
def supported_media_commands(self):
""" Flags of media commands that are supported. """
return SUPPORT_SQUEEZEBOX
def turn_off(self):
""" turn_off media player. """
self._lms.query(self._id, 'power', '0')
self.update_ha_state()
def volume_up(self):
""" volume_up media player. """
self._lms.query(self._id, 'mixer', 'volume', '+5')
self.update_ha_state()
def volume_down(self):
""" volume_down media player. """
self._lms.query(self._id, 'mixer', 'volume', '-5')
self.update_ha_state()
def set_volume_level(self, volume):
""" set volume level, range 0..1. """
volume_percent = str(int(volume*100))
self._lms.query(self._id, 'mixer', 'volume', volume_percent)
self.update_ha_state()
def mute_volume(self, mute):
""" mute (true) or unmute (false) media player. """
mute_numeric = '1' if mute else '0'
self._lms.query(self._id, 'mixer', 'muting', mute_numeric)
self.update_ha_state()
def media_play_pause(self):
""" media_play_pause media player. """
self._lms.query(self._id, 'pause')
self.update_ha_state()
def media_play(self):
""" media_play media player. """
self._lms.query(self._id, 'play')
self.update_ha_state()
def media_pause(self):
""" media_pause media player. """
self._lms.query(self._id, 'pause', '1')
self.update_ha_state()
def media_next_track(self):
""" Send next track command. """
self._lms.query(self._id, 'playlist', 'index', '+1')
self.update_ha_state()
def media_previous_track(self):
""" Send next track command. """
self._lms.query(self._id, 'playlist', 'index', '-1')
self.update_ha_state()
def media_seek(self, position):
""" Send seek command. """
self._lms.query(self._id, 'time', position)
self.update_ha_state()
def turn_on(self):
""" turn the media player on. """
self._lms.query(self._id, 'power', '1')
self.update_ha_state()
|
|
"""Support for Nederlandse Spoorwegen public transport."""
from datetime import datetime, timedelta
import logging
import ns_api
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by NS"
CONF_ROUTES = "routes"
CONF_FROM = "from"
CONF_TO = "to"
CONF_VIA = "via"
ICON = "mdi:train"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
ROUTE_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_FROM): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Optional(CONF_VIA): cv.string,
}
)
ROUTES_SCHEMA = vol.All(cv.ensure_list, [ROUTE_SCHEMA])
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_ROUTES): ROUTES_SCHEMA}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the departure sensor."""
nsapi = ns_api.NSAPI(config[CONF_API_KEY])
try:
stations = nsapi.get_stations()
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
) as error:
_LOGGER.error("Couldn't fetch stations, API password correct?: %s", error)
return
sensors = []
for departure in config.get(CONF_ROUTES):
if not valid_stations(
stations,
[departure.get(CONF_FROM), departure.get(CONF_VIA), departure.get(CONF_TO)],
):
continue
sensors.append(
NSDepartureSensor(
nsapi,
departure.get(CONF_NAME),
departure.get(CONF_FROM),
departure.get(CONF_TO),
departure.get(CONF_VIA),
)
)
if sensors:
add_entities(sensors, True)
def valid_stations(stations, given_stations):
"""Verify the existence of the given station codes."""
for station in given_stations:
if station is None:
continue
if not any(s.code == station.upper() for s in stations):
_LOGGER.warning("Station '%s' is not a valid station.", station)
return False
return True
class NSDepartureSensor(Entity):
"""Implementation of a NS Departure Sensor."""
def __init__(self, nsapi, name, departure, heading, via):
"""Initialize the sensor."""
self._nsapi = nsapi
self._name = name
self._departure = departure
self._via = via
self._heading = heading
self._state = None
self._trips = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if not self._trips:
return
if self._trips[0].trip_parts:
route = [self._trips[0].departure]
for k in self._trips[0].trip_parts:
route.append(k.destination)
# Static attributes
attributes = {
"going": self._trips[0].going,
"departure_time_planned": self._trips[0].departure_time_planned.strftime(
"%H:%M"
),
"departure_time_actual": None,
"departure_delay": False,
"departure_platform_planned": self._trips[0].departure_platform_planned,
"departure_platform_actual": None,
"arrival_time_planned": self._trips[0].arrival_time_planned.strftime(
"%H:%M"
),
"arrival_time_actual": None,
"arrival_delay": False,
"arrival_platform_platform": self._trips[0].arrival_platform_planned,
"arrival_platform_actual": None,
"next": None,
"status": self._trips[0].status.lower(),
"transfers": self._trips[0].nr_transfers,
"route": route,
"remarks": None,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
# Departure attributes
if self._trips[0].departure_time_actual is not None:
attributes["departure_time_actual"] = self._trips[
0
].departure_time_actual.strftime("%H:%M")
attributes["departure_delay"] = True
attributes["departure_platform_actual"] = self._trips[
0
].departure_platform_actual
# Arrival attributes
if self._trips[0].arrival_time_actual is not None:
attributes["arrival_time_actual"] = self._trips[
0
].arrival_time_actual.strftime("%H:%M")
attributes["arrival_delay"] = True
attributes["arrival_platform_actual"] = self._trips[
0
].arrival_platform_actual
# Next attributes
if self._trips[1].departure_time_actual is not None:
attributes["next"] = self._trips[1].departure_time_actual.strftime("%H:%M")
elif self._trips[1].departure_time_planned is not None:
attributes["next"] = self._trips[1].departure_time_planned.strftime("%H:%M")
return attributes
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the trip information."""
try:
self._trips = self._nsapi.get_trips(
datetime.now().strftime("%d-%m-%Y %H:%M"),
self._departure,
self._via,
self._heading,
True,
0,
2,
)
if self._trips:
if self._trips[0].departure_time_actual is None:
planned_time = self._trips[0].departure_time_planned
self._state = planned_time.strftime("%H:%M")
else:
actual_time = self._trips[0].departure_time_actual
self._state = actual_time.strftime("%H:%M")
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
) as error:
_LOGGER.error("Couldn't fetch trip info: %s", error)
|
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
"""
Module containing the UniversalDetector detector class, which is the primary
class a user of ``chardet`` should use.
:author: Mark Pilgrim (initial port to Python)
:author: Shy Shalom (original C code)
:author: Dan Blanchard (major refactoring for 3.0)
:author: Ian Cordasco
"""
import codecs
import logging
import re
from .enums import InputState, LanguageFilter, ProbingState
from .escprober import EscCharSetProber
from .latin1prober import Latin1Prober
from .mbcsgroupprober import MBCSGroupProber
from .sbcsgroupprober import SBCSGroupProber
class UniversalDetector(object):
"""
The ``UniversalDetector`` class underlies the ``chardet.detect`` function
and coordinates all of the different charset probers.
To get a ``dict`` containing an encoding and its confidence, you can simply
run:
.. code::
u = UniversalDetector()
u.feed(some_bytes)
u.close()
detected = u.result
"""
MINIMUM_THRESHOLD = 0.20
HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
ESC_DETECTOR = re.compile(b'(\033|~{)')
WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
'iso-8859-2': 'Windows-1250',
'iso-8859-5': 'Windows-1251',
'iso-8859-6': 'Windows-1256',
'iso-8859-7': 'Windows-1253',
'iso-8859-8': 'Windows-1255',
'iso-8859-9': 'Windows-1254',
'iso-8859-13': 'Windows-1257'}
def __init__(self, lang_filter=LanguageFilter.ALL):
self._esc_charset_prober = None
self._charset_probers = []
self.result = None
self.done = None
self._got_data = None
self._input_state = None
self._last_char = None
self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__)
self._has_win_bytes = None
self.reset()
def reset(self):
"""
Reset the UniversalDetector and all of its probers back to their
initial states. This is called by ``__init__``, so you only need to
call this directly in between analyses of different documents.
"""
self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
self.done = False
self._got_data = False
self._has_win_bytes = False
self._input_state = InputState.PURE_ASCII
self._last_char = b''
if self._esc_charset_prober:
self._esc_charset_prober.reset()
for prober in self._charset_probers:
prober.reset()
def feed(self, byte_str):
"""
Takes a chunk of a document and feeds it through all of the relevant
charset probers.
After calling ``feed``, you can check the value of the ``done``
attribute to see if you need to continue feeding the
``UniversalDetector`` more data, or if it has made a prediction
(in the ``result`` attribute).
.. note::
You should always call ``close`` when you're done feeding in your
document if ``done`` is not already ``True``.
"""
if self.done:
return
if not len(byte_str):
return
if not isinstance(byte_str, bytearray):
byte_str = bytearray(byte_str)
# First check for known BOMs, since these are guaranteed to be correct
if not self._got_data:
# If the data starts with BOM, we know it is UTF
if byte_str.startswith(codecs.BOM_UTF8):
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_UTF32_LE,
codecs.BOM_UTF32_BE)):
# FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0,
'language': ''}
elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
# FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16",
'confidence': 1.0,
'language': ''}
self._got_data = True
if self.result['encoding'] is not None:
self.done = True
return
# If none of those matched and we've only see ASCII so far, check
# for high bytes and escape sequences
if self._input_state == InputState.PURE_ASCII:
if self.HIGH_BYTE_DETECTOR.search(byte_str):
self._input_state = InputState.HIGH_BYTE
elif self._input_state == InputState.PURE_ASCII and \
self.ESC_DETECTOR.search(self._last_char + byte_str):
self._input_state = InputState.ESC_ASCII
self._last_char = byte_str[-1:]
# If we've seen escape sequences, use the EscCharSetProber, which
# uses a simple state machine to check for known escape sequences in
# HZ and ISO-2022 encodings, since those are the only encodings that
# use such sequences.
if self._input_state == InputState.ESC_ASCII:
if not self._esc_charset_prober:
self._esc_charset_prober = EscCharSetProber(self.lang_filter)
if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding':
self._esc_charset_prober.charset_name,
'confidence':
self._esc_charset_prober.get_confidence(),
'language':
self._esc_charset_prober.language}
self.done = True
# If we've seen high bytes (i.e., those with values greater than 127),
# we need to do more complicated checks using all our multi-byte and
# single-byte probers that are left. The single-byte probers
# use character bigram distributions to determine the encoding, whereas
# the multi-byte probers use a combination of character unigram and
# bigram distributions.
elif self._input_state == InputState.HIGH_BYTE:
if not self._charset_probers:
self._charset_probers = [MBCSGroupProber(self.lang_filter)]
# If we're checking non-CJK encodings, use single-byte prober
if self.lang_filter & LanguageFilter.NON_CJK:
self._charset_probers.append(SBCSGroupProber())
self._charset_probers.append(Latin1Prober())
for prober in self._charset_probers:
if prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {'encoding': prober.charset_name,
'confidence': prober.get_confidence(),
'language': prober.language}
self.done = True
break
if self.WIN_BYTE_DETECTOR.search(byte_str):
self._has_win_bytes = True
def close(self):
"""
Stop analyzing the current document and come up with a final
prediction.
:returns: The ``result`` attribute, a ``dict`` with the keys
`encoding`, `confidence`, and `language`.
"""
# Don't bother with checks if we're already done
if self.done:
return self.result
self.done = True
if not self._got_data:
self.logger.debug('no data received!')
# Default to ASCII if it is all we've seen so far
elif self._input_state == InputState.PURE_ASCII:
self.result = {'encoding': 'ascii',
'confidence': 1.0,
'language': ''}
# If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
elif self._input_state == InputState.HIGH_BYTE:
prober_confidence = None
max_prober_confidence = 0.0
max_prober = None
for prober in self._charset_probers:
if not prober:
continue
prober_confidence = prober.get_confidence()
if prober_confidence > max_prober_confidence:
max_prober_confidence = prober_confidence
max_prober = prober
if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
charset_name = max_prober.charset_name
lower_charset_name = max_prober.charset_name.lower()
confidence = max_prober.get_confidence()
# Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes
if lower_charset_name.startswith('iso-8859'):
if self._has_win_bytes:
charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
charset_name)
self.result = {'encoding': charset_name,
'confidence': confidence,
'language': max_prober.language}
# Log all prober confidences if none met MINIMUM_THRESHOLD
if self.logger.getEffectiveLevel() == logging.DEBUG:
if self.result['encoding'] is None:
self.logger.debug('no probers hit minimum threshold')
for prober in self._charset_probers[0].probers:
if not prober:
continue
self.logger.debug('%s %s confidence = %s',
prober.charset_name,
prober.language,
prober.get_confidence())
return self.result
|
|
# Copyright 2016 Mario Graff (https://github.com/mgraffg)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_tweet_iterator():
import os
import gzip
from microtc.utils import tweet_iterator
fname = os.path.dirname(__file__) + '/text.json'
a = [x for x in tweet_iterator(fname)]
fname_gz = fname + '.gz'
with open(fname, 'r') as fpt:
with gzip.open(fname_gz, 'w') as fpt2:
fpt2.write(fpt.read().encode('ascii'))
b = [x for x in tweet_iterator(fname_gz)]
assert len(a) == len(b)
for a0, b0 in zip(a, b):
assert a0['text'] == b0['text']
os.unlink(fname_gz)
def test_textmodel():
from b4msa.textmodel import TextModel
from microtc.utils import tweet_iterator
import os
fname = os.path.dirname(__file__) + '/text.json'
tw = list(tweet_iterator(fname))
text = TextModel([x['text'] for x in tw])
# print(text.tokenize("hola amiguitos gracias por venir :) http://hello.com @chanfle"))
# assert False
assert isinstance(text[tw[0]['text']], list)
def test_params():
import os
import itertools
from b4msa.params import BASIC_OPTIONS
from b4msa.textmodel import TextModel
from microtc.utils import tweet_iterator
params = dict(del_diac=[True, False], usr_option=BASIC_OPTIONS,
url_option=BASIC_OPTIONS)
params = sorted(params.items())
fname = os.path.dirname(__file__) + '/text.json'
tw = [x for x in tweet_iterator(fname)]
text = [x['text'] for x in tw]
for x in itertools.product(*[x[1] for x in params]):
args = dict(zip([x[0] for x in params], x))
ins = TextModel(text, **args)
assert isinstance(ins[text[0]], list)
def test_emoticons():
from microtc.textmodel import norm_chars
from microtc.emoticons import EmoticonClassifier
emo = EmoticonClassifier()
for a, b in [
("Hi :) :P XD", "~Hi~_pos~_pos~_pos~"),
("excelente dia xc", "~excelente~dia~_neg~")
]:
_a = norm_chars(a)
assert ' ' not in _a, "norm_chars normalizes spaces {0} ==> {1}".format(a, _a)
_b = emo.replace(_a)
print("[{0}] => [{1}]; should be [{2}]".format(a, _b, b))
assert _b == b
def test_lang():
from b4msa.textmodel import TextModel
text = [
"Hi :) :P XD",
"excelente dia xc",
"el alma de la fiesta XD"
]
model = TextModel(text, **{
"del_dup": True,
"emo_option": "group",
"lc": True,
"negation": True,
"num_option": "group",
"stemming": True,
"stopwords": "group",
"del_diac": False,
"token_list": [
-1,
# 5,
],
"url_option": "group",
"usr_option": "group",
"lang": "spanish",
})
text = "El alma de la fiesta :) conociendo la maquinaria @user bebiendo nunca manches que onda"
print(model.tokenize)
a = model.tokenize(text)
b = ['_sw', 'alma', '_sw', '_sw', 'fiest', '_pos', 'conoc', '_sw', 'maquinari', '_usr', 'beb', 'no_manch', '_sw', 'onda']
print(text)
assert a == b, "got: {0}, expected: {1}".format(a, b)
def test_negations():
from b4msa.textmodel import TextModel
text = [
"el alma de la fiesta XD"
]
model = TextModel(text, **{
'num_option': 'group',
'del_diac': False,
'stopwords': 'delete',
'negation': True,
'stemming': True,
'lc': False, 'token_list': [-1],
'usr_option': 'group', 'del_dup': False, 'emo_option': 'group', 'lang': 'spanish', 'url_option': 'delete'
})
text = """@usuario los pollos y las vacas nunca hubiesen permitido que no se hubiese hecho nada al respecto"""
a = model.tokenize(text)
b = ['_usr', 'poll', 'vac', 'hub', 'no_permit', 'hub', 'no_hech', 'no_respect']
print(a, b)
assert a == b
def test_negations_italian():
from b4msa.textmodel import TextModel
text = [
"XD"
]
model = TextModel(text, **{
'num_option': 'group',
'del_diac': False,
'stopwords': 'delete',
'negation': True,
'stemming': True,
'lc': False, 'token_list': [-1],
'usr_option': 'group',
'del_dup': False,
'emo_option': 'group',
'lang': 'italian',
'url_option': 'delete'
})
text = """@User Come non condividere; me ne frega niente"""
a = model.tokenize(text)
print("Input:", text)
print("Output:", a)
b = ['_usr', 'com', 'no_condividere', 'me', 'no_freg', 'nient']
assert a == b
def test_textmodel_entropy():
from b4msa.textmodel import TextModel
from microtc.utils import tweet_iterator
import os
fname = os.path.dirname(__file__) + '/text.json'
tw = list(tweet_iterator(fname))
text = TextModel(tw, threshold=0.01)
assert isinstance(text, TextModel)
print(len(text.model._w2id))
assert len(text.model._w2id) == 39
def test_textmodel_token_min_filter():
from b4msa.textmodel import TextModel
from microtc.utils import tweet_iterator
import os
fname = os.path.dirname(__file__) + '/text.json'
tw = list(tweet_iterator(fname))
text = TextModel(tw, token_min_filter=1)
print(len(text.model._w2id))
assert len(text.model._w2id) == 62
text = TextModel(tw, token_min_filter=0.3)
print(len(text.model._w2id))
assert len(text.model._w2id) == 13
text = TextModel(tw, token_min_filter=1, threshold=0.01)
def test_textmodel_default():
from b4msa.textmodel import TextModel
for lang in ['spanish', 'english', 'arabic']:
text = TextModel(lang=lang)
print(text.token_list, TextModel.default_parameters(lang=lang)['token_list'])
for a, b in zip(text.token_list,
TextModel.default_parameters(lang=lang)['token_list']):
print(a, b)
assert a == b
text = TextModel(lang='arabic', stopwords='xxx')
assert text._lang_kw['stopwords'] == 'xxx'
|
|
# EventMaster Library for Python
# Unofficial Python Library for connecting to, reading from and controlling
# a Barco E2, S3 or other EventMaster Switcher
# Author: Kye Lewis <[email protected]>
# GitHub: http://github.com/kyelewisstgc/pye2s3/
# Version: 160716.a
import socket
import re
from threading import Thread
from threading import Timer
from uuid import uuid4
from time import sleep
from math import ceil
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
# Globals
global SDI_TYPE_SD
global SDI_TYPE_HD
global SDI_TYPE_LEVELA
global SDI_TYPE_LEVELB
SDI_TYPE_SD = 0
SDI_TYPE_HD = 1
SDI_TYPE_LEVELA = 2
SDI_TYPE_LEVELB = 3
global COLORSPACE_RGB
global COLORSPACE_SMPTE
COLORSPACE_RGB = 0
COLORSPACE_SMPTE = 1
global BLACKONINVALID_ON
global BLACKONINVALID_OFF
BLACKONINVALID_OFF = 0
BLACKONINVALID_ON = 1
global COLORRANGE_REDUCED
global COLORRANGE_FULL
COLORRANGE_REDUCED = 0
COLORRANGE_FULL = 1
global VF_1024x768_4795
global VF_1024x768_4800
global VF_1024x768_5000
global VF_1024x768_5994
global VF_1024x768_6000
global VF_1024x768_7000
global VF_1024x768_7193
global VF_1024x768_7200
global VF_1024x768_7500
global VF_1024x768_8500
VF_1024x768_4795 = 700
VF_1024x768_4800 = 701
VF_1024x768_5000 = 702
VF_1024x768_5994 = 703
VF_1024x768_6000 = 704
VF_1024x768_7000 = 705
VF_1024x768_7193 = 706
VF_1024x768_7200 = 707
VF_1024x768_7500 = 708
VF_1024x768_8500 = 709
global VF_1152x864_7500
VF_1152x864_7500 = 900
global VF_1280x1024_4795
global VF_1280x1024_4800
global VF_1280x1024_5000
global VF_1280x1024_5994
global VF_1280x1024_6000
global VF_1280x1024_7000
global VF_1280x1024_7193
global VF_1280x1024_7200
global VF_1280x1024_7500
global VF_1280x1024_8500
VF_1280x1024_4795 = 1500
VF_1280x1024_4800 = 1501
VF_1280x1024_5000 = 1502
VF_1280x1024_5994 = 1503
VF_1280x1024_6000 = 1504
VF_1280x1024_7000 = 1505
VF_1280x1024_7193 = 1506
VF_1280x1024_7200 = 1507
VF_1280x1024_7500 = 1508
VF_1280x1024_8500 = 1509
global VF_1280x720p_2398
global VF_1280x720p_2400
global VF_1280x720p_2500
global VF_1280x720p_2997
global VF_1280x720p_3000
global VF_1280x720p_4800
global VF_1280x720p_5000
global VF_1280x720p_5994
global VF_1280x720p_6000
global VF_1280x720p_10000
global VF_1280x720p_11988
global VF_1280x720p_12000
VF_1280x720p_2398 = 1000
VF_1280x720p_2400 = 1001
VF_1280x720p_2500 = 1002
VF_1280x720p_2997 = 1003
VF_1280x720p_3000 = 1004
VF_1280x720p_4800 = 1005
VF_1280x720p_5000 = 1006
VF_1280x720p_5994 = 1007
VF_1280x720p_6000 = 1008
VF_1280x720p_10000 = 1013
VF_1280x720p_11988 = 1014
VF_1280x720p_12000 = 1015
global VF_1920x1080p_2398
global VF_1920x1080p_2400
global VF_1920x1080p_2500
global VF_1920x1080p_2997
global VF_1920x1080p_3000
global VF_1920x1080p_4795
global VF_1920x1080p_4800
global VF_1920x1080p_5000
global VF_1920x1080p_5994
global VF_1920x1080p_6000
VF_1920x1080p_2398 = 2700
VF_1920x1080p_2400 = 2701
VF_1920x1080p_2500 = 2702
VF_1920x1080p_2997 = 2703
VF_1920x1080p_3000 = 2704
VF_1920x1080p_4795 = 2705
VF_1920x1080p_4800 = 2706
VF_1920x1080p_5000 = 2707
VF_1920x1080p_5994 = 2708
VF_1920x1080p_6000 = 2709
global VF_1920x1080i_5000
global VF_1920x1080i_5994
global VF_1920x1080i_6000
VF_1920x1080i_5000 = 2900
VF_1920x1080i_5994 = 2901
VF_1920x1080i_6000 = 2902
global HDCPMODE_ON
global HDCPMODE_OFF
HDCPMODE_ON = 1
HDCPMODE_OFF = 0
global FRZMODE_ON
global FRZMODE_OFF
FRZMODE_ON = 1
FRZMODE_OFF = 0
global AUXSTREAMMODE_2K
global AUXSTREAMMODE_DL
global AUXSTREAMMODE_4K
global AUXSTREAMMODE_8L
AUXSTREAMMODE_2K = 1
AUXSTREAMMODE_DL = 2
AUXSTREAMMODE_4K = 4
AUXSTREAMMODE_8L = 8
global TESTPATTERNMODE_OFF
global TESTPATTERNMODE_HRAMP
global TESTPATTERNMODE_VRAMP
global TESTPATTERNMODE_CBAR100
global TESTPATTERNMODE_GRID16
global TESTPATTERNMODE_GRID32
global TESTPATTERNMODE_BURST
global TESTPATTERNMODE_CBAR75
global TESTPATTERNMODE_GRAY50
global TESTPATTERNMODE_HSTEPS
global TESTPATTERNMODE_VSTEPS
global TESTPATTERNMODE_WHITE
global TESTPATTERNMODE_BLACK
global TESTPATTERNMODE_SMPTE
global TESTPATTERNMODE_HALIGN
global TESTPATTERNMODE_VALIGN
global TESTPATTERNMODE_HVALIGN
TESTPATTERNMODE_OFF = 0
TESTPATTERNMODE_HRAMP = 1
TESTPATTERNMODE_VRAMP = 2
TESTPATTERNMODE_CBAR100 = 3
TESTPATTERNMODE_GRID16 = 4
TESTPATTERNMODE_GRID32 = 5
TESTPATTERNMODE_BURST = 6
TESTPATTERNMODE_CBAR75 = 7
TESTPATTERNMODE_GRAY50 = 8
TESTPATTERNMODE_HSTEPS = 9
TESTPATTERNMODE_VSTEPS = 10
TESTPATTERNMODE_WHITE = 11
TESTPATTERNMODE_BLACK = 12
TESTPATTERNMODE_SMPTE = 13
TESTPATTERNMODE_HALIGN = 14
TESTPATTERNMODE_VALIGN = 15
TESTPATTERNMODE_HVALIGN = 16
global RASTERBOX_ON
global RASTERBOX_OFF
RASTERBOX_ON = 1
RASTERBOX_OFF = 0
global DIAGMOTION_ON
global DIAGMOTION_OFF
DIAGMOTION_ON = 1
DIAGMOTION_OFF = 0
global CONNCAPACITY_NONE
global CONNCAPACITY_SL
CONNCAPACITY_NONE = 0
CONNCAPACITY_SL = 1
class EventMasterBase(object):
def _log(self, logtext_string):
"""Log to the console or logfile"""
if not logtext_string: return -1
class_name_string = self.__class__.__name__
if(hasattr(self, "parent")):
if(self.parent.getVerbose()==1):
print("[{0!s}] {1!s}".format(class_name_string, logtext_string))
else:
if(self.getVerbose()==1):
print("[{0!s}] {1!s}".format(class_name_string, logtext_string))
return 1
class EventMasterCollection(EventMasterBase):
def __init__(self, parent):
"""Generic Init"""
self.parent = parent
self.state = {}
def update_state(self, state_dict):
"""Update state dict values"""
for key, val in state_dict.items():
self.state[key] = val
return 1
def _simple_set(self, key_string, value, xml_path_list=None):
"""Set a basic XML value"""
xml_path_open_string = ""
xml_path_close_string = ""
if((xml_path_list != None) and (type(xml_path_list)==list)):
for item in xml_path_list:
xml_path_open_string += "<{0!s}>".format(item)
for item in xml_path_list[::-1]:
xml_path_close_string += "</{0!s}>".format(item.split(' ', 1)[0])
value_string = str(value)
xml_string = "{0!s}<{1!s}>{2!s}</{1!s}>{3!s}".format(xml_path_open_string, key_string, value_string, xml_path_close_string)
return self.send(xml_string)
def _simple_get(self, key_string):
"""Get a basic value from state dict"""
if key_string not in self.state:
return None
return self.state[key_string]
def send(self, xml_string):
"""Send a basic XML string"""
xml_path_open_string = ""
xml_path_close_string = ""
for item in self.xml_path_list:
xml_path_open_string += "<{0!s}>".format(item)
for item in self.xml_path_list[::-1]:
xml_path_close_string += "</{0!s}>".format(item.split(' ', 1)[0])
xml_ext_string = "{0!s}{1!s}{2!s}".format(str(xml_path_open_string),
str(xml_string),
str(xml_path_close_string))
return self.parent.send(str(xml_ext_string))
class EventMasterLayer(EventMasterCollection):
def __init__(self, parent, layer_int, destination_int, state_dict={}):
"""Create a new Layer Instance
Keyword arguments:
layer_int -- Unique Layer ID number
destination_int -- The destination ID number the layer is assigned to
state_dict -- Optional, a dict of Layer state items as defined below
"""
super(EventMasterLayer, self).__init__(parent)
if parent is None:
raise Exception( "EventMasterInput init - parent must be supplied" )
if destination_int is None or type(destination_int) != int:
raise Exception( "EventMasterLayer init - Destination must be supplied and must be an Integer" )
return
if layer_int is None or type(layer_int) != int:
raise Exception( "EventMasterLayer init - Layer must be supplied and must be an Integer" )
return
self.layer = layer_int
self.destination = destination_int
self.state = state_dict
self.xml_path_list = [ "DestMgr id=\"0\"",
"ScreenDestCol id=\"0\"",
"ScreenDest id=\"{0!s}\"".format(self.destination),
"LayerCollection id=\"0\"",
"Layer id=\"{0!s}".format(self.layer),
"LayerCfg id=\"0\"" ]
return
def getName(self):
"""(string) Gets Layer Name"""
return self._simple_get("Name")
def getPvwMode(self):
return self._simple_get("PvwMode")
def getPgmMode(self):
return self._simple_get("PgmMode")
def getOWIN(self):
"""(dict) Gets Layer Outside Window Position & Size"""
owin_dict = {}
owin_dict["HPos"] = self._simple_get("OWIN_HPos")
owin_dict["VPos"] = self._simple_get("OWIN_VPos")
owin_dict["HSize"] = self._simple_get("OWIN_HSize")
owin_dict["VSize"] = self._simple_get("OWIN_VSize")
return owin_dict
def setOWIN(self, HPos=None, VPos=None, HSize=None, VSize=None):
""" Sets Layer Outside Window Position & Size
Returns None
(int)HPos -- Horizontal Position in pixels
(int)VPos -- Vertical Position in pixels
(int)HSize -- Horizontal Size in pixels
(int)VSize -- Vertical Size in pixels
"""
xml_path_list = ["LayerState id=\"0\"",
"WinAdjust id=\"0\"",
"PostMaskOWIN id=\"0\""]
if HPos and type(HPos) == int:
self._simple_set("HPosCmd", HPos, xml_path_list=xml_path_list)
if VPos and type(VPos) == int:
self._simple_set("VPosCmd", VPos, xml_path_list=xml_path_list)
if HSize and type(HSize) == int:
self._simple_set("HSizeCmd", HSize, xml_path_list=xml_path_list)
if VSize and type(VSize) == int:
self._simple_set("VSizeCmd", VSize, xml_path_list=xml_path_list)
def cmdFreeze(self, freeze_int):
""" Sets Layer Freeze Mode On/Off
Returns a unique query UUID as string
Args: (int)freeze_int -- Freeze (FRZMODE_ON, FRZMODE_OFF) """
if freeze_int is not None or type(freeze_int) != int: return None
if freeze_int != FRZMODE_ON and freeze_int != FRZMODE_OFF: return None
return self._simple_set("FrzCmd", freeze_int)
def getFreeze(self):
""" Gets Layer Freeze Mode
Returns FRZMODE_ON or FRZMODE_OFF """
return self._simple_get("FrzMode")
def toggleFreeze(self):
""" Toggles Layer Freeze Mode
Returns a unique query UUID as string """
frz_int = self.getFreeze()
if(frz_int == FRZMODE_ON):
return self.cmdFreeze(FRZMODE_OFF)
elif(frz_int == FRZMODE_OFF):
return self.cmdFreeze(FRZMODE_ON)
else:
return None
def getSource(self):
src = self._simple_get("Source")
src_type = self._simple_get("SrcType")
""" SrcType 0 is Input Source """
if(src_type == 0):
if src in self.parent.inputs:
return self.parent.inputs[src]
return None
def cmdRouteSource(self, source_int):
if not source_int or type(source_int) != int:
return None
""" TODO: Check if source is valid """
return self._simple_set("SrcIdx", source_int)
def cmdApplyUserKey(self, userkey_int):
if userkey_int is None:
return None
return self._simple_set("ApplyUserKey", userkey_int)
class EventMasterScreenDestination(EventMasterCollection):
def __init__(self, parent, destination_int, state_dict={}):
""" Create a new Screen Destination Instance
Returns an instance of EventMasterScreenDestination
Args: (object)parent -- EventMasterSwitcher instance
(int)input_int - Unique Destination ID number
optional (dict)state_dict -- Dict of state items """
super(EventMasterScreenDestination, self).__init__(parent)
if parent is None:
raise Exception( "EventMasterInput init - parent must be supplied" )
if destination_int is None or type(destination_int) != int:
raise Exception( "EventMasterInput init - destination_int must be supplied and must be an Integer" )
self.destination = destination_int
self.state = state_dict
self.layers = {}
self.xml_path_list = [ "DestMgr id=\"0\"",
"ScreenDestCol id=\"0\"",
"ScreenDest id=\"{0!s}\"".format(self.destination) ]
return
def getName(self):
return self._simple_get("Name")
def getSize(self):
return { "HSize": self._simple_get("HSize"),
"VSize": self._simple_get("VSize") }
def getLayers(self):
if not self.layers.items:
return {}
layers = {}
for key, inst in self.layers.items():
real_layer_number = int(ceil(key/2))
if real_layer_number not in layers:
layers[real_layer_number] = {}
if inst.getPvwMode():
layers[real_layer_number]["Pvw"] = inst
if inst.getPgmMode():
layers[real_layer_number]["Pgm"] = inst
return layers
def _updateLayer(self, layer_int, state_dict):
if layer_int in self.layers:
self.layers[layer_int].update_state(state_dict)
else:
self.layers[layer_int] = EventMasterLayer( self.parent,
layer_int=layer_int,
destination_int=self.destination,
state_dict=state_dict)
return 1
class EventMasterOutput(EventMasterCollection):
def __init__(self, parent, output_int, state_dict={}):
""" Create a new Output Instance
Returns an instance of EventMasterOutput
Args: (object)parent -- EventMasterSwitcher instance
(int)output_int - Unique Output ID number
optional (dict)state_dict -- Dict of state items """
super(EventMasterOutput, self).__init__(parent)
if parent is None:
raise Exception( "EventMasterOutput init - parent must be supplied" )
if output_int is None or type(output_int) != int:
raise Exception( "EventMasterOutput init - output_int must be supplied and must be an Integer" )
self.output = output_int
self.state = state_dict
self.xml_path_list = [ "OutCfgMgr id=\"0\"",
"OutputCfg id=\"{0!s}\"".format(self.output) ]
return
def getName(self):
return self._simple_get("Name")
def getTestPatternMode(self):
return self._simple_get("TestPattern_Mode")
def getRasterBox(self):
return self._simple_get("TestPattern_RasterBox")
def getTestPatternDiagMotion(self):
return self._simple_get("TestPattern_DiagMotion")
class EventMasterPreset(EventMasterCollection):
def __init__(self, parent, preset_int, state_dict={}):
""" Create a new Preset Instance
Returns an instance of EventMasterPreset
Args: (object)parent -- EventMasterSwitcher instance
(int)output_int - Unique Preset ID number
optional (dict)state_dict -- Dict of state items """
super(EventMasterPreset, self).__init__(parent)
if parent is None:
raise Exception( "EventMasterPreset init - parent must be supplied" )
if preset_int is None or type(preset_int) != int:
raise Exception( "EventMasterPreset init - preset_int must be supplied and must be an Integer" )
self.preset = preset_int
self.state = state_dict
self.xml_path_list = [ "PresetMgr id=\"0\"",
"Preset id=\"{0!s}\"".format(self.preset) ]
return
def getName(self):
return self._simple_get("Name")
def setName(self, name_string):
if name_string is None or type(name_string) != str:
return None
return self._simple_set("Name")
def cmdRecall(self):
xml = "<PresetMgr><RecallPreset>{0!s}</RecallPreset></PresetMgr>".format(self.preset)
return self.parent.send(xml)
class EventMasterInput(EventMasterCollection):
# S3 Default Input Map.
# TODO: Get this from unit
S3_DEFAULT_INPUT_MAP = {
1: {"conn_index": "0", "slot_index": "3",
"card_type": "1", "frame_connector_type": "SDI"},
2: {"conn_index": "1", "slot_index": "3",
"card_type": "1", "frame_connector_type": "SDI"},
3: {"conn_index": "2", "slot_index": "3",
"card_type": "1", "frame_connector_type": "SDI"},
4: {"conn_index": "3", "slot_index": "3",
"card_type": "1", "frame_connector_type": "SDI"},
5: {"conn_index": "0", "slot_index": "4",
"card_type": "2", "frame_connector_type": "DP"},
6: {"conn_index": "1", "slot_index": "4",
"card_type": "2", "frame_connector_type": "DP"},
7: {"conn_index": "2", "slot_index": "4",
"card_type": "2", "frame_connector_type": "HDMI"},
8: {"conn_index": "3", "slot_index": "4",
"card_type": "2", "frame_connector_type": "HDMI"},
9: {"conn_index": "0", "slot_index": "5",
"card_type": "2", "frame_connector_type": "DP"},
10: {"conn_index": "1", "slot_index": "5",
"card_type": "2", "frame_connector_type": "DP"},
11: {"conn_index": "2", "slot_index": "5",
"card_type": "2", "frame_connector_type": "HDMI"},
12: {"conn_index": "3", "slot_index": "5",
"card_type": "2", "frame_connector_type": "HDMI"}
}
def __init__(self, parent, input_int, state_dict={}):
""" Create a new Input Instance
Returns an instance of EventMasterInput
Args: (object)parent -- EventMasterSwitcher instance
(int)input_int - Unique Input ID number
optional (dict)state_dict -- Dict of state items """
super(EventMasterInput, self).__init__(parent)
if parent is None:
raise Exception( "EventMasterInput init - parent must be supplied" )
if input_int is None or type(input_int) != int:
raise Exception( "EventMasterInput init - input_int must be supplied and must be an Integer" )
self.input = input_int
self.state = state_dict
self.xml_path_list = [ "SrcMgr id=\"0\"",
"InputCfgCol id=\"0\"",
"InputCfg id=\"{0!s}\"".format(self.input) ]
return
def cmdFreeze(self, freeze_int):
""" Sets Freeze Mode On/Off
Returns a unique query UUID as string
Args: (int)freeze_int -- Freeze (FRZMODE_ON, FRZMODE_OFF) """
if freeze_int is not None or type(freeze_int) != int: return None
if freeze_int != FRZMODE_ON and freeze_int != FRZMODE_OFF: return None
return self._simple_set("FrzCmd", freeze_int)
def getFreeze(self):
""" Gets Input Freeze Mode
Returns FRZMODE_ON or FRZMODE_OFF """
return self._simple_get("FrzMode")
def toggleFreeze(self):
""" Toggles Input Freeze Mode
Returns a unique query UUID as string """
frz_int = self.getFreeze()
if(frz_int == FRZMODE_ON):
return self.cmdFreeze(FRZMODE_OFF)
elif(frz_int == FRZMODE_OFF):
return self.cmdFreeze(FRZMODE_ON)
else:
return None
def setName(self, name_string):
""" Sets Input Name
Returns a unique query UUID as string
Args: (str)name_string -- Input name """
if name_string is not None or type(name_string) != str:
return None
return self._simple_set("Name", name_string)
def getName(self):
"""(str) Gets Input Name"""
return self._simple_get("Name")
def getInputCfgType(self):
"""(int) Gets Input Configuration Type"""
return self._simple_get("InputCfgType")
def getAutoAcqMode(self):
"""(int) Gets Input Auto Acquire Mode Status"""
return self._simple_get("AutoAcqMode")
def getType3G(self):
"""(int) Gets Input 3G Type"""
return self._simple_get("Type3G")
class EventMasterFrame(EventMasterCollection):
# TODO: Complete VF List (or get from unit?)
VF_MAP = { 1000: VF_1280x720p_2398,
1001: VF_1280x720p_2400,
1002: VF_1280x720p_2500,
1003: VF_1280x720p_2997,
1004: VF_1280x720p_3000,
1005: VF_1280x720p_4800,
1006: VF_1280x720p_5000,
1007: VF_1280x720p_5994,
1008: VF_1280x720p_6000,
1013: VF_1280x720p_10000,
1014: VF_1280x720p_11988,
1015: VF_1280x720p_12000,
2700: VF_1920x1080p_2398,
2701: VF_1920x1080p_2400,
2702: VF_1920x1080p_2500,
2703: VF_1920x1080p_2997,
2704: VF_1920x1080p_3000,
2705: VF_1920x1080p_4795,
2706: VF_1920x1080p_4800,
2707: VF_1920x1080p_5000,
2708: VF_1920x1080p_5994,
2709: VF_1920x1080p_6000 }
STR_MAP = { "[email protected]": VF_1280x720p_2398,
"1280x720p@24": VF_1280x720p_2400,
"1280x720p@25": VF_1280x720p_2500,
"[email protected]": VF_1280x720p_2997,
"1280x720p@30": VF_1280x720p_3000,
"1280x720p@48": VF_1280x720p_4800,
"1280x720p@50": VF_1280x720p_5000,
"[email protected]": VF_1280x720p_5994,
"1280x720p@60": VF_1280x720p_6000,
"1280x720p@100": VF_1280x720p_10000,
"[email protected]": VF_1280x720p_11988,
"1280x720p@120": VF_1280x720p_12000,
"[email protected]": VF_1920x1080p_2398,
"1920x1080p@24": VF_1920x1080p_2400,
"1920x1080p@25": VF_1920x1080p_2500,
"[email protected]": VF_1920x1080p_2997,
"1920x1080p@30": VF_1920x1080p_3000,
"[email protected]": VF_1920x1080p_4795,
"1920x1080p@48": VF_1920x1080p_4800,
"1920x1080p@50": VF_1920x1080p_5000,
"[email protected]": VF_1920x1080p_5994,
"1920x1080p@60": VF_1920x1080p_6000 }
# S3 Default Input Map.
# TODO: Get this from unit
S3_DEFAULT_INPUT_MAP = {
3: {0: {"card_type": 1, "frame_connector_type": "SDI"},
1: {"card_type": 1, "frame_connector_type": "SDI"},
2: {"card_type": 1, "frame_connector_type": "SDI"},
3: {"card_type": 1, "frame_connector_type": "SDI"},
},
4: {0: {"card_type": 2, "frame_connector_type": "DP"},
1: {"card_type": 2, "frame_connector_type": "DP"},
2: {"card_type": 2, "frame_connector_type": "HDMI"},
3: {"card_type": 2, "frame_connector_type": "HDMI"},
},
5: {0: {"card_type": 2, "frame_connector_type": "DP"},
1: {"card_type": 2, "frame_connector_type": "DP"},
2: {"card_type": 2, "frame_connector_type": "HDMI"},
3: {"card_type": 2, "frame_connector_type": "HDMI"},
}
}
def __init__(self, parent, frame_str, state_dict={}):
"""Create a new Frame Instance
Keyword arguments:
frame_str -- Unique Frame MAC address
state_dict -- Optional, a dict of Layer state items as defined below
Frame state items:
FrameType, BlackOnInvalid
"""
super(EventMasterFrame, self).__init__(parent)
if(frame_str == -1):
# TODO: Should fail here
return
self.frame = frame_str
if state_dict: self.state = state_dict
self.xml_path_list = ["FrameCollection id='0'",
"Frame id='{0!s}'".format(self.frame)]
return
def getFrameType(self):
"""(int) Gets Frame Type (E2 or S3)"""
return self._simple_get("FrameType")
def setEDIDAsString(self, slot_int, connector_int, edid_string):
"""Sets EDID for a given slot & connector by EDID String
Keyword arguments:
(int)slot_int -- Slot number to set EDID on.
On an S3, this will be 4-6
On an E2, this will be 3-10
(int)connector_int -- Connector number to set EDID on.
(str)edid_string -- Formatted EDID string
for example: "1920x1080p@50"
Returns a unique request ID as string
"""
if slot_int is None or type(slot_int) != int:
return None
if connector_int is None or type(connector_int) != int:
return None
if edid_string is None or edid_string not in self.STR_MAP:
return None
slot_int = int(slot_int)
connector_int = int(connector_int)
card_params = self.S3_DEFAULT_INPUT_MAP[slot_int][connector_int]
edid_params = self.VF_MAP[edid_string]
if(card_params["frame_connector_type"] == "SDI"):
return None
edid_xml = "<VideoFormat id='0'><VFEnum>{11!s}</VFEnum></VideoFormat>"
edid_type_node_in = ("<{0!s}In id='0'>"
"").format(card_params["frame_connector_type"])
edid_type_node_out = ("</{0!s}In>"
"").format(card_params["frame_connector_type"])
xml_string = ("<Slot id='{0!s}'><Card id='0'><CardIn id='0'>"
"<In id='{1!s}'>{2!s}<EDIDIn id='0'>{3!s}</EDIDIn>"
"{4!s}</In></CardIn></Card></Slot>"
"").format(card_params["slot_index"],
card_params["conn_index"],
edid_type_node_in,
edid_xml,
edid_type_node_out)
return self.send(xml_string)
class EventMasterCommsXML(EventMasterBase):
readlock = 0
def __init__(self, parent, ip, port):
"""Initialise Comms over XML"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.readlock = 0
self.ip = ip
self.port = port
self.parent = parent
return
def __gen_guid(self):
"""Generate a UUID to use as unique request identifier"""
return str(uuid4())
def close(self):
"""Close Comms over XML"""
self.socket.close()
return 1
def connect(self):
"""Connect over XML"""
address = (self.ip, self.port)
try:
self.socket.connect(address)
except:
return 0
return 1
def write(self, xml_string, reset=None):
"""Write an XML string
Keyword arguments:
xml_string -- XML-formatted string to send,
inside System namespace
Returns a unique request ID as string
"""
guid_string = self.__gen_guid()
if reset == "yes":
xml_ext_string = ("<System id='0' reset='yes' GUID='{0!s}'>"
"{1!s}</System>\r\n").format(guid_string,
xml_string)
else:
xml_ext_string = ("<System id='0' GUID='{0!s}'>"
"{1!s}</System>\r\n").format(guid_string,
xml_string)
self.socket.sendall(xml_ext_string.encode("UTF-8"))
return guid_string
def read_next(self):
"""(ElementTree) Read Next Message over XML"""
if(self.readlock == 1):
return 0
else:
self.readlock = 1
f = self.socket.makefile("rb")
c = ""
newc = ""
while True:
newc = f.read(1).decode("utf-8")
c = "{0!s}{1!s}".format(c, newc)
if c[-9:] == "</System>":
break
self.readlock = 0
return c
class EventMasterCommsDiscovery(EventMasterBase):
readlock = 0
def __init__(self, parent):
"""Initialise Barco Discovery Protocol"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.readlock = 0
self.parent = parent
self.discovered = {}
self._discover()
return
def _discover(self):
"""Send Discovery Message to Barco EM Compatible Devices"""
self.socket.sendto("?\0".encode('UTF-8'), ('255.255.255.255', 40961))
Timer(0.1, self._recieve_discovery).start()
return 1
def _recieve_discovery(self):
"""Loop to recieve and process incoming discovery messages"""
while True:
data, addr = self.socket.recvfrom(1024)
if(data):
data = data.decode('UTF-8')
discovered_list = {}
rtn_list = {}
x = data.split("\0")
for item in x:
if "=" in item:
key, value = item.split("=")
discovered_list[key] = value
if("hostname" in discovered_list):
rtn_list['Name'] = discovered_list["hostname"].split(":")[0]
rtn_list['Port'] = discovered_list["hostname"].split(":")[1]
rtn_list['SystemName'] = discovered_list["hostname"].split(":")[2]
rtn_list['MACAddress'] = discovered_list["hostname"].split(":")[5]
rtn_list['OSVersion'] = discovered_list["hostname"].split(":")[6]
if("type" in discovered_list):
rtn_list['Type'] = discovered_list['type']
number_of_items = 0
rtn_list['IP'] = addr[0]
for key, value in self.discovered.items():
number_of_items += 1
if value['IP'] == rtn_list['IP']:
self.discovered[key] = rtn_list
return 1
self.discovered[number_of_items] = rtn_list
return 1
def getDiscovery(self):
"""Get a dict of currently discovered Barco EM Compatible Devices"""
return self.discovered
class EventMasterConfigParser(EventMasterBase):
def __init__(self, parent):
self.parent = parent
self.query_status = {}
return
def __quick_parse_xml(self, xml_et, key_str, type_str):
for elem in xml_et.iterfind(key_str):
if(type_str == "int"):
return int(elem.text)
if(type_str == "str"):
return str(elem.text)
def parse(self, xml):
""" Parse XML Config """
data_et = ET.ElementTree(ET.fromstring(xml))
data = data_et.getroot()
parsers = { "FrameCollection": self.__update_framecollection,
"SrcMgr/InputCfgCol": self.__update_inputcfgcol,
"DestMgr/ScreenDestCol": self.__update_screendestcol,
"OutCfgMgr": self.__update_outcfgmgr,
"PresetMgr": self.__update_presetmgr }
for key,val in parsers.items():
if data.iterfind(key):
for iter_et in data.iterfind(key):
val(iter_et)
for resp in data.iterfind("Resp"):
if resp.text:
if int(resp.text) == 0:
if "GUID" in data.attrib:
self.query_status[data.attrib["GUID"]] = 0
for guiid in data.iterfind("GuiId"):
if guiid.text:
self.query_status[guiid.text] = 1
def getQueryStatus(self, guid_string):
if guid_string in self.query_status:
if self.query_status[guid_string] == 0:
return 0
else:
return 1
else:
return None
def __update_inputcfgcol(self, inputcfgcol_et):
""" Add Node """
for add_et in inputcfgcol_et.iterfind("Add"):
if add_et:
for inputcfg_et in add.iterfind("InputCfg"):
self.__update_inputcfg(inputcfg_et)
""" Remove Node """
for remove_et in inputcfgcol_et.iterfind("Remove"):
if remove_et:
for inputcfg_et in remove.iterfind("InputCfg"):
input_id = int(inputcfg_et.attrib["id"])
self.parent.inputs.pop(input_id, None)
""" Update """
for inputcfg_et in inputcfgcol_et.iterfind("InputCfg"):
self.__update_inputcfg(inputcfg_et)
def __update_inputcfg(self, inputcfg_et):
state_dict = {}
state_dict["ConnMap"] = []
if inputcfg_et is None:
return None
""" Input ID Attribute necessary """
if "id" not in inputcfg_et.attrib:
return None
input_int = int(inputcfg_et.attrib["id"])
""" Parse Connector Mapping """
for connmap_et in inputcfg_et.iterfind("Config/ConnMap"):
for inuse_et in connmap_et.iterfind("InUse"):
if(inuse_et.text == "1"):
for slotindex_et in connmap_et.iterfind("SlotIndex"):
slot_id = int(slotindex_et.text)
for connectorindex_et in connmap_et.iterfind("ConnectorIndex"):
connector_id = int(connectorindex_et.text)
state_dict["ConnMap"].append({"slot":slot_id,"connector:":connector_id})
""" If no ConnMap attributes, remove from state dict
(ie. an update with no ConnMap changes) """
if not state_dict["ConnMap"]:
state_dict.pop("ConnMap", None)
""" Parse a number of known items into the state dict"""
quick_parse_dict = { "FrzMode": "int",
"Name": "str",
"InputCfgType": "int",
"AutoAcqMode": "int",
"Type3G": "int" }
for key,val in quick_parse_dict.items():
state_dict[key] = self.__quick_parse_xml( inputcfg_et,
key,
val)
""" If input already exists, update state only """
if input_int in self.parent.inputs:
self.parent.inputs[input_int].update_state(inputcfg_state)
else:
input_obj = EventMasterInput(self.parent,
input_int=int(input_int),
state_dict=state_dict)
self.parent.inputs[input_int] = input_obj
return 1
def __update_framecollection(self, framecollection_et):
for frame_et in framecollection_et.iterfind("Frame"):
self.__update_frame(frame_et)
def __update_frame(self, frame_et):
state_dict = {}
if frame_et is None:
return None
""" Frame ID Attribute necessary """
if "id" not in frame_et.attrib:
return None
frame_str = str(frame_et.attrib["id"])
""" Parse a number of known items into the state dict"""
quick_parse_dict = { "FrameType": "int" }
for key,val in quick_parse_dict.items():
state_dict[key] = self.__quick_parse_xml( frame_et, key, val)
""" If frame already exists, update state only """
if frame_str in self.parent.frames:
self.parent.frames[frame_str].update_state(state_dict)
else:
frame_obj = EventMasterFrame(self.parent,
frame_str=frame_str,
state_dict=state_dict)
self.parent.frames[frame_str] = frame_obj
return 1
def __update_screendestcol(self, screendestcol_et):
""" Add Node """
for add_et in screendestcol_et.iterfind("Add"):
if add_et:
for screendest_et in add.iterfind("ScreenDest"):
self.__update_screendest(screendest_et)
""" Remove Node """
for remove_et in screendestcol_et.iterfind("Remove"):
if remove_et:
for screendest_et in remove.iterfind("ScreenDest"):
destination_id = int(screendest_et.attrib["id"])
self.parent.screendests.pop(destination_id, None)
""" Update """
for screendest_et in screendestcol_et.iterfind("ScreenDest"):
self.__update_screendest(screendest_et)
def __update_screendest(self, screendest_et):
state_dict = {}
if screendest_et is None:
return None
""" Destination ID Attribute necessary """
if "id" not in screendest_et.attrib:
return None
destination_int = int(screendest_et.attrib["id"])
""" Parse a number of known items into the state dict"""
quick_parse_dict = { "Name": "str",
"HSize": "int",
"VSize": "int" }
for key,val in quick_parse_dict.items():
state_dict[key] = self.__quick_parse_xml( screendest_et, key, val)
""" If frame already exists, update state only """
if destination_int in self.parent.screendests:
self.parent.screendests[destination_int].update_state(state_dict)
else:
screendest_obj = EventMasterScreenDestination(self.parent,
destination_int=destination_int,
state_dict=state_dict)
self.parent.screendests[destination_int] = screendest_obj
for layer_et in screendest_et.iterfind("LayerCollection/Layer"):
self.__update_layer(layer_et, destination_int)
return 1
def __update_layer(self, layer_et, destination_int):
state_dict = {}
if layer_et is None:
return None
if destination_int is None or destination_int not in self.parent.screendests:
return None
""" Layer ID Attribute necessary """
if "id" not in layer_et.attrib:
return None
layer_int = int(layer_et.attrib["id"])
xml_owin_prefix = "LayerCfg/LayerState[0]/WinAdjust/OWIN/"
owin_nodes = { xml_owin_prefix + "VPos": "OWIN_VPos",
xml_owin_prefix + "HPos": "OWIN_HPos",
xml_owin_prefix + "VSize": "OWIN_VSize",
xml_owin_prefix + "HSize": "OWIN_HSize" }
for key, val in owin_nodes.items():
for elem in layer_et.iterfind(key):
state_dict[val] = elem.text
""" Check for SrcType """
for elem in layer_et.iterfind("LayerCfg/Source[0]/SrcType"):
src_type = int(elem.text)
state_dict["SrcType"] = src_type
""" SrcType = 0 is Input Source """
for elem in layer_et.iterfind("LayerCfg/Source[0]/InputCfgIndex"):
input_id = int(elem.text)
state_dict["Source"] = input_id
""" Parse a number of known items into the state dict"""
quick_parse_dict = { "Name": "str",
"PvwMode": "int",
"PgmMode": "int",
"FrzMode": "int" }
for key,val in quick_parse_dict.items():
state_dict[key] = self.__quick_parse_xml( layer_et, key, val)
self.parent.screendests[destination_int]._updateLayer(layer_int, state_dict)
return 1
def __update_outcfgmgr(self, outcfgmgr_et):
""" Add Node """
for add_et in outcfgmgr_et.iterfind("Add"):
if add_et:
for outputcfg_et in add.iterfind("OutputCfg"):
self.__update_outputcfg(outputcfg_et)
""" Remove Node """
for remove_et in outcfgmgr_et.iterfind("Remove"):
if remove_et:
for outputcfg_et in remove.iterfind("OutputCfg"):
output_id = int(outputcfg_et.attrib["id"])
self.parent.outputs.pop(output_id, None)
""" Update """
for outputcfg_et in outcfgmgr_et.iterfind("OutputCfg"):
self.__update_outputcfg(outputcfg_et)
def __update_outputcfg(self, outputcfg_et):
state_dict = {}
if outputcfg_et is None:
return None
""" Output ID Attribute necessary """
if "id" not in outputcfg_et.attrib:
return None
output_int = int(outputcfg_et.attrib["id"])
""" Parse a number of known items into the state dict"""
quick_parse_dict = { "Name": "str" }
for key,val in quick_parse_dict.items():
state_dict[key] = self.__quick_parse_xml( outputcfg_et, key, val)
xml_owin_prefix = "OutputAOI/TestPattern"
owin_nodes = { xml_owin_prefix + "TestPatternMode": "TestPattern_Mode",
xml_owin_prefix + "DiagMotion": "TestPattern_DiagMotion",
xml_owin_prefix + "RasterBox": "TestPattern_RasterBox" }
for key, val in owin_nodes.items():
for elem in outputcfg_et.iterfind(key):
state_dict[val] = elem.text
""" If output already exists, update state only """
if output_int in self.parent.outputs:
self.parent.outputs[output_int].update_state(state_dict)
else:
output_obj = EventMasterOutput(self.parent,
output_int=output_int,
state_dict=state_dict)
self.parent.outputs[output_int] = output_obj
return 1
def __update_presetmgr(self, presetmgr_et):
""" Add Node """
for add_et in presetmgr_et.iterfind("Add"):
if add_et:
for preset_et in add.iterfind("Preset"):
self.__update_preset(preset_et)
""" Remove Node """
for remove_et in presetmgr_et.iterfind("Remove"):
if remove_et:
for preset_et in remove.iterfind("Preset"):
preset_id = int(preset_et.attrib["id"])
self.parent.presets.pop(preset_id, None)
""" Update """
for preset_et in presetmgr_et.iterfind("Preset"):
self.__update_preset(preset_et)
def __update_preset(self, preset_et):
state_dict = {}
if preset_et is None:
return None
""" Preset ID Attribute necessary """
if "id" not in preset_et.attrib:
return None
preset_int = int(preset_et.attrib["id"])
""" Parse a number of known items into the state dict"""
quick_parse_dict = { "Name": "str" }
for key,val in quick_parse_dict.items():
state_dict[key] = self.__quick_parse_xml( preset_et, key, val)
""" If preset already exists, update state only """
if preset_int in self.parent.presets:
self.parent.presets[preset_int].update_state(state_dict)
else:
preset_obj = EventMasterPreset(self.parent,
preset_int=preset_int,
state_dict=state_dict)
self.parent.presets[preset_int] = preset_obj
return 1
class EventMasterSwitcher(EventMasterBase):
QUERY_HANDSHAKE = ("<XMLType>3</XMLType><Query>3</Query>"
"<Recursive>1</Recursive>")
# TODO: Get this from the device config
VALID_REFRESH_RATES = {"23.98", "24", "25", "29.97", "30", "47.95", "48",
"50", "59.94", "60"}
S3_DEFAULT_REV_INPUT_MAP = {3: {0: 1, 1: 2, 2: 3, 3: 4},
4: {0: 5, 1: 6, 2: 7, 3: 8},
5: {0: 9, 1: 10, 2: 11, 3: 12}}
def __init__(self):
"""Initialise new Switcher instance"""
self.sys = {"port": 9876, "ip": u"127.0.0.1"}
self.verbose = 1
self.connected = self.ready = 0
self.inputs = {}
self.screendests = {}
self.frames = {}
self.outputs = {}
self.presets = {}
self.recieved_guids = {}
self.CommsDiscovery = EventMasterCommsDiscovery(self)
self.ConfigParser = EventMasterConfigParser(self)
self.updateThread = Thread(target=self.__update)
self.updateThread.daemon = True
self.updateThread.start()
return None
def __do_handshake(self):
"""Switcher do_handshake()"""
return self.send(self.QUERY_HANDSHAKE, reset="yes")
def start(self):
"""Switcher start()"""
self.ready = 2
if(self.connected):
self.ready = 0
return None
if "ip" not in self.sys:
self.ready = 0
return None
self.CommsXML = EventMasterCommsXML(self, self.sys["ip"], 9876)
if not self.CommsXML.connect():
self.ready = 0
return None
self.__do_handshake()
self.connected = 1
return True
def stop(self):
"""Switcher stop()"""
if not self.CommsXML.close():
return None
self.connected = 0
self.ready = 0
return True
def loadFromXML(self, xml_string):
if xml_string is None:
return None
else:
self.ConfigParser.parse(xml_string)
def send(self, data):
return self.CommsXML.write(data)
def __update(self):
"""Switcher __update() loop"""
while(1):
if(self.connected == 1):
try:
data = self.CommsXML.read_next()
self.ConfigParser.parse(data)
self.ready = 1
except Exception, e:
pass
if data is None:
pass
else:
sleep(1)
return None
def getDiscovery(self):
return self.CommsDiscovery.getDiscovery()
def cmdSend(self, data):
return self.CommsXML.write(data)
def getQueryStatus(self, guid_string):
return self.ConfigParser.getQueryStatus(guid_string)
def setVerbose(self, verbose_bool):
"""Switcher set_verbose()"""
if not verbose_bool and type(verbose_bool) != bool:
return None
self.verbose = int(verbose_bool)
return 1
def getVerbose(self):
"""Switcher get_verbose()"""
if not hasattr(self, "verbose"):
return -1
return int(self.verbose)
def setIP(self, ip_string):
"""Switcher set_CommsXML_IP()"""
if not ip_string and type(ip_string) != str:
return None
self.sys["ip"] = str(ip_string)
return 1
def getIP(self):
"""Switcher get_CommsXML_IP()"""
if not hasattr(self.sys, "ip"):
return -1
return str(self.sys["ip"])
def isReady(self):
"""Switcher is_ready()"""
if not hasattr(self, "ready"):
return None
return int(self.ready)
def getInputs(self):
"""Switcher enum_inputs()"""
if not self.inputs:
return {}
inputs_list = {}
for key, inst in self.inputs.items():
inputs_list[key] = inst
return inputs_list
def getScreenDests(self):
if not self.screendests:
return {}
screendests_list = {}
for key, inst in self.screendests.items():
screendests_list[key] = inst
return screendests_list
def getFrames(self):
if not self.frames:
return {}
frames_list = {}
for key, inst in self.frames.items():
frames_list[key] = inst
return frames_list
def getOutputs(self):
if not self.outputs:
return {}
return self.outputs
def getPresets(self):
if not self.presets:
return {}
return self.presets
def setNativeRate(self, rate_hz):
"""Switcher set_NativeRate"""
if rate_hz not in self.VALID_REFRESH_RATES:
return -1
xml = "<NativeRate>{$0!s}</NativeRate>".format(rate_hz)
return self.send(xml)
def setName(self, name):
"""Switcher set_Name()"""
if not name:
return -1
xml = "<Name>{$0!s}</Name>"
return self.send(xml)
def cmdCut(self):
"""Switcher cmd_Cut()"""
xml = "<PresetMgr><Cut></Cut></PresetMgr>"
return self.send(xml)
def cmdAutoTrans(self):
"""Switcher cmd_AutoTrans()"""
xml = "<PresetMgr><AutoTrans></AutoTrans></PresetMgr>"
return self.send(xml)
def cmdSavePreset(self, preset_id):
if preset_id is None:
return None
xml = "<PresetMgr><SavePreset>{0!s}</SavePreset></PresetMgr>".format(preset_id)
return self.send(xml)
|
|
#!/usr/bin/env python
# revised 4/10/15
# writes output in FITS format
#
# revised 7/29/14
# Generates a catalog by combining existing mock catalogs
# for each type into a binary format accepted by "assign".
# based on code by Martin White
# modified to achieve densities used in CDR and to include
# standard stars and sky fibers
# goal densities:
# QSOI (z<2.15) = 120/sq deg
# QSOII (z>2,15) = 50/sq deg
# bad QSO =90 /sq deg
# LRG 300/sq deg good, 50/sq deg bad
# ELG 2400/dq deg
# 03/05/15 need to add standard stars and sky fibers
# can take as random, with correct density
# Schlegel suggest 20 SS per petal so 20 x 5.3(multiple coverage)/0.75 sq deg=140 /sq deg
# for sky fibers Scheleg suggests 200, i.e. 1400/sq deg
#
# densities in Martin's mocks v2 on 06/25/14
# ELGs 2607/sq deg > 2611
# LRGs 301/sq deg > 323
# QSOI 110/sq deg > 118
# QSO II 63.8/sq deg >
# random 632 /sq deg
import numpy as N
import fitsio as F
import sys
from astropy.io import fits
import os.path
from desitarget import desi_mask, bgs_mask, mws_mask
import numpy.lib.recfunctions as rfn
__author__ = "Martin White modified by Robert Cahn 6/30/14"
__version__ = "1.0"
__email__ = "[email protected]/[email protected]"
dbase = "/project/projectdirs/desi/mocks/preliminary/"
footprint_area=20.*45.*N.sin(45.*N.pi/180.)/(45.*N.pi/180.)
print("footprint area %f"%(footprint_area))
total_area=0.
def read_objects(fn):
"""
read_objects(fn):
Reads the RA, DEC and Z of the objects from a FITS file.
"""
data = F.read(fn,columns=['RA','DEC','Z'])
ra = data[ 'RA'].astype('f4')
dec = data['DEC'].astype('f4')
zz = data[ 'Z'].astype('f4')
"""
use small region to determine densities of mocks
"""
smalldata=data[(data['RA']>170.) & (data['RA']<190.) & (data['DEC']>0.) & (data['DEC']<45.)]
galaxies=len(smalldata)
density=galaxies/footprint_area
return( (ra,dec,zz,density) )
#
def read_objects_noZ(fn):
"""
read_objects(fn):
Reads the RA, DEC of the objects from a FITS file.
"""
data = F.read(fn,columns=['RA','DEC'])
ra = data[ 'RA'].astype('f4')
dec = data['DEC'].astype('f4')
zz = N.zeros(len(data.ra),dtype='f4')
smalldata=data[(data['RA']>170.) & (data['RA']<190.) & (data['DEC']>0.) & (data['DEC']<45.)]
galaxies=len(smalldata)
density=galaxies/footprint_area
return( (ra,dec,zz),density )
#
def reduce(ra,dec,zz,frac):
xra=N.array(ra)
xdec=N.array(dec)
xzz=N.array(zz)
keepornot=N.random.uniform(0.,1.,len(ra))
limit=N.zeros(len(xra)) +frac
#create boolean array of which to keep
#find new length
kept=keepornot<limit
yra=xra[kept]
ydec=xdec[kept]
yzz=xzz[kept]
return( (yra,ydec,yzz))
def write_mtl(ra, dc, no, pp, lastpass, zz, types, icat=0, basename="targets"):
fitsname="{}_{}_mtl_0.fits".format(basename,icat)
print("writing to file {}".format(fitsname))
if(os.path.exists(fitsname)):
os.remove(fitsname)
# structure for output data
type_table = [
('TARGETID', '>i4'),
('BRICKNAME', '|S8'),
('RA', '>f4'),
('DEC', '>f4'),
('NUMOBS', '>i4'),
('PRIORITY', '>i4'),
('LASTPASS', '>i4')
]
data = N.ndarray(shape=ra.size, dtype=type_table)
data['TARGETID'] = N.arange(ra.size)
data['RA'] = ra
data['DEC'] = dc
data['BRICKNAME'][:] = "00000000"
data['NUMOBS'] = no
data['PRIORITY'] = pp
data['LASTPASS'] = lastpass
desi_target = N.int_(types)
bgs_target = N.zeros(ra.size, dtype=N.int64)
mws_target = N.zeros(ra.size, dtype=N.int64)
data = rfn.append_fields(data,
['DESI_TARGET', 'BGS_TARGET', 'MWS_TARGET'],
[desi_target, bgs_target, mws_target], usemask=False)
#- Create header to include versions, etc.
hdr = F.FITSHDR()
hdr['DEPNAM00'] = 'makecatalog'
F.write(fitsname, data, extname='MTL', header=hdr, clobber=True)
print('wrote {} items to target file'.format(len(ra)))
#structure for truth file
type_table = [
('TARGETID', '>i8'),
('BRICKNAME', '|S20'),
('Z', '>f8'),
('TYPE', '>i8')
]
data = N.ndarray(shape=ra.size, dtype=type_table)
data['TARGETID'] = N.arange(ra.size)
data['BRICKNAME'][:] = "00000000"
data['Z'] = zz
data['TYPE'] = types
hdr = F.FITSHDR()
hdr['DEPNAM00'] = 'makecatalog-truth'
F.write('truth_'+fitsname, data, extname='MTL', header=hdr, clobber=True)
print('wrote {} items to truth target file'.format(len(ra)))
return
def write_catalog(icat=0):
"""
write_catalog(icat=0):
Writes the catalog information to a file.
This fills in the number of observations required and the
priorities as well as the position information from the mock
catalog files.
"""
goal_qsoI=120.
goal_qsoII=50.
goal_badqso=90.
goal_lrg=300.
goal_badqso=90.
goal_badlrg=50.
goal_elg=2400.
goal_standardstar=140.
goal_skyfiber=1400.
possible_types = ['ELG', 'LRG', 'QSO', 'STDSTAR', 'GAL', 'OTHER']
types = N.empty((0))
# LyA QSOs are priority 1, other QSOs are priority 2.
xra,xdc,xzz,density = read_objects(dbase+"v2_qso_%d.fits"%icat)
low=xzz<2.1
high=xzz>2.1
ra_low=xra[low]
dc_low=xdc[low]
zz_low=xzz[low]
total_area=len(xra)/density
print 'total area in sq deg',total_area
ra_high=xra[high]
dc_high=xdc[high]
zz_high=xzz[high]
mockdensity_qsoI=density*len(ra_low)/(len(ra_high)+len(ra_low))
mockdensity_qsoII=density*len(ra_high)/(len(ra_high)+len(ra_low))
print ('mock density qsoI %8.1f mock density qsoII %8.1f\n'%(mockdensity_qsoI,mockdensity_qsoII))
# need to increase number of qsois probably'
frac_qsoI=goal_qsoI/mockdensity_qsoI
qsoI_needed=frac_qsoI-1.
print 'need %8.3f more qsoI \n' %qsoI_needed
print 'first qsoIs',len(ra_low)
id = N.zeros(ra_low.size,dtype='i4') + 2
pp = N.zeros(ra_low.size,dtype='f4') + desi_mask['QSO'].priorities['UNOBS']
no = N.zeros(ra_low.size,dtype='i4') + 1
frac_qsoII=goal_qsoII/mockdensity_qsoII
print ('goals qsoI %8.1f qsoII %8.1f\n'%(goal_qsoI,goal_qsoII))
nra,ndc,nzz = reduce(ra_high,dc_high,zz_high,frac_qsoII)
#combine all qsoIs with qsoIIs
nid = N.zeros(nra.size,dtype='i4') + 1
npp = N.zeros(nra.size,dtype='f4') + desi_mask['QSO'].priorities['UNOBS']
nno = N.zeros(nra.size,dtype='i4') + 5
ra = N.append(ra_low,nra)
dc = N.append(dc_low,ndc)
zz = N.append(zz_low,nzz)
id = N.append(id,nid)
pp = N.append(pp,npp)
no = N.append(no,nno)
print 'qsoIIs',len(nra)
tmp_type = N.ones(ra.size, dtype='i8') * desi_mask.QSO
types = N.append(types, tmp_type)
print 'total ra, types', N.size(ra), N.size(types)
# get extra qsos for qsoI
icatp=icat+1
xra,xdc,xzz,density = read_objects(dbase+"v2_qso_%d.fits"%icatp)
low=xzz<2.1
nra_low=xra[low]
ndc_low=xdc[low]
nzz_low=xzz[low]
nra,ndc,nzz=reduce(nra_low,ndc_low,nzz_low,qsoI_needed)
nid = N.zeros(nra.size,dtype='i4')+2
npp = N.zeros(nra.size,dtype='f4') + desi_mask['QSO'].priorities['UNOBS']
nno = N.zeros(nra.size,dtype='i4')+1
ra = N.append(ra,nra)
dc = N.append(dc,ndc)
zz = N.append(zz,nzz)
id = N.append(id,nid)
pp = N.append(pp,npp)
no = N.append(no,nno)
tmp_type = N.ones(nra.size, dtype='i8') * desi_mask.QSO
types = N.append(types, tmp_type)
print 'total ra, types', N.size(ra), N.size(types)
print 'total ra, types', len(ra), N.size(types)
print' added qsoIs', len(nra)
# LRGs are priority 3.
# density is just right
nra,ndc,nzz,density=read_objects(dbase+"v2_lrg_%d.fits"%icat)
print 'lrg mock density', density
nid = N.zeros(nra.size,dtype='i4') + 3
npp = N.zeros(nra.size,dtype='f4') + desi_mask['LRG'].priorities['UNOBS']
nno = N.zeros(nra.size,dtype='i4') + 2 #only use 2 exposures for LRG
ra = N.append(ra,nra)
dc = N.append(dc,ndc)
zz = N.append(zz,nzz)
id = N.append(id,nid)
pp = N.append(pp,npp)
no = N.append(no,nno)
tmp_type = N.ones(nra.size, dtype='i8') * desi_mask.LRG
types = N.append(types, tmp_type)
print 'lrgs added',len(nra)
print 'lrg density',len(nra)/total_area
print 'total ra, types', len(ra), N.size(types)
# ELGs are priority 4.
mra,mdc,mzz,density=read_objects(dbase+"v2_elg_%d.fits"%icat)
print 'mock density elgs', density
nra,ndc,nzz=reduce(mra,mdc,mzz,(goal_elg/density))
nid = N.zeros(nra.size,dtype='i4') + 4
npp = N.zeros(nra.size,dtype='f4') + desi_mask['ELG'].priorities['UNOBS']
nno = N.zeros(nra.size,dtype='i4') + 1
ra = N.append(ra,nra)
dc = N.append(dc,ndc)
zz = N.append(zz,nzz)
id = N.append(id,nid)
pp = N.append(pp,npp)
no = N.append(no,nno)
tmp_type = N.ones(nra.size, dtype='i8') * desi_mask.ELG
types = N.append(types, tmp_type)
print 'elgs added',len(nra)
print 'new total nid'
print 'elg density',len(nra)/total_area
print 'total ra, types', len(ra), len(types)
# and now we have "fake" qsos, placed randomly.
data = F.read(dbase+"v2_randoms_big.fits",columns=['RA','DEC'])
smalldata=data[(data['RA']>170.) & (data['RA']<190.) & (data['DEC']>0.) & (data['DEC']<45.)]
galaxies=len(smalldata)
mockdensity_randoms=galaxies/footprint_area
print 'randoms density', mockdensity_randoms
end_badqso=int(goal_badqso*total_area)
end_badlrg=int(end_badqso+goal_badlrg*total_area)
end_standardstar=int(end_badlrg+goal_standardstar*total_area)
end_skyfiber=int(end_standardstar+goal_skyfiber*total_area)
print 'end_badqso %d end_badlrg %d\n'%(end_badqso,end_badlrg)
nra = data[ 'RA'][:end_badqso].astype('f4')
ndc = data['DEC'][:end_badqso].astype('f4')
nzz = N.zeros(nra.size,dtype='f4')
nid = N.zeros(nra.size,dtype='i4')+5
npp = N.zeros(nra.size,dtype='f4') + desi_mask['QSO'].priorities['UNOBS']
nno = N.zeros(nra.size,dtype='i4')+1
ra = N.append(ra,nra)
dc = N.append(dc,ndc)
zz = N.append(zz,nzz)
id = N.append(id,nid)
pp = N.append(pp,npp)
no = N.append(no,nno)
density=len(nra)/total_area
tmp_type = N.ones(nra.size, dtype='i8') * desi_mask.QSO
types = N.append(types, tmp_type)
print 'fake qso density',density
print 'fake qsos',len(nra), len(types)
print 'total ra, types', len(ra), len(types)
#now need bad lrgs at 50/sq deg
nra = data[ 'RA'][end_badqso+1:end_badlrg].astype('f4')
ndc = data['DEC'][end_badqso+1:end_badlrg].astype('f4')
nzz = N.zeros(nra.size,dtype='f4')
nid = N.zeros(nra.size,dtype='i4') + 6
npp = N.zeros(nra.size,dtype='f4') + desi_mask['LRG'].priorities['UNOBS']
nno = N.zeros(nra.size,dtype='i4') + 1
ra = N.append(ra,nra)
dc = N.append(dc,ndc)
zz = N.append(zz,nzz)
id = N.append(id,nid)
pp = N.append(pp,npp)
no = N.append(no,nno)
density=len(nra)/total_area
tmp_type = N.ones(nra.size, dtype='i8') * desi_mask.LRG
types = N.append(types, tmp_type)
print 'fake qso density',density
print 'fake lrg density',density
print 'fake lrgs added', len(nra), len(types)
print 'total ra, types', len(ra), len(types)
lastpass = N.zeros(ra.size, dtype='int')
#now need standard stars at 140/sq deg
star_ra = data[ 'RA'][end_badlrg+1:end_standardstar].astype('f4')
star_dc = data['DEC'][end_badlrg+1:end_standardstar].astype('f4')
star_zz = N.zeros(star_ra.size,dtype='f4')
star_id = N.zeros(star_ra.size,dtype='i4')+7
star_pp = N.zeros(star_ra.size,dtype='f4') + 100
star_no = N.zeros(star_ra.size,dtype='i4') + 10
star_type = N.ones(star_ra.size, dtype='i8') * desi_mask.STD_FSTAR
star_lastpass = N.zeros(star_ra.size, dtype='int')
density=len(star_ra)/total_area
print 'standardstar density',density
#now need sky fibers at 1400/sq deg
sky_ra = data[ 'RA'][end_standardstar+1:end_skyfiber].astype('f4')
sky_dc = data['DEC'][end_standardstar+1:end_skyfiber].astype('f4')
sky_zz = N.zeros(sky_ra.size, dtype='f4')
sky_id = N.zeros(sky_ra.size, dtype='i4')+8
sky_pp = N.zeros(sky_ra.size, dtype='f4') + 200
sky_no = N.zeros(sky_ra.size, dtype='i4')+10
sky_type = N.ones(sky_ra.size, dtype='i8') * desi_mask.SKY
sky_lastpass = N.zeros(sky_ra.size, dtype='int')
density=len(sky_ra)/total_area
print 'sky fiber density',density
#
print "Writing information for ",ra.size," objects."
fout = open("objects_ss_sf%d.rdzipn"%icat,"w")
Nt = N.array([ra.size],dtype='i4')
Nt.tofile(fout)
ra.tofile(fout)
dc.tofile(fout)
zz.tofile(fout)
id.tofile(fout)
pp.tofile(fout)
no.tofile(fout)
fout.close()
#
print 'total ra, types', len(ra), len(types)
write_mtl(ra, dc, no, pp, lastpass, zz, types, basename="targets", icat=icat)
write_mtl(star_ra, star_dc, star_no, star_pp, star_lastpass, star_zz, star_type, basename="stdstar", icat=icat)
write_mtl(sky_ra, sky_dc, sky_no, sky_pp, sky_lastpass, sky_zz, sky_type, basename="sky", icat=icat)
if __name__=="__main__":
write_catalog()
|
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import math
from . import mesh
from six.moves import map
from six.moves import range
from six.moves import zip
class VoroMesh(mesh.Mesh):
""" The voro mesh loads data from
voro++ voronoi generator by Chris H. Rycroft.
The class expects a *.vol file formatted
using the following command on voro++:
voro++ -c "%q$%v$%C$%P$%t$%f$%l" x_min x_max y_min y_max z_min z_max [points_file]
"""
def build_mesh(self, voro_file_name, K):
""" Takes permeability tensor function K and
voronoi file output from voro++ and builds mesh.
"""
done_cells = []
neighbor_to_face_hash = {}
voro_file = open(voro_file_name)
self.set_boundary_markers([0, 1, 2, 3, 4, 5],
['BottomX', 'TopX',
'BottomY', 'TopY',
"BottomZ,", "TopZ",])
use_shifted_points = True
if use_shifted_points:
self.use_face_shifted_centroid()
self.use_cell_shifted_centroid()
## We need a two-pass to get the number of cells first.
number_of_cells = 0
for line in voro_file:
number_of_cells += 1
self.add_cell([],[])
voro_file.seek(0)
shifted_face_out = open("shifted_out", 'w')
ortho_line_out = open("ortho_line", 'w')
for line in voro_file:
full_input = line.split("$")
current_cell = int(full_input[0])
voronoi_point = full_input[1]
voronoi_point = np.array([float(point) for point in voronoi_point.split()])
volume = full_input[2]
volume = float(volume)
centroid = full_input[3]
centroid = np.array([float(point) for point in centroid.split()])
vertices = full_input[4]
vertices = vertices.replace(")", "")
vertices = vertices.replace("(", "")
vertices = vertices.split()
vertices = [np.array([float(v) for v in vertex.split(",")])
for vertex in vertices]
faces = full_input[5]
faces = faces.replace(")", "")
faces = faces.replace("(", "")
faces = faces.split()
faces = [[int(f) for f in face.split(",")]
for face in faces]
face_areas = full_input[6]
face_areas = [float(a) for a in face_areas.split()]
face_normals = full_input[7]
face_normals = face_normals.replace(")", "")
face_normals = face_normals.replace("(", "")
face_normals = [np.array([float(n) for n in normal.split(",")])
for normal in face_normals.split()]
for face in faces:
face.reverse()
neighbors = full_input[8]
neighbors = neighbors.split()
neighbors = [int(n) for n in neighbors]
local_to_global_point_index = [self.add_point(vertex) for vertex in vertices]
current_cell_faces = []
current_cell_orientations = []
for (local_face_index, global_cell_index) in enumerate(neighbors):
if global_cell_index >= 0:
if global_cell_index not in done_cells:
## Maps the neighbor cell to the face that was just
## added. This is to avoid doubling the number
## of faces.
## We first check that there are no points that are too close
## to each other.
new_face_points = [local_to_global_point_index[vertex_index]\
for vertex_index in faces[local_face_index]]
points_to_add = []
for point in new_face_points:
too_close = False
for existing_point in points_to_add:
if np.linalg.norm(self.get_point(existing_point)-
self.get_point(point))<1.e-8:
too_close = True
if not too_close:
points_to_add.append(point)
## exclude faces with less than two points.
if len(points_to_add)>2:
new_face_index = self.add_face(points_to_add)
self.set_face_normal(new_face_index, face_normals[local_face_index])
self.set_face_area(new_face_index,face_areas[local_face_index])
self.set_face_real_centroid(new_face_index,
self.find_face_centroid(new_face_index)[1])
neighbor_to_face_hash[(current_cell, global_cell_index)] = new_face_index
current_cell_faces.append(new_face_index)
current_cell_orientations.append(1)
else:
pass
else:
if (global_cell_index, current_cell) in neighbor_to_face_hash:
current_cell_faces.append(neighbor_to_face_hash[(global_cell_index, current_cell)])
current_cell_orientations.append(-1)
if use_shifted_points:
self.set_face_shifted_centroid(neighbor_to_face_hash[(global_cell_index, current_cell)],
(voronoi_point+
self.get_cell_shifted_centroid(global_cell_index))*.5)
shifted_point = (voronoi_point+\
self.get_cell_shifted_centroid(global_cell_index))*.5
else:
pass
## Add boundary face
else:
## We first check that there are no point that are too close
## to each other.
new_face_points = [local_to_global_point_index[vertex_index]\
for vertex_index in faces[local_face_index]]
points_to_add = []
for point in new_face_points:
too_close = False
for existing_point in points_to_add:
if np.linalg.norm(self.get_point(existing_point)-
self.get_point(point))<1.e-12:
too_close = True
if not too_close:
points_to_add.append(point)
if len(points_to_add)>2:
new_face_index = self.add_face(points_to_add)
current_cell_faces.append(new_face_index)
current_cell_orientations.append(1)
self.set_face_normal(new_face_index, face_normals[local_face_index])
self.set_face_area(new_face_index, face_areas[local_face_index])
self.set_face_real_centroid(new_face_index, self.find_face_centroid(new_face_index)[1])
self.add_boundary_face(abs(global_cell_index)-1, new_face_index, 1)
if use_shifted_points:
self.set_face_shifted_centroid(new_face_index, self.find_face_centroid(new_face_index)[1])
self.set_cell_faces(current_cell, current_cell_faces)
self.set_cell_domain(current_cell, 0)
self.set_cell_orientation(current_cell, current_cell_orientations)
self.set_cell_volume(current_cell, volume)
self.set_cell_real_centroid(current_cell, centroid)
self.set_cell_shifted_centroid(current_cell, voronoi_point)
done_cells.append(current_cell)
for cell_index in range(self.get_number_of_cells()):
[cx, cy, cz] = self.get_cell_real_centroid(cell_index)
k_e = K(np.array([cx, cy, cz]))
self.set_cell_k(cell_index, k_e)
def output_vector_field_gnuplot(self, filename, vectorMagnitude):
""" For plotting vectors in a 2D plane using gnuplot.
The plane is assumed to be x-y. Plot using command:
plot "filename.dat" using 1:2:3:4 with vectors.
"""
output = open(filename + ".dat", "w")
for face in range(self.get_number_of_faces()):
if not self.is_boundary_face(face, [0, 1, 2, 3, 4, 5]):
center = self.get_face_real_centroid(face)
normal = self.get_face_normal(face)
print(center[0], center[1], end=' ', file=output)
print(normal[0] * vectorMagnitude[face], end=' ', file=output)
print(normal[1] * vectorMagnitude[face], end=' ', file=output)
print(" ", file=output)
def output_vtk_mesh_w_data(self, filename, CellValues = [], CellValueLabels = []):
output = open(filename + ".vtk", 'w')
print("# vtk DataFile Version 2.0", file=output)
print("#unstructured mesh", file=output)
print("ASCII", file=output)
print("DATASET UNSTRUCTURED_GRID", file=output)
print("POINTS", self.get_number_of_points(), "float", file=output)
for p in self.points:
print(p[0], p[1], p[2], file=output)
print("CELLS", self.get_number_of_cells(), 9 * self.get_number_of_cells(), file=output)
for cell in self.vtk_cells:
print(8, " ".join(map(str, cell)), file=output)
print("CELL_TYPES", self.get_number_of_cells(), file=output)
for cell in self.vtk_cells:
print(12, file=output)
if CellValues:
print("CELL_DATA", self.get_number_of_cells(), file=output)
for (entry, entryname) in zip(CellValues, CellValueLabels):
print("SCALARS", entryname, "double 1", file=output)
print("LOOKUP_TABLE default", file=output)
for value in entry:
print(value, file=output)
|
|
import copy
import functools
import hashlib
import logging
import re
import socket
import time
from keystoneclient import adapter
from oslo_utils import importutils
from oslo_utils import netutils
import requests
from requests import adapters
try:
import json
except ImportError:
import simplejson as json
from six.moves.urllib import parse
from . import exceptions
from .i18n import _
from . import service_catalog
from . import SERVICE_TYPE
class TCPKeepAliveAdapter(adapters.HTTPAdapter):
"""The custom adapter used to set TCP Keep-Alive on all connections."""
def init_poolmanager(self, *args, **kwargs):
if requests.__version__ >= '2.4.1':
kwargs.setdefault('socket_options', [
(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
])
super(TCPKeepAliveAdapter, self).init_poolmanager(*args, **kwargs)
class _ClientConnectionPool(object):
def __init__(self):
self._adapters = {}
def get(self, url):
"""
Store and reuse HTTP adapters per Service URL.
"""
if url not in self._adapters:
self._adapters[url] = TCPKeepAliveAdapter()
return self._adapters[url]
class SessionClient(adapter.LegacyJsonAdapter):
def __init__(self, *args, **kwargs):
self.times = []
super(SessionClient, self).__init__(*args, **kwargs)
def request(self, url, method, **kwargs):
# NOTE(jamielennox): The standard call raises errors from
# keystoneclient, where we need to raise the icgwclient errors.
raise_exc = kwargs.pop('raise_exc', True)
start_time = time.time()
resp, body = super(SessionClient, self).request(url,
method,
raise_exc=False,
**kwargs)
end_time = time.time()
self.times.append(('%s %s' % (method, url),
start_time, end_time))
if raise_exc and resp.status_code >= 400:
raise exceptions.from_response(resp, body, url, method)
return resp, body
def get_timings(self):
return self.times
def reset_timings(self):
self.times = []
def _original_only(f):
"""Indicates and enforces that this function can only be used if we are
using the original HTTPClient object.
We use this to specify that if you use the newer Session HTTP client then
you are aware that the way you use your client has been updated and certain
functions are no longer allowed to be used.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if isinstance(self.client, SessionClient):
msg = ('This call is no longer available. The operation should '
'be performed on the session object instead.')
raise exceptions.InvalidUsage(msg)
return f(self, *args, **kwargs)
return wrapper
class HTTPClient(object):
USER_AGENT = 'python-icgwclient'
def __init__(self, user, password, projectid=None, auth_url=None,
insecure=False, timeout=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', service_type=None,
service_name=None, volume_service_name=None,
timings=False, bypass_url=None,
os_cache=False, no_cache=True,
http_log_debug=False, auth_system='keystone',
auth_plugin=None, auth_token=None,
cacert=None, tenant_id=None, user_id=None,
connection_pool=False):
self.user = user
self.user_id = user_id
self.password = password
self.projectid = projectid
self.tenant_id = tenant_id
self._connection_pool = (_ClientConnectionPool()
if connection_pool else None)
# This will be called by #_get_password if self.password is None.
# EG if a password can only be obtained by prompting the user, but a
# token is available, you don't want to prompt until the token has
# been proven invalid
self.password_func = None
if auth_system and auth_system != 'keystone' and not auth_plugin:
raise exceptions.AuthSystemNotFound(auth_system)
if not auth_url and auth_system and auth_system != 'keystone':
auth_url = auth_plugin.get_auth_url()
if not auth_url:
raise exceptions.EndpointNotFound()
self.auth_url = auth_url.rstrip('/') if auth_url else auth_url
self.version = 'v1.1'
self.region_name = region_name
self.endpoint_type = endpoint_type
self.service_type = service_type
self.service_name = service_name
self.volume_service_name = volume_service_name
self.timings = timings
self.bypass_url = bypass_url.rstrip('/') if bypass_url else bypass_url
self.os_cache = os_cache or not no_cache
self.http_log_debug = http_log_debug
if timeout is not None:
self.timeout = float(timeout)
else:
self.timeout = None
self.times = [] # [("item", starttime, endtime), ...]
self.management_url = self.bypass_url or None
self.auth_token = auth_token
self.proxy_token = proxy_token
self.proxy_tenant_id = proxy_tenant_id
self.keyring_saver = None
self.keyring_saved = False
if insecure:
self.verify_cert = False
else:
if cacert:
self.verify_cert = cacert
else:
self.verify_cert = True
self.auth_system = auth_system
self.auth_plugin = auth_plugin
self._session = None
self._current_url = None
if self.http_log_debug:
logging.basicConfig(level=logging.DEBUG)
self._logger = logging.getLogger(__name__)
if self.http_log_debug and not self._logger.handlers:
# Logging level is already set on the root logger
ch = logging.StreamHandler()
self._logger.addHandler(ch)
self._logger.propagate = False
if hasattr(requests, 'logging'):
rql = requests.logging.getLogger(requests.__name__)
rql.addHandler(ch)
# Since we have already setup the root logger on debug, we
# have to set it up here on WARNING (its original level)
# otherwise we will get all the requests logging messages
rql.setLevel(logging.WARNING)
# NOTE(melwitt): Service catalog is only set if bypass_url isn't
# used. Otherwise, we can cache using services_url.
self.service_catalog = None
self.services_url = {}
def use_token_cache(self, use_it):
self.os_cache = use_it
def unauthenticate(self):
"""Forget all of our authentication information."""
self.management_url = None
self.auth_token = None
def set_management_url(self, url):
self.management_url = url
def get_timings(self):
return self.times
def reset_timings(self):
self.times = []
def _redact(self, target, path, text=None):
"""Replace the value of a key in `target`.
The key can be at the top level by specifying a list with a single
key as the path. Nested dictionaries are also supported by passing a
list of keys to be navigated to find the one that should be replaced.
In this case the last one is the one that will be replaced.
:param dict target: the dictionary that may have a key to be redacted;
modified in place
:param list path: a list representing the nested structure in `target`
that should be redacted; modified in place
:param string text: optional text to use as a replacement for the
redacted key. if text is not specified, the
default text will be sha1 hash of the value being
redacted
"""
key = path.pop()
# move to the most nested dict
for p in path:
try:
target = target[p]
# except KeyError:
except Exception, e:
return
if key in target:
if text:
target[key] = text
else:
# because in python3 byte string handling is ... ug
value = target[key].encode('utf-8')
sha1sum = hashlib.sha1(value)
target[key] = "{SHA1}%s" % sha1sum.hexdigest()
def http_log_req(self, method, url, kwargs):
if not self.http_log_debug:
return
string_parts = ['curl -g -i']
if not kwargs.get('verify', True):
string_parts.append(' --insecure')
string_parts.append(" '%s'" % url)
string_parts.append(' -X %s' % method)
headers = copy.deepcopy(kwargs['headers'])
self._redact(headers, ['X-Auth-Token'])
# because dict ordering changes from 2 to 3
keys = sorted(headers.keys())
for name in keys:
value = headers[name]
header = ' -H "%s: %s"' % (name, value)
string_parts.append(header)
if 'data' in kwargs:
data = json.loads(kwargs['data'])
self._redact(data, ['auth', 'passwordCredentials', 'password'])
string_parts.append(" -d '%s'" % json.dumps(data))
self._logger.debug("REQ: %s" % "".join(string_parts))
def http_log_resp(self, resp):
if not self.http_log_debug:
return
if resp.text and resp.status_code != 400:
try:
body = json.loads(resp.text)
self._redact(body, ['access', 'token', 'id'])
except ValueError:
body = None
else:
body = None
self._logger.debug("RESP: [%(status)s] %(headers)s\nRESP BODY: "
"%(text)s\n", {'status': resp.status_code,
'headers': resp.headers,
'text': json.dumps(body)})
def open_session(self):
if not self._connection_pool:
self._session = requests.Session()
def close_session(self):
if self._session and not self._connection_pool:
self._session.close()
self._session = None
def _get_session(self, url):
if self._connection_pool:
magic_tuple = parse.urlsplit(url)
scheme, netloc, path, query, frag = magic_tuple
service_url = '%s://%s' % (scheme, netloc)
if self._current_url != service_url:
# Invalidate Session object in case the url is somehow changed
if self._session:
self._session.close()
self._current_url = service_url
self._logger.debug(
"New session created for: (%s)" % service_url)
self._session = requests.Session()
self._session.mount(service_url,
self._connection_pool.get(service_url))
return self._session
elif self._session:
return self._session
def request(self, url, method, **kwargs):
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['body'])
del kwargs['body']
if self.timeout is not None:
kwargs.setdefault('timeout', self.timeout)
kwargs['verify'] = self.verify_cert
self.http_log_req(method, url, kwargs)
request_func = requests.request
session = self._get_session(url)
if session:
request_func = session.request
resp = request_func(
method,
url,
**kwargs)
self.http_log_resp(resp)
if resp.text:
# TODO(dtroyer): verify the note below in a requests context
# NOTE(alaski): Because force_exceptions_to_status_code=True
# httplib2 returns a connection refused event as a 400 response.
# To determine if it is a bad request or refused connection we need
# to check the body. httplib2 tests check for 'Connection refused'
# or 'actively refused' in the body, so that's what we'll do.
if resp.status_code == 400:
if ('Connection refused' in resp.text or
'actively refused' in resp.text):
raise exceptions.ConnectionRefused(resp.text)
try:
body = json.loads(resp.text)
except ValueError:
body = None
else:
body = None
if resp.status_code >= 400:
raise exceptions.from_response(resp, body, url, method)
return resp, body
def _time_request(self, url, method, **kwargs):
start_time = time.time()
resp, body = self.request(url, method, **kwargs)
self.times.append(("%s %s" % (method, url),
start_time, time.time()))
return resp, body
def _cs_request(self, url, method, **kwargs):
if not self.management_url:
self.authenticate()
if url is None:
# To get API version information, it is necessary to GET
# a icgw endpoint directly without "v2/<tenant-id>".
magic_tuple = parse.urlsplit(self.management_url)
scheme, netloc, path, query, frag = magic_tuple
path = re.sub(r'v[1-9]/[a-z0-9]+$', '', path)
url = parse.urlunsplit((scheme, netloc, path, None, None))
else:
if self.service_catalog:
url = self.get_service_url(self.service_type) + url
else:
# NOTE(melwitt): The service catalog is not available
# when bypass_url is used.
url = self.management_url + url
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
kwargs['headers']['Content-Type'] = 'application/json'
if self.projectid:
kwargs['headers']['X-Auth-Project-Id'] = self.projectid
resp, body = self._time_request(url, method, **kwargs)
return resp, body
except exceptions.Unauthorized as e:
try:
# first discard auth token, to avoid the possibly expired
# token being re-used in the re-authentication attempt
self.unauthenticate()
# overwrite bad token
self.keyring_saved = False
self.authenticate()
kwargs['headers']['X-Auth-Token'] = self.auth_token
resp, body = self._time_request(url, method, **kwargs)
return resp, body
except exceptions.Unauthorized:
raise e
def _get_password(self):
if not self.password and self.password_func:
self.password = self.password_func()
return self.password
def get(self, url, **kwargs):
return self._cs_request(url, 'GET', **kwargs)
def post(self, url, **kwargs):
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
return self._cs_request(url, 'DELETE', **kwargs)
def get_service_url(self, service_type):
if service_type not in self.services_url:
url = self.service_catalog.url_for(
attr='region',
filter_value=self.region_name,
endpoint_type=self.endpoint_type,
service_type=service_type,
service_name=self.service_name,
volume_service_name=self.volume_service_name,)
url = url.rstrip('/')
self.services_url[service_type] = url
return self.services_url[service_type]
def _extract_service_catalog(self, url, resp, body, extract_token=True):
"""See what the auth service told us and process the response.
We may get redirected to another site, fail or actually get
back a service catalog with a token and our endpoints.
"""
# content must always present
if resp.status_code == 200 or resp.status_code == 201:
try:
self.auth_url = url
self.service_catalog = \
service_catalog.ServiceCatalog(body)
if extract_token:
self.auth_token = self.service_catalog.get_token()
self.tenant_id = self.service_catalog.get_tenant_id()
self.management_url = self.get_service_url(self.service_type)
return None
except exceptions.AmbiguousEndpoints:
print(_("Found more than one valid endpoint. Use a more "
"restrictive filter"))
raise
except KeyError:
raise exceptions.AuthorizationFailure()
except exceptions.EndpointNotFound:
print(_("Could not find any suitable endpoint. Correct "
"region?"))
raise
elif resp.status_code == 305:
return resp.headers['location']
else:
raise exceptions.from_response(resp, body, url)
def _fetch_endpoints_from_auth(self, url):
"""We have a token, but don't know the final endpoint for
the region. We have to go back to the auth service and
ask again. This request requires an admin-level token
to work. The proxy token supplied could be from a low-level enduser.
We can't get this from the keystone service endpoint, we have to use
the admin endpoint.
This will overwrite our admin token with the user token.
"""
# GET ...:5001/v2.0/tokens/#####/endpoints
url = '/'.join([url, 'tokens', '%s?belongsTo=%s'
% (self.proxy_token, self.proxy_tenant_id)])
self._logger.debug("Using Endpoint URL: %s" % url)
resp, body = self._time_request(
url, "GET", headers={'X-Auth-Token': self.auth_token})
return self._extract_service_catalog(url, resp, body,
extract_token=False)
def authenticate(self):
if not self.auth_url:
msg = _("Authentication requires 'auth_url', which should be "
"specified in '%s'") % self.__class__.__name__
raise exceptions.AuthorizationFailure(msg)
magic_tuple = netutils.urlsplit(self.auth_url)
scheme, netloc, path, query, frag = magic_tuple
port = magic_tuple.port
if port is None:
port = 80
path_parts = path.split('/')
for part in path_parts:
if len(part) > 0 and part[0] == 'v':
self.version = part
break
if self.auth_token and self.management_url:
self._save_keys()
return
# TODO(sandy): Assume admin endpoint is 35357 for now.
# Ideally this is going to have to be provided by the service catalog.
new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,))
admin_url = parse.urlunsplit(
(scheme, new_netloc, path, query, frag))
auth_url = self.auth_url
if self.version == "v2.0": # FIXME(chris): This should be better.
while auth_url:
if not self.auth_system or self.auth_system == 'keystone':
auth_url = self._v2_auth(auth_url)
else:
auth_url = self._plugin_auth(auth_url)
# Are we acting on behalf of another user via an
# existing token? If so, our actual endpoints may
# be different than that of the admin token.
if self.proxy_token:
if self.bypass_url:
self.set_management_url(self.bypass_url)
else:
self._fetch_endpoints_from_auth(admin_url)
# Since keystone no longer returns the user token
# with the endpoints any more, we need to replace
# our service account token with the user token.
self.auth_token = self.proxy_token
else:
try:
while auth_url:
auth_url = self._v1_auth(auth_url)
# In some configurations icgw makes redirection to
# v2.0 keystone endpoint. Also, new location does not contain
# real endpoint, only hostname and port.
except exceptions.AuthorizationFailure:
if auth_url.find('v2.0') < 0:
auth_url = auth_url + '/v2.0'
self._v2_auth(auth_url)
if self.bypass_url:
self.set_management_url(self.bypass_url)
elif not self.management_url:
raise exceptions.Unauthorized('Icgw Client')
self._save_keys()
def _save_keys(self):
# Store the token/mgmt url in the keyring for later requests.
if (self.keyring_saver and self.os_cache and not self.keyring_saved
and self.auth_token and self.management_url
and self.tenant_id):
self.keyring_saver.save(self.auth_token,
self.management_url,
self.tenant_id)
# Don't save it again
self.keyring_saved = True
def _v1_auth(self, url):
if self.proxy_token:
raise exceptions.NoTokenLookupException()
headers = {'X-Auth-User': self.user,
'X-Auth-Token': self._get_password(),
'Content-Type': 'application/json'}
if self.projectid:
headers['X-Auth-Project-Id'] = self.projectid
resp, body = self._time_request(url, 'GET', headers=headers)
if resp.status_code in (200, 204): # in some cases we get No Content
try:
mgmt_header = 'x-server-management-url'
self.management_url = resp.headers[mgmt_header].rstrip('/')
self.auth_token = resp.headers['x-auth-token']
self.auth_url = url
except (KeyError, TypeError):
raise exceptions.AuthorizationFailure()
elif resp.status_code == 305:
return resp.headers['location']
else:
raise exceptions.from_response(resp, body, url)
def _plugin_auth(self, auth_url):
return self.auth_plugin.authenticate(self, auth_url)
def _v2_auth(self, url):
"""Authenticate against a v2.0 auth service."""
if self.auth_token:
body = {"auth": {
"token": {"id": self.auth_token}}}
elif self.user_id:
body = {"auth": {
"passwordCredentials": {"userId": self.user_id,
"password": self._get_password()}}}
else:
body = {"auth": {
"passwordCredentials": {"username": self.user,
"password": self._get_password()}}}
if self.tenant_id:
body['auth']['tenantId'] = self.tenant_id
elif self.projectid:
body['auth']['tenantName'] = self.projectid
return self._authenticate(url, body)
def _authenticate(self, url, body, **kwargs):
"""Authenticate and extract the service catalog."""
method = "POST"
token_url = url + "/tokens"
# Make sure we follow redirects when trying to reach Keystone
resp, respbody = self._time_request(
token_url,
method,
body=body,
allow_redirects=True,
**kwargs)
return self._extract_service_catalog(url, resp, respbody)
def _construct_http_client(username=None, password=None, project_id=None,
auth_url=None, insecure=False, timeout=None,
proxy_tenant_id=None, proxy_token=None,
region_name=None, endpoint_type='publicURL',
extensions=None, service_type=SERVICE_TYPE,
service_name=None, volume_service_name=None,
timings=False, bypass_url=None, os_cache=False,
no_cache=True, http_log_debug=False,
auth_system='keystone', auth_plugin=None,
auth_token=None, cacert=None, tenant_id=None,
user_id=None, connection_pool=False, session=None,
auth=None, user_agent='python-icgwclient',
interface=None, **kwargs):
if session:
return SessionClient(session=session,
auth=auth,
interface=interface or endpoint_type,
service_type=service_type,
region_name=region_name,
service_name=service_name,
user_agent=user_agent,
**kwargs)
else:
# FIXME(jamielennox): username and password are now optional. Need
# to test that they were provided in this mode.
return HTTPClient(username,
password,
user_id=user_id,
projectid=project_id,
tenant_id=tenant_id,
auth_url=auth_url,
auth_token=auth_token,
insecure=insecure,
timeout=timeout,
auth_system=auth_system,
auth_plugin=auth_plugin,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name,
timings=timings,
bypass_url=bypass_url,
os_cache=os_cache,
http_log_debug=http_log_debug,
cacert=cacert,
connection_pool=connection_pool)
def get_client_class(version):
version_map = {
'1.1': 'icgwclient.v2.client.Client',
'2': 'icgwclient.v2.client.Client',
'3': 'icgwclient.v2.client.Client',
}
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = _("Invalid client version '%(version)s'. must be one of: "
"%(keys)s") % {'version': version,
'keys': ', '.join(version_map.keys())}
raise exceptions.UnsupportedVersion(msg)
return importutils.import_class(client_path)
def Client(version, *args, **kwargs):
client_class = get_client_class(version)
return client_class(*args, **kwargs)
|
|
from .. import rfb_icons
from .. import rman_bl_nodes
from ..rman_operators.rman_operators_utils import get_bxdf_items, get_light_items, get_lightfilter_items
from ..rfb_utils import scene_utils
from ..rfb_utils import shadergraph_utils
from ..rman_config import __RFB_CONFIG_DICT__ as rfb_config
from bpy.types import Menu
import bpy
class VIEW3D_MT_renderman_add_object_menu(Menu):
bl_label = "RenderMan"
bl_idname = "VIEW3D_MT_renderman_add_object_menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("rman_blender").icon_id
def draw(self, context):
layout = self.layout
selected_light_objects = []
if context.selected_objects:
for obj in context.selected_objects:
if shadergraph_utils.is_rman_light(obj, include_light_filters=False):
selected_light_objects.append(obj)
layout.menu('VIEW3D_MT_RM_Add_Light_Menu', icon_value=bpy.types.VIEW3D_MT_RM_Add_Light_Menu.get_icon_id())
if selected_light_objects:
layout.menu('VIEW3D_MT_RM_Add_LightFilter_Menu', text='Attach Light Filter', icon_value=bpy.types.VIEW3D_MT_RM_Add_LightFilter_Menu.get_icon_id())
else:
layout.menu('VIEW3D_MT_RM_Add_LightFilter_Menu', icon_value=bpy.types.VIEW3D_MT_RM_Add_LightFilter_Menu.get_icon_id())
layout.separator()
layout.menu('VIEW3D_MT_renderman_add_object_quadrics_menu', icon='MESH_UVSPHERE')
layout.separator()
layout.separator()
layout.menu('VIEW3D_MT_renderman_add_object_volumes_menu', icon_value=bpy.types.VIEW3D_MT_renderman_add_object_volumes_menu.get_icon_id())
layout.separator()
rman_icon = rfb_icons.get_icon("rman_CreateArchive")
op = layout.operator('object.rman_add_rman_geo', text='RIB Archive', icon_value=rman_icon.icon_id)
op.rman_prim_type = 'DELAYED_LOAD_ARCHIVE'
op.rman_default_name = 'RIB_Archive'
op.bl_prim_type = ''
op.rman_open_filebrowser = True
if rfb_config['ui_preferences']['render_runprograms']['default']:
op = layout.operator('object.rman_add_rman_geo', text='RunProgram')
op.rman_prim_type = 'PROCEDURAL_RUN_PROGRAM'
op.rman_default_name = 'RiRunProgram'
op.bl_prim_type = ''
op.rman_open_filebrowser = True
rman_icon = rfb_icons.get_icon("rman_alembic")
op = layout.operator('object.rman_add_rman_geo', text='Alembic Archive', icon_value=rman_icon.icon_id)
op.rman_prim_type = 'ALEMBIC'
op.rman_default_name = 'rman_AlembicArchive'
op.bl_prim_type = ''
op.rman_open_filebrowser = True
op.rman_convert_to_zup = True
op = layout.operator('object.rman_add_rman_geo', text='RiProcedural')
op.rman_prim_type = 'DYNAMIC_LOAD_DSO'
op.rman_default_name = 'RiProcedural'
op.bl_prim_type = ''
op.rman_open_filebrowser = True
op = layout.operator('object.rman_add_rman_geo', text='Brickmap Geometry')
op.rman_prim_type = 'BRICKMAP'
op.rman_default_name = 'BrickmapGeo'
op.bl_prim_type = ''
op.rman_open_filebrowser = True
class VIEW3D_MT_renderman_add_object_volumes_menu(Menu):
bl_label = "Volumes"
bl_idname = "VIEW3D_MT_renderman_add_object_volumes_menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("out_PxrVolume").icon_id
def draw(self, context):
layout = self.layout
rman_icon = rfb_icons.get_icon('rman_openvdb')
op = layout.operator('object.rman_add_rman_geo', text='Import OpenVDB', icon_value=rman_icon.icon_id)
op.rman_prim_type = ''
op.bl_prim_type = 'VOLUME'
op.rman_default_name = 'OpenVDB'
op.rman_open_filebrowser = True
op.rman_convert_to_zup = True
rman_icon = rfb_icons.get_node_icon('PxrVolume')
op = layout.operator('object.rman_add_rman_geo', text='Volume Box', icon_value=rman_icon.icon_id)
op.rman_prim_type = 'RI_VOLUME'
op.rman_default_name = 'RiVolume'
op.rman_open_filebrowser = False
class VIEW3D_MT_renderman_add_object_quadrics_menu(Menu):
bl_label = "Quadrics"
bl_idname = "VIEW3D_MT_renderman_add_object_quadrics_menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
def draw(self, context):
layout = self.layout
op = layout.operator('object.rman_add_rman_geo', text='Sphere', icon='MESH_UVSPHERE')
op.rman_prim_type = 'QUADRIC'
op.rman_quadric_type = 'SPHERE'
op.bl_prim_type = ''
op.rman_open_filebrowser = False
op = layout.operator('object.rman_add_rman_geo', text='Cylinder', icon='MESH_CYLINDER')
op.rman_prim_type = 'QUADRIC'
op.rman_quadric_type = 'CYLINDER'
op.bl_prim_type = ''
op.rman_open_filebrowser = False
op = layout.operator('object.rman_add_rman_geo', text='Cone', icon='MESH_CONE')
op.rman_prim_type = 'QUADRIC'
op.rman_quadric_type = 'CONE'
op.bl_prim_type = ''
op.rman_open_filebrowser = False
op = layout.operator('object.rman_add_rman_geo', text='Disk', icon='MESH_CIRCLE')
op.rman_prim_type = 'QUADRIC'
op.rman_quadric_type = 'DISK'
op.bl_prim_type = ''
op.rman_open_filebrowser = False
op = layout.operator('object.rman_add_rman_geo', text='Torus', icon='MESH_TORUS')
op.rman_prim_type = 'QUADRIC'
op.rman_quadric_type = 'TORUS'
op.bl_prim_type = ''
op.rman_open_filebrowser = False
class VIEW3D_MT_renderman_object_context_menu(Menu):
bl_label = "RenderMan"
bl_idname = "VIEW3D_MT_renderman_object_context_menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("rman_blender").icon_id
def draw(self, context):
layout = self.layout
is_rman_running = context.scene.renderman.is_rman_running
is_rman_interactive_running = context.scene.renderman.is_rman_interactive_running
if is_rman_running and not is_rman_interactive_running:
rman_icon = rfb_icons.get_icon("rman_ipr_cancel")
layout.operator('renderman.stop_render', text="Stop Render",
icon_value=rman_icon.icon_id)
return
selected_objects = []
selected_light_objects = []
if context.selected_objects:
for obj in context.selected_objects:
if shadergraph_utils.is_rman_light(obj, include_light_filters=False):
selected_light_objects.append(obj)
elif obj.type not in ['CAMERA', 'LIGHT', 'SPEAKER']:
selected_objects.append(obj)
layout.menu('VIEW3D_MT_RM_Add_Render_Menu', icon_value=VIEW3D_MT_RM_Add_Render_Menu.get_icon_id())
if selected_light_objects:
layout.separator()
layout.operator_menu_enum(
"object.rman_add_light_filter", 'rman_lightfilter_name', text="Attach New Light Filter", icon='LIGHT')
layout.separator()
if selected_objects:
# Add Bxdf
layout.menu('VIEW3D_MT_RM_Add_bxdf_Menu', text='Add New Material', icon_value=bpy.types.VIEW3D_MT_RM_Add_bxdf_Menu.get_icon_id())
# Make Selected Geo Emissive
rman_meshlight = rfb_icons.get_icon("out_PxrMeshLight")
layout.operator("object.rman_create_meshlight", text="Convert to Mesh Light",
icon_value=rman_meshlight.icon_id)
# Add Subdiv Sheme
rman_subdiv = rfb_icons.get_icon("rman_subdiv")
layout.operator("mesh.rman_convert_subdiv",
text="Convert to Subdiv", icon_value=rman_subdiv.icon_id)
layout.separator()
layout.menu('VIEW3D_MT_RM_Add_Export_Menu', icon_value=VIEW3D_MT_RM_Add_Export_Menu.get_icon_id())
# Diagnose
layout.separator()
column = layout.column()
column.enabled = not is_rman_interactive_running
row = column.row()
rman_rib = rfb_icons.get_icon('rman_rib_small')
row.operator("renderman.open_scene_rib", text='View RIB', icon_value=rman_rib.icon_id)
if selected_objects or selected_light_objects:
row = column.row()
row.operator("renderman.open_selected_rib", text='View Selected RIB', icon_value=rman_rib.icon_id)
layout.separator()
layout.label(text='Groups')
layout.menu('VIEW3D_MT_RM_Add_Selected_To_ObjectGroup_Menu', text='Trace Sets')
layout.menu('VIEW3D_MT_RM_Add_Selected_To_LightMixer_Menu', text='Light Mixer Groups')
layout.operator("scene.rman_open_light_linking", text="Light Linking Editor")
layout.separator()
layout.menu('VIEW3D_MT_RM_Stylized_Menu', text='Stylized Looks')
class VIEW3D_MT_RM_Stylized_Menu(bpy.types.Menu):
bl_label = "Stylized Looks"
bl_idname = "VIEW3D_MT_RM_Stylized_Menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("rman_blender").icon_id
def draw(self, context):
rm = context.scene.renderman
layout = self.layout
'''
if rm.render_rman_stylized:
layout.operator_menu_enum('node.rman_attach_stylized_pattern', 'stylized_pattern')
layout.operator("scene.rman_open_stylized_editor", text="Stylized Looks Editor")
else:
op = layout.operator("scene.rman_enable_stylized_looks", text="Enable Stylized Looks")
op.open_editor = True
'''
if rm.render_rman_stylized:
layout.operator_menu_enum('node.rman_attach_stylized_pattern', 'stylized_pattern')
layout.operator("scene.rman_open_stylized_editor", text="Stylized Looks Editor")
class VIEW3D_MT_RM_Add_Render_Menu(bpy.types.Menu):
bl_label = "Render"
bl_idname = "VIEW3D_MT_RM_Add_Render_Menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("rman_blender").icon_id
def draw(self, context):
layout = self.layout
rm = context.scene.renderman
if not rm.is_rman_interactive_running:
rman_rerender_controls = rfb_icons.get_icon("rman_ipr_on")
layout.operator('renderman.start_ipr', text="IPR",
icon_value=rman_rerender_controls.icon_id)
rman_render_icon = rfb_icons.get_icon("rman_render")
layout.operator("render.render", text="Render",
icon_value=rman_render_icon.icon_id)
else:
rman_rerender_controls = rfb_icons.get_icon("rman_ipr_cancel")
layout.operator('renderman.stop_ipr', text="Stop IPR",
icon_value=rman_rerender_controls.icon_id)
layout.separator()
rman_icon = rfb_icons.get_icon('rman_vp_viz')
layout.menu('PRMAN_MT_Viewport_Integrator_Menu', icon_value=rman_icon.icon_id)
layout.menu('PRMAN_MT_Viewport_Refinement_Menu', icon='IMPORT')
if rm.is_rman_viewport_rendering:
rman_icon = rfb_icons.get_icon('rman_vp_resolution')
layout.menu('PRMAN_MT_Viewport_Res_Mult_Menu', icon_value=rman_icon.icon_id)
rman_icon = rfb_icons.get_icon('rman_vp_aovs')
layout.menu('PRMAN_MT_Viewport_Channel_Sel_Menu', icon_value=rman_icon.icon_id)
rman_icon = rfb_icons.get_icon('rman_vp_crop')
layout.operator("renderman_viewport.cropwindow", icon_value=rman_icon.icon_id)
rman_icon = rfb_icons.get_icon('rman_vp_snapshot')
layout.operator("renderman_viewport.snapshot", icon_value=rman_icon.icon_id)
layout.operator('renderman_viewport.enhance', icon='VIEW_ZOOM')
# texture cache clear
rman_icon = rfb_icons.get_icon('rman_lightning_grey')
layout.operator('rman_txmgr_list.clear_all_cache', icon_value=rman_icon.icon_id)
class VIEW3D_MT_RM_Add_Export_Menu(bpy.types.Menu):
bl_label = "Export"
bl_idname = "VIEW3D_MT_RM_Add_Export_Menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("rman_CreateArchive").icon_id
def draw(self, context):
layout = self.layout
rman_archive = rfb_icons.get_icon("rman_CreateArchive")
layout.operator("export.rman_export_rib_archive",
icon_value=rman_archive.icon_id)
layout.operator("renderman.bake_selected_brickmap", text="Bake Object to Brickmap")
class VIEW3D_MT_RM_Add_Selected_To_ObjectGroup_Menu(bpy.types.Menu):
bl_label = "Trace Sets"
bl_idname = "VIEW3D_MT_RM_Add_Selected_To_ObjectGroup_Menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
def draw(self, context):
layout = self.layout
scene = context.scene
op = layout.operator("scene.rman_open_groups_editor", text="Trace Sets Editor")
selected_objects = []
if context.selected_objects:
for obj in context.selected_objects:
if obj.type not in ['CAMERA', 'LIGHT', 'SPEAKER']:
selected_objects.append(obj)
if not selected_objects:
return
layout.separator()
op = layout.operator('renderman.add_remove_object_groups', text='Create New Trace Set')
op.context = 'scene.renderman'
op.collection = 'object_groups'
op.collection_index = 'object_groups_index'
op.defaultname = 'objectGroup_%d' % len(scene.renderman.object_groups)
op.action = 'ADD'
obj_grps = scene.renderman.object_groups
if obj_grps:
layout.separator()
layout.label(text='Add Selected To: ')
for i, obj_grp in enumerate(obj_grps.keys()):
op = layout.operator('renderman.add_to_group', text=obj_grp)
op.do_scene_selected = True
op.open_editor = True
op.group_index = i
class VIEW3D_MT_RM_Add_Selected_To_LightMixer_Menu(bpy.types.Menu):
bl_label = "Light Mixer"
bl_idname = "VIEW3D_MT_RM_Add_Selected_To_LightMixer_Menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
def draw(self, context):
layout = self.layout
scene = context.scene
layout.operator('scene.rman_open_light_mixer_editor', text='Light Mixer Editor')
layout.separator()
selected_light_objects = []
if context.selected_objects:
for obj in context.selected_objects:
if shadergraph_utils.is_rman_light(obj, include_light_filters=False):
selected_light_objects.append(obj)
if not selected_light_objects:
return
op = layout.operator('collection.add_remove', text='Create Light Mixer Group')
op.context = 'scene.renderman'
op.collection = 'light_mixer_groups'
op.collection_index = 'light_mixer_groups_index'
op.defaultname = 'mixerGroup_%d' % len(scene.renderman.light_mixer_groups)
op.action = 'ADD'
lgt_mixer_grps = scene.renderman.light_mixer_groups
if lgt_mixer_grps:
layout.separator()
layout.label(text='Add Selected To: ')
for i, obj_grp in enumerate(lgt_mixer_grps.keys()):
op = layout.operator('renderman.add_light_to_light_mixer_group', text=obj_grp)
op.do_scene_selected = True
op.open_editor = True
op.group_index = i
class VIEW3D_MT_RM_Add_Light_Menu(bpy.types.Menu):
bl_label = "Light"
bl_idname = "VIEW3D_MT_RM_Add_Light_Menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("rman_arealight").icon_id
def draw(self, context):
layout = self.layout
for nm, nm, description, icon, i in get_light_items():
op = layout.operator('object.rman_add_light', text=nm, icon_value=icon)
op.rman_light_name = nm
class VIEW3D_MT_RM_Add_LightFilter_Menu(bpy.types.Menu):
bl_label = "Light Filter"
bl_idname = "VIEW3D_MT_RM_Add_LightFilter_Menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("rman_lightfilter").icon_id
def draw(self, context):
layout = self.layout
for nm, nm, description, icon, i in get_lightfilter_items():
op = layout.operator('object.rman_add_light_filter', text=nm, icon_value=icon)
op.rman_lightfilter_name = nm
class VIEW3D_MT_RM_Add_bxdf_Menu(bpy.types.Menu):
bl_label = "Material"
bl_idname = "VIEW3D_MT_RM_Add_bxdf_Menu"
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.engine == 'PRMAN_RENDER'
@classmethod
def get_icon_id(cls):
return rfb_icons.get_icon("out_PxrSurface").icon_id
def draw(self, context):
layout = self.layout
for nm, label, description, icon, i in get_bxdf_items():
if not nm:
layout.separator()
layout.label(text=label)
continue
op = layout.operator('object.rman_add_bxdf', text=nm, icon_value=icon)
op.bxdf_name = nm
def rman_add_object_menu(self, context):
rd = context.scene.render
if rd.engine != 'PRMAN_RENDER':
return
layout = self.layout
layout.menu('VIEW3D_MT_renderman_add_object_menu', text='RenderMan', icon_value=bpy.types.VIEW3D_MT_renderman_add_object_menu.get_icon_id())
def rman_object_context_menu(self, context):
rd = context.scene.render
layout = self.layout
if rd.engine != 'PRMAN_RENDER':
layout.operator('renderman.use_renderman', text='Use RenderMan', icon_value=rfb_icons.get_icon("rman_blender").icon_id)
layout.separator()
else:
layout.menu('VIEW3D_MT_renderman_object_context_menu', text='RenderMan', icon_value=bpy.types.VIEW3D_MT_renderman_add_object_menu.get_icon_id())
classes = [
VIEW3D_MT_renderman_add_object_menu,
VIEW3D_MT_renderman_add_object_quadrics_menu,
VIEW3D_MT_renderman_add_object_volumes_menu,
VIEW3D_MT_renderman_object_context_menu,
VIEW3D_MT_RM_Add_Selected_To_ObjectGroup_Menu,
VIEW3D_MT_RM_Add_Selected_To_LightMixer_Menu,
VIEW3D_MT_RM_Add_Light_Menu,
VIEW3D_MT_RM_Add_LightFilter_Menu,
VIEW3D_MT_RM_Add_bxdf_Menu,
VIEW3D_MT_RM_Add_Export_Menu,
VIEW3D_MT_RM_Add_Render_Menu,
VIEW3D_MT_RM_Stylized_Menu
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.VIEW3D_MT_add.prepend(rman_add_object_menu)
bpy.types.VIEW3D_MT_object_context_menu.prepend(rman_object_context_menu)
def unregister():
bpy.types.VIEW3D_MT_add.remove(rman_add_object_menu)
bpy.types.VIEW3D_MT_object_context_menu.remove(rman_object_context_menu)
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass
|
|
# Copyright (c) 2015, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import (integer, boolean, s3_bucket_name, notification_type,
notification_event, json_checker, task_type,
operating_system, compliance_level)
class NotificationConfig(AWSProperty):
props = {
'NotificationArn': (basestring, False),
'NotificationEvents': (notification_event, False),
'NotificationType': (notification_type, False),
}
class LoggingInfo(AWSProperty):
props = {
'Region': (basestring, True),
'S3Bucket': (s3_bucket_name, True),
'S3Prefix': (basestring, False),
}
class MaintenanceWindowAutomationParameters(AWSProperty):
props = {
'DocumentVersion': (basestring, False),
'Parameters': (dict, False),
}
class MaintenanceWindowLambdaParameters(AWSProperty):
props = {
'ClientContext': (basestring, False),
'Payload': (json_checker, False),
'Qualifier': (basestring, False),
}
class MaintenanceWindowRunCommandParameters(AWSProperty):
props = {
'Comment': (basestring, False),
'DocumentHash': (basestring, False),
'DocumentHashType': (basestring, False),
'NotificationConfig': (NotificationConfig, False),
'OutputS3BucketName': (s3_bucket_name, False),
'OutputS3KeyPrefix': (basestring, False),
'Parameters': (dict, False),
'ServiceRoleArn': (basestring, False),
'TimeoutSeconds': (integer, False),
}
class MaintenanceWindowStepFunctionsParameters(AWSProperty):
props = {
'Input': (basestring, False),
'Name': (basestring, False),
}
class PatchFilter(AWSProperty):
props = {
'Key': (basestring, True),
'Values': ([basestring], True),
}
class PatchFilterGroup(AWSProperty):
props = {
'PatchFilters': ([PatchFilter], False),
}
class Rule(AWSProperty):
props = {
'ApproveAfterDays': (integer, False),
'ComplianceLevel': (compliance_level, False),
'PatchFilterGroup': (PatchFilterGroup, False),
}
class RuleGroup(AWSProperty):
props = {
'PatchRules': ([Rule], False),
}
class TaskInvocationParameters(AWSProperty):
props = {
'MaintenanceWindowAutomationParameters':
(MaintenanceWindowAutomationParameters, False),
'MaintenanceWindowLambdaParameters':
(MaintenanceWindowLambdaParameters, False),
'MaintenanceWindowRunCommandParameters':
(MaintenanceWindowRunCommandParameters, False),
'MaintenanceWindowStepFunctionsParameters':
(MaintenanceWindowStepFunctionsParameters, False),
}
class Targets(AWSProperty):
props = {
'Key': (basestring, True),
'Values': ([basestring], True),
}
class S3OutputLocation(AWSProperty):
props = {
'OutputS3BucketName': (basestring, False),
'OutputS3KeyPrefix': (basestring, False),
}
class InstanceAssociationOutputLocation(AWSProperty):
props = {
'S3Location': (S3OutputLocation, False),
}
class Association(AWSObject):
resource_type = "AWS::SSM::Association"
props = {
'AssociationName': (basestring, False),
'DocumentVersion': (basestring, False),
'InstanceId': (basestring, False),
'Name': (basestring, True),
'OutputLocation': (InstanceAssociationOutputLocation, False),
'Parameters': (dict, False),
'ScheduleExpression': (basestring, False),
'Targets': ([Targets], False),
}
class Document(AWSObject):
resource_type = "AWS::SSM::Document"
props = {
# Need a better implementation of the SSM Document
'Content': (dict, True),
'DocumentType': (basestring, False),
'Tags': (Tags, False),
}
class MaintenanceWindow(AWSObject):
resource_type = "AWS::SSM::MaintenanceWindow"
props = {
'AllowUnassociatedTargets': (boolean, True),
'Cutoff': (integer, True),
'Description': (basestring, False),
'Duration': (integer, True),
'Name': (basestring, True),
'Schedule': (basestring, True),
}
class MaintenanceWindowTarget(AWSObject):
resource_type = "AWS::SSM::MaintenanceWindowTarget"
props = {
'Description': (basestring, False),
'Name': (basestring, False),
'OwnerInformation': (basestring, False),
'ResourceType': (basestring, True),
'Targets': ([Targets], True),
'WindowId': (basestring, True),
}
class MaintenanceWindowTask(AWSObject):
resource_type = "AWS::SSM::MaintenanceWindowTask"
props = {
'Description': (basestring, False),
'LoggingInfo': (LoggingInfo, False),
'MaxConcurrency': (basestring, False),
'MaxErrors': (basestring, True),
'Name': (basestring, False),
'Priority': (integer, True),
'ServiceRoleArn': (basestring, True),
'Targets': ([Targets], True),
'TaskArn': (basestring, True),
'TaskInvocationParameters': (TaskInvocationParameters, False),
'TaskParameters': (dict, False),
'TaskType': (task_type, True),
'WindowId': (basestring, False),
}
class Parameter(AWSObject):
resource_type = "AWS::SSM::Parameter"
props = {
'AllowedPattern': (basestring, False),
'Description': (basestring, False),
'Name': (basestring, False),
'Type': (basestring, True),
'Value': (basestring, True),
}
class PatchBaseline(AWSObject):
resource_type = "AWS::SSM::PatchBaseline"
props = {
'ApprovalRules': (RuleGroup, False),
'ApprovedPatches': ([basestring], False),
'ApprovedPatchesComplianceLevel': (compliance_level, False),
'Description': (basestring, False),
'GlobalFilters': (PatchFilterGroup, False),
'Name': (basestring, True),
'OperatingSystem': (operating_system, False),
'PatchGroups': ([basestring], False),
'RejectedPatches': ([basestring], False),
}
class ResourceDataSync(AWSObject):
resource_type = "AWS::SSM::ResourceDataSync"
props = {
'BucketName': (basestring, True),
'BucketPrefix': (basestring, False),
'BucketRegion': (basestring, True),
'KMSKeyArn': (basestring, False),
'SyncFormat': (basestring, True),
'SyncName': (basestring, True),
}
|
|
from datetime import timedelta
import numpy as np
import pandas as pd
from netCDF4 import Dataset
from scipy.signal import fftconvolve
from skimage.morphology import disk
from hagelslag.data.MRMSGrid import MRMSGrid
from hagelslag.evaluation.ProbabilityMetrics import DistributedReliability, DistributedROC
class NeighborEvaluator(object):
"""
A framework for statistically evaluating neighborhood probability forecasts.
Attributes:
run_date (datetime.datetime object): Date of the beginning of the model run
start_hour (int): First forecast hour evaluated
end_hour (int): Last forecast hour evaluated
ensemble_name (str): Name of the ensemble system being evaluated
model_name (str): Name of the physical or machine learning model being evaluated
forecast_variable (str): Name of the forecast variable being evaluated.
mrms_variable (str): Name of the NSSL MRMS product being used for gridded observations
neighbor_radii (list or array): neighborhood radii in number of grid points
smoothing_radii (list or array): radius of Gaussian filter used by the forecast
obs_thresholds (list or array): Observed intensity threshold that corresponds with each element of
size_thresholds
size_thresholds (list or array): Intensity threshold for neighborhood probabilities
obs_mask (bool): Whether or not another MRMS product is used to mask invalid grid points
mask_variable (str): MRMS variable used for masking invalid grid points
forecast_path (str): Path to forecast files
mrms_path (str): Path to MRMS data
"""
def __init__(self, run_date, start_hour, end_hour, ensemble_name, model_name, forecast_variable, mrms_variable,
neighbor_radii, smoothing_radii, obs_thresholds, size_thresholds, probability_levels, obs_mask,
mask_variable, forecast_path, mrms_path, coordinate_file=None, lon_bounds=None, lat_bounds=None):
self.run_date = run_date
self.start_hour = start_hour
self.end_hour = end_hour
self.ensemble_name = ensemble_name
self.model_name = model_name
self.forecast_variable = forecast_variable
self.mrms_variable = mrms_variable
self.obs_mask = obs_mask
self.mask_variable = mask_variable
self.neighbor_radii = neighbor_radii
self.smoothing_radii = smoothing_radii
self.obs_thresholds = obs_thresholds
self.size_thresholds = size_thresholds
self.probability_levels = probability_levels
self.forecast_path = forecast_path
self.mrms_path = mrms_path
self.hourly_forecasts = {}
self.period_forecasts = {}
self.raw_obs = {}
self.period_obs = {}
self.coordinate_file = coordinate_file
self.coordinates = {}
self.lon_bounds = lon_bounds
self.lat_bounds = lat_bounds
def load_forecasts(self):
"""
Load neighborhood probability forecasts.
"""
run_date_str = self.run_date.strftime("%Y%m%d")
forecast_file = self.forecast_path + "{0}/{1}_{2}_{3}_consensus_{0}.nc".format(run_date_str,
self.ensemble_name,
self.model_name,
self.forecast_variable)
print("Forecast file: " + forecast_file)
forecast_data = Dataset(forecast_file)
for size_threshold in self.size_thresholds:
for smoothing_radius in self.smoothing_radii:
for neighbor_radius in self.neighbor_radii:
hour_var = "neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}".format(neighbor_radius, smoothing_radius,
self.forecast_variable,
float(size_threshold))
period_var = "neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}".format(self.end_hour -
self.start_hour + 1,
neighbor_radius,
smoothing_radius,
self.forecast_variable,
float(size_threshold))
print("Loading forecasts {0} {1} {2} {3} {4}".format(self.run_date, self.model_name,
self.forecast_variable, size_threshold,
smoothing_radius))
if hour_var in forecast_data.variables.keys():
self.hourly_forecasts[hour_var] = forecast_data.variables[hour_var][:]
if period_var in forecast_data.variables.keys():
self.period_forecasts[period_var] = forecast_data.variables[period_var][:]
forecast_data.close()
def load_obs(self, mask_threshold=0.5):
"""
Loads observations and masking grid (if needed).
Args:
mask_threshold: Values greater than the threshold are kept, others are masked.
"""
print("Loading obs ", self.run_date, self.model_name, self.forecast_variable)
start_date = self.run_date + timedelta(hours=self.start_hour)
end_date = self.run_date + timedelta(hours=self.end_hour)
mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path)
mrms_grid.load_data()
if len(mrms_grid.data) > 0:
self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data)
self.period_obs[self.mrms_variable] = self.raw_obs[self.mrms_variable].max(axis=0)
if self.obs_mask:
mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path)
mask_grid.load_data()
self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0)
self.period_obs[self.mask_variable] = self.raw_obs[self.mask_variable].max(axis=0)
def load_coordinates(self):
"""
Loads lat-lon coordinates from a netCDF file.
"""
coord_file = Dataset(self.coordinate_file)
if "lon" in coord_file.variables.keys():
self.coordinates["lon"] = coord_file.variables["lon"][:]
self.coordinates["lat"] = coord_file.variables["lat"][:]
else:
self.coordinates["lon"] = coord_file.variables["XLONG"][0]
self.coordinates["lat"] = coord_file.variables["XLAT"][0]
coord_file.close()
def evaluate_hourly_forecasts(self):
"""
Calculates ROC curves and Reliability scores for each forecast hour.
Returns:
A pandas DataFrame containing forecast metadata as well as DistributedROC and Reliability objects.
"""
score_columns = ["Run_Date", "Forecast_Hour", "Ensemble Name", "Model_Name", "Forecast_Variable",
"Neighbor_Radius", "Smoothing_Radius", "Size_Threshold", "ROC", "Reliability"]
all_scores = pd.DataFrame(columns=score_columns)
for h, hour in enumerate(range(self.start_hour, self.end_hour + 1)):
for neighbor_radius in self.neighbor_radii:
n_filter = disk(neighbor_radius)
for s, size_threshold in enumerate(self.size_thresholds):
print("Eval hourly forecast {0:02d} {1} {2} {3} {4:d} {5:d}".format(hour, self.model_name,
self.forecast_variable,
self.run_date, neighbor_radius,
size_threshold))
hour_obs = fftconvolve(self.raw_obs[self.mrms_variable][h] >= self.obs_thresholds[s],
n_filter, mode="same")
hour_obs[hour_obs > 1] = 1
hour_obs[hour_obs < 1] = 0
if self.obs_mask:
hour_obs = hour_obs[self.raw_obs[self.mask_variable][h] > 0]
for smoothing_radius in self.smoothing_radii:
hour_var = "neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}".format(neighbor_radius,
smoothing_radius,
self.forecast_variable,
size_threshold)
if self.obs_mask:
hour_forecast = self.hourly_forecasts[hour_var][h][self.raw_obs[self.mask_variable][h] > 0]
else:
hour_forecast = self.hourly_forecasts[hour_var][h]
roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5)
roc.update(hour_forecast, hour_obs)
rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5)
rel.update(hour_forecast, hour_obs)
row = [self.run_date, hour, self.ensemble_name, self.model_name, self.forecast_variable,
neighbor_radius,
smoothing_radius, size_threshold, roc, rel]
all_scores.loc[hour_var + "_{0:d}".format(hour)] = row
return all_scores
def evaluate_period_forecasts(self):
"""
Evaluates ROC and Reliability scores for forecasts over the full period from start hour to end hour
Returns:
A pandas DataFrame with full-period metadata and verification statistics
"""
score_columns = ["Run_Date", "Ensemble Name", "Model_Name", "Forecast_Variable", "Neighbor_Radius",
"Smoothing_Radius", "Size_Threshold", "ROC", "Reliability"]
all_scores = pd.DataFrame(columns=score_columns)
if self.coordinate_file is not None:
coord_mask = np.where((self.coordinates["lon"] >= self.lon_bounds[0]) &
(self.coordinates["lon"] <= self.lon_bounds[1]) &
(self.coordinates["lat"] >= self.lat_bounds[0]) &
(self.coordinates["lat"] <= self.lat_bounds[1]) &
(self.period_obs[self.mask_variable] > 0))
else:
coord_mask = None
for neighbor_radius in self.neighbor_radii:
n_filter = disk(neighbor_radius)
for s, size_threshold in enumerate(self.size_thresholds):
period_obs = fftconvolve(self.period_obs[self.mrms_variable] >= self.obs_thresholds[s],
n_filter, mode="same")
period_obs[period_obs > 1] = 1
if self.obs_mask and self.coordinate_file is None:
period_obs = period_obs[self.period_obs[self.mask_variable] > 0]
elif self.obs_mask and self.coordinate_file is not None:
period_obs = period_obs[coord_mask[0], coord_mask[1]]
else:
period_obs = period_obs.ravel()
for smoothing_radius in self.smoothing_radii:
print("Eval period forecast {0} {1} {2} {3} {4} {5}".format(self.model_name,
self.forecast_variable,
self.run_date,
neighbor_radius,
size_threshold, smoothing_radius))
period_var = "neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}".format(self.end_hour -
self.start_hour + 1,
neighbor_radius,
smoothing_radius,
self.forecast_variable,
size_threshold)
if self.obs_mask and self.coordinate_file is None:
period_forecast = self.period_forecasts[period_var][self.period_obs[self.mask_variable] > 0]
elif self.obs_mask and self.coordinate_file is not None:
period_forecast = self.period_forecasts[period_var][coord_mask[0], coord_mask[1]]
else:
period_forecast = self.period_forecasts[period_var].ravel()
roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5)
roc.update(period_forecast, period_obs)
rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5)
rel.update(period_forecast, period_obs)
row = [self.run_date, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius,
smoothing_radius, size_threshold, roc, rel]
all_scores.loc[period_var] = row
return all_scores
|
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <[email protected]>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import uuid
import croniter
from oslo_config import cfg
from oslo_context import context
from oslo_log import log
from oslo_utils import netutils
from oslo_utils import timeutils
import pecan
from pecan import rest
import pytz
import six
from stevedore import extension
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
import aodh
from aodh import alarm as aodh_alarm
from aodh.alarm.storage import models as alarm_models
from aodh.api.controllers.v2.alarm_rules import combination
from aodh.api.controllers.v2 import base
from aodh.api.controllers.v2 import utils as v2_utils
from aodh.api import rbac
from aodh.i18n import _
from aodh import messaging
from aodh import utils
LOG = log.getLogger(__name__)
ALARM_API_OPTS = [
cfg.BoolOpt('record_history',
default=True,
help='Record alarm change events.'
),
cfg.IntOpt('user_alarm_quota',
default=None,
help='Maximum number of alarms defined for a user.'
),
cfg.IntOpt('project_alarm_quota',
default=None,
help='Maximum number of alarms defined for a project.'
),
cfg.IntOpt('alarm_max_actions',
default=-1,
help='Maximum count of actions for each state of an alarm, '
'non-positive number means no limit.'),
]
cfg.CONF.register_opts(ALARM_API_OPTS, group='alarm')
state_kind = ["ok", "alarm", "insufficient data"]
state_kind_enum = wtypes.Enum(str, *state_kind)
severity_kind = ["low", "moderate", "critical"]
severity_kind_enum = wtypes.Enum(str, *severity_kind)
class OverQuota(base.ClientSideError):
def __init__(self, data):
d = {
'u': data.user_id,
'p': data.project_id
}
super(OverQuota, self).__init__(
_("Alarm quota exceeded for user %(u)s on project %(p)s") % d,
status_code=403)
def is_over_quota(conn, project_id, user_id):
"""Returns False if an alarm is within the set quotas, True otherwise.
:param conn: a backend connection object
:param project_id: the ID of the project setting the alarm
:param user_id: the ID of the user setting the alarm
"""
over_quota = False
# Start by checking for user quota
user_alarm_quota = cfg.CONF.alarm.user_alarm_quota
if user_alarm_quota is not None:
user_alarms = list(conn.get_alarms(user=user_id))
over_quota = len(user_alarms) >= user_alarm_quota
# If the user quota isn't reached, we check for the project quota
if not over_quota:
project_alarm_quota = cfg.CONF.alarm.project_alarm_quota
if project_alarm_quota is not None:
project_alarms = list(conn.get_alarms(project=project_id))
over_quota = len(project_alarms) >= project_alarm_quota
return over_quota
class CronType(wtypes.UserType):
"""A user type that represents a cron format."""
basetype = six.string_types
name = 'cron'
@staticmethod
def validate(value):
# raises ValueError if invalid
croniter.croniter(value)
return value
class AlarmTimeConstraint(base.Base):
"""Representation of a time constraint on an alarm."""
name = wsme.wsattr(wtypes.text, mandatory=True)
"The name of the constraint"
_description = None # provide a default
def get_description(self):
if not self._description:
return ('Time constraint at %s lasting for %s seconds'
% (self.start, self.duration))
return self._description
def set_description(self, value):
self._description = value
description = wsme.wsproperty(wtypes.text, get_description,
set_description)
"The description of the constraint"
start = wsme.wsattr(CronType(), mandatory=True)
"Start point of the time constraint, in cron format"
duration = wsme.wsattr(wtypes.IntegerType(minimum=0), mandatory=True)
"How long the constraint should last, in seconds"
timezone = wsme.wsattr(wtypes.text, default="")
"Timezone of the constraint"
def as_dict(self):
return self.as_dict_from_keys(['name', 'description', 'start',
'duration', 'timezone'])
@staticmethod
def validate(tc):
if tc.timezone:
try:
pytz.timezone(tc.timezone)
except Exception:
raise base.ClientSideError(_("Timezone %s is not valid")
% tc.timezone)
return tc
@classmethod
def sample(cls):
return cls(name='SampleConstraint',
description='nightly build every night at 23h for 3 hours',
start='0 23 * * *',
duration=10800,
timezone='Europe/Ljubljana')
ALARMS_RULES = extension.ExtensionManager("aodh.alarm.rule")
LOG.debug("alarm rules plugin loaded: %s" % ",".join(ALARMS_RULES.names()))
class Alarm(base.Base):
"""Representation of an alarm.
.. note::
combination_rule and threshold_rule are mutually exclusive. The *type*
of the alarm should be set to *threshold* or *combination* and the
appropriate rule should be filled.
"""
alarm_id = wtypes.text
"The UUID of the alarm"
name = wsme.wsattr(wtypes.text, mandatory=True)
"The name for the alarm"
_description = None # provide a default
def get_description(self):
rule = getattr(self, '%s_rule' % self.type, None)
if not self._description:
if hasattr(rule, 'default_description'):
return six.text_type(rule.default_description)
return "%s alarm rule" % self.type
return self._description
def set_description(self, value):
self._description = value
description = wsme.wsproperty(wtypes.text, get_description,
set_description)
"The description of the alarm"
enabled = wsme.wsattr(bool, default=True)
"This alarm is enabled?"
ok_actions = wsme.wsattr([wtypes.text], default=[])
"The actions to do when alarm state change to ok"
alarm_actions = wsme.wsattr([wtypes.text], default=[])
"The actions to do when alarm state change to alarm"
insufficient_data_actions = wsme.wsattr([wtypes.text], default=[])
"The actions to do when alarm state change to insufficient data"
repeat_actions = wsme.wsattr(bool, default=False)
"The actions should be re-triggered on each evaluation cycle"
type = base.AdvEnum('type', str, *ALARMS_RULES.names(),
mandatory=True)
"Explicit type specifier to select which rule to follow below."
time_constraints = wtypes.wsattr([AlarmTimeConstraint], default=[])
"""Describe time constraints for the alarm"""
# These settings are ignored in the PUT or POST operations, but are
# filled in for GET
project_id = wtypes.text
"The ID of the project or tenant that owns the alarm"
user_id = wtypes.text
"The ID of the user who created the alarm"
timestamp = datetime.datetime
"The date of the last alarm definition update"
state = base.AdvEnum('state', str, *state_kind,
default='insufficient data')
"The state offset the alarm"
state_timestamp = datetime.datetime
"The date of the last alarm state changed"
severity = base.AdvEnum('severity', str, *severity_kind,
default='low')
"The severity of the alarm"
def __init__(self, rule=None, time_constraints=None, **kwargs):
super(Alarm, self).__init__(**kwargs)
if rule:
setattr(self, '%s_rule' % self.type,
ALARMS_RULES[self.type].plugin(**rule))
if time_constraints:
self.time_constraints = [AlarmTimeConstraint(**tc)
for tc in time_constraints]
@staticmethod
def validate(alarm):
Alarm.check_rule(alarm)
Alarm.check_alarm_actions(alarm)
ALARMS_RULES[alarm.type].plugin.validate_alarm(alarm)
if alarm.time_constraints:
tc_names = [tc.name for tc in alarm.time_constraints]
if len(tc_names) > len(set(tc_names)):
error = _("Time constraint names must be "
"unique for a given alarm.")
raise base.ClientSideError(error)
return alarm
@staticmethod
def check_rule(alarm):
rule = '%s_rule' % alarm.type
if getattr(alarm, rule) in (wtypes.Unset, None):
error = _("%(rule)s must be set for %(type)s"
" type alarm") % {"rule": rule, "type": alarm.type}
raise base.ClientSideError(error)
rule_set = None
for ext in ALARMS_RULES:
name = "%s_rule" % ext.name
if getattr(alarm, name):
if rule_set is None:
rule_set = name
else:
error = _("%(rule1)s and %(rule2)s cannot be set at the "
"same time") % {'rule1': rule_set, 'rule2': name}
raise base.ClientSideError(error)
@staticmethod
def check_alarm_actions(alarm):
actions_schema = aodh_alarm.NOTIFIER_SCHEMAS
max_actions = cfg.CONF.alarm.alarm_max_actions
for state in state_kind:
actions_name = state.replace(" ", "_") + '_actions'
actions = getattr(alarm, actions_name)
if not actions:
continue
action_set = set(actions)
if len(actions) != len(action_set):
LOG.info(_('duplicate actions are found: %s, '
'remove duplicate ones') % actions)
actions = list(action_set)
setattr(alarm, actions_name, actions)
if 0 < max_actions < len(actions):
error = _('%(name)s count exceeds maximum value '
'%(maximum)d') % {"name": actions_name,
"maximum": max_actions}
raise base.ClientSideError(error)
limited = rbac.get_limited_to_project(pecan.request.headers)
for action in actions:
try:
url = netutils.urlsplit(action)
except Exception:
error = _("Unable to parse action %s") % action
raise base.ClientSideError(error)
if url.scheme not in actions_schema:
error = _("Unsupported action %s") % action
raise base.ClientSideError(error)
if limited and url.scheme in ('log', 'test'):
error = _('You are not authorized to create '
'action: %s') % action
raise base.ClientSideError(error, status_code=401)
@classmethod
def sample(cls):
return cls(alarm_id=None,
name="SwiftObjectAlarm",
description="An alarm",
type='combination',
time_constraints=[AlarmTimeConstraint.sample().as_dict()],
user_id="c96c887c216949acbdfbd8b494863567",
project_id="c96c887c216949acbdfbd8b494863567",
enabled=True,
timestamp=datetime.datetime.utcnow(),
state="ok",
severity="moderate",
state_timestamp=datetime.datetime.utcnow(),
ok_actions=["http://site:8000/ok"],
alarm_actions=["http://site:8000/alarm"],
insufficient_data_actions=["http://site:8000/nodata"],
repeat_actions=False,
combination_rule=combination.AlarmCombinationRule.sample(),
)
def as_dict(self, db_model):
d = super(Alarm, self).as_dict(db_model)
for k in d:
if k.endswith('_rule'):
del d[k]
d['rule'] = getattr(self, "%s_rule" % self.type).as_dict()
if self.time_constraints:
d['time_constraints'] = [tc.as_dict()
for tc in self.time_constraints]
return d
Alarm.add_attributes(**{"%s_rule" % ext.name: ext.plugin
for ext in ALARMS_RULES})
class AlarmChange(base.Base):
"""Representation of an event in an alarm's history."""
event_id = wtypes.text
"The UUID of the change event"
alarm_id = wtypes.text
"The UUID of the alarm"
type = wtypes.Enum(str,
'creation',
'rule change',
'state transition',
'deletion')
"The type of change"
detail = wtypes.text
"JSON fragment describing change"
project_id = wtypes.text
"The project ID of the initiating identity"
user_id = wtypes.text
"The user ID of the initiating identity"
on_behalf_of = wtypes.text
"The tenant on behalf of which the change is being made"
timestamp = datetime.datetime
"The time/date of the alarm change"
@classmethod
def sample(cls):
return cls(alarm_id='e8ff32f772a44a478182c3fe1f7cad6a',
type='rule change',
detail='{"threshold": 42.0, "evaluation_periods": 4}',
user_id="3e5d11fda79448ac99ccefb20be187ca",
project_id="b6f16144010811e387e4de429e99ee8c",
on_behalf_of="92159030020611e3b26dde429e99ee8c",
timestamp=datetime.datetime.utcnow(),
)
def _send_notification(event, payload):
notification = event.replace(" ", "_")
notification = "alarm.%s" % notification
transport = messaging.get_transport()
notifier = messaging.get_notifier(transport, publisher_id="aodh.api")
# FIXME(sileht): perhaps we need to copy some infos from the
# pecan request headers like nova does
notifier.info(context.RequestContext(), notification, payload)
class AlarmController(rest.RestController):
"""Manages operations on a single alarm."""
_custom_actions = {
'history': ['GET'],
'state': ['PUT', 'GET'],
}
def __init__(self, alarm_id):
pecan.request.context['alarm_id'] = alarm_id
self._id = alarm_id
def _alarm(self):
self.conn = pecan.request.alarm_storage_conn
auth_project = rbac.get_limited_to_project(pecan.request.headers)
alarms = list(self.conn.get_alarms(alarm_id=self._id,
project=auth_project))
if not alarms:
raise base.AlarmNotFound(alarm=self._id, auth_project=auth_project)
return alarms[0]
def _record_change(self, data, now, on_behalf_of=None, type=None):
if not cfg.CONF.alarm.record_history:
return
type = type or alarm_models.AlarmChange.RULE_CHANGE
scrubbed_data = utils.stringify_timestamps(data)
detail = json.dumps(scrubbed_data)
user_id = pecan.request.headers.get('X-User-Id')
project_id = pecan.request.headers.get('X-Project-Id')
on_behalf_of = on_behalf_of or project_id
payload = dict(event_id=str(uuid.uuid4()),
alarm_id=self._id,
type=type,
detail=detail,
user_id=user_id,
project_id=project_id,
on_behalf_of=on_behalf_of,
timestamp=now)
try:
self.conn.record_alarm_change(payload)
except aodh.NotImplementedError:
pass
# Revert to the pre-json'ed details ...
payload['detail'] = scrubbed_data
_send_notification(type, payload)
@wsme_pecan.wsexpose(Alarm)
def get(self):
"""Return this alarm."""
rbac.enforce('get_alarm', pecan.request)
return Alarm.from_db_model(self._alarm())
@wsme_pecan.wsexpose(Alarm, body=Alarm)
def put(self, data):
"""Modify this alarm.
:param data: an alarm within the request body.
"""
rbac.enforce('change_alarm', pecan.request)
# Ensure alarm exists
alarm_in = self._alarm()
now = timeutils.utcnow()
data.alarm_id = self._id
user, project = rbac.get_limited_to(pecan.request.headers)
if user:
data.user_id = user
elif data.user_id == wtypes.Unset:
data.user_id = alarm_in.user_id
if project:
data.project_id = project
elif data.project_id == wtypes.Unset:
data.project_id = alarm_in.project_id
data.timestamp = now
if alarm_in.state != data.state:
data.state_timestamp = now
else:
data.state_timestamp = alarm_in.state_timestamp
# make sure alarms are unique by name per project.
if alarm_in.name != data.name:
alarms = list(self.conn.get_alarms(name=data.name,
project=data.project_id))
if alarms:
raise base.ClientSideError(
_("Alarm with name=%s exists") % data.name,
status_code=409)
ALARMS_RULES[data.type].plugin.update_hook(data)
old_alarm = Alarm.from_db_model(alarm_in).as_dict(alarm_models.Alarm)
updated_alarm = data.as_dict(alarm_models.Alarm)
try:
alarm_in = alarm_models.Alarm(**updated_alarm)
except Exception:
LOG.exception(_("Error while putting alarm: %s") % updated_alarm)
raise base.ClientSideError(_("Alarm incorrect"))
alarm = self.conn.update_alarm(alarm_in)
change = dict((k, v) for k, v in updated_alarm.items()
if v != old_alarm[k] and k not in
['timestamp', 'state_timestamp'])
self._record_change(change, now, on_behalf_of=alarm.project_id)
return Alarm.from_db_model(alarm)
@wsme_pecan.wsexpose(None, status_code=204)
def delete(self):
"""Delete this alarm."""
rbac.enforce('delete_alarm', pecan.request)
# ensure alarm exists before deleting
alarm = self._alarm()
self.conn.delete_alarm(alarm.alarm_id)
change = Alarm.from_db_model(alarm).as_dict(alarm_models.Alarm)
self._record_change(change,
timeutils.utcnow(),
type=alarm_models.AlarmChange.DELETION)
@wsme_pecan.wsexpose([AlarmChange], [base.Query])
def history(self, q=None):
"""Assembles the alarm history requested.
:param q: Filter rules for the changes to be described.
"""
rbac.enforce('alarm_history', pecan.request)
q = q or []
# allow history to be returned for deleted alarms, but scope changes
# returned to those carried out on behalf of the auth'd tenant, to
# avoid inappropriate cross-tenant visibility of alarm history
auth_project = rbac.get_limited_to_project(pecan.request.headers)
conn = pecan.request.alarm_storage_conn
kwargs = v2_utils.query_to_kwargs(
q, conn.get_alarm_changes, ['on_behalf_of', 'alarm_id'])
return [AlarmChange.from_db_model(ac)
for ac in conn.get_alarm_changes(self._id, auth_project,
**kwargs)]
@wsme.validate(state_kind_enum)
@wsme_pecan.wsexpose(state_kind_enum, body=state_kind_enum)
def put_state(self, state):
"""Set the state of this alarm.
:param state: an alarm state within the request body.
"""
rbac.enforce('change_alarm_state', pecan.request)
# note(sileht): body are not validated by wsme
# Workaround for https://bugs.launchpad.net/wsme/+bug/1227229
if state not in state_kind:
raise base.ClientSideError(_("state invalid"))
now = timeutils.utcnow()
alarm = self._alarm()
alarm.state = state
alarm.state_timestamp = now
alarm = self.conn.update_alarm(alarm)
change = {'state': alarm.state}
self._record_change(change, now, on_behalf_of=alarm.project_id,
type=alarm_models.AlarmChange.STATE_TRANSITION)
return alarm.state
@wsme_pecan.wsexpose(state_kind_enum)
def get_state(self):
"""Get the state of this alarm."""
rbac.enforce('get_alarm_state', pecan.request)
alarm = self._alarm()
return alarm.state
class AlarmsController(rest.RestController):
"""Manages operations on the alarms collection."""
@pecan.expose()
def _lookup(self, alarm_id, *remainder):
return AlarmController(alarm_id), remainder
@staticmethod
def _record_creation(conn, data, alarm_id, now):
if not cfg.CONF.alarm.record_history:
return
type = alarm_models.AlarmChange.CREATION
scrubbed_data = utils.stringify_timestamps(data)
detail = json.dumps(scrubbed_data)
user_id = pecan.request.headers.get('X-User-Id')
project_id = pecan.request.headers.get('X-Project-Id')
payload = dict(event_id=str(uuid.uuid4()),
alarm_id=alarm_id,
type=type,
detail=detail,
user_id=user_id,
project_id=project_id,
on_behalf_of=project_id,
timestamp=now)
try:
conn.record_alarm_change(payload)
except aodh.NotImplementedError:
pass
# Revert to the pre-json'ed details ...
payload['detail'] = scrubbed_data
_send_notification(type, payload)
@wsme_pecan.wsexpose(Alarm, body=Alarm, status_code=201)
def post(self, data):
"""Create a new alarm.
:param data: an alarm within the request body.
"""
rbac.enforce('create_alarm', pecan.request)
conn = pecan.request.alarm_storage_conn
now = timeutils.utcnow()
data.alarm_id = str(uuid.uuid4())
user_limit, project_limit = rbac.get_limited_to(pecan.request.headers)
def _set_ownership(aspect, owner_limitation, header):
attr = '%s_id' % aspect
requested_owner = getattr(data, attr)
explicit_owner = requested_owner != wtypes.Unset
caller = pecan.request.headers.get(header)
if (owner_limitation and explicit_owner
and requested_owner != caller):
raise base.ProjectNotAuthorized(requested_owner, aspect)
actual_owner = (owner_limitation or
requested_owner if explicit_owner else caller)
setattr(data, attr, actual_owner)
_set_ownership('user', user_limit, 'X-User-Id')
_set_ownership('project', project_limit, 'X-Project-Id')
# Check if there's room for one more alarm
if is_over_quota(conn, data.project_id, data.user_id):
raise OverQuota(data)
data.timestamp = now
data.state_timestamp = now
ALARMS_RULES[data.type].plugin.create_hook(data)
change = data.as_dict(alarm_models.Alarm)
# make sure alarms are unique by name per project.
alarms = list(conn.get_alarms(name=data.name,
project=data.project_id))
if alarms:
raise base.ClientSideError(
_("Alarm with name='%s' exists") % data.name,
status_code=409)
try:
alarm_in = alarm_models.Alarm(**change)
except Exception:
LOG.exception(_("Error while posting alarm: %s") % change)
raise base.ClientSideError(_("Alarm incorrect"))
alarm = conn.create_alarm(alarm_in)
self._record_creation(conn, change, alarm.alarm_id, now)
return Alarm.from_db_model(alarm)
@wsme_pecan.wsexpose([Alarm], [base.Query])
def get_all(self, q=None):
"""Return all alarms, based on the query provided.
:param q: Filter rules for the alarms to be returned.
"""
rbac.enforce('get_alarms', pecan.request)
q = q or []
# Timestamp is not supported field for Simple Alarm queries
kwargs = v2_utils.query_to_kwargs(
q, pecan.request.alarm_storage_conn.get_alarms,
allow_timestamps=False)
return [Alarm.from_db_model(m)
for m in pecan.request.alarm_storage_conn.get_alarms(**kwargs)]
|
|
"""List the reviews and corresponding branches in the current repository.
Usage examples:
Summarise all the tracked review branches:
$ barc list
ID status tracked name
10596 ok r/master/linkUtil
10594 ok r/master/notification-spew
List just the review ids and the trackers:
$ barc list --format-string "{review_id} {remote_branch}"
10596 refs/remotes/origin/dev/arcyd/trackers/rbranch/--/-/ok/r/maste...
10594 refs/remotes/origin/dev/arcyd/trackers/rbranch/--/-/ok/r/maste...
Output format examples:
--format-summary
ID status tracked name
10596 ok r/master/linkUtil
10594 ok r/master/notification-spew
--format-json
[
{
"remote_base": "refs/remotes/origin/master",
"review_id": "10596",
...
}
...
]
--format-python
[
{"remote_base": "refs/remotes/origin/master",
...
"review_id": "10596"},
...
]
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# barcmd_list
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import pprint
import abdt_classicnaming
import abdt_compositenaming
import abdt_naming
import abdt_rbranchnaming
import phlgit_showref
import phlgitu_ref
import phlsys_git
def getFromfilePrefixChars():
return None
def setupParser(parser):
fmts = parser.add_argument_group(
'Output format parameters',
'Choose one only, default is "--format-summary"')
formats = fmts.add_mutually_exclusive_group()
formats.add_argument(
'--format-summary',
action='store_true',
help="list the review ids, statuses and review names.")
formats.add_argument(
'--format-json',
action='store_true',
help="print json representation of managed review branches")
formats.add_argument(
'--format-python',
action='store_true',
help="print python representation of managed review branches")
formats.add_argument(
'--format-string',
type=str,
metavar='STR',
help='specify a custom format strings for displaying the items. '
'the string will be applied using Python\'s str.format(), '
'so you can use curly brackets to substitute for field names, '
'e.g. "{review_id}". you can use "--format-python" to discover '
'the field names.')
def process(args):
repo = phlsys_git.Repo('.')
#
# First, gather all the data
#
# XXX: only supports 'origin' remote at present
remote = 'origin'
hash_ref_pairs = phlgit_showref.hash_ref_pairs(repo)
remote_branch_to_hash = _remote_branches_as_short_local(
hash_ref_pairs, remote)
# local_branch_to_hash = _short_local_branches(hash_ref_pairs)
branch_naming = abdt_compositenaming.Naming(
abdt_classicnaming.Naming(),
abdt_rbranchnaming.Naming())
branch_pairs = abdt_naming.get_branch_pairs(
remote_branch_to_hash.keys(), branch_naming)
managed_review_branches = _get_managed_review_branches(
remote_branch_to_hash, branch_pairs)
#
# Finally, decide how to display it
#
if args.format_json:
print(json.dumps(managed_review_branches, sort_keys=True, indent=2))
elif args.format_python:
pprint.pprint(managed_review_branches)
elif args.format_string:
for branch in managed_review_branches:
print(args.format_string.format(**branch))
else: # args.format_summary
if managed_review_branches:
print("{:6} {:14} {}".format("ID", "status", "tracked name"))
for branch in managed_review_branches:
print("{review_id:6} {status:14} {tracked_name}".format(
**branch))
def _remote_branches_as_short_local(hash_ref_pairs, remote):
def is_remote(ref):
return phlgitu_ref.is_under_remote(ref, remote)
full_to_short = phlgitu_ref.fq_remote_to_short_local
branch_to_hash = dict([
(full_to_short(r), h) for h, r in hash_ref_pairs if is_remote(r)
])
return branch_to_hash
def _short_local_branches(hash_ref_pairs):
is_local_branch = phlgitu_ref.is_fq_local_branch
full_to_short = phlgitu_ref.fq_to_short
branch_to_hash = dict([
(full_to_short(r), h) for h, r in hash_ref_pairs if is_local_branch(r)
])
return branch_to_hash
def _get_managed_review_branches(remote_branch_to_hash, branch_pairs):
managed_review_branches = []
for pair in branch_pairs:
if pair.tracker:
user_commit = remote_branch_to_hash.get(
pair.tracker.review_name, None)
branch = {
'review_id': pair.tracker.id,
'status': pair.tracker.status,
'description': pair.tracker.description,
'tracked_name': pair.tracker.review_name,
'remote': pair.tracker.remote,
'remote_base': pair.tracker.remote_base,
'remote_branch': pair.tracker.remote_branch,
'base': pair.tracker.base,
'branch': pair.tracker.branch,
'review_commit': remote_branch_to_hash[pair.tracker.branch],
'user_commit': user_commit,
}
managed_review_branches.append(branch)
return managed_review_branches
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
|
"""
Config utilities.
For an example config file, see the manual. If you don't have the man page
installed, a raw version is available in doc/udiskie.8.txt.
"""
import logging
import os
import sys
from udiskie.compat import basestring
from udiskie.locale import _
__all__ = ['DeviceFilter',
'FilterMatcher',
'Config']
def lower(s):
try:
return s.lower()
except AttributeError:
return s
class DeviceFilter(object):
"""Associate a certain value to matching devices."""
VALID_PARAMETERS = [
'is_drive',
'is_block',
'is_partition_table',
'is_partition',
'is_filesystem',
'is_luks',
'is_toplevel',
'is_detachable',
'is_ejectable',
'has_media',
'device_file',
'device_presentation',
'device_id',
'id_usage',
'is_crypto',
'is_ignored',
'id_type',
'id_label',
'id_uuid',
'is_luks_cleartext',
'is_external',
'is_systeminternal',
'is_mounted',
'mount_paths',
'is_unlocked',
'in_use',
'should_automount',
]
def __init__(self, match, value):
"""
Construct an instance.
:param dict match: device attributes
:param list value: value
"""
self._log = logging.getLogger(__name__)
self._match = match.copy()
# the use of keys() makes deletion inside the loop safe:
for k in self._match.keys():
if k not in self.VALID_PARAMETERS:
self._log.warn(_('Unknown matching attribute: {!r}', k))
del self._match[k]
self._value = value
self._log.debug(_('{0} created', self))
def __str__(self):
return _('{0}(match={1!r}, value={2!r})',
self.__class__.__name__,
self._match,
self._value)
def match(self, device):
"""
Check if the device matches this filter.
:param Device device: device to be checked
"""
return all(lower(getattr(device, k)) == lower(v)
for k, v in self._match.items())
def value(self, device):
"""
Get the associated value.
:param Device device: matched device
If :meth:`match` is False for the device, the return value of this
method is undefined.
"""
self._log.debug(_('{0} used for {1}', self, device.object_path))
return self._value
class MountOptions(DeviceFilter):
"""Associate a list of mount options to matched devices."""
def __init__(self, config_item):
"""Parse the MountOptions filter from the config item."""
config_item = config_item.copy()
options = config_item.pop('options')
if isinstance(options, basestring):
options = [o.strip() for o in options.split(',')]
super(MountOptions, self).__init__(config_item, options)
class IgnoreDevice(DeviceFilter):
"""Associate a boolean ignore flag to matched devices."""
def __init__(self, config_item):
"""Parse the IgnoreDevice filter from the config item."""
config_item = config_item.copy()
ignore = config_item.pop('ignore', True)
super(IgnoreDevice, self).__init__(config_item, ignore)
class FilterMatcher(object):
"""Matches devices against multiple `DeviceFilter`s."""
def __init__(self, filters, default):
"""
Construct a FilterMatcher instance from list of DeviceFilter.
:param list filters:
"""
self._filters = list(filters)
self._default = default
def __call__(self, device):
"""
Matches devices against multiple :class:`DeviceFilter`s.
:param default: default value
:param list filters: device filters
:param Device device: device to be mounted
:returns: value of the first matching filter
"""
matches = (f.value(device) for f in self._filters if f.match(device))
return next(matches, self._default)
class Config(object):
"""Udiskie config in memory representation."""
def __init__(self, data):
"""
Initialize with preparsed data object.
:param ConfigParser data: config file accessor
"""
self._data = data
@classmethod
def default_pathes(cls):
"""
Return the default config file pathes.
:rtype: list
"""
try:
from xdg.BaseDirectory import xdg_config_home as config_home
except ImportError:
config_home = os.path.expanduser('~/.config')
return [os.path.join(config_home, 'udiskie', 'config.yml'),
os.path.join(config_home, 'udiskie', 'config.json')]
@classmethod
def from_file(cls, path=None):
"""
Read config file.
:param str path: YAML config file name
:returns: configuration object
:rtype: Config
:raises IOError: if the path does not exist
"""
# None => use default
if path is None:
for path in cls.default_pathes():
try:
return cls.from_file(path)
except IOError:
logging.getLogger(__name__).debug(
"Failed to read config file: {0}"
.format(sys.exc_info()[1]))
except ImportError:
logging.getLogger(__name__).warn(
"Failed to read {0!r}: {1}"
.format(path, sys.exc_info()[1]))
return cls({})
# False/'' => no config
if not path:
return cls({})
if os.path.splitext(path)[1].lower() == '.json':
from json import load
else:
from yaml import safe_load as load
with open(path) as f:
return cls(load(f))
@property
def mount_options(self):
"""Get a MountOptions filter list from the config data."""
config_list = self._data.get('mount_options', [])
return FilterMatcher(map(MountOptions, config_list), None)
@property
def ignore_device(self):
"""Get a IgnoreDevice filter list from the config data"""
config_list = self._data.get('ignore_device', [])
return FilterMatcher(map(IgnoreDevice, config_list), False)
@property
def program_options(self):
"""Get the program options dictionary from the config file."""
return self._data.get('program_options', {}).copy()
@property
def notifications(self):
"""Get the notification timeouts dictionary from the config file."""
return self._data.get('notifications', {}).copy()
@property
def icon_names(self):
"""Get the icon names dictionary from the config file."""
return self._data.get('icon_names', {}).copy()
@property
def notification_actions(self):
"""Get the notification actions dictionary from the config file."""
return self._data.get('notification_actions', {}).copy()
|
|
"""Plotting functions
Note:
It relies on Matplotlib
"""
from __future__ import division
import warnings
import logging
logger = logging.getLogger('pygrfnn.vis')
import numpy as np
from functools import wraps
from numpy.polynomial.polynomial import polyroots
from utils import find_nearest
from utils import nice_log_values
from grfnn import grfnn_update_event
from resonances import fareySequence, resonanceSequence
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
except ImportError:
warnings.warn("Failed to import matplotlib. Plotting functions will be disabled.")
# graphical output decorator
def check_display(fun):
"""Decorator to check for display capability
Args:
fun (``function``): Function to be timed
Returns:
``function``: decorated function
"""
@wraps(fun)
def display_wrapper(*args, **kwargs):
try:
import matplotlib as mpl
import os
return fun(*args, **kwargs)
except ImportError:
warnings.warn("Couldn't import matplotlib, so visualizations are disabled")
# logging.info("Couldn't import matplotlib, so visualizations are disabled")
except Exception as e:
logging.error(str(e))
warnings.warn(str(e))
# warnings.warn("Something went wrong when trying to plot. "
# "Are you sure there's a display available?")
logging.error("Something went wrong when trying to plot. "
"Are you sure there's a display available?")
return display_wrapper
@check_display
def tf_simple(TF, t, f, title=None, x=None, display_op=np.abs,
cmap='binary', vmin=None, vmax=None):
"""Simple time-frequency representation (TFR).
Show the TFR in the top plot and the original time signal in the bottom
plot, if ``x`` is passed.
Args:
TF (:class:`numpy.ndarray`): time-frequency representation
t (:class:`numpy.ndarray`): time vector
f (:class:`numpy.ndarray`): frequency vector
title (``string``): title of the plot
x (:class:`numpy.array`): original time domain signal. If ``None``, no
time domain plot is shown
display_op (``function``): operator to apply to the TF representation (e.g.
:func:`numpy.abs`)
cmap (``string``): colormap to use in the TF representation
vmin (``float``): lower limit of the colormap
vmax (``float``): upper limit of the colormap
Note:
Is the caller's responsibility to issue :func:`matplotlib.pyplot.show()`
if necessary.
"""
opTF = display_op(TF)
if x is None:
fig, axTF = plt.subplots(1)
axOnset = None
else:
# fig, (axTF, axT) = plt.subplots(2, 1, sharex=True)
fig = plt.figure()
gs = gridspec.GridSpec(2, 1,
width_ratios=[1],
height_ratios=[3, 1]
)
gs.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
axTF = fig.add_subplot(gs[0])
axOnset = fig.add_subplot(gs[1], sharex=axTF)
axTF.pcolormesh(t, f, opTF, cmap=cmap, vmin=vmin, vmax=vmax)
if title is not None:
axTF.set_title(title)
axTF.set_yscale('log')
axTF.set_yticks(nice_log_values(f))
axTF.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
axTF.axis('tight')
if axOnset is not None:
plt.setp(axTF.get_xticklabels(), visible=False)
axOnset.plot(t, x)
axOnset.yaxis.set_ticks_position('right')
axOnset.axis('tight')
# plt.show()
@check_display
def tf_detail(TF, t, f, title=None, t_detail=None, x=None, display_op=np.abs,
figsize=None, cmap='binary', vmin=None, vmax=None):
"""
Detailed time-frequency representation (TFR).
Show the TFR in the top plot. Also show the frequency representation at a
specific time instants (last time by default) on the plot on the right. If
specified, the original time signal ``x`` is shown the bottom plot.
Args:
TF (:class:`numpy.ndarray`): time-frequency representation
t (:class:`numpy.ndarray`): time vector
f (:class:`numpy.ndarray`): frequency vector
title (``string``): title of the plot
t_detail (``float`` or ``list``): time instant(s) to be detailed
x (:class:`numpy.ndarray`): original time domain signal. If *None*, not
time domain plot is shown
display_op (``function``): operator to apply to the TF representation
(e.g. :func:`numpy.angle`)
figsize (``tuple``): matplotlib's figure size (optional)
cmap (``string``): colormap to use in the TF representation
vmin (``float``): lower limit of the colormap
vmax (``float``): upper limit of the colormap
Returns:
``(handles, ...)``: tuple of handles to plotted elements. They can be
used to create animations
Note:
``vmin`` and ``vmax`` are useful when comparing different time-frequency
representations, so they all share the same color scale.
Note:
Is the caller's responsibility to issue :func:`matplotlib.pyplot.show()`
if necessary.
"""
if figsize is not None:
fig = plt.figure(figsize=figsize)
else:
fig = plt.figure()
opTF = display_op(TF)
if t_detail is None:
wr = [1, 2, 20]
detail = None
else:
wr = [1, 2, 20, 6]
if x is None:
hr = [1]
axOnset = None
else:
hr = [3, 1]
gs = gridspec.GridSpec(len(hr), len(wr),
width_ratios=wr,
height_ratios=hr
)
gs.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
axCB = fig.add_subplot(gs[0])
axTF = fig.add_subplot(gs[2])
if x is not None:
axOnset = fig.add_subplot(gs[len(wr)+2], sharex=axTF)
if t_detail is not None:
axF = fig.add_subplot(gs[3], sharey=axTF)
nice_freqs = nice_log_values(f)
# TF image
# im = axTF.pcolormesh(t, f, opTF, cmap=cmap)
im = axTF.imshow(opTF,
extent=[min(t), max(t), min(f), max(f)],
cmap=cmap,
vmin=vmin,
vmax=vmax,
origin='lower'
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
axTF.set_yscale('log')
axTF.set_yticks(nice_freqs)
axTF.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
axTF.invert_yaxis()
if title is not None:
axTF.set_title(title)
# Add colorbar
cb = plt.colorbar(im, ax=axTF, cax=axCB)
cb.ax.yaxis.set_ticks_position('left')
# TF detail
# find detail index
tf_line = None
tf_x_min, tf_x_max = 0, np.max(opTF)
if vmin is not None:
tf_x_min = vmin
if vmax is not None:
tf_x_max = vmax
if t_detail is not None:
if isinstance(t_detail, np.ndarray):
t_detail = t_detail.tolist()
elif not isinstance(t_detail, list):
t_detail = [t_detail]
t_detail, idx = find_nearest(t, t_detail)
# axF.invert_xaxis()
detail = axF.semilogy(opTF[:, idx], f)
axF.set_yticks(nice_freqs)
axF.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
axF.xaxis.set_ticks_position('top')
axF.axis('tight')
axF.set_xlim(tf_x_min, tf_x_max)
axF.yaxis.set_ticks_position('right')
plt.setp(axF.get_xaxis().get_ticklabels(), rotation=-90 )
axTF.hold(True)
tf_line = axTF.plot([t_detail, t_detail], [np.min(f), np.max(f)])
# tf_line = [axTF.axvline(td) for td in t_detail]
axTF.hold(False)
axTF.axis('tight')
# onset signal
t_line = None
if axOnset is not None:
plt.setp(axTF.get_xticklabels(), visible=False)
axOnset.plot(t, x, color='k')
if t_detail is not None:
t_line = axOnset.plot([t_detail, t_detail], [np.min(x), np.max(x)])
# t_line = [axOnset.axvline(td) for td in t_detail]
axOnset.yaxis.set_ticks_position('right')
axOnset.axis('tight')
# plt.show()
return (fig, im, tf_line, t_line, detail)
@check_display
def plot_connections(connection, title=None, f_detail=None, display_op=np.abs,
detail_type='polar', cmap='binary', vmin=None, vmax=None):
"""plot_connections(connection, t_detail=None, display_op=np.abs,
detail_type='polar')
Args:
connection (:class:`.Connection`): connection object
title (``string``): Title to be displayed
f_detail (``float``): frequency of the detail plot
display_op (``function``): operator to apply to the connection
matrix (e.g. :func:`numpy.abs`)
detail_type (``string``): detail complex display type (``'cartesian',
'polar', 'magnitude'`` or ``'phase'``)
cmap (``string``): colormap to use in the TF representation
vmin (``float``): lower limit of the colormap
vmax (``float``): upper limit of the colormap
Note:
Is the caller's responsibility to issue :func:`matplotlib.pyplot.show()`
if necessary.
"""
fig = plt.figure()
if f_detail is not None:
gs = gridspec.GridSpec(2, 1,
width_ratios=[1],
height_ratios=[3, 1]
)
gs.update(wspace=0.0, hspace=0.0) # set the spacing between axes.
axConn = fig.add_subplot(gs[0])
axDetail = fig.add_subplot(gs[1])
else:
axConn = fig.add_subplot(1, 1, 1)
f_source = connection.source.f
f_dest = connection.destination.f
matrix = connection.matrix
opMat = display_op(matrix)
# axConn.pcolormesh(f_source, f_dest, opMat, cmap=cmap)
axConn.imshow(opMat,
extent=[min(f_source), max(f_source),
min(f_dest), max(f_dest)],
cmap=cmap,
vmin=vmin,
vmax=vmax,
origin='lower'
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# axConn.invert_yaxis()
axConn.set_xscale('log')
axConn.set_xticks(nice_log_values(f_source))
axConn.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
axConn.set_yscale('log')
axConn.set_yticks(nice_log_values(f_dest))
axConn.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
axConn.set_ylabel(r'$f_{\mathrm{dest}}$')
if title is not None:
axConn.set_title(title)
if f_detail is None:
axConn.set_xlabel(r'$f_{\mathrm{source}}$')
else:
(f_detail, idx) = find_nearest(f_dest, f_detail)
conn = matrix[idx, :]
axConn.hold(True)
axConn.plot([np.min(f_source), np.max(f_source)],
[f_detail, f_detail],
color='r')
axConn.hold(False)
scalar_formatter = mpl.ticker.ScalarFormatter()
if detail_type is 'polar':
axDetail.semilogx(f_source, np.abs(conn))
axDetailb = axDetail.twinx()
axDetailb.semilogx(f_source, np.angle(conn), color='r')
axDetailb.set_xticks(nice_log_values(f_source))
axDetailb.get_xaxis().set_major_formatter(scalar_formatter)
axDetailb.set_ylim([-np.pi, np.pi])
axDetail.axis('tight')
elif detail_type is 'magnitude':
y_min, y_max = 0, np.abs(conn)
if vmin is not None:
y_min = vmin
if vmax is not None:
y_max = vmax
axDetail.semilogx(f_source, np.abs(conn))
axDetail.set_xticks(nice_log_values(f_source))
axDetail.get_xaxis().set_major_formatter(scalar_formatter)
# axDetail.axis('tight')
axDetail.set_ylim([y_min, y_max])
elif detail_type is 'phase':
axDetail.semilogx(f_source, np.angle(conn), color='r')
axDetail.set_xticks(nice_log_values(f_source))
axDetail.get_xaxis().set_major_formatter(scalar_formatter)
axDetail.set_ylim([-np.pi, np.pi])
else:
axDetail.semilogx(f_source, np.real(conn))
axDetailb = axDetail.twinx()
axDetailb.semilogx(f_source, np.imag(conn), color='r')
axDetailb.set_xticks(nice_log_values(f_source))
axDetailb.get_xaxis().set_major_formatter(scalar_formatter)
axDetail.axis('tight')
axDetail.set_xlabel(r'$f_{\mathrm{dest}}$')
axConn.set(aspect=1, adjustable='box-forced')
# plt.show()
@check_display
class GrFNN_RT_plot(object):
"""
On-line GrFNN state visualization.
Args:
grfnn (:class:`.Model`): GrFNN to be plotted
update_interval (``float``): Update interval (in seconds). This is
an approximation, as the update will happen as a multiple of the
integration step time.
fig_name (``string``): Name of the figure to use. If specified, the same
figure will be reused in consecutive runs. If None, a new figure
will be created each time the caller script runs.
title (``string``): optional title of the plot
Note:
This function probably won't work on an iPython Notebook. A possible
implementation using mpld3 should be possible to code, but is not in the
short term planning.
Note:
This function calls :func:`matplotlib.pyplot.ion` internally to allow
for on-line updating of the plot.
Note:
There is probably room for optimization here. For example,
http://goo.gl/J7Yyor does some interesting analysis/optimizations for
updating plots
"""
def __init__(self, grfnn, update_interval=0, fig_name=None, title=''):
self.grfnn = grfnn
self.update_interval = update_interval
self.title = title
self.fig_name = fig_name
plt.ion()
if fig_name is None:
self.fig = plt.figure()
else:
self.fig = plt.figure(fig_name)
self.ax = self.fig.add_subplot(111)
self.ax.grid(True)
self.line1, = self.ax.semilogx(grfnn.f, np.abs(grfnn.z), 'k')
self.ax.axis((np.min(grfnn.f), np.max(grfnn.f), 0, 1))
plt.xticks(nice_log_values(grfnn.f))
self.ax.set_title('{}'.format(self.title))
self.fig.canvas.draw()
self.last_update = 0
def update_callback(sender, **kwargs):
"""
Update the plot when necessary
"""
t = kwargs['t']
if 'force' in kwargs:
force = kwargs['force']
else:
force = False
if force or (t - self.last_update >= self.update_interval):
z = sender.z
self.line1.set_ydata(np.abs(z))
self.ax.set_title('{} (t = {:0.2f}s)'.format(self.title, t))
self.fig.canvas.draw()
self.last_update = t
grfnn_update_event.connect(update_callback, sender=grfnn, weak=False)
@check_display
def vector_field(params, F=1.0):
"""
Display the vector field of an oscillator.
For a given set of intrinsic parameters, show the vector field for an
oscillator as the one defined by :func:`.zdot`.
Args:
params (:class:`.Zparam`): oscillator intrinsic parameters
F (``scalar`` or ``iterable``): Forcing values to plot
"""
colormap = plt.cm.gist_heat
try:
len(F)
except:
F = [F]
# FIXME: customizable?
colors = [colormap(i) for i in np.linspace(0, 0.7, len(F))]
# \dot{r} = f(r, F)
r = np.arange(0, 1/np.sqrt(params.epsilon), 0.01)
rdot = np.add.outer(params.alpha * r +
params.beta1 * r**3 +
((params.epsilon* params.beta2 * r**5) /
(1 - params.epsilon * r**2)),
F)
# plot it
plt.figure()
ax = plt.gca()
ax.set_color_cycle(colors)
plt.plot(r, rdot, zorder=0, linewidth=2)
plt.title(r'$\alpha={:.3g},'
r'\beta_1={:.3g},'
r'\beta_2={:.3g}$'.format(params.alpha,
params.beta1,
params.beta2))
## assymptote
# plt.vlines(x=1/np.sqrt(epsilon), ymin=-1, ymax=2, color='r', linestyle=':')
# plt.ylim(-5,5)
ax.axhline(y=0,xmin=min(r),xmax=max(r),c="k",zorder=5, alpha=0.5)
plt.xlabel(r'$r$')
plt.ylabel(r'$\dot{r}$', labelpad=-10)
# find roots (r^*)
roots = [None] * len(F)
for i in xrange(len(F)):
r = polyroots([F[i], # ^0
params.alpha, # ^1
-params.epsilon*F[i], # ^2
params.beta1-params.epsilon*params.alpha, # ^3
0, # ^4
params.epsilon*(params.beta2-params.beta1)]) # ^5
r = np.real(r[np.abs(np.imag(r)) < 1e-20])
r = r[(r>=0) & (r < 1/params.sqe)]
roots[i] = r
# print roots
# plot the roots
plt.gca().set_color_cycle(colors)
for r in roots:
plt.plot(r, np.zeros_like(r), 'o', markersize=4, zorder=10)
def plotResonanceDiagram(N, exclude_inf=True):
"""
Generate resonance plot.
As the one shown in http://goo.gl/dOSV2z
To Do:
Complete documentation
"""
ALPHA = 0.2
plt.figure()
ticks = set([])
for h, k in fareySequence(N, 1):
ticks.add((h,k))
for a, b in resonanceSequence(N, k):
if b == 0:
if not exclude_inf:
plt.plot([h/k, h/k], [0, 1], 'b:', alpha=2*ALPHA)
plt.plot([0, 1], [h/k, h/k], 'b:', alpha=2*ALPHA)
continue
m = a/b
cp, cm = m*h/k, -m*h/k
x = np.array([0, h/k, 1])
y = np.array([cp, 0, cm+m])
plt.plot( x, y, 'b', alpha=ALPHA)
plt.plot( y, x, 'b', alpha=ALPHA)
plt.plot( x, 1-y, 'b', alpha=ALPHA)
plt.plot(1-y, x, 'b', alpha=ALPHA)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xticks([h/k for h,k in ticks], [r"$\frac{{{:d}}}{{{:d}}}$".format(h,k) for h,k in ticks])
plt.yticks([h/k for h,k in ticks], [r"$\frac{{{:d}}}{{{:d}}}$".format(h,k) for h,k in ticks])
# plt.xticks([h/k for h,k in ticks], [r"${:d}/{:d}$".format(h,k) for h,k in ticks])
# plt.yticks([h/k for h,k in ticks], [r"${:d}/{:d}$".format(h,k) for h,k in ticks])
plt.title("N = {:d}".format(N))
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
import datetime
from decimal import Decimal
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pyflink.common import Row, RowKind
from pyflink.fn_execution.state_impl import RemovableConcatIterator
from pyflink.table import DataTypes
from pyflink.table.data_view import ListView, MapView
from pyflink.table.expressions import col
from pyflink.table.udf import AggregateFunction, udaf
from pyflink.testing.test_case_utils import PyFlinkBlinkStreamTableTestCase
class CountAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [0]
def accumulate(self, accumulator, *args):
accumulator[0] = accumulator[0] + 1
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - 1
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] = accumulator[0] + other_acc[0]
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.BIGINT()
class SumAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [0]
def accumulate(self, accumulator, *args):
accumulator[0] = accumulator[0] + args[0]
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - args[0]
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] = accumulator[0] + other_acc[0]
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.BIGINT()
class ConcatAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
str_list = [i for i in accumulator[0]]
str_list.sort()
return accumulator[1].join(str_list)
def create_accumulator(self):
return Row([], '')
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[1] = args[1]
accumulator[0].append(args[0])
def retract(self, accumulator, *args):
if args[0] is not None:
accumulator[0].remove(args[0])
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.ARRAY(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.STRING()
class ListViewConcatAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[1].join(accumulator[0])
def create_accumulator(self):
return Row(ListView(), '')
def accumulate(self, accumulator, *args):
accumulator[1] = args[1]
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
raise NotImplementedError
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.LIST_VIEW(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.STRING()
class CountDistinctAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[1]
def create_accumulator(self):
return Row(MapView(), 0)
def accumulate(self, accumulator, *args):
input_str = args[0]
if accumulator[0].is_empty() or input_str not in accumulator[0] \
or accumulator[0][input_str] is None:
accumulator[0][input_str] = 1
accumulator[1] += 1
else:
accumulator[0][input_str] += 1
if input_str == "clear":
accumulator[0].clear()
accumulator[1] = 0
def retract(self, accumulator, *args):
input_str = args[0]
if accumulator[0].is_empty() or input_str not in accumulator[0]:
return
accumulator[0].put_all({input_str: accumulator[0][input_str] - 1})
if accumulator[0][input_str] <= 0:
accumulator[1] -= 1
accumulator[0][input_str] = None
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.BIGINT()
class TestIterateAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
# test iterate keys
key_set = [i for i in accumulator[0]]
key_set.sort()
# test iterate values
value_set = [str(i) for i in accumulator[0].values()]
value_set.sort()
item_set = {}
# test iterate items
for key, value in accumulator[0].items():
item_set[key] = value
ordered_item_set = collections.OrderedDict()
for key in key_set:
ordered_item_set[key] = str(item_set[key])
try:
# test auto clear the cached iterators
next(iter(accumulator[0].items()))
except StopIteration:
pass
return Row(",".join(key_set),
','.join(value_set),
",".join([":".join(item) for item in ordered_item_set.items()]),
accumulator[1])
def create_accumulator(self):
return Row(MapView(), 0)
def accumulate(self, accumulator, *args):
input_str = args[0]
if input_str not in accumulator[0]:
accumulator[0][input_str] = 1
accumulator[1] += 1
else:
accumulator[0][input_str] += 1
def retract(self, accumulator, *args):
input_str = args[0]
if input_str not in accumulator[0]:
return
accumulator[0][input_str] -= 1
if accumulator[0][input_str] == 0:
# test removable iterator
key_iter = iter(accumulator[0].keys()) # type: RemovableConcatIterator
while True:
try:
key = next(key_iter)
if key == input_str:
key_iter.remove()
except StopIteration:
break
accumulator[1] -= 1
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.MAP_VIEW(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.STRING()),
DataTypes.FIELD("f1", DataTypes.STRING()),
DataTypes.FIELD("f2", DataTypes.STRING()),
DataTypes.FIELD("f3", DataTypes.BIGINT())])
class StreamTableAggregateTests(PyFlinkBlinkStreamTableTestCase):
def test_double_aggregate(self):
self.t_env.register_function("my_count", CountAggregateFunction())
self.t_env.create_temporary_function("my_sum", SumAggregateFunction())
# trigger the finish bundle more frequently to ensure testing the communication
# between RemoteKeyedStateBackend and the StateGrpcService.
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "1")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi2'),
(2, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select("my_count(a) as a, my_sum(a) as b, c") \
.select("my_count(a) as a, my_sum(b) as b, sum0(b) as c, sum0(b.cast(double)) as d")
assert_frame_equal(result.to_pandas(),
pd.DataFrame([[3, 12, 12, 12.0]], columns=['a', 'b', 'c', 'd']))
def test_mixed_with_built_in_functions_with_retract(self):
self.env.set_parallelism(1)
self.t_env.create_temporary_system_function(
"concat",
ConcatAggregateFunction())
t = self.t_env.from_elements(
[(1, 'Hi_', 1),
(1, 'Hi', 2),
(2, 'Hi_', 3),
(2, 'Hi', 4),
(3, None, None),
(3, None, None),
(4, 'hello2_', 7),
(4, 'hello2', 8),
(5, 'hello_', 9),
(5, 'hello', 10)], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select a, LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
self.t_env.create_temporary_view("retract_table", table_with_retract_message)
result_table = self.t_env.sql_query(
"select concat(b, ',') as a, "
"FIRST_VALUE(b) as b, "
"LAST_VALUE(b) as c, "
"COUNT(c) as d, "
"COUNT(1) as e, "
"LISTAGG(b) as f,"
"LISTAGG(b, '|') as g,"
"MAX(c) as h,"
"MAX(cast(c as float) + 1) as i,"
"MIN(c) as j,"
"MIN(cast(c as decimal) + 1) as k,"
"SUM(c) as l,"
"SUM(cast(c as float) + 1) as m,"
"AVG(c) as n,"
"AVG(cast(c as double) + 1) as o,"
"STDDEV_POP(cast(c as float)),"
"STDDEV_SAMP(cast(c as float)),"
"VAR_POP(cast(c as float)),"
"VAR_SAMP(cast(c as float))"
" from retract_table")
result = [i for i in result_table.execute().collect()]
expected = Row('Hi,Hi,hello,hello2', 'Hi', 'hello', 4, 5, 'Hi,Hi,hello2,hello',
'Hi|Hi|hello2|hello', 10, 11.0, 2, Decimal(3.0), 24, 28.0, 6, 7.0,
3.1622777, 3.6514838, 10.0, 13.333333)
expected.set_row_kind(RowKind.INSERT)
self.assertEqual(result[len(result) - 1], expected)
def test_mixed_with_built_in_functions_without_retract(self):
self.env.set_parallelism(1)
self.t_env.create_temporary_system_function(
"concat",
ConcatAggregateFunction())
t = self.t_env.from_elements(
[('Hi', 2),
('Hi', 4),
(None, None),
('hello2', 8),
('hello', 10)], ['b', 'c'])
self.t_env.create_temporary_view("source", t)
result_table = self.t_env.sql_query(
"select concat(b, ',') as a, "
"FIRST_VALUE(b) as b, "
"LAST_VALUE(b) as c, "
"COUNT(c) as d, "
"COUNT(1) as e, "
"LISTAGG(b) as f,"
"LISTAGG(b, '|') as g,"
"MAX(c) as h,"
"MAX(cast(c as float) + 1) as i,"
"MIN(c) as j,"
"MIN(cast(c as decimal) + 1) as k,"
"SUM(c) as l,"
"SUM(cast(c as float) + 1) as m "
"from source")
result = [i for i in result_table.execute().collect()]
expected = Row('Hi,Hi,hello,hello2', 'Hi', 'hello', 4, 5, 'Hi,Hi,hello2,hello',
'Hi|Hi|hello2|hello', 10, 11.0, 2, Decimal(3.0), 24, 28.0)
expected.set_row_kind(RowKind.INSERT)
self.assertEqual(result[len(result) - 1], expected)
def test_using_decorator(self):
my_count = udaf(CountAggregateFunction(),
accumulator_type=DataTypes.ARRAY(DataTypes.INT()),
result_type=DataTypes.INT())
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c) \
.select(my_count(t.a).alias("a"), t.c.alias("b"))
plan = result.explain()
result_type = result.get_schema().get_field_data_type(0)
self.assertTrue(plan.find("PythonGroupAggregate(groupBy=[c], ") >= 0)
self.assertEqual(result_type, DataTypes.INT())
def test_list_view(self):
my_concat = udaf(ListViewConcatAggregateFunction())
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "2")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi'),
(2, 'Hi', 'Hello'),
(1, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(3, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(2, 'Hi3', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(my_concat(t.b, ',').alias("a"), t.c)
assert_frame_equal(result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"]], columns=['a', 'c']))
def test_map_view(self):
my_count = udaf(CountDistinctAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "1")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.read-cache-size", "1")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.write-cache-size", "1")
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
result = table_with_retract_message.group_by(t.c).select(my_count(t.b).alias("a"), t.c)
assert_frame_equal(result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([[2, "hello"],
[3, "hi"]], columns=['a', 'c']))
def test_data_view_clear(self):
my_count = udaf(CountDistinctAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "1")
t = self.t_env.from_elements(
[(2, 'hello', 'hello'),
(4, 'clear', 'hello'),
(6, 'hello2', 'hello'),
(8, 'hello', 'hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(my_count(t.b).alias("a"), t.c)
assert_frame_equal(result.to_pandas(),
pd.DataFrame([[2, "hello"]], columns=['a', 'c']))
def test_map_view_iterate(self):
test_iterate = udaf(TestIterateAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "2")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.read-cache-size", "2")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.write-cache-size", "2")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.iterate-response-batch-size", "2")
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
result = table_with_retract_message.group_by(t.c) \
.select(test_iterate(t.b).alias("a"), t.c) \
.select(col("a").get(0).alias("a"),
col("a").get(1).alias("b"),
col("a").get(2).alias("c"),
col("a").get(3).alias("d"),
t.c.alias("e"))
assert_frame_equal(
result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([
["Hi,Hi2,Hi3", "1,2,3", "Hi:3,Hi2:2,Hi3:1", 3, "hi"],
["hello,hello2", "1,3", 'hello:3,hello2:1', 2, "hello"]],
columns=['a', 'b', 'c', 'd', 'e']))
def test_distinct_and_filter(self):
self.t_env.create_temporary_system_function(
"concat",
ConcatAggregateFunction())
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
self.t_env.create_temporary_view("retract_table", table_with_retract_message)
result = self.t_env.sql_query(
"select concat(distinct b, '.') as a, "
"concat(distinct b, ',') filter (where c = 'hi') as b, "
"concat(distinct b, ',') filter (where c = 'hello') as c, "
"c as d "
"from retract_table group by c")
assert_frame_equal(result.to_pandas().sort_values(by='a').reset_index(drop=True),
pd.DataFrame([["Hi.Hi2.Hi3", "Hi,Hi2,Hi3", "", "hi"],
["hello.hello2", "", "hello,hello2", "hello"]],
columns=['a', 'b', 'c', 'd']))
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
"""Support for esphome devices."""
import asyncio
import logging
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Callable
import attr
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, \
EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback, Event
import homeassistant.helpers.device_registry as dr
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
from homeassistant.helpers.dispatcher import async_dispatcher_connect, \
async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.template import Template
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import HomeAssistantType, ConfigType
# Import config flow so that it's added to the registry
from .config_flow import EsphomeFlowHandler # noqa
if TYPE_CHECKING:
from aioesphomeapi import APIClient, EntityInfo, EntityState, DeviceInfo, \
ServiceCall
DOMAIN = 'esphome'
REQUIREMENTS = ['aioesphomeapi==1.3.0']
DISPATCHER_UPDATE_ENTITY = 'esphome_{entry_id}_update_{component_key}_{key}'
DISPATCHER_REMOVE_ENTITY = 'esphome_{entry_id}_remove_{component_key}_{key}'
DISPATCHER_ON_LIST = 'esphome_{entry_id}_on_list'
DISPATCHER_ON_DEVICE_UPDATE = 'esphome_{entry_id}_on_device_update'
DISPATCHER_ON_STATE = 'esphome_{entry_id}_on_state'
STORAGE_KEY = 'esphome.{}'
STORAGE_VERSION = 1
# The HA component types this integration supports
HA_COMPONENTS = [
'binary_sensor',
'cover',
'fan',
'light',
'sensor',
'switch',
]
_LOGGER = logging.getLogger(__name__)
# No config schema - only configuration entry
CONFIG_SCHEMA = vol.Schema({}, extra=vol.ALLOW_EXTRA)
@attr.s
class RuntimeEntryData:
"""Store runtime data for esphome config entries."""
entry_id = attr.ib(type=str)
client = attr.ib(type='APIClient')
store = attr.ib(type=Store)
reconnect_task = attr.ib(type=Optional[asyncio.Task], default=None)
state = attr.ib(type=Dict[str, Dict[str, Any]], factory=dict)
info = attr.ib(type=Dict[str, Dict[str, Any]], factory=dict)
available = attr.ib(type=bool, default=False)
device_info = attr.ib(type='DeviceInfo', default=None)
cleanup_callbacks = attr.ib(type=List[Callable[[], None]], factory=list)
disconnect_callbacks = attr.ib(type=List[Callable[[], None]], factory=list)
def async_update_entity(self, hass: HomeAssistantType, component_key: str,
key: int) -> None:
"""Schedule the update of an entity."""
signal = DISPATCHER_UPDATE_ENTITY.format(
entry_id=self.entry_id, component_key=component_key, key=key)
async_dispatcher_send(hass, signal)
def async_remove_entity(self, hass: HomeAssistantType, component_key: str,
key: int) -> None:
"""Schedule the removal of an entity."""
signal = DISPATCHER_REMOVE_ENTITY.format(
entry_id=self.entry_id, component_key=component_key, key=key)
async_dispatcher_send(hass, signal)
def async_update_static_infos(self, hass: HomeAssistantType,
infos: 'List[EntityInfo]') -> None:
"""Distribute an update of static infos to all platforms."""
signal = DISPATCHER_ON_LIST.format(entry_id=self.entry_id)
async_dispatcher_send(hass, signal, infos)
def async_update_state(self, hass: HomeAssistantType,
state: 'EntityState') -> None:
"""Distribute an update of state information to all platforms."""
signal = DISPATCHER_ON_STATE.format(entry_id=self.entry_id)
async_dispatcher_send(hass, signal, state)
def async_update_device_state(self, hass: HomeAssistantType) -> None:
"""Distribute an update of a core device state like availability."""
signal = DISPATCHER_ON_DEVICE_UPDATE.format(entry_id=self.entry_id)
async_dispatcher_send(hass, signal)
async def async_load_from_store(self) -> List['EntityInfo']:
"""Load the retained data from store and return de-serialized data."""
# pylint: disable= redefined-outer-name
from aioesphomeapi import COMPONENT_TYPE_TO_INFO, DeviceInfo
restored = await self.store.async_load()
if restored is None:
return []
self.device_info = _attr_obj_from_dict(DeviceInfo,
**restored.pop('device_info'))
infos = []
for comp_type, restored_infos in restored.items():
if comp_type not in COMPONENT_TYPE_TO_INFO:
continue
for info in restored_infos:
cls = COMPONENT_TYPE_TO_INFO[comp_type]
infos.append(_attr_obj_from_dict(cls, **info))
return infos
async def async_save_to_store(self) -> None:
"""Generate dynamic data to store and save it to the filesystem."""
store_data = {
'device_info': attr.asdict(self.device_info)
}
for comp_type, infos in self.info.items():
store_data[comp_type] = [attr.asdict(info)
for info in infos.values()]
await self.store.async_save(store_data)
def _attr_obj_from_dict(cls, **kwargs):
return cls(**{key: kwargs[key] for key in attr.fields_dict(cls)})
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Stub to allow setting up this component.
Configuration through YAML is not supported at this time.
"""
return True
async def async_setup_entry(hass: HomeAssistantType,
entry: ConfigEntry) -> bool:
"""Set up the esphome component."""
# pylint: disable=redefined-outer-name
from aioesphomeapi import APIClient, APIConnectionError
hass.data.setdefault(DOMAIN, {})
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
password = entry.data[CONF_PASSWORD]
cli = APIClient(hass.loop, host, port, password)
await cli.start()
# Store client in per-config-entry hass.data
store = Store(hass, STORAGE_VERSION, STORAGE_KEY.format(entry.entry_id),
encoder=JSONEncoder)
entry_data = hass.data[DOMAIN][entry.entry_id] = RuntimeEntryData(
client=cli,
entry_id=entry.entry_id,
store=store,
)
async def on_stop(event: Event) -> None:
"""Cleanup the socket client on HA stop."""
await _cleanup_instance(hass, entry)
entry_data.cleanup_callbacks.append(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_stop)
)
try_connect = await _setup_auto_reconnect_logic(hass, cli, entry, host)
@callback
def async_on_state(state: 'EntityState') -> None:
"""Send dispatcher updates when a new state is received."""
entry_data.async_update_state(hass, state)
@callback
def async_on_service_call(service: 'ServiceCall') -> None:
"""Call service when user automation in ESPHome config is triggered."""
domain, service_name = service.service.split('.', 1)
service_data = service.data
if service.data_template:
try:
data_template = {key: Template(value) for key, value in
service.data_template.items()}
template.attach(hass, data_template)
service_data.update(template.render_complex(
data_template, service.variables))
except TemplateError as ex:
_LOGGER.error('Error rendering data template: %s', ex)
return
hass.async_create_task(hass.services.async_call(
domain, service_name, service_data, blocking=True))
async def send_home_assistant_state(entity_id: str, _,
new_state: Optional[str]) -> None:
"""Forward Home Assistant states to ESPHome."""
if new_state is None:
return
await cli.send_home_assistant_state(entity_id, new_state)
@callback
def async_on_state_subscription(entity_id: str) -> None:
"""Subscribe and forward states for requested entities."""
unsub = async_track_state_change(
hass, entity_id, send_home_assistant_state)
entry_data.disconnect_callbacks.append(unsub)
# Send initial state
hass.async_create_task(send_home_assistant_state(
entity_id, None, hass.states.get(entity_id)))
async def on_login() -> None:
"""Subscribe to states and list entities on successful API login."""
try:
entry_data.device_info = await cli.device_info()
entry_data.available = True
await _async_setup_device_registry(hass, entry,
entry_data.device_info)
entry_data.async_update_device_state(hass)
entity_infos = await cli.list_entities()
entry_data.async_update_static_infos(hass, entity_infos)
await cli.subscribe_states(async_on_state)
await cli.subscribe_service_calls(async_on_service_call)
await cli.subscribe_home_assistant_states(
async_on_state_subscription)
hass.async_create_task(entry_data.async_save_to_store())
except APIConnectionError as err:
_LOGGER.warning("Error getting initial data: %s", err)
# Re-connection logic will trigger after this
await cli.disconnect()
cli.on_login = on_login
# This is a bit of a hack: We schedule complete_setup into the
# event loop and return immediately (return True)
#
# Usually, we should avoid that so that HA can track which components
# have been started successfully and which failed to be set up.
# That doesn't work here for two reasons:
# - We have our own re-connect logic
# - Before we do the first try_connect() call, we need to make sure
# all dispatcher event listeners have been connected, so
# async_forward_entry_setup needs to be awaited. However, if we
# would await async_forward_entry_setup() in async_setup_entry(),
# we would end up with a deadlock.
#
# Solution is: complete the setup outside of the async_setup_entry()
# function. HA will wait until the first connection attempt is made
# before starting up (as it should), but if the first connection attempt
# fails we will schedule all next re-connect attempts outside of the
# tracked tasks (hass.loop.create_task). This way HA won't stall startup
# forever until a connection is successful.
async def complete_setup() -> None:
"""Complete the config entry setup."""
tasks = []
for component in HA_COMPONENTS:
tasks.append(hass.config_entries.async_forward_entry_setup(
entry, component))
await asyncio.wait(tasks)
infos = await entry_data.async_load_from_store()
entry_data.async_update_static_infos(hass, infos)
# If first connect fails, the next re-connect will be scheduled
# outside of _pending_task, in order not to delay HA startup
# indefinitely
await try_connect(is_disconnect=False)
hass.async_create_task(complete_setup())
return True
async def _setup_auto_reconnect_logic(hass: HomeAssistantType,
cli: 'APIClient',
entry: ConfigEntry, host: str):
"""Set up the re-connect logic for the API client."""
from aioesphomeapi import APIConnectionError
async def try_connect(tries: int = 0, is_disconnect: bool = True) -> None:
"""Try connecting to the API client. Will retry if not successful."""
if entry.entry_id not in hass.data[DOMAIN]:
# When removing/disconnecting manually
return
data = hass.data[DOMAIN][entry.entry_id] # type: RuntimeEntryData
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
data.disconnect_callbacks = []
data.available = False
data.async_update_device_state(hass)
if tries != 0:
# If not first re-try, wait and print message
wait_time = min(2**tries, 300)
_LOGGER.info("Trying to reconnect in %s seconds", wait_time)
await asyncio.sleep(wait_time)
if is_disconnect and tries == 0:
# This can happen often depending on WiFi signal strength.
# So therefore all these connection warnings are logged
# as infos. The "unavailable" logic will still trigger so the
# user knows if the device is not connected.
_LOGGER.info("Disconnected from API")
try:
await cli.connect()
await cli.login()
except APIConnectionError as error:
_LOGGER.info("Can't connect to esphome API for '%s' (%s)",
host, error)
# Schedule re-connect in event loop in order not to delay HA
# startup. First connect is scheduled in tracked tasks.
data.reconnect_task = \
hass.loop.create_task(try_connect(tries + 1, is_disconnect))
else:
_LOGGER.info("Successfully connected to %s", host)
cli.on_disconnect = try_connect
return try_connect
async def _async_setup_device_registry(hass: HomeAssistantType,
entry: ConfigEntry,
device_info: 'DeviceInfo'):
"""Set up device registry feature for a particular config entry."""
sw_version = device_info.esphome_core_version
if device_info.compilation_time:
sw_version += ' ({})'.format(device_info.compilation_time)
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={
(dr.CONNECTION_NETWORK_MAC, device_info.mac_address)
},
name=device_info.name,
manufacturer='espressif',
model=device_info.model,
sw_version=sw_version,
)
async def _cleanup_instance(hass: HomeAssistantType,
entry: ConfigEntry) -> None:
"""Cleanup the esphome client if it exists."""
data = hass.data[DOMAIN].pop(entry.entry_id) # type: RuntimeEntryData
if data.reconnect_task is not None:
data.reconnect_task.cancel()
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
for cleanup_callback in data.cleanup_callbacks:
cleanup_callback()
await data.client.stop()
async def async_unload_entry(hass: HomeAssistantType,
entry: ConfigEntry) -> bool:
"""Unload an esphome config entry."""
await _cleanup_instance(hass, entry)
tasks = []
for component in HA_COMPONENTS:
tasks.append(hass.config_entries.async_forward_entry_unload(
entry, component))
await asyncio.wait(tasks)
return True
async def platform_async_setup_entry(hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities,
*,
component_key: str,
info_type,
entity_type,
state_type
) -> None:
"""Set up an esphome platform.
This method is in charge of receiving, distributing and storing
info and state updates.
"""
entry_data = hass.data[DOMAIN][entry.entry_id] # type: RuntimeEntryData
entry_data.info[component_key] = {}
entry_data.state[component_key] = {}
@callback
def async_list_entities(infos: List['EntityInfo']):
"""Update entities of this platform when entities are listed."""
old_infos = entry_data.info[component_key]
new_infos = {}
add_entities = []
for info in infos:
if not isinstance(info, info_type):
# Filter out infos that don't belong to this platform.
continue
if info.key in old_infos:
# Update existing entity
old_infos.pop(info.key)
else:
# Create new entity
entity = entity_type(entry.entry_id, component_key, info.key)
add_entities.append(entity)
new_infos[info.key] = info
# Remove old entities
for info in old_infos.values():
entry_data.async_remove_entity(hass, component_key, info.key)
entry_data.info[component_key] = new_infos
async_add_entities(add_entities)
signal = DISPATCHER_ON_LIST.format(entry_id=entry.entry_id)
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_list_entities)
)
@callback
def async_entity_state(state: 'EntityState'):
"""Notify the appropriate entity of an updated state."""
if not isinstance(state, state_type):
return
entry_data.state[component_key][state.key] = state
entry_data.async_update_entity(hass, component_key, state.key)
signal = DISPATCHER_ON_STATE.format(entry_id=entry.entry_id)
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_entity_state)
)
class EsphomeEntity(Entity):
"""Define a generic esphome entity."""
def __init__(self, entry_id: str, component_key: str, key: int):
"""Initialize."""
self._entry_id = entry_id
self._component_key = component_key
self._key = key
self._remove_callbacks = [] # type: List[Callable[[], None]]
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
kwargs = {
'entry_id': self._entry_id,
'component_key': self._component_key,
'key': self._key,
}
self._remove_callbacks.append(
async_dispatcher_connect(self.hass,
DISPATCHER_UPDATE_ENTITY.format(**kwargs),
self.async_schedule_update_ha_state)
)
self._remove_callbacks.append(
async_dispatcher_connect(self.hass,
DISPATCHER_REMOVE_ENTITY.format(**kwargs),
self.async_schedule_update_ha_state)
)
self._remove_callbacks.append(
async_dispatcher_connect(
self.hass, DISPATCHER_ON_DEVICE_UPDATE.format(**kwargs),
self.async_schedule_update_ha_state)
)
async def async_will_remove_from_hass(self):
"""Unregister callbacks."""
for remove_callback in self._remove_callbacks:
remove_callback()
self._remove_callbacks = []
@property
def _entry_data(self) -> RuntimeEntryData:
return self.hass.data[DOMAIN][self._entry_id]
@property
def _static_info(self) -> 'EntityInfo':
return self._entry_data.info[self._component_key][self._key]
@property
def _device_info(self) -> 'DeviceInfo':
return self._entry_data.device_info
@property
def _client(self) -> 'APIClient':
return self._entry_data.client
@property
def _state(self) -> 'Optional[EntityState]':
try:
return self._entry_data.state[self._component_key][self._key]
except KeyError:
return None
@property
def available(self) -> bool:
"""Return if the entity is available."""
device = self._device_info
if device.has_deep_sleep:
# During deep sleep the ESP will not be connectable (by design)
# For these cases, show it as available
return True
return self._entry_data.available
@property
def unique_id(self) -> Optional[str]:
"""Return a unique id identifying the entity."""
if not self._static_info.unique_id:
return None
return self._static_info.unique_id
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
'connections': {(dr.CONNECTION_NETWORK_MAC,
self._device_info.mac_address)}
}
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._static_info.name
@property
def should_poll(self) -> bool:
"""Disable polling."""
return False
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Volume Code.
"""
import cStringIO
import mox
from nova import context
from nova import exception
from nova import db
from nova import flags
from nova import log as logging
import nova.policy
from nova import rpc
from nova import test
from nova import utils
import nova.volume.api
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class VolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
super(VolumeTestCase, self).setUp()
self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake')
self.volume = utils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
self.instance_id = db.instance_create(self.context, {})['id']
def tearDown(self):
db.instance_destroy(self.context, self.instance_id)
super(VolumeTestCase, self).tearDown()
@staticmethod
def _create_volume(size='0', snapshot_id=None):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['snapshot_id'] = snapshot_id
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(context.get_admin_context(), vol)
def test_create_delete_volume(self):
"""Test volume can be created and deleted."""
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
volume_id).id)
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
self.volume.driver.delete_volume(mox.IgnoreArg()) \
.AndRaise(exception.VolumeIsBusy)
self.mox.ReplayAll()
res = self.volume.delete_volume(self.context, volume_id)
self.assertEqual(True, res)
volume_ref = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume_id, volume_ref.id)
self.assertEqual("available", volume_ref.status)
self.mox.UnsetStubs()
self.volume.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
"""Test volume can be created from a snapshot."""
volume_src = self._create_volume()
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'])
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_id)
volume_dst = self._create_volume(0, snapshot_id)
self.volume.create_volume(self.context, volume_dst['id'], snapshot_id)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id, db.volume_get(
context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
def test_too_big_volume(self):
"""Ensure failure if a too large of a volume is requested."""
# FIXME(vish): validation needs to move into the data layer in
# volume_create
return True
try:
volume = self._create_volume('1001')
self.volume.create_volume(self.context, volume)
self.fail("Should have thrown TypeError")
except TypeError:
pass
def test_too_many_volumes(self):
"""Ensure that NoMoreTargets is raised when we run out of volumes."""
vols = []
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
vols.append(volume['id'])
volume = self._create_volume()
self.assertRaises(db.NoMoreTargets,
self.volume.create_volume,
self.context,
volume['id'])
db.volume_destroy(context.get_admin_context(), volume['id'])
for volume_id in vols:
self.volume.delete_volume(self.context, volume_id)
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
inst = {}
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = '2' # m1.tiny
inst['ami_launch_index'] = 0
instance_id = db.instance_create(self.context, inst)['id']
mountpoint = "/dev/sdf"
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
db.volume_attached(self.context, volume_id, instance_id,
mountpoint)
else:
self.compute.attach_volume(self.context,
instance_id,
volume_id,
mountpoint)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
instance_ref = db.volume_get_instance(self.context, volume_id)
self.assertEqual(instance_ref['id'], instance_id)
self.assertRaises(exception.Error,
self.volume.delete_volume,
self.context,
volume_id)
if FLAGS.fake_tests:
db.volume_detached(self.context, volume_id)
else:
self.compute.detach_volume(self.context,
instance_id,
volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
db.instance_destroy(self.context, instance_id)
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
targets = []
def _check(volume_id):
"""Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume = self._create_volume()
d = self.volume.create_volume(self.context, volume['id'])
_check(d)
for volume_id in volume_ids:
self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
@staticmethod
def _create_snapshot(volume_id, size='0'):
"""Create a snapshot object."""
snap = {}
snap['volume_size'] = size
snap['user_id'] = 'fake'
snap['project_id'] = 'fake'
snap['volume_id'] = volume_id
snap['status'] = "creating"
return db.snapshot_create(context.get_admin_context(), snap)['id']
def test_create_delete_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
self.volume.delete_snapshot(self.context, snapshot_id)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
def fake_cast(ctxt, topic, msg):
pass
self.stubs.Set(rpc, 'cast', fake_cast)
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
db.volume_attached(self.context, volume['id'], self.instance_id,
'/dev/sda1')
volume_api = nova.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
db.snapshot_destroy(self.context, snapshot_ref['id'])
db.volume_destroy(self.context, volume['id'])
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot_id = self._create_snapshot(volume_id)
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(mox.IgnoreArg()) \
.AndRaise(exception.SnapshotIsBusy)
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.mox.UnsetStubs()
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_id)
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "nova.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
self.flags(volume_driver=self.driver_name,
logging_default_format_string="%(message)s")
self.volume = utils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
self.output = ""
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
self.volume.driver.set_execute(_fake_execute)
log = logging.getLogger()
self.stream = cStringIO.StringIO()
log.logger.addHandler(logging.logging.StreamHandler(self.stream))
inst = {}
self.instance_id = db.instance_create(self.context, inst)['id']
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
class VolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver"""
driver_name = "nova.volume.driver.VolumeDriver"
def test_delete_busy_volume(self):
"""Test deleting a busy volume."""
self.stubs.Set(self.volume.driver, '_volume_not_present',
lambda x: False)
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x, y: False)
# Want DriverTestCase._fake_execute to return 'o' so that
# volume.driver.delete_volume() raises the VolumeIsBusy exception.
self.output = 'o'
self.assertRaises(exception.VolumeIsBusy,
self.volume.driver.delete_volume,
{'name': 'test1', 'size': 1024})
# when DriverTestCase._fake_execute returns something other than
# 'o' volume.driver.delete_volume() does not raise an exception.
self.output = 'x'
self.volume.driver.delete_volume({'name': 'test1', 'size': 1024})
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
driver_name = "nova.volume.driver.ISCSIDriver"
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
volume_id_list = []
for index in xrange(3):
vol = {}
vol['size'] = 0
vol_ref = db.volume_create(self.context, vol)
self.volume.create_volume(self.context, vol_ref['id'])
vol_ref = db.volume_get(self.context, vol_ref['id'])
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
db.volume_attached(self.context, vol_ref['id'], self.instance_id,
mountpoint)
volume_id_list.append(vol_ref['id'])
return volume_id_list
def test_check_for_export_with_no_volume(self):
"""No log message when no volume is attached to an instance."""
self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_volume_exported(self):
"""No log message when all the processes are running."""
volume_id_list = self._attach_volume()
self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
for i in volume_id_list:
tid = db.volume_get_iscsi_target_num(self.context, i)
self.volume.driver.tgtadm.show_target(tid)
self.stream.truncate(0)
self.mox.ReplayAll()
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
def test_check_for_export_with_some_volume_missing(self):
"""Output a warning message when some volumes are not recognied
by ietd."""
volume_id_list = self._attach_volume()
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
self.volume.driver.tgtadm.show_target(tid).AndRaise(
exception.ProcessExecutionError())
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,
self.volume.check_for_export,
self.context,
self.instance_id)
msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
self.assertTrue(0 <= self.stream.getvalue().find(msg))
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
class VolumePolicyTestCase(test.TestCase):
def setUp(self):
super(VolumePolicyTestCase, self).setUp()
nova.policy.reset()
nova.policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(VolumePolicyTestCase, self).tearDown()
nova.policy.reset()
def _set_rules(self, rules):
nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
def test_check_policy(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
nova.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
nova.volume.api.check_policy(self.context, 'attach')
def test_check_policy_with_target(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'id': 2,
}
nova.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
nova.volume.api.check_policy(self.context, 'attach', {'id': 2})
|
|
from collections import namedtuple
import sublime
import re
try: # Python 3
from .haxe_parse_helper import parse_package
except (ValueError): # Python 2
from haxe_parse_helper import parse_package
SCOPE_VAR = 'meta.variable.haxe.2'
SCOPE_VAR_NAME = 'entity.name.variable.haxe.2'
SCOPE_FUNC = 'meta.method.haxe.2'
SCOPE_FUNC_BLOCK = 'meta.method.block.haxe.2'
SCOPE_FUNC_NAME = 'entity.name.function.haxe.2'
SCOPE_PARAMETERS = 'meta.parameters.haxe.2'
SCOPE_STATIC = 'meta.static.haxe.2'
SCOPE_TYPE = 'meta.type'
SCOPE_TYPE_BLOCK = 'meta.type.block.haxe.2'
SCOPE_TYPE_NAME = 'entity.name.type.class.haxe.2'
FIELD_FUNC = 'function'
FIELD_VAR = 'var'
FIELD_STATIC_FUNC = 'static function'
FIELD_STATIC_VAR = 'static var'
re_import = re.compile('^([ \t]*)import\s+([a-z0-9._*]+);', re.I | re.M)
re_prop_params = re.compile(r'\((\w*)\s*,\s*(\w*)\)\s*:?\s*(\w*)')
re_word = re.compile('^[_a-zA-Z]\w*$')
def count_blank_lines(view, pos):
whitespaces = ' \t'
src = view.substr(sublime.Region(0, view.size()))
before, after = 0, 0
for i in range(pos - 1, 0, -1):
c = src[i]
if c == '\n':
before += 1
elif c not in whitespaces:
break
for i in range(pos, view.size()):
c = src[i]
if c == '\n':
after += 1
elif c not in whitespaces:
break
return before, after
def filter_regions(inners, outers):
contains = []
ncontains = []
ii, io, ni, no = 0, 0, len(inners), len(outers)
if no == 0:
return contains, inners[:]
while io < no and ii < ni:
inner = inners[ii]
outer = outers[io]
if outer.contains(inner):
contains.append(inner)
io += 1
ii += 1
continue
if inner.begin() > outer.begin():
io += 1
else:
ncontains.append(inner)
ii += 1
while ii < ni:
ncontains.append(inners[ii])
ii += 1
return contains, ncontains
def find_cur_region(view, selector, as_string=False):
rgns = view.find_by_selector(selector)
pos = view.sel()[0].begin()
for rgn in rgns:
if rgn.contains(pos):
if as_string:
return view.substr(rgn)
else:
return rgn
return None
def find_line_start_pos(view, pos):
rgn = view.line(pos)
pos = rgn.begin()
line = view.substr(rgn)
for c in line:
if c == ' ' or c == '\t':
pos += 1
else:
break
return pos
def find_regions(view, selector, in_region=None, incl_string=False):
rgns = view.find_by_selector(selector)
regions = []
for rgn in rgns:
if in_region is not None and in_region.contains(rgn):
if incl_string:
regions.append((rgn, view.substr(rgn)))
else:
regions.append(rgn)
return regions
def get_blank_lines(view, name, default=0):
n = view.settings().get(name, default)
return '\n' * n
def get_context(view):
return HaxeContext(view)
def get_default_value(type_name):
if type_name == 'Float' or type_name == 'Int':
return '0'
elif type_name == 'Void':
return None
elif type_name == 'Bool':
return 'false'
return 'null'
def get_mod_order(view):
def_order = 'opis'
order = view.settings().get('haxe_modifiers_order', def_order)
for c in def_order:
if c not in order:
order += c
return order
def get_mods(view, private=True, o=False, p=True, i=False, s=True):
mods = ''
mod_map = {}
order = get_mod_order(view)
def add_mod(use, key, value):
if use:
mod_map[key] = value
add_mod(o, 'o', 'override')
add_mod(p, 'p', 'private' if private else 'public')
add_mod(i, 'i', 'inline')
add_mod(s, 's', 'static')
for c in order:
if c not in mod_map:
continue
mods += mod_map[c] + ' '
del mod_map[c]
return mods.strip()
def get_editable_mods(
view, idx,
private=True, o=False, p=True, i=False, s=True,
eo=False, ep=False, ei=False, es=False):
mods = []
mod_map = {}
edit_map = {}
order = get_mod_order(view)
def add_mod(use, key, value, editable):
edit_map[key] = editable
if use:
mod_map[key] = value
add_mod(o, 'o', 'override', eo)
add_mod(p, 'p', 'private' if private else 'public', ep)
add_mod(i, 'i', 'inline', ei)
add_mod(s, 's', 'static', es)
for c in order:
if c not in mod_map:
continue
mod = mod_map[c] + ' '
if edit_map[c]:
mod = '${%d:%s }' % (idx, mod_map[c])
idx += 1
mods.append(mod)
del mod_map[c]
return ''.join(mods)
def is_haxe_scope(view):
return view.score_selector(0, "source.haxe.2") > 0
def is_imported(tp, imports):
for itp in imports:
itp = itp.strip('.*')
if itp == tp:
return True
if itp in tp:
rtp = tp.replace(itp, '')
if rtp.count('.') == 1:
return True
return False
def set_pos(view, pos, center=True):
view.sel().clear()
view.sel().add(sublime.Region(pos, pos))
if center:
view.show_at_center(pos)
def shorten_imported_type(tp, imports):
if '.' in tp:
if is_imported(tp, imports):
tp = tp.rpartition('.')[2]
return tp
CtxVar = namedtuple('CtxVar', ['group', 'name', 'region'])
CtxMethod = namedtuple('CtxMethod', ['group', 'name', 'region', 'block'])
CtxType = namedtuple(
'CtxType',
['group', 'name', 'package', 'full_name', 'region', 'block',
'vars', 'svars', 'methods', 'smethods', 'field_map'])
CtxWord = namedtuple('CtxWord', ['name', 'region', 'scope'])
class HaxeContext(object):
def __init__(self, view):
super(HaxeContext, self).__init__()
self.view = view
pos = view.sel()[0].begin()
self.scope = view.scope_name(pos)
self._type = None
self._var = None
self._method = None
self._word = None
self._src = None
self._imports = None
def get_imports(self):
if self._imports is None:
self._imports = \
[mo.group(2) for mo in re_import.finditer(self.src)]
return self._imports
def get_method(self):
if self._method is None:
self._method = False
if SCOPE_FUNC not in self.scope:
return False
rgn = find_cur_region(self.view, SCOPE_FUNC)
self._method = CtxMethod(
FIELD_STATIC_FUNC if SCOPE_STATIC in self.scope else
FIELD_FUNC,
find_regions(self.view, SCOPE_FUNC_NAME, rgn, True)[0][1],
rgn,
find_regions(self.view, SCOPE_FUNC_BLOCK, rgn)[0])
return self._method
def get_src(self):
if self._src is None:
self._src = self.view.substr(sublime.Region(0, self.view.size()))
return self._src
def get_type(self):
if self._type is None:
self._type = False
if SCOPE_TYPE not in self.scope:
return False
view = self.view
type_group = None
type_scope = None
type_groups = ('abstract', 'class', 'enum', 'interface', 'typedef')
for group in type_groups:
type_scope = 'meta.type.%s.haxe.2' % group
if type_scope in self.scope:
type_group = group
break
type_rgn = find_cur_region(view, type_scope)
v_rgns = find_regions(view, SCOPE_VAR, type_rgn)
vname_rgns = find_regions(view, SCOPE_VAR_NAME, type_rgn)
f_rgns = find_regions(view, SCOPE_FUNC, type_rgn)
fname_rgns = find_regions(view, SCOPE_FUNC_NAME, type_rgn)
s_rgns = find_regions(view, SCOPE_STATIC, type_rgn)
sv_rgns, v_rgns = filter_regions(v_rgns, s_rgns)
sf_rgns, f_rgns = filter_regions(f_rgns, s_rgns)
svname_rgns, vname_rgns = filter_regions(vname_rgns, sv_rgns)
sfname_rgns, fname_rgns = filter_regions(fname_rgns, sf_rgns)
def combine(field_group, field_rgns, field_name_rgns):
lst = []
is_var = 'var' in field_group
for i in range(0, len(field_rgns)):
ctx = None
if is_var:
ctx = CtxVar(
field_group,
view.substr(field_name_rgns[i]),
field_rgns[i])
else:
ctx = CtxMethod(
field_group,
view.substr(field_name_rgns[i]),
field_rgns[i],
find_regions(
view, SCOPE_FUNC_BLOCK, field_rgns[i])[0])
lst.append(ctx)
return lst
v_ctxs = combine(FIELD_VAR, v_rgns, vname_rgns)
sv_ctxs = combine(FIELD_STATIC_VAR, sv_rgns, svname_rgns)
f_ctxs = combine(FIELD_FUNC, f_rgns, fname_rgns)
sf_ctxs = combine(FIELD_STATIC_FUNC, sf_rgns, sfname_rgns)
field_map = {}
for ctx in v_ctxs:
field_map[ctx.name] = ctx
for ctx in sv_ctxs:
field_map[ctx.name] = ctx
for ctx in f_ctxs:
field_map[ctx.name] = ctx
for ctx in sf_ctxs:
field_map[ctx.name] = ctx
name = find_regions(view, SCOPE_TYPE_NAME, type_rgn, True)[0][1]
package = parse_package(self.src)
full_name = name
if package:
full_name = package + '.' + name
self._type = CtxType(
type_group,
name,
package,
full_name,
type_rgn,
find_regions(view, SCOPE_TYPE_BLOCK, type_rgn)[0],
v_ctxs,
sv_ctxs,
f_ctxs,
sf_ctxs,
field_map)
return self._type
def get_var(self):
if self._var is None:
self._var = False
if SCOPE_VAR not in self.scope:
return False
rgn = find_cur_region(self.view, SCOPE_VAR)
self._var = CtxVar(
FIELD_STATIC_VAR if SCOPE_STATIC in self.scope else FIELD_VAR,
find_regions(self.view, SCOPE_VAR_NAME, rgn, True)[0][1],
rgn)
return self._var
def get_word(self):
if self._word is None:
self._word = False
view = self.view
pos = view.sel()[0].begin()
word_rgn = view.word(pos)
word = view.substr(word_rgn)
scope = view.scope_name(word_rgn.begin())
if not re_word.match(word):
return False
ignore_scopes = (
'comment', 'constant', 'keyword', 'storage', 'string')
for sc in ignore_scopes:
if sc in scope:
return False
self._word = CtxWord(word, word_rgn, scope)
return self._word
method = property(get_method)
type = property(get_type)
var = property(get_var)
word = property(get_word)
src = property(get_src)
imports = property(get_imports)
|
|
import argparse
import logging
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from coapthon.client.helperclient import HelperClient
from coapthon.utils import parse_uri
from coapthon.defines import Codes, DEFAULT_HC_PATH, HC_PROXY_DEFAULT_PORT, COAP_DEFAULT_PORT, LOCALHOST, BAD_REQUEST, \
NOT_IMPLEMENTED, CoAP_HTTP
from coapthon.defines import COAP_PREFACE
from urlparse import urlparse
__author__ = "Marco Ieni, Davide Foti"
__email__ = "[email protected], [email protected]"
logger = logging.getLogger(__name__)
hc_path = DEFAULT_HC_PATH
""" the class that realizes the HTTP-CoAP Proxy """
class HCProxy:
"""
This program implements an HTTP-CoAP Proxy without using external libraries.
It is assumed that URI is formatted like this:
http://hc_proxy_ip:proxy_port/hc/coap://server_coap_ip:server_coap_port/resource
You can run this program passing the parameters from the command line or you can use the HCProxy class in your own
project.
"""
def __init__(self, path=DEFAULT_HC_PATH, hc_port=HC_PROXY_DEFAULT_PORT, ip=LOCALHOST,
coap_port=COAP_DEFAULT_PORT):
"""
Initialize the HC proxy.
:param path: the path of the hc_proxy server
:param hc_port: the port of the hc_proxy server
:param ip: the ip of the hc_proxy server
:param coap_port: the coap server port you want to reach
"""
global hc_path
hc_path = HCProxy.get_formatted_path(path)
self.hc_port = hc_port
self.ip = ip
self.coap_port = coap_port
def run(self):
"""
Start the proxy.
"""
server_address = (self.ip, self.hc_port)
hc_proxy = HTTPServer(server_address, HCProxyHandler)
logger.info('Starting HTTP-CoAP Proxy...')
hc_proxy.serve_forever() # the server listen to http://ip:hc_port/path
@staticmethod
def get_formatted_path(path):
"""
Uniform the path string
:param path: the path
:return: the uniform path
"""
if path[0] != '/':
path = '/' + path
if path[-1] != '/':
path = '{0}/'.format(path)
return path
class CoapUri: # this class takes the URI from the HTTP URI
""" Class that can manage and inbox the CoAP URI """
def __init__(self, coap_uri):
self.uri = coap_uri
self.host, self.port, self.path = parse_uri(coap_uri)
def get_uri_as_list(self):
"""
Split the uri into <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
:return: the split uri
"""
return urlparse(self.uri)
def get_payload(self):
"""
Return the query string of the uri.
:return: the query string as a list
"""
temp = self.get_uri_as_list()
query_string = temp[4]
if query_string == "":
return None # Bad request error code
query_string_as_list = str.split(query_string, "=")
return query_string_as_list[1]
def __str__(self):
return self.uri
class HCProxyHandler(BaseHTTPRequestHandler):
""" It maps the requests from HTTP to CoAP """
coap_uri = None
client = None
def set_coap_uri(self):
"""
Create a CoAP Uri
"""
self.coap_uri = CoapUri(self.path[len(hc_path):])
def do_initial_operations(self):
"""
Setup the client for interact with remote server
"""
if not self.request_hc_path_corresponds():
# the http URI of the request is not the same of the one specified by the admin for the hc proxy,
# so I do not answer
# For example the admin setup the http proxy URI like: "http://127.0.0.1:8080:/my_hc_path/" and the URI of
# the requests asks for "http://127.0.0.1:8080:/another_hc_path/"
return
self.set_coap_uri()
self.client = HelperClient(server=(self.coap_uri.host, self.coap_uri.port))
def do_GET(self):
"""
Perform a GET request
"""
self.do_initial_operations()
coap_response = self.client.get(self.coap_uri.path)
self.client.stop()
logger.info("Server response: %s", coap_response.pretty_print())
self.set_http_response(coap_response)
def do_HEAD(self):
"""
Perform a HEAD request
"""
self.do_initial_operations()
# the HEAD method is not present in CoAP, so we treat it
# like if it was a GET and then we exclude the body from the response
# with send_body=False we say that we do not need the body, because it is a HEAD request
coap_response = self.client.get(self.coap_uri.path)
self.client.stop()
logger.info("Server response: %s", coap_response.pretty_print())
self.set_http_header(coap_response)
def do_POST(self):
"""
Perform a POST request
"""
# Doesn't do anything with posted data
# print "uri: ", self.client_address, self.path
self.do_initial_operations()
payload = self.coap_uri.get_payload()
if payload is None:
logger.error("BAD POST REQUEST")
self.send_error(BAD_REQUEST)
return
coap_response = self.client.post(self.coap_uri.path, payload)
self.client.stop()
logger.info("Server response: %s", coap_response.pretty_print())
self.set_http_response(coap_response)
def do_PUT(self):
"""
Perform a PUT request
"""
self.do_initial_operations()
payload = self.coap_uri.get_payload()
if payload is None:
logger.error("BAD PUT REQUEST")
self.send_error(BAD_REQUEST)
return
logger.debug(payload)
coap_response = self.client.put(self.coap_uri.path, payload)
self.client.stop()
logger.debug("Server response: %s", coap_response.pretty_print())
self.set_http_response(coap_response)
def do_DELETE(self):
"""
Perform a DELETE request
"""
self.do_initial_operations()
coap_response = self.client.delete(self.coap_uri.path)
self.client.stop()
logger.debug("Server response: %s", coap_response.pretty_print())
self.set_http_response(coap_response)
def do_CONNECT(self):
"""
Perform a CONNECT request. Reply with error, not implemented in CoAP
"""
self.send_error(NOT_IMPLEMENTED)
def do_OPTIONS(self):
"""
Perform a OPTIONS request. Reply with error, not implemented in CoAP
"""
self.send_error(NOT_IMPLEMENTED)
def do_TRACE(self):
"""
Perform a TRACE request. Reply with error, not implemented in CoAP
"""
self.send_error(NOT_IMPLEMENTED)
def request_hc_path_corresponds(self):
"""
Tells if the hc path of the request corresponds to that specified by the admin
:return: a boolean that says if it corresponds or not
"""
uri_path = self.path.split(COAP_PREFACE)
request_hc_path = uri_path[0]
logger.debug("HCPATH: %s", hc_path)
# print HC_PATH
logger.debug("URI: %s", request_hc_path)
if hc_path != request_hc_path:
return False
else:
return True
def set_http_header(self, coap_response):
"""
Sets http headers.
:param coap_response: the coap response
"""
logger.debug(
("Server: %s\n"\
"codice risposta: %s\n"\
"PROXED: %s\n"\
"payload risposta: %s"),
coap_response.source,
coap_response.code,
CoAP_HTTP[Codes.LIST[coap_response.code].name],
coap_response.payload)
self.send_response(int(CoAP_HTTP[Codes.LIST[coap_response.code].name]))
self.send_header('Content-type', 'text/html')
self.end_headers()
def set_http_body(self, coap_response):
"""
Set http body.
:param coap_response: the coap response
"""
if coap_response.payload is not None:
body = "<html><body><h1>", coap_response.payload, "</h1></body></html>"
self.wfile.write("".join(body))
else:
self.wfile.write("<html><body><h1>None</h1></body></html>")
def set_http_response(self, coap_response):
"""
Set http response.
:param coap_response: the coap response
"""
self.set_http_header(coap_response)
self.set_http_body(coap_response)
return
def get_command_line_args():
parser = argparse.ArgumentParser(description='Run the HTTP-CoAP Proxy.')
parser.add_argument('-p', dest='path', default=DEFAULT_HC_PATH,
help='the path of the hc_proxy server')
parser.add_argument('-hp', dest='hc_port', default=HC_PROXY_DEFAULT_PORT,
help='the port of the hc_proxy server')
parser.add_argument('-ip', dest='ip', default=LOCALHOST,
help='the ip of the hc_proxy server')
parser.add_argument('-cp', dest='coap_port', default=COAP_DEFAULT_PORT,
help='the coap server port you want to reach')
return parser.parse_args()
if __name__ == "__main__":
args = get_command_line_args()
hc_proxy = HCProxy(args.path, int(args.hc_port), args.ip, args.coap_port)
hc_proxy.run()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import copy
import os.path
import re
from ConfigParser import ConfigParser, ParsingError
from genshi.builder import tag
from trac.admin import AdminCommandError, IAdminCommandProvider
from trac.core import Component, ExtensionPoint, TracError, implements
from trac.util import AtomicFile, as_bool
from trac.util.compat import OrderedDict, wait_for_file_mtime_change
from trac.util.text import cleandoc, printout, to_unicode, to_utf8
from trac.util.translation import _, N_, tag_
__all__ = ['Configuration', 'ConfigSection', 'Option', 'BoolOption',
'IntOption', 'FloatOption', 'ListOption', 'ChoiceOption',
'PathOption', 'ExtensionOption', 'OrderedExtensionsOption',
'ConfigurationError']
_use_default = object()
def _getint(value):
return int(value or 0)
def _getfloat(value):
return float(value or 0.0)
def _getlist(value, sep, keep_empty):
if not value:
return []
if isinstance(value, basestring):
if isinstance(sep, (list, tuple)):
splitted = re.split('|'.join(map(re.escape, sep)), value)
else:
splitted = value.split(sep)
items = [item.strip() for item in splitted]
else:
items = list(value)
if not keep_empty:
items = [item for item in items if item not in (None, '')]
return items
class ConfigurationError(TracError):
"""Exception raised when a value in the configuration file is not valid."""
title = N_("Configuration Error")
def __init__(self, message=None, title=None, show_traceback=False):
if message is None:
message = _("Look in the Trac log for more information.")
super(ConfigurationError, self).__init__(message, title,
show_traceback)
class UnicodeConfigParser(ConfigParser):
"""A Unicode-aware version of ConfigParser. Arguments are encoded to
UTF-8 and return values are decoded from UTF-8.
"""
# All of the methods of ConfigParser are overridden except
# `getboolean`, `getint`, `getfloat`, `defaults`, `read`, `readfp`,
# `optionxform` and `write`. `getboolean`, `getint` and `getfloat`
# call `get`, so it isn't necessary to reimplement them.
# The base class `RawConfigParser` doesn't inherit from `object`
# so we can't use `super`.
def __init__(self, **kwargs):
dict_type = kwargs.pop('dict_type', None) or OrderedDict
ConfigParser.__init__(self, dict_type=dict_type, **kwargs)
def sections(self):
return map(to_unicode, ConfigParser.sections(self))
def add_section(self, section):
section_str = to_utf8(section)
ConfigParser.add_section(self, section_str)
def has_section(self, section):
section_str = to_utf8(section)
return ConfigParser.has_section(self, section_str)
def options(self, section):
section_str = to_utf8(section)
return map(to_unicode, ConfigParser.options(self, section_str))
def get(self, section, option, raw=False, vars=None):
section_str = to_utf8(section)
option_str = to_utf8(option)
return to_unicode(ConfigParser.get(self, section_str,
option_str, raw, vars))
def items(self, section, raw=False, vars=None):
section_str = to_utf8(section)
return [(to_unicode(k), to_unicode(v))
for k, v in ConfigParser.items(self, section_str, raw, vars)]
def has_option(self, section, option):
section_str = to_utf8(section)
option_str = to_utf8(option)
return ConfigParser.has_option(self, section_str, option_str)
def set(self, section, option, value=None):
section_str = to_utf8(section)
option_str = to_utf8(option)
value_str = to_utf8(value) if value is not None else ''
ConfigParser.set(self, section_str, option_str, value_str)
def remove_option(self, section, option):
section_str = to_utf8(section)
option_str = to_utf8(option)
ConfigParser.remove_option(self, section_str, option_str)
def remove_section(self, section):
section_str = to_utf8(section)
ConfigParser.remove_section(self, section_str)
def __copy__(self):
parser = self.__class__()
parser._sections = copy.copy(self._sections)
return parser
def __deepcopy__(self, memo):
parser = self.__class__()
parser._sections = copy.deepcopy(self._sections)
return parser
class Configuration(object):
"""Thin layer over `ConfigParser` from the Python standard library.
In addition to providing some convenience methods, the class remembers
the last modification time of the configuration file, and reparses it
when the file has changed.
"""
def __init__(self, filename, params={}):
self.filename = filename
self.parser = UnicodeConfigParser()
self._pristine_parser = None
self.parents = []
self._lastmtime = 0
self._sections = {}
self.parse_if_needed(force=True)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.filename)
def __contains__(self, name):
"""Return whether the configuration contains a section of the given
name.
"""
return name in self.sections()
def __getitem__(self, name):
"""Return the configuration section with the specified name."""
if name not in self._sections:
self._sections[name] = Section(self, name)
return self._sections[name]
def get(self, section, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
return self[section].get(key, default)
def getbool(self, section, key, default=''):
"""Return the specified option as boolean value.
If the value of the option is one of "yes", "true", "enabled", "on",
or "1", this method wll return `True`, otherwise `False`.
Valid default input is a string or a bool. Returns a bool.
"""
return self[section].getbool(key, default)
def getint(self, section, key, default=''):
"""Return the value of the specified option as integer.
If the specified option can not be converted to an integer, a
`ConfigurationError` exception is raised.
Valid default input is a string or an int. Returns an int.
"""
return self[section].getint(key, default)
def getfloat(self, section, key, default=''):
"""Return the value of the specified option as float.
If the specified option can not be converted to a float, a
`ConfigurationError` exception is raised.
Valid default input is a string, float or int. Returns a float.
"""
return self[section].getfloat(key, default)
def getlist(self, section, key, default='', sep=',', keep_empty=False):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. The
`sep` parameter can specify multiple values using a list or a tuple.
If the `keep_empty` parameter is set to `True`, empty elements are
included in the list.
Valid default input is a string or a list. Returns a string.
"""
return self[section].getlist(key, default, sep, keep_empty)
def getpath(self, section, key, default=''):
"""Return a configuration value as an absolute path.
Relative paths are resolved relative to the location of this
configuration file.
Valid default input is a string. Returns a normalized path.
"""
return self[section].getpath(key, default)
def set(self, section, key, value):
"""Change a configuration value.
These changes are not persistent unless saved with `save()`.
"""
self[section].set(key, value)
def defaults(self, compmgr=None):
"""Returns a dictionary of the default configuration values.
If `compmgr` is specified, return only options declared in components
that are enabled in the given `ComponentManager`.
"""
defaults = {}
for (section, key), option in \
Option.get_registry(compmgr).iteritems():
defaults.setdefault(section, {})[key] = \
option.dumps(option.default)
return defaults
def options(self, section, compmgr=None):
"""Return a list of `(name, value)` tuples for every option in the
specified section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given
`ComponentManager`.
"""
return self[section].options(compmgr)
def remove(self, section, key):
"""Remove the specified option."""
self[section].remove(key)
def sections(self, compmgr=None, defaults=True):
"""Return a list of section names.
If `compmgr` is specified, only the section names corresponding to
options declared in components that are enabled in the given
`ComponentManager` are returned.
"""
sections = set(self.parser.sections())
for parent in self.parents:
sections.update(parent.sections(compmgr, defaults=False))
if defaults:
sections.update(self.defaults(compmgr))
return sorted(sections)
def has_option(self, section, option, defaults=True):
"""Returns True if option exists in section in either the project
trac.ini or one of the parents, or is available through the Option
registry.
"""
return self[section].contains(option, defaults)
def save(self):
"""Write the configuration options to the primary file."""
all_options = {}
for (section, name), option in Option.get_registry().iteritems():
all_options.setdefault(section, {})[name] = option
def normalize(section, name, value):
option = all_options.get(section, {}).get(name)
return option.normalize(value) if option else value
sections = []
for section in self.sections():
options = []
for option in self[section]:
default = None
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
default = normalize(section, option,
parent.get(section, option))
break
if self.parser.has_option(section, option):
current = normalize(section, option,
self.parser.get(section, option))
if current != default:
options.append((option, current))
if options:
sections.append((section, sorted(options)))
# Prepare new file contents to write to disk.
parser = UnicodeConfigParser()
for section, options in sections:
parser.add_section(section)
for key, val in options:
parser.set(section, key, val)
try:
self._write(parser)
except Exception:
# Revert all changes to avoid inconsistencies
self.parser = copy.deepcopy(self._pristine_parser)
raise
else:
self._pristine_parser = copy.deepcopy(self.parser)
def parse_if_needed(self, force=False):
if not self.filename or not os.path.isfile(self.filename):
return False
changed = False
modtime = os.path.getmtime(self.filename)
if force or modtime != self._lastmtime:
self.parser = UnicodeConfigParser()
try:
if not self.parser.read(self.filename):
raise TracError(_("Error reading '%(file)s', make sure "
"it is readable.", file=self.filename))
except ParsingError as e:
raise TracError(e)
self._lastmtime = modtime
self._pristine_parser = copy.deepcopy(self.parser)
changed = True
if changed:
self.parents = self._get_parents()
else:
for parent in self.parents:
changed |= parent.parse_if_needed(force=force)
if changed:
self._sections = {}
return changed
def touch(self):
if self.filename and os.path.isfile(self.filename) \
and os.access(self.filename, os.W_OK):
wait_for_file_mtime_change(self.filename)
def set_defaults(self, compmgr=None, component=None):
"""Retrieve all default values and store them explicitly in the
configuration, so that they can be saved to file.
Values already set in the configuration are not overwritten.
"""
def set_option_default(option):
section = option.section
name = option.name
if not self.has_option(section, name, defaults=False):
value = option.dumps(option.default)
self.set(section, name, value)
if component:
if component.endswith('.*'):
component = component[:-2]
component = component.lower().split('.')
from trac.core import ComponentMeta
for cls in ComponentMeta._components:
clsname = (cls.__module__ + '.' + cls.__name__).lower() \
.split('.')
if clsname[:len(component)] == component:
for option in cls.__dict__.itervalues():
if isinstance(option, Option):
set_option_default(option)
else:
for option in Option.get_registry(compmgr).itervalues():
set_option_default(option)
def _get_parents(self):
_parents = []
if self.parser.has_option('inherit', 'file'):
for filename in self.parser.get('inherit', 'file').split(','):
filename = filename.strip()
if not os.path.isabs(filename):
filename = os.path.join(os.path.dirname(self.filename),
filename)
_parents.append(Configuration(filename))
return _parents
def _write(self, parser):
if not self.filename:
return
wait_for_file_mtime_change(self.filename)
with AtomicFile(self.filename, 'w') as fd:
fd.writelines(['# -*- coding: utf-8 -*-\n', '\n'])
parser.write(fd)
class Section(object):
"""Proxy for a specific configuration section.
Objects of this class should not be instantiated directly.
"""
__slots__ = ['config', 'name', '_cache']
def __init__(self, config, name):
self.config = config
self.name = name
self._cache = {}
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
def contains(self, key, defaults=True):
if self.config.parser.has_option(self.name, key):
return True
for parent in self.config.parents:
if parent[self.name].contains(key, defaults=False):
return True
return defaults and (self.name, key) in Option.registry
__contains__ = contains
def iterate(self, compmgr=None, defaults=True):
"""Iterate over the options in this section.
If `compmgr` is specified, only return default option values for
components that are enabled in the given `ComponentManager`.
"""
options = set()
if self.config.parser.has_section(self.name):
for option in self.config.parser.options(self.name):
options.add(option.lower())
yield option
for parent in self.config.parents:
for option in parent[self.name].iterate(defaults=False):
loption = option.lower()
if loption not in options:
options.add(loption)
yield option
if defaults:
for section, option in Option.get_registry(compmgr).iterkeys():
if section == self.name and option.lower() not in options:
yield option
__iter__ = iterate
def get(self, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
cached = self._cache.get(key, _use_default)
if cached is not _use_default:
return cached
if self.config.parser.has_option(self.name, key):
value = self.config.parser.get(self.name, key)
else:
for parent in self.config.parents:
value = parent[self.name].get(key, _use_default)
if value is not _use_default:
break
else:
if default is not _use_default:
option = Option.registry.get((self.name, key))
value = option.dumps(option.default) if option \
else _use_default
else:
value = _use_default
if value is _use_default:
return default
self._cache[key] = value
return value
def getbool(self, key, default=''):
"""Return the value of the specified option as boolean.
This method returns `True` if the option value is one of "yes",
"true", "enabled", "on", or non-zero numbers, ignoring case.
Otherwise `False` is returned.
Valid default input is a string or a bool. Returns a bool.
"""
return as_bool(self.get(key, default))
def getint(self, key, default=''):
"""Return the value of the specified option as integer.
If the specified option can not be converted to an integer, a
`ConfigurationError` exception is raised.
Valid default input is a string or an int. Returns an int.
"""
value = self.get(key, default)
try:
return _getint(value)
except ValueError:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected integer,'
' got %(value)s', section=self.name, entry=key,
value=repr(value)))
def getfloat(self, key, default=''):
"""Return the value of the specified option as float.
If the specified option can not be converted to a float, a
`ConfigurationError` exception is raised.
Valid default input is a string, float or int. Returns a float.
"""
value = self.get(key, default)
try:
return _getfloat(value)
except ValueError:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected float,'
' got %(value)s', section=self.name, entry=key,
value=repr(value)))
def getlist(self, key, default='', sep=',', keep_empty=True):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. The
`sep` parameter can specify multiple values using a list or a tuple.
If the `keep_empty` parameter is set to `True`, empty elements are
included in the list.
Valid default input is a string or a list. Returns a list.
"""
return _getlist(self.get(key, default), sep, keep_empty)
def getpath(self, key, default=''):
"""Return the value of the specified option as a path, relative to
the location of this configuration file.
Valid default input is a string. Returns a normalized path.
"""
path = self.get(key, default)
if not path:
return default
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(self.config.filename), path)
return os.path.normcase(os.path.realpath(path))
def options(self, compmgr=None):
"""Return `(key, value)` tuples for every option in the section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
for key in self.iterate(compmgr):
yield key, self.get(key)
def set(self, key, value):
"""Change a configuration value.
These changes are not persistent unless saved with `save()`.
"""
self._cache.pop(key, None)
if not self.config.parser.has_section(self.name):
self.config.parser.add_section(self.name)
return self.config.parser.set(self.name, key, value)
def remove(self, key):
"""Delete a key from this section.
Like for `set()`, the changes won't persist until `save()` gets
called.
"""
if self.config.parser.has_section(self.name):
self._cache.pop(key, None)
self.config.parser.remove_option(self.name, key)
def _get_registry(cls, compmgr=None):
"""Return the descriptor registry.
If `compmgr` is specified, only return descriptors for components that
are enabled in the given `ComponentManager`.
"""
if compmgr is None:
return cls.registry
from trac.core import ComponentMeta
components = {}
for comp in ComponentMeta._components:
for attr in comp.__dict__.itervalues():
if isinstance(attr, cls):
components[attr] = comp
return dict(each for each in cls.registry.iteritems()
if each[1] not in components
or compmgr.is_enabled(components[each[1]]))
class ConfigSection(object):
"""Descriptor for configuration sections."""
registry = {}
@staticmethod
def get_registry(compmgr=None):
"""Return the section registry, as a `dict` mapping section names to
`ConfigSection` objects.
If `compmgr` is specified, only return sections for components that
are enabled in the given `ComponentManager`.
"""
return _get_registry(ConfigSection, compmgr)
def __init__(self, name, doc, doc_domain='tracini'):
"""Create the configuration section."""
self.name = name
self.registry[self.name] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
return config[self.name]
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
class Option(object):
"""Descriptor for configuration options."""
registry = {}
def accessor(self, section, name, default):
return section.get(name, default)
@staticmethod
def get_registry(compmgr=None):
"""Return the option registry, as a `dict` mapping `(section, key)`
tuples to `Option` objects.
If `compmgr` is specified, only return options for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(Option, compmgr)
def __init__(self, section, name, default=None, doc='',
doc_domain='tracini'):
"""Create the configuration option.
@param section: the name of the configuration section this option
belongs to
@param name: the name of the option
@param default: the default value for the option
@param doc: documentation of the option
"""
self.section = section
self.name = name
self.default = self.normalize(default)
self.registry[(self.section, self.name)] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
section = config[self.section]
value = self.accessor(section, self.name, self.default)
return value
def __set__(self, instance, value):
raise AttributeError(_("Setting attribute is not allowed."))
def __repr__(self):
return '<%s [%s] %r>' % (self.__class__.__name__, self.section,
self.name)
def dumps(self, value):
"""Return the value as a string to write to a trac.ini file"""
if value is None:
return ''
if value is True:
return 'enabled'
if value is False:
return 'disabled'
if isinstance(value, unicode):
return value
return to_unicode(value)
def normalize(self, value):
"""Normalize the given value to write to a trac.ini file"""
return self.dumps(value)
class BoolOption(Option):
"""Descriptor for boolean configuration options."""
def accessor(self, section, name, default):
return section.getbool(name, default)
def normalize(self, value):
if value not in (True, False):
value = as_bool(value)
return self.dumps(value)
class IntOption(Option):
"""Descriptor for integer configuration options."""
def accessor(self, section, name, default):
return section.getint(name, default)
def normalize(self, value):
try:
value = _getint(value)
except ValueError:
pass
return self.dumps(value)
class FloatOption(Option):
"""Descriptor for float configuration options."""
def accessor(self, section, name, default):
return section.getfloat(name, default)
def normalize(self, value):
try:
value = _getfloat(value)
except ValueError:
pass
return self.dumps(value)
class ListOption(Option):
"""Descriptor for configuration options that contain multiple values
separated by a specific character.
"""
def __init__(self, section, name, default=None, sep=',', keep_empty=False,
doc='', doc_domain='tracini'):
self.sep = sep
self.keep_empty = keep_empty
Option.__init__(self, section, name, default, doc, doc_domain)
def accessor(self, section, name, default):
return section.getlist(name, default, self.sep, self.keep_empty)
def dumps(self, value):
if isinstance(value, (list, tuple)):
sep = self.sep
if isinstance(sep, (list, tuple)):
sep = sep[0]
return sep.join(Option.dumps(self, v) or '' for v in value)
return Option.dumps(self, value)
def normalize(self, value):
return self.dumps(_getlist(value, self.sep, self.keep_empty))
class ChoiceOption(Option):
"""Descriptor for configuration options providing a choice among a list
of items.
The default value is the first choice in the list.
"""
def __init__(self, section, name, choices, doc='', doc_domain='tracini'):
Option.__init__(self, section, name, to_unicode(choices[0]), doc,
doc_domain)
self.choices = set(to_unicode(c).strip() for c in choices)
def accessor(self, section, name, default):
value = section.get(name, default)
if value not in self.choices:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected one of '
'(%(choices)s), got %(value)s',
section=section.name, entry=name, value=repr(value),
choices=', '.join('"%s"' % c
for c in sorted(self.choices))))
return value
class PathOption(Option):
"""Descriptor for file system path configuration options.
Relative paths are resolved to absolute paths using the directory
containing the configuration file as the reference.
"""
def accessor(self, section, name, default):
return section.getpath(name, default)
class ExtensionOption(Option):
"""Name of a component implementing `interface`. Raises a
`ConfigurationError` if the component cannot be found in the list of
active components implementing the interface."""
def __init__(self, section, name, interface, default=None, doc='',
doc_domain='tracini'):
Option.__init__(self, section, name, default, doc, doc_domain)
self.xtnpt = ExtensionPoint(interface)
def __get__(self, instance, owner):
if instance is None:
return self
value = Option.__get__(self, instance, owner)
for impl in self.xtnpt.extensions(instance):
if impl.__class__.__name__ == value:
return impl
raise ConfigurationError(
tag_("Cannot find an implementation of the %(interface)s "
"interface named %(implementation)s. Please check "
"that the Component is enabled or update the option "
"%(option)s in trac.ini.",
interface=tag.code(self.xtnpt.interface.__name__),
implementation=tag.code(value),
option=tag.code("[%s] %s" % (self.section, self.name))))
class OrderedExtensionsOption(ListOption):
"""A comma separated, ordered, list of components implementing
`interface`. Can be empty.
If `include_missing` is true (the default) all components implementing the
interface are returned, with those specified by the option ordered first.
"""
def __init__(self, section, name, interface, default=None,
include_missing=True, doc='', doc_domain='tracini'):
ListOption.__init__(self, section, name, default, doc=doc,
doc_domain=doc_domain)
self.xtnpt = ExtensionPoint(interface)
self.include_missing = include_missing
def __get__(self, instance, owner):
if instance is None:
return self
order = ListOption.__get__(self, instance, owner)
components = []
implementing_classes = []
for impl in self.xtnpt.extensions(instance):
implementing_classes.append(impl.__class__.__name__)
if self.include_missing or impl.__class__.__name__ in order:
components.append(impl)
not_found = sorted(set(order) - set(implementing_classes))
if not_found:
raise ConfigurationError(
tag_("Cannot find implementation(s) of the %(interface)s "
"interface named %(implementation)s. Please check "
"that the Component is enabled or update the option "
"%(option)s in trac.ini.",
interface=tag.code(self.xtnpt.interface.__name__),
implementation=tag(
(', ' if idx != 0 else None, tag.code(impl))
for idx, impl in enumerate(not_found)),
option=tag.code("[%s] %s" % (self.section, self.name))))
def compare(x, y):
x, y = x.__class__.__name__, y.__class__.__name__
if x not in order:
return int(y in order)
if y not in order:
return -int(x in order)
return cmp(order.index(x), order.index(y))
components.sort(compare)
return components
class ConfigurationAdmin(Component):
"""trac-admin command provider for trac.ini administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('config get', '<section> <option>',
'Get the value of the given option in "trac.ini"',
self._complete_config, self._do_get)
yield ('config remove', '<section> <option>',
'Remove the specified option from "trac.ini"',
self._complete_config, self._do_remove)
yield ('config set', '<section> <option> <value>',
'Set the value for the given option in "trac.ini"',
self._complete_config, self._do_set)
def _complete_config(self, args):
if len(args) == 1:
return self.config.sections()
elif len(args) == 2:
return [name for (name, value) in self.config[args[0]].options()]
def _do_get(self, section, option):
if not self.config.has_option(section, option):
raise AdminCommandError(
_("Option '%(option)s' doesn't exist in section"
" '%(section)s'", option=option, section=section))
printout(self.config.get(section, option))
def _do_set(self, section, option, value):
self.config.set(section, option, value)
if section == 'components' and as_bool(value):
self.config.set_defaults(component=option)
self.config.save()
if section == 'inherit' and option == 'file':
self.config.parse_if_needed(force=True) # Full reload
def _do_remove(self, section, option):
if not self.config.has_option(section, option):
raise AdminCommandError(
_("Option '%(option)s' doesn't exist in section"
" '%(section)s'", option=option, section=section))
self.config.remove(section, option)
self.config.save()
if section == 'inherit' and option == 'file':
self.config.parse_if_needed(force=True) # Full reload
def get_configinfo(env):
"""Returns a list of dictionaries containing the `name` and `options`
of each configuration section. The value of `options` is a list of
dictionaries containing the `name`, `value` and `modified` state of
each configuration option. The `modified` value is True if the value
differs from its default.
:since: version 1.1.2
"""
all_options = {}
for (section, name), option in \
Option.get_registry(env.compmgr).iteritems():
all_options.setdefault(section, {})[name] = option
sections = []
for section in env.config.sections(env.compmgr):
options = []
for name, value in env.config.options(section, env.compmgr):
registered = all_options.get(section, {}).get(name)
if registered:
default = registered.default
normalized = registered.normalize(value)
else:
default = u''
normalized = unicode(value)
options.append({'name': name, 'value': value,
'modified': normalized != default})
options.sort(key=lambda o: o['name'])
sections.append({'name': section, 'options': options})
sections.sort(key=lambda s: s['name'])
return sections
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uses stochastic_descent to find local optima of bang-bang protocols.
Starting from a given (likely random) protocol, iterate through all of the
protocols that are Hamming distance at most k away and evaluate their
performance. The iteration proceeds in a random order, and when a better
protocol is found then jump to that one and restart the search from the new
origin. The algorithm terminates when none of the neighbors have better
performance, and this defines a local optimum.
"""
import itertools
from absl import logging
import numpy as np
from bangbang_qaoa import circuit_lib
def get_all_protocols(num_chunks):
"""Returns all possible bang-bang protocols of a given number of chunks.
Args:
num_chunks: Positive integer, the length of the bang-bang protocol.
Returns:
A generator containing all possible bang-bang protocols of a particular
length.
Raises:
ValueError: If num_chunks is not positive.
"""
if num_chunks <= 0:
raise ValueError('num_chunks should be positive, not %d' % num_chunks)
return itertools.product([circuit_lib.HamiltonianType.X,
circuit_lib.HamiltonianType.CONSTRAINT],
repeat=num_chunks)
def get_random_protocol(num_chunks, random_state=None):
"""Gets a uniformly random protocol of a given number of chunks.
Args:
num_chunks: Positive integer, the length of the random bang-bang protocol.
random_state: np.random.RandomState, default None, RandomState(seed=None).
Returns:
A uniformly random bang-bang protocol with a certain number of chunks.
Raises:
ValueError: If num_chunks is not positive.
"""
if num_chunks <= 0:
raise ValueError('num_chunks should be positive, not %d' % num_chunks)
if random_state is None:
random_state = np.random.RandomState(None)
return [
# NOTE(leeley): random.randint(a, b) returns a random integer [a, b]
# but np.random.randint(a, b) returns a random integer [a, b).
# So we need to use np.random.randint(0, 2) to generate numbers in {0, 1}.
circuit_lib.HamiltonianType(random_state.randint(0, 2))
for _ in range(num_chunks)]
def get_random_adiabatic_protocol(
num_chunks, ascending=True, random_state=None):
"""Gets a random adabatic looking protocol of a given number of chunks.
Takes a linear interpolation from 0 to 1 and then uses that as the probability
of applying circuit_lib.HamiltonianType.CONSTRAINT.
Args:
num_chunks: Positive integer, the length of the random bang-bang protocol.
ascending: Boolean, whether the probability of binomial sampling increases
with chunk index. Default True, adiabatic approximation.
random_state: np.random.RandomState, default None, RandomState(seed=None).
Returns:
A random bang-bang protocol with a certain number of chunks.
Raises:
ValueError: If num_chunks is not positive.
"""
if num_chunks <= 0:
raise ValueError('num_chunks should be positive, not %d' % num_chunks)
if random_state is None:
random_state = np.random.RandomState(None)
if ascending:
probabilities = np.linspace(0, 1, num_chunks)
else:
probabilities = np.linspace(1, 0, num_chunks)
return [
circuit_lib.HamiltonianType(random_state.binomial(1, probability))
for probability in probabilities]
def _apply_changes_to_protocol(bangbang_protocol, changes):
"""Apply changes to a bang-bang protocol.
Args:
bangbang_protocol: List of circuit_lib.HamiltonianType describing the
protocol.
changes: List of positive integers. Each integer represents the index
of the protocol that will be changed.
Returns:
A copy of the original bang-bang protocol with the corresponding
circuit_lib.HamiltonianType changed at each index described in changes.
Raises:
IndexError: If value is not in the interval [0, len(bangbang_protocol))
"""
protocol_copy = list(bangbang_protocol)
protocol_len = len(protocol_copy)
for index in changes:
if index < 0 or index >= protocol_len:
raise IndexError('Each index should be between 0 and %d, not %d'
% (protocol_len - 1, index))
protocol_copy[index] = circuit_lib.switch_hamiltonian_type(
bangbang_protocol[index])
return protocol_copy
def _get_all_changes(num_chunks, max_num_flips):
"""Get all changes of Hamming distance up to max_num_flips.
Args:
num_chunks: Positive integer, the total number of chunks that can be
flipped.
max_num_flips: Positive integer, the maximum number of indices of the
bang-bang protocol that can be changed (k).
Returns:
A generator over all possible changes of Hamming distance k, each described
by a list of indices of length at most k, where each index corresponds
to a change.
Raises:
ValueError: If 0 < max_num_flips <= num_chunks is not true.
"""
if max_num_flips <= 0:
raise ValueError('max_num_flips should be positive, not %d' % max_num_flips)
if num_chunks < max_num_flips:
raise ValueError('num_chunks should be at least max_num_flips')
return itertools.chain.from_iterable(
itertools.combinations(range(num_chunks), i)
for i in range(1, max_num_flips + 1)
)
def get_all_new_protocols(bangbang_protocol, max_num_flips):
"""Gets all new protocols within max_num_flips flips of the current protocol.
Returns a shuffled generator of all neighbors within max_num_flips flip of
the bang-bang protocol.
Args:
bangbang_protocol: List of circuit_lib.HamiltonianType describing the
protocol.
max_num_flips: Positive integer, the maximum number of indices of the
bang-bang protocol that can be changed.
Returns:
A shuffled generator of all neighbours within max_num_flips flips of the
bang-bang protocol.
Raises:
ValueError: If max_num_flips is greater than the number of chunks of the
protocol.
"""
if max_num_flips > len(bangbang_protocol):
raise ValueError(
'max_num_flips should be less than len(bangbang_protocol), not %d'
% max_num_flips)
changes = list(_get_all_changes(len(bangbang_protocol), max_num_flips))
np.random.shuffle(changes)
return (_apply_changes_to_protocol(bangbang_protocol, change)
for change in changes)
def _more_optimal(minimize, new_val, old_val):
"""Returns if the new value is more optimal than the old value.
Args:
minimize: Bool, True if we are trying to minimize.
new_val: Float, the new value that we we want to see if more optimal.
old_val: Float, the old value that we will compare to new_val.
Returns:
Boolean that is true if new_val is more optimal than old_val.
"""
return new_val < old_val if minimize else new_val > old_val
def _stochastic_descent_epoch(
circuit, bangbang_protocol, max_num_flips, previous_eval, minimize):
"""One epoch of the stochastic descent process.
Randomly goes through all neighbors within max_num_flips flips of the
bang-bang protocol and evaluates them. If one of them performs better, move to
that protocol, otherwise return the same protocol, signalling a local optimum.
Args:
circuit: circuit_lib.BangBangProtocolCircuit, object contains method to
evaluate the bang-bang protocols.
bangbang_protocol: The current bang-bang protocol.
max_num_flips: Positive integer, the maximum number of indices of the
bang-bang protocol that can be changed.
previous_eval: Float, the evaluation of the current bang-bang protocol.
minimize: Bool, True if we want to minimize the expectation.
Returns:
current_optimal_protocol: circuit_lib.HamiltonianType list, if
bangbang_protocol is a max_num_flips local optimum, returns
bangbang_protocol. Otherwise, returns a new bang-bang protocol from the
neighbors that performs more optimally.
current_optimal_eval: Float, the evaluation of the best_protocol.
Raises:
ValueError: If max_num_flips is not positive.
"""
if max_num_flips <= 0:
raise ValueError('max_num_flips should be positive, not %d' % max_num_flips)
current_optimal_protocol = bangbang_protocol
current_optimal_eval = previous_eval
for new_protocol in get_all_new_protocols(bangbang_protocol, max_num_flips):
new_eval = circuit.get_constraint_expectation(
circuit.get_wavefunction(new_protocol))
if _more_optimal(minimize, new_eval, current_optimal_eval):
logging.info(
'%s in the neighbors performs more optimally than %s, %f vs %f',
circuit_lib.protocol_to_string(new_protocol),
circuit_lib.protocol_to_string(current_optimal_protocol),
new_eval,
current_optimal_eval)
current_optimal_eval = new_eval
current_optimal_protocol = new_protocol
break
else:
logging.info(
'bangbang_protocol %s (%f) is a max_num_flips local optimum.',
circuit_lib.protocol_to_string(current_optimal_protocol),
current_optimal_eval)
return current_optimal_protocol, current_optimal_eval
def stochastic_descent(
circuit,
max_num_flips,
initial_protocol,
minimize,
skip_search=False):
"""Finds a locally optimal bang-bang protocol using stochastic descent.
Iteratively through protocols with up to max_num_flip flips, and moves to a
neighbor if better protocol is found. When no such neighbor is found, the
process stops and returns the local optimum.
Args:
circuit: circuit_lib.BangBangProtocolCircuit, object contains method to
evaluate the bang-bang protocols.
max_num_flips: Positive integer, the maximum number of indices of the
bang-bang protocol that can be changed.
initial_protocol: A list of circuit_lib.HamiltonianType, the initial guess
of bang-bang protocol for stochastic descent.
minimize: Bool, True if we want to minimize the exepectation.
skip_search: Bool, whether to skip the stochastic descent search. If True,
only the initial protocol are evaluated. This is used as a baseline.
Returns:
current_optimal_protocol: circuit_lib.HamiltonianType list, a
locally optimal bang-bang protocol according to stochastic descent.
current_optimal_eval: Float, the evaluation of the protocol.
num_epoch: Integer, the number of epoch from initial protocol to optimal
protocol.
Raises:
ValueError: If max_num_flips is not positive.
"""
if max_num_flips <= 0:
raise ValueError('max_num_flips should be positive, not %d' % max_num_flips)
current_optimal_protocol = initial_protocol
current_optimal_eval = circuit.get_constraint_expectation(
circuit.get_wavefunction(current_optimal_protocol))
if skip_search:
return current_optimal_protocol, current_optimal_eval, 0
depth = 1
new_protocol, new_eval = _stochastic_descent_epoch(
circuit=circuit,
bangbang_protocol=current_optimal_protocol,
max_num_flips=max_num_flips,
previous_eval=current_optimal_eval,
minimize=minimize)
while _more_optimal(minimize, new_eval, current_optimal_eval):
current_optimal_eval = new_eval
current_optimal_protocol = new_protocol
depth += 1
new_protocol, new_eval = _stochastic_descent_epoch(
circuit=circuit,
bangbang_protocol=current_optimal_protocol,
max_num_flips=max_num_flips,
previous_eval=current_optimal_eval,
minimize=minimize)
logging.log_every_n(
logging.INFO, 'Stochastic Descent Depth %d.', 10, depth)
return current_optimal_protocol, current_optimal_eval, depth - 1
|
|
'''
Run Resnet50 on Imagenet data.
Generate TFrecords for Imagenet data by following instructions in:
examples/build_imagenet_data/README.md
run:
# 4 GPU machine 2 sockets typically --map-by ppr:2:socket works well.
TMPDIR=/tmp mpirun --report-bindings \
--map-by ppr:2:socket -oversubscribe -np 4 python \
./examples/resnet/resnet50_tfrecord_horovod.py \
--imgs_per_epoch=6400 # to speed up epoch
TMPDIR=/tmp mpirun --report-bindings -mca btl_tcp_if_exclude docker0,lo \
--bind-to none --map-by slot -np 8 \
run_psgcluster_singularity.sh --datamnt=/datasets \
--container=/cm/shared/singularity/tf1.4.0_hvd_ompi3.0.0-2017-11-23-154091b4d08c.img \
--venvpy=~/.virtualenvs/py-keras-gen \
--scripts=./examples/resnet/resnet50_tfrecord_horovod.py \
--datadir=/datasets/imagenet/train-val-tfrecord-480-subset \
--batch_size=64 --epochs=2 --imgs_per_epoch=6400 # to speed up epoch
TMPDIR=/tmp mpirun --report-bindings -mca btl_tcp_if_exclude docker0,lo \
--bind-to none --map-by slot -np 8 \
run_psgcluster_singularity.sh --datamnts=/datasets \
--container=/cm/shared/singularity/tf17.12_tf1.4.0_hvd_ompi3.0.0_ibverbs-2018-02-01-5540d30e4dc5.img \
--venvpy=~/.virtualenvs/py-keras-gen \
--scripts=./examples/resnet/resnet50_tfrecord_horovod.py \
--datadir=/datasets/imagenet/train-val-tfrecord-480-subset \
--batch_size=64 --epochs=2 --imgs_per_epoch=6400 # to speed up epoch
'''
from __future__ import print_function
import sys
import argparse as ap
from textwrap import dedent
import time
import tensorflow as tf
import horovod.tensorflow as hvd
import horovod.keras as hvd_keras
import keras.backend as KB
import keras.optimizers as KO
import keras.layers as KL
# from keras.models import Model
# from keras import backend as KB
from keras.layers import Input
from keras.applications.resnet50 import ResNet50
from keras_exp.callbacks.timing import SamplesPerSec, BatchTiming
# from keras_tqdm import TQDMCallback
from resnet_common import RecordInputImagenetPreprocessor
class SmartFormatterMixin(ap.HelpFormatter):
# ref:
# http://stackoverflow.com/questions/3853722/python-argparse-how-to-insert-newline-in-the-help-text
# @IgnorePep8
def _split_lines(self, text, width):
# this is the RawTextHelpFormatter._split_lines
if text.startswith('S|'):
return text[2:].splitlines()
return ap.HelpFormatter._split_lines(self, text, width)
class CustomFormatter(ap.RawDescriptionHelpFormatter, SmartFormatterMixin):
'''Convenience formatter_class for argparse help print out.'''
def _parser(desc):
parser = ap.ArgumentParser(description=dedent(desc),
formatter_class=CustomFormatter)
parser.add_argument('--epochs', type=int, default=10,
help='Number of epochs to run training for.\n'
'(Default: %(default)s)\n')
parser.add_argument(
'--batch_size', type=int, default=64,
help='S|Batch size. Default: %(default)s')
parser.add_argument(
'--imgs_per_epoch', type=int, default=0,
help='S|Number of images to run during epoch. Use for timing.\n'
'Default uses all the images for an epoch.')
imagenet_datadir = '/datasets/imagenet/train-val-tfrecord-480-subset'
parser.add_argument(
'--datadir', default=imagenet_datadir,
help='S|Data directory with Imagenet TFrecord dataset. Assumes\n'
'TFrecord subsets prefixed with train-* and validation-* are in the\n'
'directory. Default: %(default)s')
parser.add_argument(
'--distort_color', action='store_true', default=False,
help='S|Distort color during training on imagenet to "enrich" the\n'
'dataset. Default no distortion. Set this flag to enable distortion.')
args = parser.parse_args()
return args
def main(argv=None):
# Initialize Horovod.
hvd.init()
# Pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
KB.set_session(tf.Session(config=config))
# print('LOCAL RANK, OVERAL RANK: {}, {}'.format(hvd.local_rank(),
# hvd.rank()))
ngpus = hvd.size()
main.__doc__ = __doc__
argv = sys.argv if argv is None else sys.argv.extend(argv)
desc = main.__doc__ # .format(os.path.basename(__file__))
# CLI parser
args = _parser(desc)
num_devices_tfrecord = 1
height, width = 224, 224 # Image dimensions. Gets resized if not match.
distort_color = args.distort_color
data_dir = args.datadir
batch_size = args.batch_size # * ngpus
epochs = args.epochs
imgs_per_epoch = args.imgs_per_epoch
# Fit the model using data from the TFRecord data tensors.
device_minibatches = RecordInputImagenetPreprocessor.device_minibatches
images_tfrecord, labels_tfrecord, nrecords = device_minibatches(
num_devices_tfrecord, data_dir, batch_size,
height, width, distort_color, val=False)
images_tfrecord = images_tfrecord[0]
labels_tfrecord = labels_tfrecord[0]
# CASTING FOR KERAS
# labels[device_num] = tf.cast(labels_tfrecord, dtype)
nclasses = 1000
labels_tfrecord = tf.one_hot(labels_tfrecord, nclasses)
nimgs_to_use = imgs_per_epoch if imgs_per_epoch > 0 else nrecords
steps_per_epoch = nimgs_to_use // batch_size // hvd.size()
# steps_per_epoch = 100
# batch_shape = images_tfrecord.get_shape().as_list()
# images = Input(tensor=images_tfrecord, batch_shape=x_batch_shape)
images = Input(tensor=images_tfrecord)
model = ResNet50(input_tensor=images, weights=None)
if hvd.rank() == 0:
model.summary()
print('Num images: {}'.format(nrecords))
if nimgs_to_use < nrecords:
print('Using {} images per epoch'.format(nimgs_to_use))
# print('IMAGES_TFRECORD: {}'.format(images_tfrecord))
# print('LABELS_TFRECORD: {}'.format(labels_tfrecord))
# Add Horovod Distributed Optimizer from nvcnn.py
# momentum = 0.9
# lr = 0.1
# learning_rate = tf.train.exponential_decay(
# lr,
# self.global_step,
# decay_steps=FLAGS.lr_decay_epochs * nstep_per_epoch,
# decay_rate=FLAGS.lr_decay_rate,
# staircase=True)
# opt = tf.train.MomentumOptimizer(self.learning_rate, momentum,
# use_nesterov=True)
# lr = 0.001 * ngpus
# opt = tf.train.AdamOptimizer()
# opt = hvd.DistributedOptimizer(opt) # , use_locking=True)
# opt = KO.TFOptimizer(opt) # Required for tf.train based optimizers
opt = KO.Adam()
opt = hvd_keras.DistributedOptimizer(opt)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
# metrics=['accuracy'],
target_tensors=[labels_tfrecord])
# Broadcast variables from rank 0 to all other processes.
KB.get_session().run(hvd.broadcast_global_variables(0))
callbacks = []
if hvd.rank() == 0:
callbacks += [BatchTiming(),
SamplesPerSec(ngpus * batch_size)]
# RecordInput is a yield op which doesn't use queue runners or queues.
# Start the queue runners.
# sess = KB.get_session()
# sess.run([tf.local_variables_initializer(),
# tf.global_variables_initializer()])
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess, coord)
start_time = time.time()
model.fit(
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks,
verbose=1)
# verbose=hvd.rank() == 0)
elapsed_time = time.time() - start_time
if hvd.rank() == 0:
print('[{}] finished in {} s'
.format('TRAINING', round(elapsed_time, 3)))
# loss = model.evaluate(None, None, steps=steps_per_epoch_val)
images_tfrecord_val, labels_tfrecord_val, nrecords_val = \
device_minibatches(num_devices_tfrecord, data_dir, batch_size,
height, width, distort_color, val=True)
images_tfrecord_val = images_tfrecord_val[0]
labels_tfrecord_val = labels_tfrecord_val[0]
labels_tfrecord_val = tf.one_hot(labels_tfrecord_val, nclasses)
# print('IMAGES_TFRECORD_VAL: {}'.format(images_tfrecord_val))
# print('labels_tfrecord_val: {}'.format(labels_tfrecord_val))
steps_per_epoch_val = nrecords_val // batch_size
images_val = Input(tensor=images_tfrecord_val)
model_val = model
model_val.layers[0] = KL.InputLayer(input_tensor=images_val)
model_val.compile(
loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'],
target_tensors=[labels_tfrecord_val])
# model.summary()
loss = model_val.evaluate(x=None, y=None, steps=steps_per_epoch_val)
print('\nNum images evaluated, steps: {}, {}'.
format(nrecords_val, steps_per_epoch_val))
print('\nTest loss, acc: {}'.format(loss))
# print('\nTest accuracy: {0}'.format(acc))
# Clean up the TF session.
# coord.request_stop()
# coord.join(threads)
KB.clear_session() # do this for Horovod
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from time import sleep
import logging
import redis
from redis.sentinel import Sentinel
from rediscluster import StrictRedisCluster
from contextlib import contextmanager
from drivers import BaseDriver, DatabaseInfraStatus, DatabaseStatus
from drivers.errors import ConnectionError
from system.models import Configuration
from physical.models import Instance
from util import build_context_script, \
make_db_random_password
LOG = logging.getLogger(__name__)
CLONE_DATABASE_SCRIPT_NAME = "redis_clone.py"
REDIS_CONNECTION_DEFAULT_TIMEOUT = 5
REDIS_CONNECTION_SOCKET_TIMEOUT = 3
class Redis(BaseDriver):
default_port = 6379
@property
def ports(self):
return (6379,)
@property
def instances_filtered(self):
return self.databaseinfra.instances.filter(
instance_type=Instance.REDIS, is_active=True
)
@property
def uri_instance_type(self):
return 'redis'
@property
def database_name(self):
return '0'
@property
def connection_timeout_in_seconds(self):
return Configuration.get_by_name_as_int(
'redis_connect_timeout',
default=REDIS_CONNECTION_DEFAULT_TIMEOUT
)
@property
def connection_socket_timeout_in_seconds(self):
return Configuration.get_by_name_as_int(
'redis_socket_connect_timeout',
default=REDIS_CONNECTION_SOCKET_TIMEOUT
)
def concatenate_instances(self):
return ",".join([
"{}:{}".format(instance.address, instance.port)
for instance in self.instances_filtered
])
def concatenate_instances_dns(self):
return ",".join([
"{}:{}".format(instance.dns, instance.port)
for instance in self.instances_filtered
if not instance.dns.startswith('10.')
])
def concatenate_instances_dns_only(self):
return ",".join([
str(instance.dns) for instance in self.instances_filtered
])
def get_connection(self, database=None):
return "{}://:<password>@{}/{}".format(
self.uri_instance_type, self.concatenate_instances(),
self.database_name
)
def get_connection_dns(self, database=None):
return "{}://:<password>@{}/{}".format(
self.uri_instance_type, self.concatenate_instances_dns(),
self.database_name
)
def get_connection_dns_simple(self, database=None):
return "{}://{}".format(
self.uri_instance_type, self.concatenate_instances_dns()
)
def __get_admin_single_connection(self, instance=None):
if not instance:
instance = self.instances_filtered.first()
return instance.address, instance.port
def get_dns_port(self):
instance = self.databaseinfra.instances.first()
return instance.dns, instance.port
def __redis_client__(self, instance, default_timeout=False):
LOG.debug('Connecting to redis single infra {}'.format(
self.databaseinfra
))
address, port = self.__get_admin_single_connection(instance)
client = redis.StrictRedis(
host=address, port=int(port),
password=self.databaseinfra.password,
socket_timeout=REDIS_CONNECTION_DEFAULT_TIMEOUT if default_timeout else self.connection_timeout_in_seconds,
socket_connect_timeout= REDIS_CONNECTION_SOCKET_TIMEOUT if default_timeout else self.connection_socket_timeout_in_seconds
)
LOG.debug('Successfully connected to redis single infra {}'.format(
self.databaseinfra
))
return client
def get_client(self, instance):
return self.__redis_client__(instance, default_timeout=False)
def lock_database(self, client):
pass
def unlock_database(self, client):
pass
@contextmanager
def redis(self, instance=None, database=None, default_timeout=False):
try:
client = self.__redis_client__(instance, default_timeout=default_timeout)
return_value = client
yield return_value
except Exception as e:
raise ConnectionError(
'Error connecting to infra {}: {}'.format(
self.databaseinfra, str(e)
)
)
def check_status(self, instance=None):
with self.redis(instance=instance) as client:
try:
ok = client.ping()
except Exception as e:
raise ConnectionError(
'Error connection to infra {}: {}'.format(
self.databaseinfra, str(e)
)
)
if not ok:
raise ConnectionError(
'Invalid status for ping command to infra {}'.format(
self.databaseinfra
)
)
return True
def list_databases(self, instance=None):
dbs_names = []
with self.redis(instance=instance) as client:
try:
keyspace = client.info('keyspace')
if len(keyspace) == 0:
dbs_names.append('db0')
else:
for db in keyspace:
dbs_names.append(db)
except Exception as e:
raise ConnectionError(
'Error connection to infra {}: {}'.format(
self.databaseinfra, str(e)
)
)
return dbs_names
@property
def maxmemory(self):
return int(
self.databaseinfra.get_parameter_value_by_parameter_name('maxmemory') or
self.databaseinfra.get_dbaas_parameter_default_value('maxmemory')
)
def get_total_size_from_instance(self, instance):
return self.maxmemory
def get_used_size_from_instance(self, instance):
with self.redis(instance=instance) as client:
if instance.status == Instance.ALIVE:
database_info = client.info()
return database_info.get(
'used_memory', 0
)
def info(self):
infra_status = DatabaseInfraStatus(
databaseinfra_model=self.databaseinfra
)
with self.redis() as client:
json_server_info = client.info()
infra_status.version = json_server_info.get(
'redis_version', None
)
infra_status.used_size_in_bytes = json_server_info.get(
'used_memory', 0
)
for database in self.databaseinfra.databases.all():
database_name = database.name
db_status = DatabaseStatus(database)
try:
if self.check_status():
db_status.is_alive = True
except:
pass
db_status.total_size_in_bytes = 0
db_status.used_size_in_bytes = infra_status.used_size_in_bytes
infra_status.databases_status[database_name] = db_status
return infra_status
def get_replication_info(self, instance):
return 0
def is_replication_ok(self, instance):
replication_info = int(self.get_replication_info(instance=instance))
if replication_info == 0:
return True
return False
def update_user(self, credential):
pass
def remove_user(self, credential):
pass
def create_database(self, database):
pass
def remove_database(self, database):
pass
def change_default_pwd(self, instance):
pass
def clone(self):
return CLONE_DATABASE_SCRIPT_NAME
def check_instance_is_eligible_for_backup(self, instance):
return True
def check_instance_is_master(self, instance, default_timeout=False):
if instance.is_active:
return True
return False
def deprecated_files(self,):
return ["*.pid", ]
def data_dir(self, ):
return '/data/'
def switch_master(self, instance=None, preferred_slave_instance=None):
pass
def get_database_agents(self):
return ['httpd']
def get_default_database_port(self):
return 6379
def get_default_instance_type(self):
return Instance.REDIS
def get_configuration(self):
instance = self.databaseinfra.instances.filter(
status=Instance.ALIVE, instance_type=Instance.REDIS, is_active=True
).first()
if not instance:
raise EnvironmentError(
'Cannot get configuration to {}. No Redis instance with status '
'alive and active found'.format(self.databaseinfra)
)
with self.redis(instance) as client:
config = client.config_get()
if 'client-output-buffer-limit' in config:
config_COBL = config['client-output-buffer-limit']
config_COBL_normal = config_COBL.split("normal ")[1].split(" slave")[0]
config_COBL_slave = config_COBL.split("slave ")[1].split(" pubsub")[0]
config_COBL_pubsub = config_COBL.split("pubsub ")[1]
config['client-output-buffer-limit-normal'] = config_COBL_normal
config['client-output-buffer-limit-slave'] = config_COBL_slave
config['client-output-buffer-limit-pubsub'] = config_COBL_pubsub
return config
def set_configuration(self, instance, name, value):
with self.redis(instance) as client:
if name.startswith('client-output-buffer-limit-'):
name, prefix = name.rsplit("-", 1)
value = '{} {}'.format(prefix, value)
client.config_set(name, value)
def get_database_process_name(self):
return "redis-server"
def initialization_parameters(self, instance):
return self.parameters_redis(instance.hostname)
def configuration_parameters(self, instance, **kw):
config = self.parameters_redis(instance.hostname)
config.update(kw)
return config
def parameters_redis(self, host):
redis = host.database_instance()
redis_address = redis.address
if host.future_host:
redis_address = host.future_host.address
redis_port = redis.port
only_sentinel = False
return {
'HOSTADDRESS': redis_address,
'PORT': redis_port,
'ONLY_SENTINEL': only_sentinel,
'DATABASE_START_COMMAND': host.commands.database(
action='start'
),
'HTTPD_STOP_COMMAND_NO_OUTPUT': host.commands.httpd(
action='stop',
no_output=True
),
'HTTPD_START_COMMAND_NO_OUTPUT': host.commands.httpd(
action='start',
no_output=True
),
'SECONDARY_SERVICE_START_COMMAND': host.commands.secondary_service(
action='start'
)
}
def configuration_parameters_migration(self, instance):
return self.configuration_parameters(instance)
@classmethod
def topology_name(cls):
return ['redis_single']
def build_new_infra_auth(self):
return '', make_db_random_password(), ''
def create_metric_collector_user(self, username, password):
pass
def remove_metric_collector_user(self, username):
pass
def get_metric_collector_user(self, username):
return ""
def get_metric_collector_password(self, password):
return self.databaseinfra.password
class RedisSentinel(Redis):
@property
def ports(self):
return (6379, 26379)
@property
def instances_filtered(self):
return self.databaseinfra.instances.filter(
instance_type=Instance.REDIS_SENTINEL, is_active=True
)
@property
def uri_instance_type(self):
return 'sentinel'
@property
def database_name(self):
return 'service_name:{}'.format(self.databaseinfra.name)
def get_dns_port(self):
dns = self.concatenate_instances_dns_only()
port = self.instances_filtered.first().port
return dns, port
def __redis_client__(self, instance, default_timeout=False):
if instance and instance.instance_type == Instance.REDIS:
return super(RedisSentinel, self).__redis_client__(instance, default_timeout=False)
LOG.debug('Connecting to redis infra {}'.format(self.databaseinfra))
sentinel = self.get_sentinel_client(instance)
client = sentinel.master_for(
self.databaseinfra.name,
socket_timeout=REDIS_CONNECTION_DEFAULT_TIMEOUT if default_timeout else self.connection_timeout_in_seconds,
socket_connect_timeout=REDIS_CONNECTION_SOCKET_TIMEOUT if default_timeout else self.connection_socket_timeout_in_seconds,
password=self.databaseinfra.password
)
LOG.debug('Successfully connected to redis infra {}'.format(
self.databaseinfra
))
return client
def get_sentinel_client(self, instance=None):
sentinels = self.__get_admin_sentinel_connection(instance)
sentinel = Sentinel(
sentinels, socket_timeout=self.connection_timeout_in_seconds, socket_connect_timeout=self.connection_socket_timeout_in_seconds
)
return sentinel
def __get_admin_sentinel_connection(self, instance=None):
sentinels = []
if instance:
sentinels.append((instance.address, instance.port))
else:
for instance in self.databaseinfra.instances.filter(instance_type=Instance.REDIS_SENTINEL, is_active=True).all():
sentinels.append((instance.address, instance.port))
return sentinels
def get_sentinel_instance_client(self, instance, default_timeout=False):
if instance.instance_type != Instance.REDIS_SENTINEL:
error = 'Instance {} is not Sentinel'.format(instance)
raise Exception(error)
address, port = instance.address, instance.port
client = redis.StrictRedis(
host=address, port=int(port),
socket_timeout=REDIS_CONNECTION_DEFAULT_TIMEOUT if default_timeout else self.connection_timeout_in_seconds,
socket_connect_timeout= REDIS_CONNECTION_SOCKET_TIMEOUT if default_timeout else self.connection_socket_timeout_in_seconds
)
return client
def get_replication_info(self, instance):
if self.check_instance_is_master(instance=instance, default_timeout=False):
return 0
with self.redis(instance=instance) as client:
server_info = client.info()
return int(server_info['master_last_io_seconds_ago'])
def check_instance_is_eligible_for_backup(self, instance):
if instance.instance_type == Instance.REDIS_SENTINEL:
return False
with self.redis(instance=instance) as client:
try:
info = client.info()
return info['role'] == 'slave'
except Exception as e:
raise ConnectionError('Error connection to infra {}: {}'.format(
self.databaseinfra, str(e)
))
def check_instance_is_master(self, instance, default_timeout=False):
if instance.instance_type == Instance.REDIS_SENTINEL:
return False
if not instance.is_active:
return False
masters_for_sentinel = []
sentinels = self.get_non_database_instances()
for sentinel in sentinels:
client = self.get_sentinel_instance_client(sentinel)
try:
master = client.sentinel_get_master_addr_by_name(
self.databaseinfra.name)
masters_for_sentinel.append(master)
except Exception as e:
error = 'Connection error to {}. Error: {}'.format(
sentinel, e)
LOG.info(error)
sentinels_believe_is_master = 0
for master_host, master_port in masters_for_sentinel:
if (instance.address == master_host and
instance.port == master_port):
sentinels_believe_is_master += 1
if sentinels_believe_is_master > 1:
return True
return False
def switch_master(self, instance=None, preferred_slave_instance=None):
sentinel_instance = self.instances_filtered.first()
host = sentinel_instance.hostname
script = """
#!/bin/bash
die_if_error()
{
local err=$?
if [ "$err" != "0" ];
then
echo "$*"
exit $err
fi
}"""
script += """
/usr/local/redis/src/redis-cli -h {} -p {} <<EOF_DBAAS
SENTINEL failover {}
exit
\nEOF_DBAAS
die_if_error "Error reseting sentinel"
""".format(
sentinel_instance.address, sentinel_instance.port,
self.databaseinfra.name
)
script = build_context_script({}, script)
host.ssh.run_script(script)
def configuration_parameters(self, instance, **kw):
variables = {}
if kw.get('need_master', False):
for i in range(5):
master = self.get_master_instance()
if master:
break
sleep(10)
if not master:
raise Exception(
("Expect got master instance but got {} on "
"configuration_parameters").format(
master
)
)
master = self.get_master_instance()
if master:
variables.update(self.master_parameters(instance, master))
variables.update(self.parameters_redis(instance.hostname))
variables.update(self.parameters_sentinel(instance.hostname))
variables.update(kw)
return variables
def parameters_redis(self, host):
redis = host.database_instance()
redis_address = ''
redis_port = ''
only_sentinel = True
if redis:
redis_address = redis.address
redis_port = redis.port
only_sentinel = False
if redis and host.future_host:
redis_address = host.future_host.address
return {
'HOSTADDRESS': redis_address,
'PORT': redis_port,
'ONLY_SENTINEL': only_sentinel,
'DATABASE_START_COMMAND': host.commands.database(
action='start'
),
'HTTPD_STOP_COMMAND_NO_OUTPUT': host.commands.httpd(
action='stop',
no_output=True
),
'HTTPD_START_COMMAND_NO_OUTPUT': host.commands.httpd(
action='start',
no_output=True
),
'SECONDARY_SERVICE_START_COMMAND': host.commands.secondary_service(
action='start'
)
}
def master_parameters(self, instance, master):
return {
'SENTINELMASTER': master.address,
'SENTINELMASTERPORT': master.port,
'MASTERNAME': instance.databaseinfra.name
}
def parameters_sentinel(self, host):
sentinel = host.non_database_instance()
if sentinel and host.future_host:
sentinel.address = host.future_host.address
sentinel_address = ''
sentinel_port = ''
if sentinel:
sentinel_address = sentinel.address
sentinel_port = sentinel.port
return {
'SENTINELADDRESS': sentinel_address,
'SENTINELPORT': sentinel_port,
}
def configuration_parameters_migration(self, instance):
base_parameters = super(
RedisSentinel, self
).configuration_parameters_migration(instance)
all_instances = self.databaseinfra.instances.all()
future_master = all_instances[len(all_instances)/2]
base_parameters.update(self.master_parameters(instance, future_master))
return base_parameters
@classmethod
def topology_name(cls):
return ['redis_sentinel']
class RedisCluster(Redis):
@property
def ports(self):
return (6379, 16379)
@property
def uri_instance_type(self):
return 'cluster'
def get_dns_port(self):
dns = self.concatenate_instances_dns_only()
port = self.instances_filtered.first().port
return dns, port
def __redis_client__(self, instance, default_timeout=False):
LOG.debug('Connecting to redis infra {}'.format(self.databaseinfra))
cluster = self.get_cluster_client(instance, default_timeout=default_timeout)
LOG.debug('Successfully connected to redis infra {}'.format(
self.databaseinfra
))
return cluster
def get_cluster_client(self, instance, default_timeout=False):
if instance:
return redis.StrictRedis(
host=instance.address, port=instance.port,
password=self.databaseinfra.password,
socket_timeout=REDIS_CONNECTION_DEFAULT_TIMEOUT if default_timeout else self.connection_timeout_in_seconds,
socket_connect_timeout=REDIS_CONNECTION_SOCKET_TIMEOUT if default_timeout else self.connection_socket_timeout_in_seconds,
)
return StrictRedisCluster(
startup_nodes=[
{'host': instance.address, 'port': instance.port}
for instance in self.instances_filtered
],
password=self.databaseinfra.password,
socket_timeout=REDIS_CONNECTION_DEFAULT_TIMEOUT if default_timeout else self.connection_timeout_in_seconds,
socket_connect_timeout=REDIS_CONNECTION_SOCKET_TIMEOUT if default_timeout else self.connection_socket_timeout_in_seconds,
)
def get_replication_info(self, instance):
if self.check_instance_is_master(instance=instance, default_timeout=False):
return 0
with self.redis(instance=instance) as client:
info = client.info()
return int(info['master_last_io_seconds_ago'])
def check_instance_is_eligible_for_backup(self, instance):
with self.redis(instance=instance) as client:
try:
info = client.info()
except Exception as e:
raise ConnectionError('Error connection to infra {}: {}'.format(
self.databaseinfra, str(e)
))
else:
return info['role'] == 'slave'
def check_instance_is_master(self, instance, default_timeout=False):
if not instance.is_active:
return False
with self.redis(instance=instance, default_timeout=default_timeout) as client:
try:
info = client.info()
except Exception as e:
raise ConnectionError('Error connection to infra {}: {}'.format(
self.databaseinfra, str(e)
))
else:
return info['role'] == 'master'
def switch_master(self, instance=None, preferred_slave_instance=None):
if instance is None:
raise Exception('Cannot switch master in a redis cluster without instance.')
slave_instance = self.get_slave_for(instance, preferred_slave_instance)
if not slave_instance:
raise Exception('There is no slave for {}'.format(instance))
host = slave_instance.hostname
script = """
#!/bin/bash
die_if_error()
{
local err=$?
if [ "$err" != "0" ];
then
echo "$*"
exit $err
fi
}"""
script += """
/usr/local/redis/src/redis-cli -h {} -p {} -a {} -c<<EOF_DBAAS
CLUSTER FAILOVER
exit
\nEOF_DBAAS
die_if_error "Error executing cluster failover"
""".format(
slave_instance.address, slave_instance.port,
self.databaseinfra.password
)
script = build_context_script({}, script)
host.ssh.run_script(script)
def get_master_instance(self):
masters = []
for instance in self.get_database_instances():
try:
if self.check_instance_is_master(instance, default_timeout=False):
masters.append(instance)
if instance.hostname.future_host:
instance.address = instance.hostname.future_host.address
if self.check_instance_is_master(instance, default_timeout=False):
masters.append(instance)
except ConnectionError:
continue
return masters
def get_master_instance2(self):
masters = []
for instance in self.get_database_instances():
try:
if self.check_instance_is_master(instance, default_timeout=False):
masters.append(instance)
except ConnectionError:
continue
return masters
def get_slave_instances(self, ):
instances = self.get_database_instances()
masters = self.get_master_instance()
try:
instances.remove(masters)
except ValueError:
raise Exception("Master could not be detected")
return instances
def get_master_for(self, instance):
with self.redis(instance=instance) as client:
try:
info = client.info()
except Exception as e:
raise ConnectionError('Error connection to infra {}: {}'.format(
self.databaseinfra, str(e)
))
if info['role'] != 'slave':
return instance
address = info['master_host']
port = info['master_port']
return self.databaseinfra.instances.filter(
hostname__address=address, port=port
).first()
def get_slave_for(self, instance, preferred_slave_instance=None):
with self.redis(instance=instance) as client:
try:
info = client.info('replication')
except Exception as e:
raise ConnectionError('Error connection to infra {}: {}'.format(
self.databaseinfra, str(e)
))
if info['role'] != 'master':
return
connected_slaves = info['connected_slaves']
if connected_slaves == 0:
return
if preferred_slave_instance is None:
address = info['slave0']['ip']
port = info['slave0']['port']
else:
for i in range(connected_slaves):
address = info['slave{}'.format(i)]['ip']
port = info['slave{}'.format(i)]['port']
if (address == preferred_slave_instance.address
and port == preferred_slave_instance.port):
break
return self.databaseinfra.instances.filter(
hostname__address=address, port=port
).first()
@classmethod
def topology_name(cls):
return ['redis_cluster']
def get_node_id(self, instance, address, port):
name = "{}:{}".format(address, port)
with self.redis(instance=instance) as client:
nodes = client.execute_command("CLUSTER NODES")
for node in nodes.keys():
if name in node:
return nodes[node]['node_id']
raise EnvironmentError('Node {} not in {}'.format(name, nodes))
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata builder helper for models built with Estimator API.
This module utilizes monkey_patch_utils to monkey patch certain functions in
feature columns and estimators to observe created tensors. Then, explanation
metadata is created from these groups of tensors.
Users of EstimatorMetadataBuilder need to instantiate the builder with an
estimator instance, base feature columns they want explanations for, serving
input function they want to export the model with, output key, and other kwargs
to be passed to export_saved_model function of estimators.
Example usage is as follows:
classifier.train(...)
md_builder = EstimatorMetadataBuilder(classifier, columns, serving_input_fn)
md_builder.save_model_with_metadata(export_path)
Caveats:
- If an input is encoded multiple times, we don't include any encoded tensors
in the metadata. If those inputs are sparse, users need to use Sampled
Shapley. When we have the support for attributing from multiple encoded
tensors to a single input for methods like IG, then we can update this
module to include encoded tensors in input metadata.
- Having multiple input tensors for a feature column is a rare case. But when
it happens, we create two input metadata for that feature because there are
multiple sets of input tensors and encoded tensors in parallel.
- There is no get_metadata() function because these tensors can be observed
only when the estimator is being saved.
"""
import enum
import shutil
import tempfile
from typing import Dict, Text, List, Any, Set, Callable, Optional, Union
import tensorflow.compat.v1 as tf
from tensorflow.python.feature_column import feature_column_v2 as fc2
from explainable_ai_sdk.common import explain_metadata
from explainable_ai_sdk.metadata import constants
from explainable_ai_sdk.metadata import metadata_builder
from explainable_ai_sdk.metadata import utils
from explainable_ai_sdk.metadata.tf.v1 import monkey_patch_utils
@enum.unique
class DuplicateAction(enum.Enum):
"""Enum for action to take in case of duplicate inputs."""
NOTHING = enum.auto()
DROP = enum.auto()
GROUP = enum.auto()
class EstimatorMetadataBuilder(metadata_builder.MetadataBuilder):
"""Class for generating metadata for models built with Estimator API."""
def __init__(self,
estimator: tf.estimator.Estimator,
feature_columns: List[fc2.FeatureColumn],
serving_input_fn: Callable[..., Any],
output_key: Optional[Text] = None,
baselines: Optional[Dict[Text, List[Any]]] = None,
input_mds: Optional[List[explain_metadata.InputMetadata]] = None,
duplicate_feature_treatment: Union[
str, DuplicateAction] = DuplicateAction.NOTHING,
**kwargs):
"""Initialize an EstimatorMetadataBuilder.
Args:
estimator: Estimator instance to observe and save.
feature_columns: A group of feature columns to export metadata for. These
feature columns need to be basic feature columns and not derived
columns such as embedding, indicator, bucketized.
serving_input_fn: Serving input function to be used when exporting the
model.
output_key: Output key to find the model's relevant output tensors. Some
valid values are logits, probabilities. If not provided, will default to
logits and regression outputs.
baselines: Baseline values for each input feature. The key name is the
feature name, and the value represents the baselines. The value is
specified as a list because multiple baselines can be supplied for the
features.
input_mds: Additional inputs to the Metadata. If this provided, it will
be added in addition to inputs from feature_columns.
duplicate_feature_treatment: Treatment for duplicate inputs for the same
feature column. Either provide a DuplicateAction enum or a string where
the possible values are {'nothing', 'drop', 'group'}. If set
to 'nothing', all are included with unique suffix added to input names
to disambiguate. If set to 'drop', only one of the inputs will be
retained. If set to 'group', all inputs will be grouped.
**kwargs: Any keyword arguments to be passed to export_saved_model.
add_meta_graph() function.
"""
if not isinstance(estimator, tf.estimator.Estimator):
raise ValueError('A valid estimator needs to be provided.')
self._estimator = estimator
if not feature_columns:
raise ValueError('feature_columns cannot be empty.')
if isinstance(duplicate_feature_treatment, str):
if duplicate_feature_treatment.upper() not in [a.name
for a in DuplicateAction]:
raise ValueError('Unrecognized treatment option for duplicates:'
f'{duplicate_feature_treatment}.')
duplicate_feature_treatment = DuplicateAction[
duplicate_feature_treatment.upper()]
self._feature_columns = feature_columns
self._input_mds = input_mds
self._output_key = output_key
self._baselines = baselines
self._serving_input_fn = serving_input_fn
self._duplicate_action = duplicate_feature_treatment
self._save_args = kwargs
self._metadata = None
def _get_input_tensor_names_for_metadata(
self,
feature_tensors: monkey_patch_utils.FeatureTensors) -> Dict[Text, Text]:
"""Returns a dictionary of tensor names for given FeatureTensors object."""
input_md = {}
if isinstance(feature_tensors.input_tensor, tf.Tensor):
input_md['input_tensor_name'] = feature_tensors.input_tensor.name
else: # IdWeightPair -- sparse tensor
sparse_ids, weights = feature_tensors.input_tensor
input_md['input_tensor_name'] = sparse_ids.values.name
input_md['indices_tensor_name'] = sparse_ids.indices.name
input_md['dense_shape_tensor_name'] = sparse_ids.dense_shape.name
if weights:
input_md['weight_values_name'] = weights.values.name
input_md['weight_indices_name'] = weights.indices.name
input_md['weight_dense_shape_name'] = weights.dense_shape.name
return input_md
def _get_encoded_tensor_names_for_metadata(
self,
feature_tensors: monkey_patch_utils.FeatureTensors) -> Dict[Text, Text]:
"""Returns encoded tensor names only if there is a single encoded tensor."""
input_md = {}
if len(feature_tensors.encoded_tensors) == 1:
# Currently, encoding is always the combined embedding.
input_md['encoded_tensor_name'] = feature_tensors.encoded_tensors[0].name
input_md['encoding'] = 'combined_embedding'
return input_md
def _create_input_metadata(
self,
features_dict: Dict[Text, List[monkey_patch_utils.FeatureTensors]],
crossed_columns: Set[Text],
desired_columns: List[Text]):
"""Creates and returns a list of InputMetadata.
Args:
features_dict: Dictionary from feature name to FeatureTensors class.
crossed_columns: A set of crossed column names.
desired_columns: A list of feature column names. Only the columns in
this list will be added to input metadata.
Returns:
A list of InputMetadata.
"""
input_mds = []
if self._input_mds is not None:
input_mds = self._input_mds
input_names_processed = set([i.name for i in input_mds])
for fc_name, tensor_groups in features_dict.items():
if fc_name in desired_columns and fc_name not in input_names_processed:
for tensor_group in tensor_groups:
input_md = self._get_input_tensor_names_for_metadata(tensor_group)
if fc_name not in crossed_columns:
input_md.update(
self._get_encoded_tensor_names_for_metadata(tensor_group))
input_md['name'] = fc_name
if self._baselines:
input_md['input_baselines'] = self._baselines.get(fc_name, None)
if (len(tensor_groups) == 1 or
self._duplicate_action == DuplicateAction.DROP):
input_mds.append(explain_metadata.InputMetadata(**input_md))
break # Skip other tensor_groups.
# There are multiple inputs for the same feature column.
# Append part of the tensor name until the first '/'. This usually
# specifies what kind of model it is: linear or dnn.
input_tensor_name = str(input_md['input_tensor_name'])
suffix = input_tensor_name.split('/')[0]
input_md['name'] = '%s_%s' % (fc_name, suffix)
if self._duplicate_action == DuplicateAction.GROUP:
input_md['group_name'] = fc_name
input_mds.append(explain_metadata.InputMetadata(**input_md))
return input_mds
def _create_output_metadata(self, output_dict: Dict[Text, tf.Tensor]):
"""Creates and returns a list of OutputMetadata.
Args:
output_dict: Dictionary from tf.feature_columns to list of dense tensors.
Returns:
A list of OutputMetadata.
"""
return [explain_metadata.OutputMetadata(name, tensor.name)
for name, tensor in output_dict.items()]
def _create_metadata_from_tensors(
self,
features_dict: Dict[Text, List[monkey_patch_utils.FeatureTensors]],
crossed_columns: Set[Text],
desired_columns: List[Text],
output_dict: Dict[Text, tf.Tensor]) -> explain_metadata.ExplainMetadata:
"""Creates metadata from given tensor information.
Args:
features_dict: Dictionary from feature name to FeatureTensors class.
crossed_columns: A set of crossed column names.
desired_columns: A list of feature column names. Only the columns in
this list will be added to input metadata.
output_dict: Dictionary from tf.feature_columns to list of dense tensors.
Returns:
A dictionary that abides to explanation metadata.
"""
return explain_metadata.ExplainMetadata(
inputs=self._create_input_metadata(
features_dict,
crossed_columns,
desired_columns),
outputs=self._create_output_metadata(output_dict),
framework='Tensorflow',
tags=[constants.METADATA_TAG])
def save_model_with_metadata(self, file_path: str) -> str:
"""Saves the model and the generated metadata to the given file path.
New metadata will not be generated for each call to this function since an
Estimator is static. Calling this function with different paths will save
the model and the same metadata to all paths.
Args:
file_path: Path to save the model and the metadata. It can be a GCS bucket
or a local folder. The folder needs to be empty.
Returns:
Full file path where the model and the metadata are written.
"""
monkey_patcher = monkey_patch_utils.EstimatorMonkeyPatchHelper()
with monkey_patcher.exporting_context(self._output_key):
model_path = self._estimator.export_saved_model(
file_path, self._serving_input_fn, **self._save_args)
if not self._metadata:
self._metadata = self._create_metadata_from_tensors(
monkey_patcher.feature_tensors_dict,
monkey_patcher.crossed_columns,
[fc.name for fc in self._feature_columns],
monkey_patcher.output_tensors_dict)
utils.write_metadata_to_file(self._metadata.to_dict(), model_path)
return model_path.decode('utf-8')
def get_metadata(self) -> Dict[str, Any]:
"""Returns the current metadata as a dictionary.
Since metadata creation is somewhat costly. The one already created is
returned as a dictionary. If it hasn't been created, the model is saved to a
temporary folder as the metadata tensors are created during model save. That
temporary folder is deleted afterwards.
"""
if self._metadata:
return self._metadata.to_dict()
temp_location = tempfile.gettempdir()
model_path = self.save_model_with_metadata(temp_location)
shutil.rmtree(model_path)
return self._metadata.to_dict()
|
|
import logging
from dart.model.action import ActionType
_logger = logging.getLogger(__name__)
class ElasticsearchActionTypes(object):
data_check = ActionType(
name='data_check',
description='Executes a user defined, Elasticsearch data check',
params_json_schema={
'type': 'object',
'properties': {
'index': {
'type': 'string',
'default': '_all',
'description': 'The Elasticsearch index to perform the query on. '
+ 'Leave blank or explicitly set to "_all" to perform the query on all indices.'
},
'document_type': {
'type': ['string', 'null'],
'default': None,
'description': 'The Elasticsearch document type to perform the query on. '
+ 'Leave blank to perform the query on all document types.'
},
'query_body': {
'type': 'string',
'x-schema-form': {'type': 'textarea'},
'description': 'The Elasticsearch query should return a response that contains at least one result '
+ '("hits" in Elasticsearch terminology") for the data check to pass. '
+ 'https://www.elastic.co/guide/en/elasticsearch/reference/5.1/query-dsl.html'
},
'expected_count': {
'type': 'integer',
'default': 0,
'description': 'The expected count to of documents to be returned by the query. '
+ ' Use this and the operator to return a truthy value for the data check to pass.'
},
'operator': {
'type': 'string',
'default': '>',
'description': 'The operator to apply to the query and expected count. '
+ 'i.e. result count > expected count ',
'enum': [
'>',
'>=',
'<',
'<=',
'==',
'!='
]
}
},
'additionalProperties': False,
'required': ['query_body'],
},
)
create_index = ActionType(
name='create_index',
description='Creates an Elasticsearch index',
params_json_schema={
'type': 'object',
'properties': {
'index': {
'type': 'string',
'default': '_all',
'description': 'The Elasticsearch index to create.'
},
'mapping': {
'type': 'string',
'x-schema-form': {'type': 'textarea'},
'description': 'The Elasticsearch index mapping.'
},
},
'additionalProperties': False,
'required': ['index'],
},
)
create_mapping = ActionType(
name='create_mapping',
description='Creates an Elasticsearch mapping',
params_json_schema={
'type': 'object',
'properties': {
'index': {
'type': 'string',
'default': '_all',
'description': 'The Elasticsearch index to create the mapping for.'
+ 'Explicitly set to "_all" or leave blank to create the mapping for all indices.'
},
'document_type': {
'type': 'string',
'description': 'The Elasticsearch document type to create the mapping for.'
},
'mapping': {
'type': 'string',
'x-schema-form': {'type': 'textarea'},
'description': 'The Elasticsearch mapping.'
},
},
'additionalProperties': False,
'required': ['mapping', 'document_type'],
},
)
create_template = ActionType(
name='create_template',
description='Creates an Elasticsearch template',
params_json_schema={
'type': 'object',
'properties': {
'template_name': {
'type': 'string',
'description': 'The Elasticsearch template name to create.'
},
'template': {
'type': 'string',
'x-schema-form': {'type': 'textarea'},
'description': 'The Elasticsearch template.'
},
},
'additionalProperties': False,
'required': ['template', 'template_name'],
},
)
delete_index = ActionType(
name='delete_index',
description='Deletes an Elasticsearch index',
params_json_schema={
'type': 'object',
'properties': {
'index': {
'type': 'string',
'default': '_all',
'description': 'The Elasticsearch index to delete.'
}
},
'additionalProperties': False,
'required': ['index'],
},
)
delete_template = ActionType(
name='delete_template',
description='Deletes an Elasticsearch template',
params_json_schema={
'type': 'object',
'properties': {
'template_name': {
'type': 'string',
'description': 'The Elasticsearch template name to delete.'
},
},
'additionalProperties': False,
'required': ['template_name'],
},
)
force_merge_index = ActionType(
name='force_merge_index',
description='Force merges an Elasticsearch index',
params_json_schema={
'type': 'object',
'properties': {
'index': {
'type': 'string',
'default': '_all',
'description': 'A comma-separated list of index names; use \"_all" or empty string to perform the operation on all indices.'
},
'flush': {
'type': 'boolean',
'default': True,
'description': 'Specify whether the index should be flushed after performing the operation'
},
'allow_no_indices': {
'type': 'boolean',
'default': False,
'description': 'Whether to ignore if a wildcard indices expression resolves into no concrete indices.'
+ '(This includes "_all" string or when no indices have been specified)'
},
'expand_wildcards': {
'type': 'string',
'default': 'open',
'pattern': '^(open|closed|none|all)$',
'description': 'Whether to expand wildcard expression to concrete indices that are open, closed or '
+ 'both. default is "open". valid choices are: "open", "closed", "none", "all"'
},
'max_num_segments': {
'type': ['integer', 'null'],
'default': None,
'description': 'The number of segments the index should be merged into (default: dynamic)'
},
'only_expunge_deletes': {
'type': 'boolean',
'default': False,
'description': 'Specify whether the operation should only expunge deleted documents'
},
'wait_for_merge': {
'type': 'boolean',
'default': True,
'description': 'Specify whether the request should block until the merge process is finished'
}
},
'additionalProperties': False,
'required': ['index'],
},
)
|
|
import numpy as np
import scipy as sp
from abc import ABCMeta, abstractmethod
class Likelihood(object):
__metaclass__ = ABCMeta
def __init__(self, n=1, mask=None):
self._n = n
self._mask = None
if mask is None:
self._n = n
else:
self._n = mask.size - mask.sum()
@property
def mask(self):
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
if value is not None:
self._n = value.size - value.sum()
def _prior_energy(self):
return 0.0
def energy(self, theta, data):
e_prior = self._prior_energy()
if self._mask is None:
return self._energy(theta, data) + e_prior
else:
return self._energy_mask(theta, data, self._mask) + e_prior
@abstractmethod
def _energy(self, theta, data):
raise NotImplementedError("Not implemented in Base class")
def _energy_mask(self, theta, data, mask):
return self._energy(theta[np.logical_not(mask)],
data[np.logical_not(mask)])
def gradient(self, theta, data):
e_prior = self._prior_energy()
if self._mask is None:
energy, gradient = self._gradient(theta, data)
else:
energy, gradient = self._gradient_mask(theta, data, self._mask)
return energy + e_prior, gradient
@abstractmethod
def _gradient(self, theta, data):
raise NotImplementedError("Not implemented in Base class")
def _gradient_mask(self, theta, data, mask):
grad = np.zeros_like(theta)
indices = np.where(np.logical_not(mask))
energy, masked_grad = self._gradient(theta[indices],
data[indices])
grad[indices] = masked_grad
return energy, grad
def set_params(self, p):
if 'mask' in p:
self._mask = p['mask']
self._n = self._mask.size - self._mask.sum()
def get_parms(self, p):
if mask is not None:
return self._mask
def sample_nuissance_params(self, calc_data, data, parameter_dict):
pass
class GaussianLikelihood(Likelihood):
"""
We assume a scale free prior on gamma
and a gamma distribution on k
"""
def __init__(self, k=1., n=1, mask=None):
super(GaussianLikelihood, self).__init__(n, mask)
self._k = np.float(k)
self._gamma = 1.
self._normal_alpha = 10.
self._normal_beta = 10.
self._sample_gamma = False
self._sample_k = False
def _prior_energy(self):
k = self._k
# Gamma prior on k
energy = self._normal_beta * k + (self._normal_alpha - 1) * np.log(k)
# Jeffrey's prior on gamma (improper)
energy += np.log(self._gamma)
return energy
def _energy(self, theta, data):
n_data = self._n
chi2 = (data - self._gamma * theta)**2
E = 0.5 * self._k * chi2.sum()
# Partion function
E -= 0.5 * n_data * np.log(self._k)
return E
def _gradient(self, theta, data):
n_data = self._n
diff = (data - self._gamma * theta)
energy = 0.5 * self._k * np.sum(diff**2)
# Partion function
energy -= 0.5 * n_data * np.log(self._k)
grad = -self._k * diff
return energy, grad
def sample_nuissance_params(self, calc_data, data, parameter_dict):
if self._sample_k:
self.sample_force_constant(calc_data, data, parameter_dict)
if self._sample_gamma:
self.sample_scale(calc_data, data, parameter_dict)
def sample_scale(self, calc_data, data, parameter_dict):
"""
Sample a new scale (fluence) gamma
"""
gamma = self._gamma
k = self._k
xy = k * np.sum(data * calc_data)
xx = k * np.sum(calc_data**2)
mean = xy / xx
var = 1. / xx
gamma = np.random.normal(mean, np.sqrt(var))
parameter_dict["gamma"] = gamma
def sample_force_constant(self, calc_data, data, parameter_dict):
"""
Sample a new force constant k
We assume a gamma prior G(10, 10) on k
"""
gamma = parameter_dict["gamma"]
n = data.size
chi2 = np.sum((gamma * calc_data - data)**2)
alpha = 0.5 * n + self._normal_alpha
beta = 0.5 * chi2 + self._normal_beta
parameter_dict['k'] = np.clip(np.random.gamma(alpha, 1./beta), 0.1, 1e5)
def set_params(self, p):
super(GaussianLikelihood, self).set_params(p)
if 'k' in p:
self._k = np.float(p['k'])
if "gamma" in p:
self._gamma = np.float(p['gamma'])
def get_params(self):
return {"k": self._k,
"gamma": self._gamma}
class PoissonLikelihood(Likelihood):
"""
Poisson Error Model
Assumes Poisson distributed data
"""
def __init__(self, k=1., n=1, mask=None):
super(PoissonLikelihood, self).__init__(n, mask)
self._gamma = 1.
def _energy(self, theta, data):
eps = 1e-300
energy = -data * np.log(np.clip(theta,1e-20, 1e300)) + data * np.log(np.clip(data,1e-20,1e300)) + theta
return energy.sum()
def _gradient(self, theta, data):
energy = -data * np.log(np.clip(theta,1e-20, 1e300)) + data * np.log(np.clip(data,1e-20,1e300)) + theta
# agressive clipping, maybe I am making the gradient problematic
grad = 1 - data/np.clip(theta, 1e-10, 1e300)
return energy.sum(), grad
def sample_nuissance_params(self, calc_data, data, parameter_dict):
if self._sample_gamma:
self.sample_scale(calc_data, data, parameter_dict)
def sample_scale(self, calc_data, data, parameter_dict):
# add a prior
prior = 1.
beta = np.sum(calc_data) + prior
alpha = np.sum(data) + 1. + prior
parameter_dict["gamma"] = np.random.gamma(alpha, 1./beta)
def set_params(self, p):
super(GaussianLikelihood, self).set_params(p)
if "gamma" in p:
self._gamma = np.float(p['gamma'])
def get_params(self):
return {"gamma": self._gamma}
class LogNormalLikelihood(Likelihood):
def __init__(self, k=1., n=1, mask=None):
super(LogNormalLikelihood, self).__init__(n, mask)
self._k = np.float(k)
self._gamma = 1.
def _energy(self, theta, data):
a = np.log(np.clip(theta,
1e-105, 1e300))
b = np.log(np.clip(data,
1e-105, 1e300))
chi = a - b
return 0.5 * self._k * np.sum(chi*chi)
def _gradient(self, theta, data):
a = np.log(np.clip(theta,
1e-105, 1e300))
b = np.log(np.clip(data,
1e-105, 1e300))
chi = a - b
return 0.5 * self._k * chi / a
class TiedGaussianLikelihood(Likelihood):
"""
A likelihood in which the we assume that mean and variance are tied
plus some constant gaussian noise so the model looks like
so d_i = x_i + N(0, sqrt(x_i)) + N(0,sigma)
"""
def __init__(self, sigma=1., n=1, mask=None):
super(TiedGaussianLikelihood, self).__init__(n,mask)
self._sigma = sigma
def _energy(self, theta, data):
chi2 = (data - theta)**2
sqrt_theta = np.sqrt(np.clip(theta, 1e-30, 1e100))
u = np.log(sqrt_theta + self._sigma)\
+ 0.5 * chi2/(sqrt_theta + self._sigma)**2
return u.sum() - 0.5 * self._n * np.log(2 * np.pi)
def _gradient(self, theta, data):
ctheta = np.clip(theta, 1e-30, 1e100)
diff = (data - theta)
chi2 = diff**2
sqrt_theta = np.sqrt(ctheta)
sqrt_plus_sigma = (sqrt_theta + self._sigma)
grad = 1./(ctheta + sqrt_theta * self._sigma) * 0.5
grad -= chi2/(2 * sqrt_theta * sqrt_plus_sigma**3) + diff/sqrt_plus_sigma**2
energy = np.log(sqrt_plus_sigma)\
+ 0.5 * chi2/(sqrt_plus_sigma)**2
energy = energy.sum() - 0.5 * self._n * np.log(2 * np.pi)
return energy, grad
class AnscombeLikelihood(Likelihood):
"""
Instead of estimating the underlying image we estimate the anscombe transformed
image
"""
def __init__(self, k=1., n=1, mask=None):
super(AnscombeLikelihood, self).__init__(n, mask)
self._n = n
self._k = np.float(k)
self._sigma = 1.
def _energy(self, theta, data):
n_data = self._n
transformed_counts = 2 * np.sqrt(np.clip(data, 0.0, 1e309) + 0.375)
chi2 = np.sum((transformed_counts.ravel()
- theta.ravel())**2)
return 0.5 * self._k * chi2 - 0.5 * n_data * np.log(self._k)
def _gradient(self, theta, data):
n_data = self._n
transformed_counts = 2 * np.sqrt(np.clip(data, -0.3, 1e309) + 0.375)
diff = transformed_counts - theta
chi2 = np.sum((diff)**2)
energy = 0.5 * self._k * chi2 - 0.5 * n_data * np.log(self._k)
return energy, self._k * -diff
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:22565")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:22565")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a MandarinCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a MandarinCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import datetime
import json
import logging
from typing import Any, Callable, Dict, Optional, Set, Union
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.id_dict import BatchKwargs, BatchSpec, IDDict
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.exceptions import InvalidBatchIdError
from great_expectations.types import SerializableDictDot, safe_deep_copy
from great_expectations.util import deep_filter_properties_iterable
from great_expectations.validator.metric_configuration import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import pyspark
except ImportError:
pyspark = None
logger.debug(
"Unable to load pyspark; install optional spark dependency if you will be working with Spark dataframes"
)
class BatchDefinition(SerializableDictDot):
def __init__(
self,
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
batch_identifiers: IDDict,
batch_spec_passthrough: Optional[dict] = None,
):
self._validate_batch_definition(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
batch_identifiers=batch_identifiers,
)
assert type(batch_identifiers) == IDDict
self._datasource_name = datasource_name
self._data_connector_name = data_connector_name
self._data_asset_name = data_asset_name
self._batch_identifiers = batch_identifiers
self._batch_spec_passthrough = batch_spec_passthrough
def to_json_dict(self) -> dict:
return convert_to_json_serializable(
{
"datasource_name": self.datasource_name,
"data_connector_name": self.data_connector_name,
"data_asset_name": self.data_asset_name,
"batch_identifiers": self.batch_identifiers,
}
)
def __repr__(self) -> str:
doc_fields_dict: dict = {
"datasource_name": self._datasource_name,
"data_connector_name": self._data_connector_name,
"data_asset_name": self.data_asset_name,
"batch_identifiers": self._batch_identifiers,
}
return str(doc_fields_dict)
@staticmethod
def _validate_batch_definition(
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
batch_identifiers: IDDict,
):
if datasource_name is None:
raise ValueError("A valid datasource must be specified.")
if datasource_name and not isinstance(datasource_name, str):
raise TypeError(
f"""The type of an datasource name must be a string (Python "str"). The type given is
"{str(type(datasource_name))}", which is illegal.
"""
)
if data_connector_name is None:
raise ValueError("A valid data_connector must be specified.")
if data_connector_name and not isinstance(data_connector_name, str):
raise TypeError(
f"""The type of a data_connector name must be a string (Python "str"). The type given is
"{str(type(data_connector_name))}", which is illegal.
"""
)
if data_asset_name is None:
raise ValueError("A valid data_asset_name must be specified.")
if data_asset_name and not isinstance(data_asset_name, str):
raise TypeError(
f"""The type of a data_asset name must be a string (Python "str"). The type given is
"{str(type(data_asset_name))}", which is illegal.
"""
)
if batch_identifiers and not isinstance(batch_identifiers, IDDict):
raise TypeError(
f"""The type of batch_identifiers must be an IDDict object. The type given is \
"{str(type(batch_identifiers))}", which is illegal.
"""
)
@property
def datasource_name(self) -> str:
return self._datasource_name
@property
def data_connector_name(self) -> str:
return self._data_connector_name
@property
def data_asset_name(self) -> str:
return self._data_asset_name
@property
def batch_identifiers(self) -> IDDict:
return self._batch_identifiers
@property
def batch_spec_passthrough(self) -> dict:
return self._batch_spec_passthrough
@batch_spec_passthrough.setter
def batch_spec_passthrough(self, batch_spec_passthrough: Optional[dict]):
self._batch_spec_passthrough = batch_spec_passthrough
@property
def id(self) -> str:
return IDDict(self.to_json_dict()).to_id()
def __eq__(self, other):
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return self.id == other.id
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
def __hash__(self) -> int:
"""Overrides the default implementation"""
_result_hash: int = hash(self.id)
return _result_hash
class BatchRequestBase(SerializableDictDot):
"""
This class is for internal inter-object protocol purposes only.
As such, it contains all attributes of a batch_request, but does not validate them.
See the BatchRequest class, which extends BatchRequestBase and validates the attributes.
BatchRequestBase is used for the internal protocol purposes exclusively, not part of API for the developer users.
Previously, the very same BatchRequest was used for both the internal protocol purposes and as part of the API
exposed to developers. However, while convenient for internal data interchange, using the same BatchRequest class
as arguments to the externally-exported DataContext.get_batch(), DataContext.get_batch_list(), and
DataContext.get_validator() API calls for obtaining batches and/or validators was insufficiently expressive to
fulfill the needs of both. In the user-accessible API, BatchRequest, must enforce that all members of the triple,
consisting of data_source_name, data_connector_name, and data_asset_name, are not NULL. Whereas for the internal
protocol, BatchRequest is used as a flexible bag of attributes, in which any fields are allowed to be NULL. Hence,
now, BatchRequestBase is dedicated for the use as the bag oof attributes for the internal protocol use, whereby NULL
values are allowed as per the internal needs. The BatchRequest class extends BatchRequestBase and adds to it strong
validation (described above plus additional attribute validation) so as to formally validate user specified fields.
"""
def __init__(
self,
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
data_connector_query: Optional[dict] = None,
limit: Optional[int] = None,
runtime_parameters: Optional[dict] = None,
batch_identifiers: Optional[dict] = None,
batch_spec_passthrough: Optional[dict] = None,
):
self._datasource_name = datasource_name
self._data_connector_name = data_connector_name
self._data_asset_name = data_asset_name
self._data_connector_query = data_connector_query
self._limit = limit
self._runtime_parameters = runtime_parameters
self._batch_identifiers = batch_identifiers
self._batch_spec_passthrough = batch_spec_passthrough
@property
def datasource_name(self) -> str:
return self._datasource_name
@datasource_name.setter
def datasource_name(self, value: str):
self._datasource_name = value
@property
def data_connector_name(self) -> str:
return self._data_connector_name
@data_connector_name.setter
def data_connector_name(self, value: str):
self._data_connector_name = value
@property
def data_asset_name(self) -> str:
return self._data_asset_name
@data_asset_name.setter
def data_asset_name(self, data_asset_name):
self._data_asset_name = data_asset_name
@property
def data_connector_query(self) -> dict:
return self._data_connector_query
@data_connector_query.setter
def data_connector_query(self, value: dict):
self._data_connector_query = value
@property
def limit(self) -> int:
return self._limit
@limit.setter
def limit(self, value: int):
self._limit = value
@property
def runtime_parameters(self) -> dict:
return self._runtime_parameters
@runtime_parameters.setter
def runtime_parameters(self, value: dict):
self._runtime_parameters = value
@property
def batch_identifiers(self) -> dict:
return self._batch_identifiers
@batch_identifiers.setter
def batch_identifiers(self, value: dict):
self._batch_identifiers = value
@property
def batch_spec_passthrough(self) -> dict:
return self._batch_spec_passthrough
@batch_spec_passthrough.setter
def batch_spec_passthrough(self, value: dict):
self._batch_spec_passthrough = value
@property
def id(self) -> str:
return IDDict(self.to_json_dict()).to_id()
def to_dict(self) -> dict:
return standardize_batch_request_display_ordering(
batch_request=super().to_dict()
)
def to_json_dict(self) -> dict:
"""
# TODO: <Alex>2/4/2022</Alex>
This implementation of "SerializableDictDot.to_json_dict() occurs frequently and should ideally serve as the
reference implementation in the "SerializableDictDot" class itself. However, the circular import dependencies,
due to the location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules
make this refactoring infeasible at the present time.
"""
# if batch_data appears in BatchRequest, temporarily replace it with
# str placeholder before calling convert_to_json_serializable so that
# batch_data is not serialized
if batch_request_contains_batch_data(batch_request=self):
batch_data: Union[
BatchRequest, RuntimeBatchRequest, dict
] = self.runtime_parameters["batch_data"]
self.runtime_parameters["batch_data"]: str = str(type(batch_data))
serializeable_dict: dict = convert_to_json_serializable(data=self.to_dict())
# after getting serializable_dict, restore original batch_data
self.runtime_parameters["batch_data"]: Union[
BatchRequest, RuntimeBatchRequest, dict
] = batch_data
else:
serializeable_dict: dict = convert_to_json_serializable(data=self.to_dict())
return serializeable_dict
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for key, value in self.to_raw_dict().items():
value_copy = safe_deep_copy(data=value, memo=memo)
setattr(result, key, value_copy)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return self.id == other.id
def __repr__(self) -> str:
"""
# TODO: <Alex>2/4/2022</Alex>
This implementation of a custom "__repr__()" occurs frequently and should ideally serve as the reference
implementation in the "SerializableDictDot" class. However, the circular import dependencies, due to the
location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules make this
refactoring infeasible at the present time.
"""
json_dict: dict = self.to_json_dict()
deep_filter_properties_iterable(
properties=json_dict,
inplace=True,
)
return json.dumps(json_dict, indent=2)
def __str__(self) -> str:
"""
# TODO: <Alex>2/4/2022</Alex>
This implementation of a custom "__str__()" occurs frequently and should ideally serve as the reference
implementation in the "SerializableDictDot" class. However, the circular import dependencies, due to the
location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules make this
refactoring infeasible at the present time.
"""
return self.__repr__()
@staticmethod
def _validate_init_parameters(
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
data_connector_query: Optional[dict] = None,
limit: Optional[int] = None,
):
# TODO test and check all logic in this validator!
if not (datasource_name and isinstance(datasource_name, str)):
raise TypeError(
f"""The type of an datasource name must be a string (Python "str"). The type given is
"{str(type(datasource_name))}", which is illegal.
"""
)
if not (data_connector_name and isinstance(data_connector_name, str)):
raise TypeError(
f"""The type of data_connector name must be a string (Python "str"). The type given is
"{str(type(data_connector_name))}", which is illegal.
"""
)
if not (data_asset_name and isinstance(data_asset_name, str)):
raise TypeError(
f"""The type of data_asset name must be a string (Python "str"). The type given is
"{str(type(data_asset_name))}", which is illegal.
"""
)
# TODO Abe 20201015: Switch this to DataConnectorQuery.
if data_connector_query and not isinstance(data_connector_query, dict):
raise TypeError(
f"""The type of data_connector_query must be a dict object. The type given is
"{str(type(data_connector_query))}", which is illegal.
"""
)
if limit and not isinstance(limit, int):
raise TypeError(
f"""The type of limit must be an integer (Python "int"). The type given is "{str(type(limit))}", which
is illegal.
"""
)
class BatchRequest(BatchRequestBase):
"""
This class contains all attributes of a batch_request. See the comments in BatchRequestBase for design specifics.
limit: refers to the number of batches requested (not rows per batch)
"""
include_field_names: Set[str] = {
"datasource_name",
"data_connector_name",
"data_asset_name",
"data_connector_query",
"limit",
"batch_spec_passthrough",
}
def __init__(
self,
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
data_connector_query: Optional[dict] = None,
limit: Optional[int] = None,
batch_spec_passthrough: Optional[dict] = None,
):
self._validate_init_parameters(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
data_connector_query=data_connector_query,
limit=limit,
)
super().__init__(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
data_connector_query=data_connector_query,
limit=limit,
batch_spec_passthrough=batch_spec_passthrough,
)
class RuntimeBatchRequest(BatchRequestBase):
include_field_names: Set[str] = {
"datasource_name",
"data_connector_name",
"data_asset_name",
"runtime_parameters",
"batch_identifiers",
"batch_spec_passthrough",
}
def __init__(
self,
datasource_name: str,
data_connector_name: str,
data_asset_name: str,
runtime_parameters: dict,
batch_identifiers: dict,
batch_spec_passthrough: Optional[dict] = None,
):
self._validate_init_parameters(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
)
self._validate_runtime_batch_request_specific_init_parameters(
runtime_parameters=runtime_parameters,
batch_identifiers=batch_identifiers,
batch_spec_passthrough=batch_spec_passthrough,
)
super().__init__(
datasource_name=datasource_name,
data_connector_name=data_connector_name,
data_asset_name=data_asset_name,
runtime_parameters=runtime_parameters,
batch_identifiers=batch_identifiers,
batch_spec_passthrough=batch_spec_passthrough,
)
@staticmethod
def _validate_runtime_batch_request_specific_init_parameters(
runtime_parameters: dict,
batch_identifiers: dict,
batch_spec_passthrough: Optional[dict] = None,
):
if not (runtime_parameters and (isinstance(runtime_parameters, dict))):
raise TypeError(
f"""The runtime_parameters must be a non-empty dict object.
The type given is "{str(type(runtime_parameters))}", which is an illegal type or an empty dictionary."""
)
if not (batch_identifiers and isinstance(batch_identifiers, dict)):
raise TypeError(
f"""The type for batch_identifiers must be a dict object, with keys being identifiers defined in the
data connector configuration. The type given is "{str(type(batch_identifiers))}", which is illegal."""
)
if batch_spec_passthrough and not (isinstance(batch_spec_passthrough, dict)):
raise TypeError(
f"""The type for batch_spec_passthrough must be a dict object. The type given is \
"{str(type(batch_spec_passthrough))}", which is illegal.
"""
)
# TODO: <Alex>The following class is to support the backward compatibility with the legacy design.</Alex>
class BatchMarkers(BatchKwargs):
"""A BatchMarkers is a special type of BatchKwargs (so that it has a batch_fingerprint) but it generally does
NOT require specific keys and instead captures information about the OUTPUT of a datasource's fetch
process, such as the timestamp at which a query was executed."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "ge_load_time" not in self:
raise InvalidBatchIdError("BatchMarkers requires a ge_load_time")
@property
def ge_load_time(self):
return self.get("ge_load_time")
# TODO: <Alex>This module needs to be cleaned up.
# We have Batch used for the legacy design, and we also need Batch for the new design.
# However, right now, the Batch from the legacy design is imported into execution engines of the new design.
# As a result, we have multiple, inconsistent versions of BatchMarkers, extending legacy/new classes.</Alex>
# TODO: <Alex>See also "great_expectations/datasource/types/batch_spec.py".</Alex>
class Batch(SerializableDictDot):
def __init__(
self,
data,
batch_request: Union[BatchRequest, RuntimeBatchRequest] = None,
batch_definition: BatchDefinition = None,
batch_spec: BatchSpec = None,
batch_markers: BatchMarkers = None,
# The remaining parameters are for backward compatibility.
data_context=None,
datasource_name=None,
batch_parameters=None,
batch_kwargs=None,
):
self._data = data
if batch_request is None:
batch_request = {}
self._batch_request = batch_request
if batch_definition is None:
batch_definition = IDDict()
self._batch_definition = batch_definition
if batch_spec is None:
batch_spec = BatchSpec()
self._batch_spec = batch_spec
if batch_markers is None:
batch_markers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(
datetime.timezone.utc
).strftime("%Y%m%dT%H%M%S.%fZ")
}
)
self._batch_markers = batch_markers
# The remaining parameters are for backward compatibility.
self._data_context = data_context
self._datasource_name = datasource_name
self._batch_parameters = batch_parameters
self._batch_kwargs = batch_kwargs or BatchKwargs()
@property
def data(self):
return self._data
@property
def batch_request(self):
return self._batch_request
@batch_request.setter
def batch_request(self, batch_request):
self._batch_request = batch_request
@property
def batch_definition(self):
return self._batch_definition
@batch_definition.setter
def batch_definition(self, batch_definition):
self._batch_definition = batch_definition
@property
def batch_spec(self):
return self._batch_spec
@property
def batch_markers(self):
return self._batch_markers
# The remaining properties are for backward compatibility.
@property
def data_context(self):
return self._data_context
@property
def datasource_name(self):
return self._datasource_name
@property
def batch_parameters(self):
return self._batch_parameters
@property
def batch_kwargs(self):
return self._batch_kwargs
def to_dict(self) -> dict:
dict_obj: dict = {
"data": str(self.data),
"batch_request": self.batch_request.to_dict(),
"batch_definition": self.batch_definition.to_json_dict()
if isinstance(self.batch_definition, BatchDefinition)
else {},
"batch_spec": self.batch_spec,
"batch_markers": self.batch_markers,
}
return dict_obj
def to_json_dict(self) -> dict:
json_dict: dict = self.to_dict()
deep_filter_properties_iterable(
properties=json_dict["batch_request"],
inplace=True,
)
return json_dict
@property
def id(self):
batch_definition = self._batch_definition
return (
batch_definition.id
if isinstance(batch_definition, BatchDefinition)
else batch_definition.to_id()
)
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
def head(self, n_rows=5, fetch_all=False):
# FIXME - we should use a Validator after resolving circularity
# Validator(self._data.execution_engine, batches=(self,)).get_metric(MetricConfiguration("table.head", {"batch_id": self.id}, {"n_rows": n_rows, "fetch_all": fetch_all}))
metric = MetricConfiguration(
"table.head",
{"batch_id": self.id},
{"n_rows": n_rows, "fetch_all": fetch_all},
)
return self._data.execution_engine.resolve_metrics((metric,))[metric.id]
def materialize_batch_request(
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None
) -> Optional[Union[BatchRequest, RuntimeBatchRequest]]:
effective_batch_request: dict = get_batch_request_as_dict(
batch_request=batch_request
)
if not effective_batch_request:
return None
batch_request_class: type
if batch_request_contains_runtime_parameters(batch_request=effective_batch_request):
batch_request_class = RuntimeBatchRequest
else:
batch_request_class = BatchRequest
return batch_request_class(**effective_batch_request)
def batch_request_contains_batch_data(
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None
) -> bool:
return (
batch_request_contains_runtime_parameters(batch_request=batch_request)
and batch_request["runtime_parameters"].get("batch_data") is not None
)
def batch_request_contains_runtime_parameters(
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None
) -> bool:
return (
batch_request is not None
and batch_request.get("runtime_parameters") is not None
)
def get_batch_request_as_dict(
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None
) -> Optional[dict]:
if batch_request is None:
return None
if isinstance(batch_request, (BatchRequest, RuntimeBatchRequest)):
batch_request = batch_request.to_dict()
return batch_request
def get_batch_request_from_acceptable_arguments(
datasource_name: Optional[str] = None,
data_connector_name: Optional[str] = None,
data_asset_name: Optional[str] = None,
*,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest]] = None,
batch_data: Optional[Any] = None,
data_connector_query: Optional[dict] = None,
batch_identifiers: Optional[dict] = None,
limit: Optional[int] = None,
index: Optional[Union[int, list, tuple, slice, str]] = None,
custom_filter_function: Optional[Callable] = None,
batch_spec_passthrough: Optional[dict] = None,
sampling_method: Optional[str] = None,
sampling_kwargs: Optional[dict] = None,
splitter_method: Optional[str] = None,
splitter_kwargs: Optional[dict] = None,
runtime_parameters: Optional[dict] = None,
query: Optional[str] = None,
path: Optional[str] = None,
batch_filter_parameters: Optional[dict] = None,
**kwargs,
) -> BatchRequest:
"""Obtain formal BatchRequest typed object from allowed attributes (supplied as arguments).
This method applies only to the new (V3) Datasource schema.
Args:
datasource_name
data_connector_name
data_asset_name
batch_request
batch_data
query
path
runtime_parameters
data_connector_query
batch_identifiers
batch_filter_parameters
limit
index
custom_filter_function
sampling_method
sampling_kwargs
splitter_method
splitter_kwargs
batch_spec_passthrough
**kwargs
Returns:
(BatchRequest) The formal BatchRequest object
"""
if batch_request:
if not isinstance(batch_request, (BatchRequest, RuntimeBatchRequest)):
raise TypeError(
f"""batch_request must be an instance of BatchRequest or RuntimeBatchRequest object, not \
{type(batch_request)}"""
)
datasource_name = batch_request.datasource_name
# ensure that the first parameter is datasource_name, which should be a str. This check prevents users
# from passing in batch_request as an unnamed parameter.
if not isinstance(datasource_name, str):
raise ge_exceptions.GreatExpectationsTypeError(
f"the first parameter, datasource_name, must be a str, not {type(datasource_name)}"
)
if len([arg for arg in [batch_data, query, path] if arg is not None]) > 1:
raise ValueError("Must provide only one of batch_data, query, or path.")
if any(
[
batch_data is not None
and runtime_parameters
and "batch_data" in runtime_parameters,
query and runtime_parameters and "query" in runtime_parameters,
path and runtime_parameters and "path" in runtime_parameters,
]
):
raise ValueError(
"If batch_data, query, or path arguments are provided, the same keys cannot appear in the "
"runtime_parameters argument."
)
if batch_request:
# TODO: Raise a warning if any parameters besides batch_requests are specified
return batch_request
batch_request_class: type
batch_request_as_dict: dict
if any([batch_data is not None, query, path, runtime_parameters]):
batch_request_class = RuntimeBatchRequest
runtime_parameters = runtime_parameters or {}
if batch_data is not None:
runtime_parameters["batch_data"] = batch_data
elif query is not None:
runtime_parameters["query"] = query
elif path is not None:
runtime_parameters["path"] = path
if batch_identifiers is None:
batch_identifiers = kwargs
else:
# Raise a warning if kwargs exist
pass
batch_request_as_dict = {
"datasource_name": datasource_name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": runtime_parameters,
"batch_identifiers": batch_identifiers,
"batch_spec_passthrough": batch_spec_passthrough,
}
else:
batch_request_class = BatchRequest
if data_connector_query is None:
if batch_filter_parameters is not None and batch_identifiers is not None:
raise ValueError(
'Must provide either "batch_filter_parameters" or "batch_identifiers", not both.'
)
if batch_filter_parameters is None and batch_identifiers is not None:
logger.warning(
'Attempting to build data_connector_query but "batch_identifiers" was provided '
'instead of "batch_filter_parameters". The "batch_identifiers" key on '
'data_connector_query has been renamed to "batch_filter_parameters". Please update '
'your code. Falling back on provided "batch_identifiers".'
)
batch_filter_parameters = batch_identifiers
elif batch_filter_parameters is None and batch_identifiers is None:
batch_filter_parameters = kwargs
else:
# Raise a warning if kwargs exist
pass
data_connector_query_params: dict = {
"batch_filter_parameters": batch_filter_parameters,
"limit": limit,
"index": index,
"custom_filter_function": custom_filter_function,
}
data_connector_query = IDDict(data_connector_query_params)
else:
# Raise a warning if batch_filter_parameters or kwargs exist
data_connector_query = IDDict(data_connector_query)
if batch_spec_passthrough is None:
batch_spec_passthrough = {}
if sampling_method is not None:
sampling_params: dict = {
"sampling_method": sampling_method,
}
if sampling_kwargs is not None:
sampling_params["sampling_kwargs"] = sampling_kwargs
batch_spec_passthrough.update(sampling_params)
if splitter_method is not None:
splitter_params: dict = {
"splitter_method": splitter_method,
}
if splitter_kwargs is not None:
splitter_params["splitter_kwargs"] = splitter_kwargs
batch_spec_passthrough.update(splitter_params)
batch_request_as_dict: dict = {
"datasource_name": datasource_name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"data_connector_query": data_connector_query,
"batch_spec_passthrough": batch_spec_passthrough,
}
deep_filter_properties_iterable(
properties=batch_request_as_dict,
inplace=True,
)
batch_request = batch_request_class(**batch_request_as_dict)
return batch_request
def standardize_batch_request_display_ordering(
batch_request: Dict[str, Union[str, int, Dict[str, Any]]]
) -> Dict[str, Union[str, Dict[str, Any]]]:
datasource_name: str = batch_request["datasource_name"]
data_connector_name: str = batch_request["data_connector_name"]
data_asset_name: str = batch_request["data_asset_name"]
runtime_parameters: str = batch_request.get("runtime_parameters")
batch_identifiers: str = batch_request.get("batch_identifiers")
batch_request.pop("datasource_name")
batch_request.pop("data_connector_name")
batch_request.pop("data_asset_name")
# NOTE: AJB 20211217 The below conditionals should be refactored
if runtime_parameters is not None:
batch_request.pop("runtime_parameters")
if batch_identifiers is not None:
batch_request.pop("batch_identifiers")
if runtime_parameters is not None and batch_identifiers is not None:
batch_request = {
"datasource_name": datasource_name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": runtime_parameters,
"batch_identifiers": batch_identifiers,
**batch_request,
}
elif runtime_parameters is not None and batch_identifiers is None:
batch_request = {
"datasource_name": datasource_name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": runtime_parameters,
**batch_request,
}
elif runtime_parameters is None and batch_identifiers is not None:
batch_request = {
"datasource_name": datasource_name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"batch_identifiers": batch_identifiers,
**batch_request,
}
else:
batch_request = {
"datasource_name": datasource_name,
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
**batch_request,
}
return batch_request
|
|
#This function does all the file Handling required for creating the index.
import sys
import bz2
import heapq
import os
import operator
from collections import defaultdict
# import threading
import pdb
from config import *
#
# class writeParallel(threading.Thread):
# """
# Multi Threading , write multiple field files simultaneously
# """
# def __init__(self, field, data, offset, countFinalFile,pathOfFolder):
# threading.Thread.__init__(self)
# self.data=data
# self.field=field
# self.count=countFinalFile
# self.offset=offset
# self.pathOfFolder=pathOfFolder
#
# def run(self):
# filename= self.pathOfFolder+'/'+self.field+str(self.count)
# with bz2.BZ2File(filename+'.bz2', 'wb', compresslevel=7) as f:
# f.write('\n'.join(self.data))
# with open(filename+'.txt', 'wb') as f:
# f.write('\n'.join(self.data))
# filename= self.pathOfFolder+'/o'+self.field+str(self.count)+'.txt'
# with open(filename, 'wb') as f:
# f.write('\n'.join(self.offset))
def writeSingle(field, data, offset, countFinalFile, pathOfFolder):
filename = pathOfFolder + '/' + field + str(countFinalFile)
if COMPRESS_INDEX:
write_type = bz2.BZ2File(filename+'.bz2', 'wb', compresslevel=7)
else:
write_type = open(filename+'.txt', 'wb')
with write_type as f:
f.write('\n'.join(data) + "\n")
filename = pathOfFolder + '/o' + field + str(countFinalFile) + '.txt'
with open(filename, 'wb') as f:
f.write('\n'.join(offset) + "\n")
def get_appropriate_score_type(score):
if SCORE_TYPE_TYPE == int:
return SCORE_TYPE_TYPE(float(score))
if SCORE_TYPE_TYPE == float:
return SCORE_TYPE_TYPE(score)
def writeFinalIndex(data, countFinalFile, pathOfFolder,offsetSize):
"""
Write index after merging
"""
title, text, info, category, externalLink = defaultdict(dict), defaultdict(dict), defaultdict(dict), defaultdict(dict), defaultdict(dict)
print "Merging file:", countFinalFile
uniqueWords, offset = [], []
min_score_value = str(SCORE_TYPE_TYPE(0))
for key in sorted(data):
listOfDoc = data[key]
temp=[]
flag=0
for i in range(0, len(listOfDoc), 6):
word = listOfDoc
docid = word[i]
try:
if word[i+1] != min_score_value:
title[key][docid]=get_appropriate_score_type(word[i+1])
flag=1
if word[i+2] != min_score_value:
text[key][docid]=get_appropriate_score_type(word[i+2])
flag=1
if word[i+3] != min_score_value:
info[key][docid]=get_appropriate_score_type(word[i+3])
flag=1
if word[i+4] != min_score_value:
category[key][docid]=get_appropriate_score_type(word[i+4])
flag=1
if word[i+5] != min_score_value:
externalLink[key][docid]=get_appropriate_score_type(word[i+5])
flag=1
except Exception as e:
print e
pdb.set_trace()
if flag==1:
string = key+' '+str(countFinalFile)+' '+str(len(listOfDoc)/6)
uniqueWords.append(string)
offset.append(str(offsetSize))
offsetSize=offsetSize+len(string)+1
titleData, textData, infoData, categoryData, externalLinkData = [], [], [], [], []
titleOffset, textOffset, infoOffset, categoryOffset, externalLinkOffset = [], [], [], [], []
previousTitle, previousText, previousInfo, previousCategory, previousExternalLink = 0, 0, 0, 0, 0
for key in sorted(data.keys()): #create field wise Index
if key in title:
string=key+' '
sortedField = title[key]
sortedField = sorted(sortedField, key=sortedField.get, reverse=True)
for doc in sortedField:
string += doc + ' ' + str(title[key][doc]) + ' '
titleOffset.append(str(previousTitle)+' '+str(len(sortedField)))
previousTitle += len(string)+1
# pdb.set_trace()
titleData.append(string)
if key in text:
string=key+' '
sortedField = text[key]
sortedField = sorted(sortedField, key=sortedField.get, reverse=True)
for doc in sortedField:
string += doc + ' ' + str(text[key][doc]) + ' '
textOffset.append(str(previousText)+' '+str(len(sortedField)))
previousText += len(string)+1
# pdb.set_trace()
textData.append(string)
if key in info:
string=key+' '
sortedField=info[key]
sortedField = sorted(sortedField, key=sortedField.get, reverse=True)
for doc in sortedField:
string += doc + ' ' + str(info[key][doc]) + ' '
infoOffset.append(str(previousInfo) + ' ' + str(len(sortedField)))
previousInfo += len(string)+1
infoData.append(string)
if key in category:
string=key+' '
sortedField=category[key]
sortedField = sorted(sortedField, key=sortedField.get, reverse=True)
for doc in sortedField:
string += (doc + ' ' + str(category[key][doc]) + ' ')
categoryOffset.append(str(previousCategory)+' '+str(len(sortedField)))
previousCategory += len(string)+1
categoryData.append(string)
if key in externalLink:
string=key+' '
sortedField=externalLink[key]
sortedField = sorted(sortedField, key=sortedField.get, reverse=True)
for doc in sortedField:
string+=doc+' '+str(externalLink[key][doc])+' '
externalLinkOffset.append(str(previousExternalLink)+' '+str(len(sortedField)))
previousExternalLink+=len(string)+1
externalLinkData.append(string)
writeSingle('t', titleData, titleOffset, countFinalFile,pathOfFolder)
writeSingle('b', textData, textOffset, countFinalFile,pathOfFolder)
writeSingle('i', infoData, infoOffset, countFinalFile,pathOfFolder)
writeSingle('c', categoryData, categoryOffset, countFinalFile,pathOfFolder)
writeSingle('e', externalLinkData, externalLinkOffset, countFinalFile,pathOfFolder)
try:
# Change file of size > 30.48 Mb
if os.path.getsize(pathOfFolder+'/b'+str(countFinalFile)+('.txt.bz2' if COMPRESS_INDEX else '.txt')) > 30485760:
countFinalFile+=1
except:
pass
with open(pathOfFolder+"/vocabularyList.txt","ab") as f:
f.write('\n'.join(uniqueWords) + "\n")
with open(pathOfFolder+"/offset.txt","ab") as f:
f.write('\n'.join(offset) + "\n")
return countFinalFile, offsetSize
def writeIntoFile(pathOfFolder, index, dict_Id, countFile, titleOffset):
"""
Write partial index to file
"""
data=[] #write the primary index
previousTitleOffset = titleOffset
# Iterating index over key essentially DOES NOT sort the index based on 'word'
for key in sorted(index.keys()):
string = key.encode('utf-8') + ' ' + ' '.join(index[key])
data.append(string)
# Compress if required and then write into file
filename = pathOfFolder + '/index' + str(countFile) + ('.txt.bz2' if COMPRESS_INDEX else '.txt')
write_type = bz2.BZ2File(filename, 'wb', compresslevel=9) if COMPRESS_INDEX else open(filename, 'wb')
with write_type as f:
f.write('\n'.join(data))
data=[]
dataOffset=[]
for key in sorted(dict_Id.keys()):
data.append(str(key) + ' ' + dict_Id[key])
dataOffset.append(str(previousTitleOffset))
previousTitleOffset += len(str(key) + ' ' + dict_Id[key])
with open(pathOfFolder + '/title.txt','ab') as f:
f.write('\n'.join(data) + '\n')
'''filename=pathOfFolder+'/titleoffset.txt'
with open(filename,'ab') as f:
f.write('\n'.join(dataOffset))'''
return previousTitleOffset
def mergeFiles(pathOfFolder, countFile):
"""
Merge multiple partial indexes using HEAP merge.
"""
listOfWords, indexFile, topOfFile = {}, {}, {}
flag = [0]*countFile
data = defaultdict(list)
heap = []
countFinalFile, offsetSize = 0, 0
for i in xrange(countFile):
fileName = pathOfFolder + '/index' + str(i) + ('.txt.bz2' if COMPRESS_INDEX else '.txt')
indexFile[i] = bz2.BZ2File(fileName, 'rb') if COMPRESS_INDEX else open(fileName, 'rb')
flag[i] = 1
topOfFile[i] = indexFile[i].readline().strip()
listOfWords[i] = topOfFile[i].split()
if listOfWords[i][0] not in heap:
heapq.heappush(heap, listOfWords[i][0])
count=0
while any(flag)==1:
temp = heapq.heappop(heap)
count += 1
#print "."
for i in xrange(countFile):
if flag[i]:
if listOfWords[i][0] == temp:
data[temp].extend(listOfWords[i][1:])
topOfFile[i] = indexFile[i].readline().strip()
if topOfFile[i] == '':
flag[i] = 0
indexFile[i].close()
print "\tRemoved:", str(i)
os.remove(pathOfFolder + '/index' + str(i) + ('.txt.bz2' if COMPRESS_INDEX else '.txt'))
else:
listOfWords[i] = topOfFile[i].split()
if listOfWords[i][0] not in heap:
heapq.heappush(heap, listOfWords[i][0])
if not count%5000:
print "Done Words:", count
if count > 0 and count%20000==0:
oldCountFile = countFinalFile
countFinalFile, offsetSize = writeFinalIndex(data, countFinalFile, pathOfFolder, offsetSize)
if oldCountFile != countFinalFile:
data = defaultdict(list)
countFinalFile, offsetSize = writeFinalIndex(data, countFinalFile, pathOfFolder, offsetSize)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import types as python_types
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.Masking')
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
return inputs * math_ops.cast(boolean_mask, inputs.dtype)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Dropout')
class Dropout(Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return self.noise_shape
return nn_ops._get_noise_shape(inputs, self.noise_shape) # pylint: disable=protected-access
def call(self, inputs, training=None):
original_training_value = training
if training is None:
training = K.learning_phase()
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed)
output = tf_utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
# EagerTensor object has no attribute _uses_learning_phase
if not context.executing_eagerly() and original_training_value is None:
output._uses_learning_phase = True # pylint: disable=protected-access
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.SpatialDropout1D')
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
@tf_export('keras.layers.SpatialDropout2D')
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
@tf_export('keras.layers.SpatialDropout3D')
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
@tf_export('keras.layers.Activation')
class Activation(Layer):
"""Applies an activation function to an output.
Arguments:
activation: name of activation function to use
or alternatively, a Theano or TensorFlow operation.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Reshape')
class Reshape(Layer):
"""Reshapes an output to a certain shape.
Arguments:
target_shape: target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, 3, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments:
input_shape: shape of array being reshaped
output_shape: desired shape of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises a ValueError if the total array size of the output_shape is
different then the input_shape, or more than one unknown dimension
is specified.
Raises:
ValueError: in case of invalid values
for `input_shape` or `input_shape`.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
# input shape (partially) unknown? replace -1's with None's
output_shape += tuple(s if s != -1 else None for s in self.target_shape)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.reshape(inputs,
(array_ops.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Permute')
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Arguments:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimension
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Flatten')
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Example:
```python
model = Sequential()
model.add(Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32)))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=2)
def call(self, inputs):
if self.data_format == 'channels_first':
permutation = [0]
permutation.extend([i for i in
range(2, K.ndim(inputs))])
permutation.append(1)
inputs = array_ops.transpose(inputs, perm=permutation)
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.get_shape()))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(Flatten, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.RepeatVector')
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Arguments:
n: integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Lambda')
class Lambda(Layer):
"""Wraps arbitrary expression as a `Layer` object.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Arguments:
function: The function to be evaluated.
Takes input tensor as first argument.
output_shape: Expected output shape from function.
This argument can be inferred if not explicitly provided.
Can be a tuple or function.
If a tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input:
`output_shape = (input_shape[0], ) + output_shape`
or, the input is `None` and
the sample dimension is also `None`:
`output_shape = (None, ) + output_shape`
If a function, it specifies the entire shape as a function of the
input shape: `output_shape = f(input_shape)`
arguments: optional dictionary of keyword arguments to be passed
to the function.
Input shape:
Arbitrary. Use the keyword argument input_shape
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Specified by `output_shape` argument
"""
def __init__(self, function, output_shape=None, mask=None, arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
if output_shape is None:
self._output_shape = None
elif isinstance(output_shape, (tuple, list)):
self._output_shape = tuple(output_shape)
else:
if not callable(output_shape):
raise TypeError('In Lambda, `output_shape` '
'must be a list, a tuple, or a function.')
self._output_shape = output_shape
def compute_output_shape(self, input_shape):
input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())
if self._output_shape is None:
if context.executing_eagerly():
raise NotImplementedError
x = K.placeholder(shape=input_shape)
x = self.call(x)
if isinstance(x, list):
return [tensor_shape.TensorShape(K.int_shape(x_elem)) for x_elem in x]
else:
return tensor_shape.TensorShape(K.int_shape(x))
elif isinstance(self._output_shape, (tuple, list)):
if isinstance(input_shape, list):
num_samples = input_shape[0][0]
else:
num_samples = input_shape[0] if input_shape else None
return tensor_shape.TensorShape((num_samples,) +
tuple(self._output_shape))
else:
shape = self._output_shape(input_shape)
if not isinstance(shape, (list, tuple)):
raise ValueError(
'`output_shape` function must return a tuple or a list of tuples.')
if isinstance(shape, list):
if isinstance(shape[0], int) or shape[0] is None:
shape = tuple(shape)
return tensor_shape.TensorShape(shape)
def call(self, inputs, mask=None):
arguments = self.arguments
if generic_utils.has_arg(self.function, 'mask'):
arguments['mask'] = mask
return self.function(inputs, **arguments)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
module = self.function.__module__
if isinstance(self.function, python_types.LambdaType):
function = generic_utils.func_dump(self.function)
function_type = 'lambda'
else:
function = self.function.__name__
function_type = 'function'
output_shape_module = None
if isinstance(self._output_shape, python_types.LambdaType):
output_shape = generic_utils.func_dump(self._output_shape)
output_shape_type = 'lambda'
output_shape_module = self._output_shape.__module__
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
output_shape_type = 'function'
output_shape_module = self._output_shape.__module__
else:
output_shape = self._output_shape
output_shape_type = 'raw'
config = {
'function': function,
'module': module,
'function_type': function_type,
'output_shape': output_shape,
'output_shape_type': output_shape_type,
'output_shape_module': output_shape_module,
'arguments': self.arguments
}
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
globs = globals()
module = config.pop('module', None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module)
, UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop('function_type')
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config['function'],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(config['function'], globs=globs)
else:
raise TypeError('Unknown function type:', function_type)
output_shape_module = config.pop('output_shape_module', None)
if output_shape_module in sys.modules:
globs.update(sys.modules[output_shape_module].__dict__)
elif output_shape_module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(output_shape_module)
, UserWarning)
output_shape_type = config.pop('output_shape_type')
if output_shape_type == 'function':
# Simple lookup in custom objects
output_shape = generic_utils.deserialize_keras_object(
config['output_shape'],
custom_objects=custom_objects,
printable_module_name='output_shape function in Lambda layer')
elif output_shape_type == 'lambda':
# Unsafe deserialization from bytecode
output_shape = generic_utils.func_load(config['output_shape'],
globs=globs)
else:
output_shape = config['output_shape']
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
config['function'] = function
config['output_shape'] = output_shape
return cls(**config)
@tf_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_weight(
'kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.ActivityRegularization')
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Arguments:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
|
from typing import List, Union
from asgiref.sync import async_to_sync
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model
from .cache import element_cache
GROUP_DEFAULT_PK = 1 # This is the hard coded pk for the default group.
GROUP_ADMIN_PK = 2 # This is the hard coded pk for the admin group.
# Hard coded collection string for users and groups
group_collection_string = "users/group"
user_collection_string = "users/user"
class UserDoesNotExist(Exception):
"""
This is raised, if some permissions checks are done on not existing users.
"""
pass
def get_group_model() -> Model:
"""
Return the Group model that is active in this project.
"""
try:
return apps.get_model(settings.AUTH_GROUP_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured(
"AUTH_GROUP_MODEL must be of the form 'app_label.model_name'"
)
except LookupError:
raise ImproperlyConfigured(
f"AUTH_GROUP_MODEL refers to model '{settings.AUTH_GROUP_MODEL}' that has not been installed"
)
async def async_is_superadmin(user_id: int) -> bool:
"""
Checks, if the user is a superadmin (in the admin group).
This is done by querying a non existing permission, becuase has_perm
should always return true, if the user is in the admin group.
"""
return await async_has_perm(user_id, "superadmin")
def has_perm(user_id: int, perm: str) -> bool:
"""
Checks that user has a specific permission.
user_id 0 means anonymous user.
"""
# Convert user to right type
# TODO: Remove this and make use, that user has always the right type
user_id = user_to_user_id(user_id)
return async_to_sync(async_has_perm)(user_id, perm)
async def async_has_perm(user_id: int, perm: str) -> bool:
"""
Checks that user has a specific permission.
user_id 0 means anonymous user.
"""
if not user_id and not await async_anonymous_is_enabled():
has_perm = False
elif not user_id:
# Use the permissions from the default group.
default_group = await element_cache.get_element_data(
group_collection_string, GROUP_DEFAULT_PK
)
if default_group is None:
raise RuntimeError("Default Group does not exist.")
has_perm = perm in default_group["permissions"]
else:
user_data = await element_cache.get_element_data(
user_collection_string, user_id
)
if user_data is None:
raise UserDoesNotExist()
if GROUP_ADMIN_PK in user_data["groups_id"]:
# User in admin group (pk 2) grants all permissions.
has_perm = True
else:
# Get all groups of the user and then see, if one group has the required
# permission. If the user has no groups, then use the default group.
group_ids = user_data["groups_id"] or [GROUP_DEFAULT_PK]
for group_id in group_ids:
group = await element_cache.get_element_data(
group_collection_string, group_id
)
if group is None:
raise RuntimeError(
f"User {user_id} is in non existing group {group_id}."
)
if perm in group["permissions"]:
has_perm = True
break
else:
has_perm = False
return has_perm
# async code doesn't work well with QuerySets, so we have to give a list of ints for groups
def in_some_groups(user_id: int, groups: List[int], exact: bool = False) -> bool:
"""
Checks that user is in at least one given group. Groups can be given as a list
of ids or a QuerySet.
If exact is false (default) and the user is in the admin group (pk = 2),
the result is always true, even if no groups are given.
If exact is true, the user must be in one of the groups, ignoring the possible
superadmin-status of the user.
user_id 0 means anonymous user.
"""
# Convert user to right type
# TODO: Remove this and make use, that user has always the right type
user_id = user_to_user_id(user_id)
return async_to_sync(async_in_some_groups)(user_id, groups, exact)
async def async_in_some_groups(
user_id: int, groups: List[int], exact: bool = False
) -> bool:
"""
Checks that user is in at least one given group. Groups can be given as a list
of ids or a QuerySet. If the user is in the admin group (pk = 2) the result
is always true, even if no groups are given.
user_id 0 means anonymous user.
"""
if not user_id and not await async_anonymous_is_enabled():
in_some_groups = False
elif not user_id:
# Use the permissions from the default group.
in_some_groups = GROUP_DEFAULT_PK in groups
else:
user_data = await element_cache.get_element_data(
user_collection_string, user_id
)
if user_data is None:
raise UserDoesNotExist()
if not exact and GROUP_ADMIN_PK in user_data["groups_id"]:
# User in admin group (pk 2) grants all permissions.
in_some_groups = True
else:
# Get all groups of the user and then see, if one group has the required
# permission. If the user has no groups, then use the default group.
group_ids = user_data["groups_id"] or [GROUP_DEFAULT_PK]
for group_id in group_ids:
if group_id in groups:
in_some_groups = True
break
else:
in_some_groups = False
return in_some_groups
def anonymous_is_enabled() -> bool:
"""
Returns True if the anonymous user is enabled in the settings.
"""
from ..core.config import config
return config["general_system_enable_anonymous"]
async def async_anonymous_is_enabled() -> bool:
"""
Like anonymous_is_enabled but async.
"""
from ..core.config import config
element = await element_cache.get_element_data(
config.get_collection_string(),
(await config.async_get_key_to_id())["general_system_enable_anonymous"],
)
return False if element is None else element["value"]
AnyUser = Union[Model, int, AnonymousUser, None]
def user_to_user_id(user: AnyUser) -> int:
"""
Takes an object, that represents a user returns its user_id.
user_id 0 means anonymous user.
User can be
* an user object,
* an user id or
* an anonymous user.
Raises an TypeError, if the given user object can not be converted.
"""
User = get_user_model()
if user is None:
user_id = 0
elif isinstance(user, int):
# Nothing to do
user_id = user
elif isinstance(user, AnonymousUser):
user_id = 0
elif isinstance(user, User):
user_id = user.pk
else:
raise TypeError(
f"Unsupported type for user. User {user} has type {type(user)}."
)
return user_id
|
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import print_function
import os
import mimetypes
import six
from tweepy.binder import bind_api
from tweepy.error import TweepError
from tweepy.parsers import ModelParser, Parser
from tweepy.utils import list_to_csv
class API(object):
"""Twitter API"""
def __init__(self, auth_handler=None,
host='api.twitter.com', search_host='search.twitter.com',
cache=None, api_root='/1.1', search_root='',
retry_count=0, retry_delay=0, retry_errors=None, timeout=60,
parser=None, compression=False, wait_on_rate_limit=False,
wait_on_rate_limit_notify=False, proxy=''):
""" Api instance Constructor
:param auth_handler:
:param host: url of the server of the rest api, default:'api.twitter.com'
:param search_host: url of the search server, default:'search.twitter.com'
:param cache: Cache to query if a GET method is used, default:None
:param api_root: suffix of the api version, default:'/1.1'
:param search_root: suffix of the search version, default:''
:param retry_count: number of allowed retries, default:0
:param retry_delay: delay in second between retries, default:0
:param retry_errors: default:None
:param timeout: delay before to consider the request as timed out in seconds, default:60
:param parser: ModelParser instance to parse the responses, default:None
:param compression: If the response is compressed, default:False
:param wait_on_rate_limit: If the api wait when it hits the rate limit, default:False
:param wait_on_rate_limit_notify: If the api print a notification when the rate limit is hit, default:False
:param proxy: Url to use as proxy during the HTTP request, default:''
:raise TypeError: If the given parser is not a ModelParser instance.
"""
self.auth = auth_handler
self.host = host
self.search_host = search_host
self.api_root = api_root
self.search_root = search_root
self.cache = cache
self.compression = compression
self.retry_count = retry_count
self.retry_delay = retry_delay
self.retry_errors = retry_errors
self.timeout = timeout
self.wait_on_rate_limit = wait_on_rate_limit
self.wait_on_rate_limit_notify = wait_on_rate_limit_notify
self.parser = parser or ModelParser()
self.proxy = {}
if proxy:
self.proxy['https'] = proxy
# Attempt to explain more clearly the parser argument requirements
# https://github.com/tweepy/tweepy/issues/421
#
parser_type = Parser
if not isinstance(self.parser, parser_type):
raise TypeError(
'"parser" argument has to be an instance of "{required}".'
' It is currently a {actual}.'.format(
required=parser_type.__name__,
actual=type(self.parser)
)
)
@property
def home_timeline(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/home_timeline
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/home_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
def statuses_lookup(self, id_, include_entities=None,
trim_user=None, map_=None):
return self._statuses_lookup(list_to_csv(id_), include_entities,
trim_user, map_)
@property
def _statuses_lookup(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/lookup
:allowed_param:'id', 'include_entities', 'trim_user', 'map'
"""
return bind_api(
api=self,
path='/statuses/lookup.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'include_entities', 'trim_user', 'map'],
require_auth=True
)
@property
def user_timeline(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/user_timeline
:allowed_param:'id', 'user_id', 'screen_name', 'since_id'
"""
return bind_api(
api=self,
path='/statuses/user_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'since_id',
'max_id', 'count', 'include_rts']
)
@property
def mentions_timeline(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/mentions_timeline
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/mentions_timeline.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
@property
def related_results(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/related_results/show/%3id.format
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/related_results/show/{id}.json',
payload_type='relation', payload_list=True,
allowed_param=['id'],
require_auth=False
)
@property
def retweets_of_me(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/retweets_of_me
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/statuses/retweets_of_me.json',
payload_type='status', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
@property
def get_status(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/show/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/show.json',
payload_type='status',
allowed_param=['id']
)
@property
def update_status(self):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/update
:allowed_param:'status', 'in_reply_to_status_id', 'lat', 'long', 'source', 'place_id', 'display_coordinates'
"""
return bind_api(
api=self,
path='/statuses/update.json',
method='POST',
payload_type='status',
allowed_param=['status', 'in_reply_to_status_id', 'lat', 'long', 'source', 'place_id', 'display_coordinates'],
require_auth=True
)
def update_with_media(self, filename, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/update_with_media
:allowed_param:'status', 'possibly_sensitive', 'in_reply_to_status_id', 'lat', 'long', 'place_id', 'display_coordinates'
"""
f = kwargs.pop('file', None)
headers, post_data = API._pack_image(filename, 3072, form_field='media[]', f=f)
kwargs.update({'headers': headers, 'post_data': post_data})
return bind_api(
api=self,
path='/statuses/update_with_media.json',
method='POST',
payload_type='status',
allowed_param=[
'status', 'possibly_sensitive', 'in_reply_to_status_id', 'lat', 'long',
'place_id', 'display_coordinates'
],
require_auth=True
)(*args, **kwargs)
@property
def destroy_status(self):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/destroy/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/destroy/{id}.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def retweet(self):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/retweet/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/statuses/retweet/{id}.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def retweets(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/retweets/%3Aid
:allowed_param:'id', 'count'
"""
return bind_api(
api=self,
path='/statuses/retweets/{id}.json',
payload_type='status', payload_list=True,
allowed_param=['id', 'count'],
require_auth=True
)
@property
def retweeters(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/retweeters/ids
:allowed_param:'id', 'cursor', 'stringify_ids
"""
return bind_api(
api=self,
path='/statuses/retweeters/ids.json',
payload_type='ids',
allowed_param=['id', 'cursor', 'stringify_ids']
)
@property
def get_user(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/show
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/users/show.json',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name']
)
@property
def get_oembed(self):
""" :reference: https://dev.twitter.com/rest/reference/get/statuses/oembed
:allowed_param:'id', 'url', 'maxwidth', 'hide_media', 'omit_script', 'align', 'related', 'lang'
"""
return bind_api(
api=self,
path='/statuses/oembed.json',
payload_type='json',
allowed_param=['id', 'url', 'maxwidth', 'hide_media', 'omit_script', 'align', 'related', 'lang']
)
def lookup_users(self, user_ids=None, screen_names=None, include_entities=None):
""" Perform bulk look up of users from user ID or screenname """
post_data = {}
if include_entities is not None:
include_entities = 'true' if include_entities else 'false'
post_data['include_entities'] = include_entities
if user_ids:
post_data['user_id'] = list_to_csv(user_ids)
if screen_names:
post_data['screen_name'] = list_to_csv(screen_names)
return self._lookup_users(post_data=post_data)
@property
def _lookup_users(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/lookup
allowed_param='user_id', 'screen_name', 'include_entities'
"""
return bind_api(
api=self,
path='/users/lookup.json',
payload_type='user', payload_list=True,
method='POST',
)
def me(self):
""" Get the authenticated user """
return self.get_user(screen_name=self.auth.get_username())
@property
def search_users(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/search
:allowed_param:'q', 'count', 'page'
"""
return bind_api(
api=self,
path='/users/search.json',
payload_type='user', payload_list=True,
require_auth=True,
allowed_param=['q', 'count', 'page']
)
@property
def suggested_users(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/suggestions/%3Aslug
:allowed_param:'slug', 'lang'
"""
return bind_api(
api=self,
path='/users/suggestions/{slug}.json',
payload_type='user', payload_list=True,
require_auth=True,
allowed_param=['slug', 'lang']
)
@property
def suggested_categories(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/suggestions
:allowed_param:'lang'
"""
return bind_api(
api=self,
path='/users/suggestions.json',
payload_type='category', payload_list=True,
allowed_param=['lang'],
require_auth=True
)
@property
def suggested_users_tweets(self):
""" :reference: https://dev.twitter.com/rest/reference/get/users/suggestions/%3Aslug/members
:allowed_param:'slug'
"""
return bind_api(
api=self,
path='/users/suggestions/{slug}/members.json',
payload_type='status', payload_list=True,
allowed_param=['slug'],
require_auth=True
)
@property
def direct_messages(self):
""" :reference: https://dev.twitter.com/rest/reference/get/direct_messages
:allowed_param:'since_id', 'max_id', 'count'
"""
return bind_api(
api=self,
path='/direct_messages.json',
payload_type='direct_message', payload_list=True,
allowed_param=['since_id', 'max_id', 'count'],
require_auth=True
)
@property
def get_direct_message(self):
""" :reference: https://dev.twitter.com/rest/reference/get/direct_messages/show
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/direct_messages/show/{id}.json',
payload_type='direct_message',
allowed_param=['id'],
require_auth=True
)
@property
def sent_direct_messages(self):
""" :reference: https://dev.twitter.com/rest/reference/get/direct_messages/sent
:allowed_param:'since_id', 'max_id', 'count', 'page'
"""
return bind_api(
api=self,
path='/direct_messages/sent.json',
payload_type='direct_message', payload_list=True,
allowed_param=['since_id', 'max_id', 'count', 'page'],
require_auth=True
)
@property
def send_direct_message(self):
""" :reference: https://dev.twitter.com/rest/reference/post/direct_messages/new
:allowed_param:'user', 'screen_name', 'user_id', 'text'
"""
return bind_api(
api=self,
path='/direct_messages/new.json',
method='POST',
payload_type='direct_message',
allowed_param=['user', 'screen_name', 'user_id', 'text'],
require_auth=True
)
@property
def destroy_direct_message(self):
""" :reference: https://dev.twitter.com/rest/reference/post/direct_messages/destroy
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/direct_messages/destroy.json',
method='POST',
payload_type='direct_message',
allowed_param=['id'],
require_auth=True
)
@property
def create_friendship(self):
""" :reference: https://dev.twitter.com/rest/reference/post/friendships/create
:allowed_param:'id', 'user_id', 'screen_name', 'follow'
"""
return bind_api(
api=self,
path='/friendships/create.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name', 'follow'],
require_auth=True
)
@property
def destroy_friendship(self):
""" :reference: https://dev.twitter.com/rest/reference/post/friendships/destroy
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/friendships/destroy.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def show_friendship(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friendships/show
:allowed_param:'source_id', 'source_screen_name'
"""
return bind_api(
api=self,
path='/friendships/show.json',
payload_type='friendship',
allowed_param=['source_id', 'source_screen_name',
'target_id', 'target_screen_name']
)
def lookup_friendships(self, user_ids=None, screen_names=None):
""" Perform bulk look up of friendships from user ID or screenname """
return self._lookup_friendships(list_to_csv(user_ids), list_to_csv(screen_names))
@property
def _lookup_friendships(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friendships/lookup
:allowed_param:'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/friendships/lookup.json',
payload_type='relationship', payload_list=True,
allowed_param=['user_id', 'screen_name'],
require_auth=True
)
@property
def friends_ids(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friends/ids
:allowed_param:'id', 'user_id', 'screen_name', 'cursor'
"""
return bind_api(
api=self,
path='/friends/ids.json',
payload_type='ids',
allowed_param=['id', 'user_id', 'screen_name', 'cursor']
)
@property
def friends(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friends/list
:allowed_param:'id', 'user_id', 'screen_name', 'cursor'
"""
return bind_api(
api=self,
path='/friends/list.json',
payload_type='user', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'cursor']
)
@property
def friendships_incoming(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friendships/incoming
:allowed_param:'cursor'
"""
return bind_api(
api=self,
path='/friendships/incoming.json',
payload_type='ids',
allowed_param=['cursor']
)
@property
def friendships_outgoing(self):
""" :reference: https://dev.twitter.com/rest/reference/get/friendships/outgoing
:allowed_param:'cursor'
"""
return bind_api(
api=self,
path='/friendships/outgoing.json',
payload_type='ids',
allowed_param=['cursor']
)
@property
def followers_ids(self):
""" :reference: https://dev.twitter.com/rest/reference/get/followers/ids
:allowed_param:'id', 'user_id', 'screen_name', 'cursor', 'count'
"""
return bind_api(
api=self,
path='/followers/ids.json',
payload_type='ids',
allowed_param=['id', 'user_id', 'screen_name', 'cursor', 'count']
)
@property
def followers(self):
""" :reference: https://dev.twitter.com/rest/reference/get/followers/list
:allowed_param:'id', 'user_id', 'screen_name', 'cursor', 'count', 'skip_status', 'include_user_entities'
"""
return bind_api(
api=self,
path='/followers/list.json',
payload_type='user', payload_list=True,
allowed_param=['id', 'user_id', 'screen_name', 'cursor', 'count',
'skip_status', 'include_user_entities']
)
def verify_credentials(self, **kargs):
""" :reference: https://dev.twitter.com/rest/reference/get/account/verify_credentials
:allowed_param:'include_entities', 'skip_status'
"""
try:
return bind_api(
api=self,
path='/account/verify_credentials.json',
payload_type='user',
require_auth=True,
allowed_param=['include_entities', 'skip_status'],
)(**kargs)
except TweepError as e:
if e.response and e.response.status == 401:
return False
raise
@property
def rate_limit_status(self):
""" :reference: https://dev.twitter.com/rest/reference/get/application/rate_limit_status
:allowed_param:'resources'
"""
return bind_api(
api=self,
path='/application/rate_limit_status.json',
payload_type='json',
allowed_param=['resources'],
use_cache=False
)
@property
def set_delivery_device(self):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_delivery_device
:allowed_param:'device'
"""
return bind_api(
api=self,
path='/account/update_delivery_device.json',
method='POST',
allowed_param=['device'],
payload_type='user',
require_auth=True
)
@property
def update_profile_colors(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/account/update_profile_colors
:allowed_param:'profile_background_color', 'profile_text_color',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_sidebar_border_color'],
"""
return bind_api(
api=self,
path='/account/update_profile_colors.json',
method='POST',
payload_type='user',
allowed_param=['profile_background_color', 'profile_text_color',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_sidebar_border_color'],
require_auth=True
)
def update_profile_image(self, filename, file_=None):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_image
:allowed_param:'include_entities', 'skip_status'
"""
headers, post_data = API._pack_image(filename, 700, f=file_)
return bind_api(
api=self,
path='/account/update_profile_image.json',
method='POST',
payload_type='user',
allowed_param=['include_entities', 'skip_status'],
require_auth=True
)(self, post_data=post_data, headers=headers)
def update_profile_background_image(self, filename, **kargs):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_background_image
:allowed_param:'tile', 'include_entities', 'skip_status', 'use'
"""
f = kargs.pop('file', None)
headers, post_data = API._pack_image(filename, 800, f=f)
bind_api(
api=self,
path='/account/update_profile_background_image.json',
method='POST',
payload_type='user',
allowed_param=['tile', 'include_entities', 'skip_status', 'use'],
require_auth=True
)(post_data=post_data, headers=headers)
def update_profile_banner(self, filename, **kargs):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile_banner
:allowed_param:'width', 'height', 'offset_left', 'offset_right'
"""
f = kargs.pop('file', None)
headers, post_data = API._pack_image(filename, 700, form_field="banner", f=f)
bind_api(
api=self,
path='/account/update_profile_banner.json',
method='POST',
allowed_param=['width', 'height', 'offset_left', 'offset_right'],
require_auth=True
)(post_data=post_data, headers=headers)
@property
def update_profile(self):
""" :reference: https://dev.twitter.com/rest/reference/post/account/update_profile
:allowed_param:'name', 'url', 'location', 'description'
"""
return bind_api(
api=self,
path='/account/update_profile.json',
method='POST',
payload_type='user',
allowed_param=['name', 'url', 'location', 'description'],
require_auth=True
)
@property
def favorites(self):
""" :reference: https://dev.twitter.com/rest/reference/get/favorites/list
:allowed_param:'screen_name', 'user_id', 'max_id', 'count', 'since_id', 'max_id'
"""
return bind_api(
api=self,
path='/favorites/list.json',
payload_type='status', payload_list=True,
allowed_param=['screen_name', 'user_id', 'max_id', 'count', 'since_id', 'max_id']
)
@property
def create_favorite(self):
""" :reference:https://dev.twitter.com/rest/reference/post/favorites/create
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/favorites/create.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def destroy_favorite(self):
""" :reference: https://dev.twitter.com/rest/reference/post/favorites/destroy
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/favorites/destroy.json',
method='POST',
payload_type='status',
allowed_param=['id'],
require_auth=True
)
@property
def create_block(self):
""" :reference: https://dev.twitter.com/rest/reference/post/blocks/create
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/blocks/create.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def destroy_block(self):
""" :reference: https://dev.twitter.com/rest/reference/post/blocks/destroy
:allowed_param:'id', 'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/blocks/destroy.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name'],
require_auth=True
)
@property
def blocks(self):
""" :reference: https://dev.twitter.com/rest/reference/get/blocks/list
:allowed_param:'cursor'
"""
return bind_api(
api=self,
path='/blocks/list.json',
payload_type='user', payload_list=True,
allowed_param=['cursor'],
require_auth=True
)
@property
def blocks_ids(self):
""" :reference: https://dev.twitter.com/rest/reference/get/blocks/ids """
return bind_api(
api=self,
path='/blocks/ids.json',
payload_type='json',
require_auth=True
)
@property
def report_spam(self):
""" :reference: https://dev.twitter.com/rest/reference/post/users/report_spam
:allowed_param:'user_id', 'screen_name'
"""
return bind_api(
api=self,
path='/users/report_spam.json',
method='POST',
payload_type='user',
allowed_param=['user_id', 'screen_name'],
require_auth=True
)
@property
def saved_searches(self):
""" :reference: https://dev.twitter.com/rest/reference/get/saved_searches/show/%3Aid """
return bind_api(
api=self,
path='/saved_searches/list.json',
payload_type='saved_search', payload_list=True,
require_auth=True
)
@property
def get_saved_search(self):
""" :reference: https://dev.twitter.com/rest/reference/get/saved_searches/show/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/saved_searches/show/{id}.json',
payload_type='saved_search',
allowed_param=['id'],
require_auth=True
)
@property
def create_saved_search(self):
""" :reference: https://dev.twitter.com/rest/reference/post/saved_searches/create
:allowed_param:'query'
"""
return bind_api(
api=self,
path='/saved_searches/create.json',
method='POST',
payload_type='saved_search',
allowed_param=['query'],
require_auth=True
)
@property
def destroy_saved_search(self):
""" :reference: https://dev.twitter.com/rest/reference/post/saved_searches/destroy/%3Aid
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/saved_searches/destroy/{id}.json',
method='POST',
payload_type='saved_search',
allowed_param=['id'],
require_auth=True
)
@property
def create_list(self):
""" :reference: https://dev.twitter.com/rest/reference/post/lists/create
:allowed_param:'name', 'mode', 'description'
"""
return bind_api(
api=self,
path='/lists/create.json',
method='POST',
payload_type='list',
allowed_param=['name', 'mode', 'description'],
require_auth=True
)
@property
def destroy_list(self):
""" :reference: https://dev.twitter.com/rest/reference/post/lists/destroy
:allowed_param:'owner_screen_name', 'owner_id', 'list_id', 'slug'
"""
return bind_api(
api=self,
path='/lists/destroy.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'owner_id', 'list_id', 'slug'],
require_auth=True
)
@property
def update_list(self):
""" :reference: https://dev.twitter.com/rest/reference/post/lists/update
:allowed_param: list_id', 'slug', 'name', 'mode', 'description', 'owner_screen_name', 'owner_id'
"""
return bind_api(
api=self,
path='/lists/update.json',
method='POST',
payload_type='list',
allowed_param=['list_id', 'slug', 'name', 'mode', 'description', 'owner_screen_name', 'owner_id'],
require_auth=True
)
@property
def lists_all(self):
""" :reference: https://dev.twitter.com/rest/reference/get/lists/list
:allowed_param:'screen_name', 'user_id'
"""
return bind_api(
api=self,
path='/lists/list.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id'],
require_auth=True
)
@property
def lists_memberships(self):
""" :reference: https://dev.twitter.com/rest/reference/get/lists/memberships
:allowed_param:'screen_name', 'user_id', 'filter_to_owned_lists', 'cursor'
"""
return bind_api(
api=self,
path='/lists/memberships.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id', 'filter_to_owned_lists', 'cursor'],
require_auth=True
)
@property
def lists_subscriptions(self):
""" :reference: https://dev.twitter.com/rest/reference/get/lists/subscriptions
:allowed_param:'screen_name', 'user_id', 'cursor'
"""
return bind_api(
api=self,
path='/lists/subscriptions.json',
payload_type='list', payload_list=True,
allowed_param=['screen_name', 'user_id', 'cursor'],
require_auth=True
)
@property
def list_timeline(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/statuses
:allowed_param:'owner_screen_name', 'slug', 'owner_id', 'list_id',
'since_id', 'max_id', 'count', 'include_rts
"""
return bind_api(
api=self,
path='/lists/statuses.json',
payload_type='status', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'owner_id',
'list_id', 'since_id', 'max_id', 'count',
'include_rts']
)
@property
def get_list(self):
""" :reference: https://dev.twitter.com/rest/reference/get/lists/show
:allowed_param:'owner_screen_name', 'owner_id', 'slug', 'list_id'
"""
return bind_api(
api=self,
path='/lists/show.json',
payload_type='list',
allowed_param=['owner_screen_name', 'owner_id', 'slug', 'list_id']
)
@property
def add_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/create
:allowed_param:'screen_name', 'user_id', 'owner_screen_name',
'owner_id', 'slug', 'list_id'
"""
return bind_api(
api=self,
path='/lists/members/create.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'owner_screen_name',
'owner_id', 'slug', 'list_id'],
require_auth=True
)
@property
def remove_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/destroy
:allowed_param:'screen_name', 'user_id', 'owner_screen_name',
'owner_id', 'slug', 'list_id'
"""
return bind_api(
api=self,
path='/lists/members/destroy.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'owner_screen_name',
'owner_id', 'slug', 'list_id'],
require_auth=True
)
def add_list_members(self, screen_name=None, user_id=None, slug=None,
list_id=None, owner_id=None, owner_screen_name=None):
""" Perform bulk add of list members from user ID or screenname """
return self._add_list_members(list_to_csv(screen_name),
list_to_csv(user_id),
slug, list_id, owner_id,
owner_screen_name)
@property
def _add_list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/create_all
:allowed_param:'screen_name', 'user_id', 'slug', 'lit_id',
'owner_id', 'owner_screen_name'
"""
return bind_api(
api=self,
path='/lists/members/create_all.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'slug', 'lit_id',
'owner_id', 'owner_screen_name'],
require_auth=True
)
def remove_list_members(self, screen_name=None, user_id=None, slug=None,
list_id=None, owner_id=None, owner_screen_name=None):
""" Perform bulk remove of list members from user ID or screenname """
return self._remove_list_members(list_to_csv(screen_name),
list_to_csv(user_id),
slug, list_id, owner_id,
owner_screen_name)
@property
def _remove_list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/members/destroy_all
:allowed_param:'screen_name', 'user_id', 'slug', 'lit_id',
'owner_id', 'owner_screen_name'
"""
return bind_api(
api=self,
path='/lists/members/destroy_all.json',
method='POST',
payload_type='list',
allowed_param=['screen_name', 'user_id', 'slug', 'lit_id',
'owner_id', 'owner_screen_name'],
require_auth=True
)
@property
def list_members(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/members
:allowed_param:'owner_screen_name', 'slug', 'list_id',
'owner_id', 'cursor
"""
return bind_api(
api=self,
path='/lists/members.json',
payload_type='user', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'list_id',
'owner_id', 'cursor']
)
@property
def show_list_member(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/members/show
:allowed_param:'list_id', 'slug', 'user_id', 'screen_name',
'owner_screen_name', 'owner_id
"""
return bind_api(
api=self,
path='/lists/members/show.json',
payload_type='user',
allowed_param=['list_id', 'slug', 'user_id', 'screen_name',
'owner_screen_name', 'owner_id']
)
@property
def subscribe_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/subscribers/create
:allowed_param:'owner_screen_name', 'slug', 'owner_id',
'list_id'
"""
return bind_api(
api=self,
path='/lists/subscribers/create.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'slug', 'owner_id',
'list_id'],
require_auth=True
)
@property
def unsubscribe_list(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/post/lists/subscribers/destroy
:allowed_param:'owner_screen_name', 'slug', 'owner_id',
'list_id'
"""
return bind_api(
api=self,
path='/lists/subscribers/destroy.json',
method='POST',
payload_type='list',
allowed_param=['owner_screen_name', 'slug', 'owner_id',
'list_id'],
require_auth=True
)
@property
def list_subscribers(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/subscribers
:allowed_param:'owner_screen_name', 'slug', 'owner_id',
'list_id', 'cursor
"""
return bind_api(
api=self,
path='/lists/subscribers.json',
payload_type='user', payload_list=True,
allowed_param=['owner_screen_name', 'slug', 'owner_id',
'list_id', 'cursor']
)
@property
def show_list_subscriber(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/lists/subscribers/show
:allowed_param:'owner_screen_name', 'slug', 'screen_name',
'owner_id', 'list_id', 'user_id
"""
return bind_api(
api=self,
path='/lists/subscribers/show.json',
payload_type='user',
allowed_param=['owner_screen_name', 'slug', 'screen_name',
'owner_id', 'list_id', 'user_id']
)
@property
def trends_available(self):
""" :reference: https://dev.twitter.com/rest/reference/get/trends/available """
return bind_api(
api=self,
path='/trends/available.json',
payload_type='json'
)
@property
def trends_place(self):
""" :reference: https://dev.twitter.com/rest/reference/get/trends/place
:allowed_param:'id', 'exclude'
"""
return bind_api(
api=self,
path='/trends/place.json',
payload_type='json',
allowed_param=['id', 'exclude']
)
@property
def trends_closest(self):
""" :reference: https://dev.twitter.com/rest/reference/get/trends/closest
:allowed_param:'lat', 'long'
"""
return bind_api(
api=self,
path='/trends/closest.json',
payload_type='json',
allowed_param=['lat', 'long']
)
@property
def search(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/search
:allowed_param:'q', 'lang', 'locale', 'since_id', 'geocode',
'max_id', 'since', 'until', 'result_type', 'count',
'include_entities', 'from', 'to', 'source']
"""
return bind_api(
api=self,
path='/search/tweets.json',
payload_type='search_results',
allowed_param=['q', 'lang', 'locale', 'since_id', 'geocode',
'max_id', 'since', 'until', 'result_type',
'count', 'include_entities', 'from',
'to', 'source']
)
@property
def trends_daily(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/trends/daily
:allowed_param:'date', 'exclude'
"""
return bind_api(
api=self,
path='/trends/daily.json',
payload_type='json',
allowed_param=['date', 'exclude']
)
@property
def trends_weekly(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/trends/weekly
:allowed_param:'date', 'exclude'
"""
return bind_api(
api=self,
path='/trends/weekly.json',
payload_type='json',
allowed_param=['date', 'exclude']
)
@property
def reverse_geocode(self):
""" :reference: https://dev.twitter.com/rest/reference/get/geo/reverse_geocode
:allowed_param:'lat', 'long', 'accuracy', 'granularity', 'max_results'
"""
return bind_api(
api=self,
path='/geo/reverse_geocode.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'accuracy', 'granularity',
'max_results']
)
@property
def geo_id(self):
""" :reference: https://dev.twitter.com/rest/reference/get/geo/id/%3Aplace_id
:allowed_param:'id'
"""
return bind_api(
api=self,
path='/geo/id/{id}.json',
payload_type='place',
allowed_param=['id']
)
@property
def geo_search(self):
""" :reference: https://dev.twitter.com/docs/api/1.1/get/geo/search
:allowed_param:'lat', 'long', 'query', 'ip', 'granularity',
'accuracy', 'max_results', 'contained_within
"""
return bind_api(
api=self,
path='/geo/search.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'query', 'ip', 'granularity',
'accuracy', 'max_results', 'contained_within']
)
@property
def geo_similar_places(self):
""" :reference: https://dev.twitter.com/rest/reference/get/geo/similar_places
:allowed_param:'lat', 'long', 'name', 'contained_within'
"""
return bind_api(
api=self,
path='/geo/similar_places.json',
payload_type='place', payload_list=True,
allowed_param=['lat', 'long', 'name', 'contained_within']
)
@property
def supported_languages(self):
""" :reference: https://dev.twitter.com/rest/reference/get/help/languages """
return bind_api(
api=self,
path='/help/languages.json',
payload_type='json',
require_auth=True
)
@property
def configuration(self):
""" :reference: https://dev.twitter.com/rest/reference/get/help/configuration """
return bind_api(
api=self,
path='/help/configuration.json',
payload_type='json',
require_auth=True
)
""" Internal use only """
@staticmethod
def _pack_image(filename, max_size, form_field="image", f=None):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 700kb in size
if f is None:
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than 700kb.')
except os.error:
raise TweepError('Unable to access file')
# build the mulitpart-formdata body
fp = open(filename, 'rb')
else:
f.seek(0, 2) # Seek to end of file
if f.tell() > (max_size * 1024):
raise TweepError('File is too big, must be less than 700kb.')
f.seek(0) # Reset to beginning of file
fp = f
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise TweepError('Invalid file type for image: %s' % file_type)
if isinstance(filename, six.text_type):
filename = filename.encode("utf-8")
BOUNDARY = b'Tw3ePy'
body = list()
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}";'
' filename="{1}"'.format(form_field, filename)
.encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': str(len(body))
}
return headers, body
|
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import warnings
from flask_wtf import Form
from wtforms import TextField, IntegerField, SelectField, validators
from werkzeug import redirect, abort
from jinja2 import TemplateNotFound
from nereid import request, url_for, render_template, login_required, flash, \
jsonify, route, current_user, current_website
from trytond.model import ModelView, ModelSQL, fields
from trytond.pool import Pool, PoolMeta
from .user import RegistrationForm
from .i18n import _
__all__ = ['Address', 'Party', 'ContactMechanism']
class AddressForm(Form):
"""
A form resembling the party.address
"""
name = TextField(_('Name'), [validators.DataRequired(), ])
street = TextField(_('Street'), [validators.DataRequired(), ])
streetbis = TextField(_('Street (Bis)'))
zip = TextField(_('Post Code'), [validators.DataRequired(), ])
city = TextField(_('City'), [validators.DataRequired(), ])
country = SelectField(_('Country'), [validators.DataRequired(), ], coerce=int) # noqa
subdivision = IntegerField(_('State/County'), [validators.DataRequired()])
email = TextField(_('Email'))
phone = TextField(_('Phone'))
def __init__(self, formdata=None, obj=None, prefix='', **kwargs):
super(AddressForm, self).__init__(formdata, obj, prefix, **kwargs)
# Fill country choices while form is initialized
self.country.choices = [
(c.id, c.name) for c in current_website.countries
]
class Address:
"""Party Address"""
__name__ = 'party.address'
__metaclass__ = PoolMeta
registration_form = RegistrationForm
@classmethod
def get_address_form(cls, address=None):
"""
Return an initialised Address form that can be validated and used to
create/update addresses
:param address: If an active record is provided it is used to autofill
the form.
"""
if address:
form = AddressForm(
request.form,
name=address.name,
street=address.street,
streetbis=address.streetbis,
zip=address.zip,
city=address.city,
country=address.country and address.country.id,
subdivision=address.subdivision and address.subdivision.id,
email=address.party.email,
phone=address.party.phone
)
else:
address_name = "" if current_user.is_anonymous else \
current_user.display_name
form = AddressForm(request.form, name=address_name)
return form
@classmethod
@route("/create-address", methods=["GET", "POST"])
@login_required
def create_address(cls):
"""
Create an address for the current nereid_user
GET
~~~
Return an address creation form
POST
~~~~
Creates an address and redirects to the address view. If a next_url
is provided, redirects there.
.. version_added: 3.0.3.0
"""
form = cls.get_address_form()
if request.method == 'POST' and form.validate():
party = current_user.party
address, = cls.create([{
'name': form.name.data,
'street': form.street.data,
'streetbis': form.streetbis.data,
'zip': form.zip.data,
'city': form.city.data,
'country': form.country.data,
'subdivision': form.subdivision.data,
'party': party.id,
}])
if form.email.data:
party.add_contact_mechanism_if_not_exists(
'email', form.email.data
)
if form.phone.data:
party.add_contact_mechanism_if_not_exists(
'phone', form.phone.data
)
return redirect(url_for('party.address.view_address'))
try:
return render_template('address-add.jinja', form=form)
except TemplateNotFound:
# The address-add template was introduced in 3.0.3.0
# so just raise a deprecation warning till 3.2.X and then
# expect the use of address-add template
warnings.warn(
"address-add.jinja template not found. "
"Will be required in future versions",
DeprecationWarning
)
return render_template('address-edit.jinja', form=form)
@classmethod
@route("/save-new-address", methods=["GET", "POST"])
@route("/edit-address/<int:address>", methods=["GET", "POST"])
@login_required
def edit_address(cls, address=None):
"""
Edit an Address
POST will update an existing address.
GET will return a existing address edit form.
.. version_changed:: 3.0.3.0
For creating new address use the create_address handled instead of
this one. The functionality would be deprecated in 3.2.X
:param address: ID of the address
"""
if address is None:
warnings.warn(
"Address creation will be deprecated from edit_address handler."
" Use party.address.create_address instead",
DeprecationWarning
)
return cls.create_address()
form = cls.get_address_form()
if address not in (a.id for a in current_user.party.addresses):
# Check if the address is in the list of addresses of the
# current user's party
abort(403)
address = cls(address)
if request.method == 'POST' and form.validate():
party = current_user.party
cls.write([address], {
'name': form.name.data,
'street': form.street.data,
'streetbis': form.streetbis.data,
'zip': form.zip.data,
'city': form.city.data,
'country': form.country.data,
'subdivision': form.subdivision.data,
})
if form.email.data:
party.add_contact_mechanism_if_not_exists(
'email', form.email.data
)
if form.phone.data:
party.add_contact_mechanism_if_not_exists(
'phone', form.phone.data
)
return redirect(url_for('party.address.view_address'))
elif request.method == 'GET' and address:
# Its an edit of existing address, prefill data
form = cls.get_address_form(address)
return render_template('address-edit.jinja', form=form, address=address)
@classmethod
@route("/view-address", methods=["GET"])
@login_required
def view_address(cls):
"View the addresses of user"
return render_template('address.jinja')
@route("/remove-address/<int:active_id>", methods=["POST"])
@login_required
def remove_address(self):
"""
Make address inactive if user removes the address from address book.
"""
if self.party == current_user.party:
self.active = False
self.save()
flash(_('Address has been deleted successfully!'))
if request.is_xhr:
return jsonify(success=True)
return redirect(request.referrer)
abort(403)
class Party(ModelSQL, ModelView):
"Party"
__name__ = 'party.party'
nereid_users = fields.One2Many('nereid.user', 'party', 'Web Users')
def add_contact_mechanism_if_not_exists(self, type, value):
"""
Adds a contact mechanism to the party if it does not exist
:return: The created contact mechanism or the one which existed
"""
ContactMechanism = Pool().get('party.contact_mechanism')
mechanisms = ContactMechanism.search([
('party', '=', self.id),
('type', '=', type),
('value', '=', value),
])
if not mechanisms:
mechanisms = ContactMechanism.create([{
'party': self.id,
'type': type,
'value': value,
}])
return mechanisms[0]
class ContactMechanismForm(Form):
type = SelectField('Type', [validators.DataRequired()])
value = TextField('Value', [validators.DataRequired()])
comment = TextField('Comment')
class ContactMechanism(ModelSQL, ModelView):
"""
Allow modification of contact mechanisms
"""
__name__ = "party.contact_mechanism"
@classmethod
def get_form(cls):
"""
Returns the contact mechanism form
"""
from trytond.modules.party import contact_mechanism
form = ContactMechanismForm()
form.type.choices = contact_mechanism._TYPES
return form
@classmethod
@route("/contact-mechanisms/add", methods=["POST"])
@login_required
def add(cls):
"""
Adds a contact mechanism to the party's contact mechanisms
"""
form = cls.get_form()
if form.validate_on_submit():
cls.create([{
'party': current_user.party.id,
'type': form.type.data,
'value': form.value.data,
'comment': form.comment.data,
}])
if request.is_xhr:
return jsonify({'success': True})
return redirect(request.referrer)
if request.is_xhr:
return jsonify({'success': False})
else:
for field, messages in form.errors:
flash("<br>".join(messages), "Field %s" % field)
return redirect(request.referrer)
@route("/contact-mechanisms/<int:active_id>", methods=["POST", "DELETE"])
@login_required
def remove(self):
"""
DELETE: Removes the current contact mechanism
"""
ContactMechanism = Pool().get('party.contact_mechanism')
if self.party == current_user.party:
ContactMechanism.delete([self])
else:
abort(403)
if request.is_xhr:
return jsonify({
'success': True
})
return redirect(request.referrer)
|
|
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
from functools import partial
from optparse import OptionGroup, SUPPRESS_HELP, Option
import warnings
from pip.index import (
FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary,
fmt_ctl_no_use_wheel)
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, src_prefix
from pip.utils.hashes import STRONG_HASHES
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def resolve_wheel_no_use_binary(options):
if not options.use_wheel:
control = options.format_control
fmt_ctl_no_use_wheel(control)
def check_install_build_global(options, check_options=None):
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2)
###########
# options #
###########
help_ = partial(
Option,
'-h', '--help',
dest='help',
action='help',
help='Show help.')
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help=('Give less output. Option is additive, and can be used up to 3'
' times (corresponding to WARNING, ERROR, and CRITICAL logging'
' levels).')
)
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
no_input = partial(
Option,
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = partial(
Option,
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
def exists_action():
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b', 'a'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.")
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
metavar='path',
help="Path to alternate CA bundle.")
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help="Base URL of Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.")
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url."
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
def find_links():
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory, "
"then look for archives in the directory listing.")
def allow_external():
return Option(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
allow_all_external = partial(
Option,
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help=SUPPRESS_HELP,
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
# Remove after 7.0
no_allow_external = partial(
Option,
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 7.0
def allow_unsafe():
return Option(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help=SUPPRESS_HELP,
)
# Remove after 7.0
no_allow_unsafe = partial(
Option,
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = partial(
Option,
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
def constraints():
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.')
def requirements():
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
def editable():
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
# XXX: deprecated, remove in 9.0
use_wheel = partial(
Option,
'--use-wheel',
dest='use_wheel',
action='store_true',
default=True,
help=SUPPRESS_HELP,
)
# XXX: deprecated, remove in 9.0
no_use_wheel = partial(
Option,
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations. DEPRECATED in favour of --no-binary.'),
)
def _get_format_control(values, option):
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.no_binary, existing.only_binary)
def _handle_only_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.only_binary, existing.no_binary)
def no_binary():
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.")
def only_binary():
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.")
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
ignore_requires_python = partial(
Option,
'--ignore-requires-python',
dest='ignore_requires_python',
action='store_true',
help='Ignore the Requires-Python information.')
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
pre = partial(
Option,
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.")
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
)
def _merge_hash(option, opt_str, value, parser):
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(':', 1)
except ValueError:
parser.error('Arguments to %s must be a hash name '
'followed by a value, like --hash=sha256:abcde...' %
opt_str)
if algo not in STRONG_HASHES:
parser.error('Allowed hash algorithms for %s are %s.' %
(opt_str, ', '.join(STRONG_HASHES)))
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
'--hash',
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest='hashes',
action='callback',
callback=_merge_hash,
type='string',
help="Verify that the package's archive matches this "
'hash before installing. Example: --hash=sha256:abcdef...')
require_hashes = partial(
Option,
'--require-hashes',
dest='require_hashes',
action='store_true',
default=False,
help='Require a hash to check each requirement against, for '
'repeatable installs. This option is implied when any package in a '
'requirements file has a --hash option.')
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
non_deprecated_index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
process_dependency_links,
]
}
index_group = {
'name': 'Package Index Options (including deprecated options)',
'options': non_deprecated_index_group['options'] + [
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
]
}
|
|
# -*- coding: utf-8 -*-
"""This module contains classes representing syntactical elements of SQL."""
import re
import sys
from sqlparse3 import tokens as T
class Token(object):
"""Base class for all other classes in this module.
It represents a single token and has two instance attributes:
``value`` is the unchange value of the token and ``ttype`` is
the type of the token.
"""
__slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword')
def __init__(self, ttype, value):
self.value = value
if ttype in T.Keyword:
self.normalized = value.upper()
else:
self.normalized = value
self.ttype = ttype
self.is_keyword = ttype in T.Keyword
self.parent = None
def __str__(self):
if sys.version_info[0] == 3:
return self.value
else:
return str(self).encode('utf-8')
def __repr__(self):
short = self._get_repr_value()
if sys.version_info[0] < 3:
short = short.encode('utf-8')
return '<%s \'%s\' at 0x%07x>' % (self._get_repr_name(),
short, id(self))
def __unicode__(self):
"""Returns a unicode representation of this object."""
return self.value or ''
def to_unicode(self):
"""Returns a unicode representation of this object.
.. deprecated:: 0.1.5
Use ``unicode(token)`` (for Python 3: ``str(token)``) instead.
"""
return str(self)
def _get_repr_name(self):
return str(self.ttype).split('.')[-1]
def _get_repr_value(self):
raw = str(self)
if len(raw) > 7:
raw = raw[:6] + '...'
return re.sub('\s+', ' ', raw)
def flatten(self):
"""Resolve subgroups."""
yield self
def match(self, ttype, values, regex=False):
"""Checks whether the token matches the given arguments.
*ttype* is a token type. If this token doesn't match the given token
type.
*values* is a list of possible values for this token. The values
are OR'ed together so if only one of the values matches ``True``
is returned. Except for keyword tokens the comparison is
case-sensitive. For convenience it's ok to pass in a single string.
If *regex* is ``True`` (default is ``False``) the given values are
treated as regular expressions.
"""
type_matched = self.ttype is ttype
if not type_matched or values is None:
return type_matched
if regex:
if isinstance(values, str):
values = set([values])
if self.ttype is T.Keyword:
values = set(re.compile(v, re.IGNORECASE) for v in values)
else:
values = set(re.compile(v) for v in values)
for pattern in values:
if pattern.search(self.value):
return True
return False
if isinstance(values, str):
if self.is_keyword:
return values.upper() == self.normalized
return values == self.value
if self.is_keyword:
for v in values:
if v.upper() == self.normalized:
return True
return False
return self.value in values
def is_group(self):
"""Returns ``True`` if this object has children."""
return False
def is_whitespace(self):
"""Return ``True`` if this token is a whitespace token."""
return self.ttype and self.ttype in T.Whitespace
def within(self, group_cls):
"""Returns ``True`` if this token is within *group_cls*.
Use this method for example to check if an identifier is within
a function: ``t.within(sql.Function)``.
"""
parent = self.parent
while parent:
if isinstance(parent, group_cls):
return True
parent = parent.parent
return False
def is_child_of(self, other):
"""Returns ``True`` if this token is a direct child of *other*."""
return self.parent == other
def has_ancestor(self, other):
"""Returns ``True`` if *other* is in this tokens ancestry."""
parent = self.parent
while parent:
if parent == other:
return True
parent = parent.parent
return False
class TokenList(Token):
"""A group of tokens.
It has an additional instance attribute ``tokens`` which holds a
list of child-tokens.
"""
__slots__ = ('value', 'ttype', 'tokens')
def __init__(self, tokens=None):
if tokens is None:
tokens = []
self.tokens = tokens
Token.__init__(self, None, self._to_string())
def __unicode__(self):
return self._to_string()
def __str__(self):
str_ = self._to_string()
if sys.version_info[0] < 2:
str_ = str_.encode('utf-8')
return str_
def _to_string(self):
if sys.version_info[0] == 3:
return ''.join(x.value for x in self.flatten())
else:
return ''.join(str(x) for x in self.flatten())
def _get_repr_name(self):
return self.__class__.__name__
def _pprint_tree(self, max_depth=None, depth=0):
"""Pretty-print the object tree."""
indent = ' ' * (depth * 2)
for idx, token in enumerate(self.tokens):
if token.is_group():
pre = ' +-'
else:
pre = ' | '
print('%s%s%d %s \'%s\'' % (indent, pre, idx,
token._get_repr_name(),
token._get_repr_value()))
if (token.is_group() and (max_depth is None or depth < max_depth)):
token._pprint_tree(max_depth, depth + 1)
def _remove_quotes(self, val):
"""Helper that removes surrounding quotes from strings."""
if not val:
return val
if val[0] in ('"', '\'') and val[-1] == val[0]:
val = val[1:-1]
return val
def get_token_at_offset(self, offset):
"""Returns the token that is on position offset."""
idx = 0
for token in self.flatten():
end = idx + len(token.value)
if idx <= offset <= end:
return token
idx = end
def flatten(self):
"""Generator yielding ungrouped tokens.
This method is recursively called for all child tokens.
"""
for token in self.tokens:
if isinstance(token, TokenList):
for item in token.flatten():
yield item
else:
yield token
# def __iter__(self):
# return self
#
# def next(self):
# for token in self.tokens:
# yield token
def is_group(self):
return True
def get_sublists(self):
# return [x for x in self.tokens if isinstance(x, TokenList)]
for x in self.tokens:
if isinstance(x, TokenList):
yield x
@property
def _groupable_tokens(self):
return self.tokens
def token_first(self, ignore_whitespace=True):
"""Returns the first child token.
If *ignore_whitespace* is ``True`` (the default), whitespace
tokens are ignored.
"""
for token in self.tokens:
if ignore_whitespace and token.is_whitespace():
continue
return token
def token_next_by_instance(self, idx, clss):
"""Returns the next token matching a class.
*idx* is where to start searching in the list of child tokens.
*clss* is a list of classes the token should be an instance of.
If no matching token can be found ``None`` is returned.
"""
if not isinstance(clss, (list, tuple)):
clss = (clss,)
for token in self.tokens[idx:]:
if isinstance(token, clss):
return token
def token_next_by_type(self, idx, ttypes):
"""Returns next matching token by it's token type."""
if not isinstance(ttypes, (list, tuple)):
ttypes = [ttypes]
for token in self.tokens[idx:]:
if token.ttype in ttypes:
return token
def token_next_match(self, idx, ttype, value, regex=False):
"""Returns next token where it's ``match`` method returns ``True``."""
if not isinstance(idx, int):
idx = self.token_index(idx)
for n in range(idx, len(self.tokens)):
token = self.tokens[n]
if token.match(ttype, value, regex):
return token
def token_not_matching(self, idx, funcs):
for token in self.tokens[idx:]:
passed = False
for func in funcs:
if func(token):
passed = True
break
if not passed:
return token
def token_matching(self, idx, funcs):
for token in self.tokens[idx:]:
for func in funcs:
if func(token):
return token
def token_prev(self, idx, skip_ws=True):
"""Returns the previous token relative to *idx*.
If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
``None`` is returned if there's no previous token.
"""
if idx is None:
return None
if not isinstance(idx, int):
idx = self.token_index(idx)
while idx:
idx -= 1
if self.tokens[idx].is_whitespace() and skip_ws:
continue
return self.tokens[idx]
def token_next(self, idx, skip_ws=True):
"""Returns the next token relative to *idx*.
If *skip_ws* is ``True`` (the default) whitespace tokens are ignored.
``None`` is returned if there's no next token.
"""
if idx is None:
return None
if not isinstance(idx, int):
idx = self.token_index(idx)
while idx < len(self.tokens) - 1:
idx += 1
if self.tokens[idx].is_whitespace() and skip_ws:
continue
return self.tokens[idx]
def token_index(self, token):
"""Return list index of token."""
return self.tokens.index(token)
def tokens_between(self, start, end, exclude_end=False):
"""Return all tokens between (and including) start and end.
If *exclude_end* is ``True`` (default is ``False``) the end token
is included too.
"""
# FIXME(andi): rename exclude_end to inlcude_end
if exclude_end:
offset = 0
else:
offset = 1
end_idx = self.token_index(end) + offset
start_idx = self.token_index(start)
return self.tokens[start_idx:end_idx]
def group_tokens(self, grp_cls, tokens, ignore_ws=False):
"""Replace tokens by an instance of *grp_cls*."""
idx = self.token_index(tokens[0])
if ignore_ws:
while tokens and tokens[-1].is_whitespace():
tokens = tokens[:-1]
for t in tokens:
self.tokens.remove(t)
grp = grp_cls(tokens)
for token in tokens:
token.parent = grp
grp.parent = self
self.tokens.insert(idx, grp)
return grp
def insert_before(self, where, token):
"""Inserts *token* before *where*."""
self.tokens.insert(self.token_index(where), token)
def insert_after(self, where, token, skip_ws=True):
"""Inserts *token* after *where*."""
next_token = self.token_next(where, skip_ws=skip_ws)
if next_token is None:
self.tokens.append(token)
else:
self.tokens.insert(self.token_index(next_token), token)
def has_alias(self):
"""Returns ``True`` if an alias is present."""
return self.get_alias() is not None
def get_alias(self):
"""Returns the alias for this identifier or ``None``."""
kw = self.token_next_match(0, T.Keyword, 'AS')
if kw is not None:
alias = self.token_next(self.token_index(kw))
if alias is None:
return None
else:
next_ = self.token_next_by_instance(0, Identifier)
if next_ is None:
next_ = self.token_next_by_type(0, T.String.Symbol)
if next_ is None:
return None
alias = next_
if isinstance(alias, Identifier):
return alias.get_name()
return self._remove_quotes(str(alias))
def get_name(self):
"""Returns the name of this identifier.
This is either it's alias or it's real name. The returned valued can
be considered as the name under which the object corresponding to
this identifier is known within the current statement.
"""
alias = self.get_alias()
if alias is not None:
return alias
return self.get_real_name()
def get_real_name(self):
"""Returns the real name (object name) of this identifier."""
# a.b
dot = self.token_next_match(0, T.Punctuation, '.')
if dot is None:
next_ = self.token_next_by_type(0, T.Name)
if next_ is not None:
return self._remove_quotes(next_.value)
return None
next_ = self.token_next_by_type(self.token_index(dot),
(T.Name, T.Wildcard, T.String.Symbol))
if next_ is None: # invalid identifier, e.g. "a."
return None
return self._remove_quotes(next_.value)
class Statement(TokenList):
"""Represents a SQL statement."""
__slots__ = ('value', 'ttype', 'tokens')
def get_type(self):
"""Returns the type of a statement.
The returned value is a string holding an upper-cased reprint of
the first DML or DDL keyword. If the first token in this group
isn't a DML or DDL keyword "UNKNOWN" is returned.
"""
first_token = self.token_first()
if first_token is None:
# An "empty" statement that either has not tokens at all
# or only whitespace tokens.
return 'UNKNOWN'
elif first_token.ttype in (T.Keyword.DML, T.Keyword.DDL):
return first_token.normalized
return 'UNKNOWN'
class Identifier(TokenList):
"""Represents an identifier.
Identifiers may have aliases or typecasts.
"""
__slots__ = ('value', 'ttype', 'tokens')
def get_parent_name(self):
"""Return name of the parent object if any.
A parent object is identified by the first occuring dot.
"""
dot = self.token_next_match(0, T.Punctuation, '.')
if dot is None:
return None
prev_ = self.token_prev(self.token_index(dot))
if prev_ is None: # something must be verry wrong here..
return None
return self._remove_quotes(prev_.value)
def is_wildcard(self):
"""Return ``True`` if this identifier contains a wildcard."""
token = self.token_next_by_type(0, T.Wildcard)
return token is not None
def get_typecast(self):
"""Returns the typecast or ``None`` of this object as a string."""
marker = self.token_next_match(0, T.Punctuation, '::')
if marker is None:
return None
next_ = self.token_next(self.token_index(marker), False)
if next_ is None:
return None
return str(next_)
def get_ordering(self):
"""Returns the ordering or ``None`` as uppercase string."""
ordering = self.token_next_by_type(0, T.Keyword.Order)
if ordering is None:
return None
return ordering.value.upper()
class IdentifierList(TokenList):
"""A list of :class:`~sqlparse.sql.Identifier`\'s."""
__slots__ = ('value', 'ttype', 'tokens')
def get_identifiers(self):
"""Returns the identifiers.
Whitespaces and punctuations are not included in this generator.
"""
for x in self.tokens:
if not x.is_whitespace() and not x.match(T.Punctuation, ','):
yield x
class Parenthesis(TokenList):
"""Tokens between parenthesis."""
__slots__ = ('value', 'ttype', 'tokens')
@property
def _groupable_tokens(self):
return self.tokens[1:-1]
class Assignment(TokenList):
"""An assignment like 'var := val;'"""
__slots__ = ('value', 'ttype', 'tokens')
class If(TokenList):
"""An 'if' clause with possible 'else if' or 'else' parts."""
__slots__ = ('value', 'ttype', 'tokens')
class For(TokenList):
"""A 'FOR' loop."""
__slots__ = ('value', 'ttype', 'tokens')
class Comparison(TokenList):
"""A comparison used for example in WHERE clauses."""
__slots__ = ('value', 'ttype', 'tokens')
@property
def left(self):
return self.tokens[0]
@property
def right(self):
return self.tokens[-1]
class Comment(TokenList):
"""A comment."""
__slots__ = ('value', 'ttype', 'tokens')
class Where(TokenList):
"""A WHERE clause."""
__slots__ = ('value', 'ttype', 'tokens')
class Case(TokenList):
"""A CASE statement with one or more WHEN and possibly an ELSE part."""
__slots__ = ('value', 'ttype', 'tokens')
def get_cases(self):
"""Returns a list of 2-tuples (condition, value).
If an ELSE exists condition is None.
"""
CONDITION = 1
VALUE = 2
ret = []
mode = CONDITION
for token in self.tokens:
# Set mode from the current statement
if token.match(T.Keyword, 'CASE'):
continue
elif token.match(T.Keyword, 'WHEN'):
ret.append(([], []))
mode = CONDITION
elif token.match(T.Keyword, 'THEN'):
mode = VALUE
elif token.match(T.Keyword, 'ELSE'):
ret.append((None, []))
mode = VALUE
elif token.match(T.Keyword, 'END'):
mode = None
# First condition without preceding WHEN
if mode and not ret:
ret.append(([], []))
# Append token depending of the current mode
if mode == CONDITION:
ret[-1][0].append(token)
elif mode == VALUE:
ret[-1][1].append(token)
# Return cases list
return ret
class Function(TokenList):
"""A function or procedure call."""
__slots__ = ('value', 'ttype', 'tokens')
def get_parameters(self):
"""Return a list of parameters."""
parenthesis = self.tokens[-1]
for t in parenthesis.tokens:
if isinstance(t, IdentifierList):
return t.get_identifiers()
elif isinstance(t, Identifier):
return [t,]
return []
|
|
#!/usr/bin/env python
"""Updates FileCheck checks in MIR tests.
This script is a utility to update MIR based tests with new FileCheck
patterns.
The checks added by this script will cover the entire body of each
function it handles. Virtual registers used are given names via
FileCheck patterns, so if you do want to check a subset of the body it
should be straightforward to trim out the irrelevant parts. None of
the YAML metadata will be checked, other than function names.
If there are multiple llc commands in a test, the full set of checks
will be repeated for each different check pattern. Checks for patterns
that are common between different commands will be left as-is by
default, or removed if the --remove-common-prefixes flag is provided.
"""
from __future__ import print_function
import argparse
import collections
import glob
import os
import re
import subprocess
import sys
from UpdateTestChecks import common
MIR_FUNC_NAME_RE = re.compile(r' *name: *(?P<func>[A-Za-z0-9_.-]+)')
MIR_BODY_BEGIN_RE = re.compile(r' *body: *\|')
MIR_BASIC_BLOCK_RE = re.compile(r' *bb\.[0-9]+.*:$')
VREG_RE = re.compile(r'(%[0-9]+)(?::[a-z0-9_]+)?(?:\([<>a-z0-9 ]+\))?')
VREG_DEF_RE = re.compile(
r'^ *(?P<vregs>{0}(?:, {0})*) '
r'= (?P<opcode>[A-Zt][A-Za-z0-9_]+)'.format(VREG_RE.pattern))
MIR_PREFIX_DATA_RE = re.compile(r'^ *(;|bb.[0-9].*: *$|[a-z]+:( |$)|$)')
IR_FUNC_NAME_RE = re.compile(
r'^\s*define\s+(?:internal\s+)?[^@]*@(?P<func>[A-Za-z0-9_.]+)\s*\(')
IR_PREFIX_DATA_RE = re.compile(r'^ *(;|$)')
MIR_FUNC_RE = re.compile(
r'^---$'
r'\n'
r'^ *name: *(?P<func>[A-Za-z0-9_.-]+)$'
r'.*?'
r'^ *body: *\|\n'
r'(?P<body>.*?)\n'
r'^\.\.\.$',
flags=(re.M | re.S))
class LLC:
def __init__(self, bin):
self.bin = bin
def __call__(self, args, ir):
if ir.endswith('.mir'):
args = '{} -x mir'.format(args)
with open(ir) as ir_file:
stdout = subprocess.check_output('{} {}'.format(self.bin, args),
shell=True, stdin=ir_file)
if sys.version_info[0] > 2:
stdout = stdout.decode()
# Fix line endings to unix CR style.
stdout = stdout.replace('\r\n', '\n')
return stdout
class Run:
def __init__(self, prefixes, cmd_args, triple):
self.prefixes = prefixes
self.cmd_args = cmd_args
self.triple = triple
def __getitem__(self, index):
return [self.prefixes, self.cmd_args, self.triple][index]
def log(msg, verbose=True):
if verbose:
print(msg, file=sys.stderr)
def find_triple_in_ir(lines, verbose=False):
for l in lines:
m = common.TRIPLE_IR_RE.match(l)
if m:
return m.group(1)
return None
def find_run_lines(test, lines, verbose=False):
raw_lines = [m.group(1)
for m in [common.RUN_LINE_RE.match(l) for l in lines] if m]
run_lines = [raw_lines[0]] if len(raw_lines) > 0 else []
for l in raw_lines[1:]:
if run_lines[-1].endswith("\\"):
run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l
else:
run_lines.append(l)
if verbose:
log('Found {} RUN lines:'.format(len(run_lines)))
for l in run_lines:
log(' RUN: {}'.format(l))
return run_lines
def build_run_list(test, run_lines, verbose=False):
run_list = []
all_prefixes = []
for l in run_lines:
if '|' not in l:
common.warn('Skipping unparseable RUN line: ' + l)
continue
commands = [cmd.strip() for cmd in l.split('|', 1)]
llc_cmd = commands[0]
filecheck_cmd = commands[1] if len(commands) > 1 else ''
common.verify_filecheck_prefixes(filecheck_cmd)
if not llc_cmd.startswith('llc '):
common.warn('Skipping non-llc RUN line: {}'.format(l), test_file=test)
continue
if not filecheck_cmd.startswith('FileCheck '):
common.warn('Skipping non-FileChecked RUN line: {}'.format(l),
test_file=test)
continue
triple = None
m = common.TRIPLE_ARG_RE.search(llc_cmd)
if m:
triple = m.group(1)
# If we find -march but not -mtriple, use that.
m = common.MARCH_ARG_RE.search(llc_cmd)
if m and not triple:
triple = '{}--'.format(m.group(1))
cmd_args = llc_cmd[len('llc'):].strip()
cmd_args = cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [
item
for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd)
for item in m.group(1).split(',')]
if not check_prefixes:
check_prefixes = ['CHECK']
all_prefixes += check_prefixes
run_list.append(Run(check_prefixes, cmd_args, triple))
# Remove any common prefixes. We'll just leave those entirely alone.
common_prefixes = set([prefix for prefix in all_prefixes
if all_prefixes.count(prefix) > 1])
for run in run_list:
run.prefixes = [p for p in run.prefixes if p not in common_prefixes]
return run_list, common_prefixes
def find_functions_with_one_bb(lines, verbose=False):
result = []
cur_func = None
bbs = 0
for line in lines:
m = MIR_FUNC_NAME_RE.match(line)
if m:
if bbs == 1:
result.append(cur_func)
cur_func = m.group('func')
bbs = 0
m = MIR_BASIC_BLOCK_RE.match(line)
if m:
bbs += 1
if bbs == 1:
result.append(cur_func)
return result
def build_function_body_dictionary(test, raw_tool_output, triple, prefixes,
func_dict, verbose):
for m in MIR_FUNC_RE.finditer(raw_tool_output):
func = m.group('func')
body = m.group('body')
if verbose:
log('Processing function: {}'.format(func))
for l in body.splitlines():
log(' {}'.format(l))
for prefix in prefixes:
if func in func_dict[prefix] and func_dict[prefix][func] != body:
common.warn('Found conflicting asm for prefix: {}'.format(prefix),
test_file=test)
func_dict[prefix][func] = body
def add_checks_for_function(test, output_lines, run_list, func_dict, func_name,
single_bb, verbose=False):
printed_prefixes = set()
for run in run_list:
for prefix in run.prefixes:
if prefix in printed_prefixes:
continue
if not func_dict[prefix][func_name]:
continue
# if printed_prefixes:
# # Add some space between different check prefixes.
# output_lines.append('')
printed_prefixes.add(prefix)
log('Adding {} lines for {}'.format(prefix, func_name), verbose)
add_check_lines(test, output_lines, prefix, func_name, single_bb,
func_dict[prefix][func_name].splitlines())
break
return output_lines
def add_check_lines(test, output_lines, prefix, func_name, single_bb,
func_body):
if single_bb:
# Don't bother checking the basic block label for a single BB
func_body.pop(0)
if not func_body:
common.warn('Function has no instructions to check: {}'.format(func_name),
test_file=test)
return
first_line = func_body[0]
indent = len(first_line) - len(first_line.lstrip(' '))
# A check comment, indented the appropriate amount
check = '{:>{}}; {}'.format('', indent, prefix)
output_lines.append('{}-LABEL: name: {}'.format(check, func_name))
vreg_map = {}
for func_line in func_body:
if not func_line.strip():
continue
m = VREG_DEF_RE.match(func_line)
if m:
for vreg in VREG_RE.finditer(m.group('vregs')):
name = mangle_vreg(m.group('opcode'), vreg_map.values())
vreg_map[vreg.group(1)] = name
func_line = func_line.replace(
vreg.group(1), '[[{}:%[0-9]+]]'.format(name), 1)
for number, name in vreg_map.items():
func_line = re.sub(r'{}\b'.format(number), '[[{}]]'.format(name),
func_line)
check_line = '{}: {}'.format(check, func_line[indent:]).rstrip()
output_lines.append(check_line)
def mangle_vreg(opcode, current_names):
base = opcode
# Simplify some common prefixes and suffixes
if opcode.startswith('G_'):
base = base[len('G_'):]
if opcode.endswith('_PSEUDO'):
base = base[:len('_PSEUDO')]
# Shorten some common opcodes with long-ish names
base = dict(IMPLICIT_DEF='DEF',
GLOBAL_VALUE='GV',
CONSTANT='C',
FCONSTANT='C',
MERGE_VALUES='MV',
UNMERGE_VALUES='UV',
INTRINSIC='INT',
INTRINSIC_W_SIDE_EFFECTS='INT',
INSERT_VECTOR_ELT='IVEC',
EXTRACT_VECTOR_ELT='EVEC',
SHUFFLE_VECTOR='SHUF').get(base, base)
# Avoid ambiguity when opcodes end in numbers
if len(base.rstrip('0123456789')) < len(base):
base += '_'
i = 0
for name in current_names:
if name.rstrip('0123456789') == base:
i += 1
if i:
return '{}{}'.format(base, i)
return base
def should_add_line_to_output(input_line, prefix_set):
# Skip any check lines that we're handling.
m = common.CHECK_RE.match(input_line)
if m and m.group(1) in prefix_set:
return False
return True
def update_test_file(args, test):
log('Scanning for RUN lines in test file: {}'.format(test), args.verbose)
with open(test) as fd:
input_lines = [l.rstrip() for l in fd]
script_name = os.path.basename(__file__)
first_line = input_lines[0] if input_lines else ""
if 'autogenerated' in first_line and script_name not in first_line:
common.warn("Skipping test which wasn't autogenerated by " +
script_name + ": " + test)
return
if args.update_only:
if not first_line or 'autogenerated' not in first_line:
common.warn("Skipping test which isn't autogenerated: " + test)
return
triple_in_ir = find_triple_in_ir(input_lines, args.verbose)
run_lines = find_run_lines(test, input_lines, args.verbose)
run_list, common_prefixes = build_run_list(test, run_lines, args.verbose)
simple_functions = find_functions_with_one_bb(input_lines, args.verbose)
func_dict = {}
for run in run_list:
for prefix in run.prefixes:
func_dict.update({prefix: dict()})
for prefixes, llc_args, triple_in_cmd in run_list:
log('Extracted LLC cmd: llc {}'.format(llc_args), args.verbose)
log('Extracted FileCheck prefixes: {}'.format(prefixes), args.verbose)
raw_tool_output = args.llc(llc_args, test)
if not triple_in_cmd and not triple_in_ir:
common.warn('No triple found: skipping file', test_file=test)
return
build_function_body_dictionary(test, raw_tool_output,
triple_in_cmd or triple_in_ir,
prefixes, func_dict, args.verbose)
state = 'toplevel'
func_name = None
prefix_set = set([prefix for run in run_list for prefix in run.prefixes])
log('Rewriting FileCheck prefixes: {}'.format(prefix_set), args.verbose)
if args.remove_common_prefixes:
prefix_set.update(common_prefixes)
elif common_prefixes:
common.warn('Ignoring common prefixes: {}'.format(common_prefixes),
test_file=test)
comment_char = '#' if test.endswith('.mir') else ';'
autogenerated_note = ('{} NOTE: Assertions have been autogenerated by '
'utils/{}'.format(comment_char, script_name))
output_lines = []
output_lines.append(autogenerated_note)
for input_line in input_lines:
if input_line == autogenerated_note:
continue
if state == 'toplevel':
m = IR_FUNC_NAME_RE.match(input_line)
if m:
state = 'ir function prefix'
func_name = m.group('func')
if input_line.rstrip('| \r\n') == '---':
state = 'document'
output_lines.append(input_line)
elif state == 'document':
m = MIR_FUNC_NAME_RE.match(input_line)
if m:
state = 'mir function metadata'
func_name = m.group('func')
if input_line.strip() == '...':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'mir function metadata':
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
m = MIR_BODY_BEGIN_RE.match(input_line)
if m:
if func_name in simple_functions:
# If there's only one block, put the checks inside it
state = 'mir function prefix'
continue
state = 'mir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=False,
verbose=args.verbose)
elif state == 'mir function prefix':
m = MIR_PREFIX_DATA_RE.match(input_line)
if not m:
state = 'mir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=True,
verbose=args.verbose)
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'mir function body':
if input_line.strip() == '...':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'ir function prefix':
m = IR_PREFIX_DATA_RE.match(input_line)
if not m:
state = 'ir function body'
add_checks_for_function(test, output_lines, run_list,
func_dict, func_name, single_bb=False,
verbose=args.verbose)
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
elif state == 'ir function body':
if input_line.strip() == '}':
state = 'toplevel'
func_name = None
if should_add_line_to_output(input_line, prefix_set):
output_lines.append(input_line)
log('Writing {} lines to {}...'.format(len(output_lines), test), args.verbose)
with open(test, 'wb') as fd:
fd.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='Show verbose output')
parser.add_argument('--llc-binary', dest='llc', default='llc', type=LLC,
help='The "llc" binary to generate the test case with')
parser.add_argument('--remove-common-prefixes', action='store_true',
help='Remove existing check lines whose prefixes are '
'shared between multiple commands')
parser.add_argument('-u', '--update-only', action='store_true',
help='Only update test if it was already autogened')
parser.add_argument('tests', nargs='+')
args = parser.parse_args()
test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
for test in test_paths:
try:
update_test_file(args, test)
except Exception:
common.warn('Error processing file', test_file=test)
raise
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
'''Simple script to grep for networks (net + wildcard, subnetmask or CIDR) containing a given IP address.
Copyright 2013, Steffen Imhof
Licensed under the MIT License (MIT), see LICENSE file for details
'''
import socket, struct, sys, re, fileinput
from optparse import OptionParser
PORT_NAMES = {
"aol": "5190",
"bgp": "179",
"biff": "512",
"bootpc": "68",
"bootps": "67",
"chargen": "19",
"citrix-ica": "1494",
"cmd": "514",
"ctiqbe": "2748",
"daytime": "13",
"discard": "9",
"dnsix": "195",
"domain": "53",
"drip": "3949",
"echo": "7",
"exec": "512",
"finger": "79",
"ftp": "21",
"ftp-data": "20",
"gopher": "70",
"h323": "1720",
"hostname": "101",
"https": "443",
"ident": "113",
"imap4": "143",
"irc": "194",
"isakmp": "500",
"kerberos": "750",
"klogin": "543",
"kshell": "544",
"ldap": "389",
"ldaps": "636",
"login": "513",
"lotusnotes": "1352",
"lpd": "515",
"mobile-ip": "434",
"nameserver": "42",
"netbios-dgm": "138",
"netbios-ns": "137",
"netbios-ss": "139",
"netbios-ssn": "139",
"nntp": "119",
"non500-isakmp": "4500",
"ntp": "123",
"onep-plain": "15001",
"onep-tls": "15001",
"pcanywhere-data": "5631",
"pcanywhere-status": "5632",
"pim-auto-rp": "496",
"pop2": "109",
"pop3": "110",
"pptp": "1723",
"radius": "1645",
"radius-acct": "1646",
"rip": "520",
"secureid-udp": "5510",
"smtp": "25",
"snmp": "161",
"snmptrap": "162",
"sqlnet": "1521",
"ssh": "22",
"sunrpc": "111",
"sunrpc (rpc)": "111",
"syslog": "514",
"tacacs": "49",
"talk": "517",
"telnet": "23",
"tftp": "69",
"time": "37",
"uucp": "540",
"who": "513",
"whois": "43",
"www": "80",
"xdmcp": "177"
}
class ACLParser:
"""Helper class to parse an ACL file line by line.
This will find out protocol, networks and ports for each line and keeps track
of the name of the current ACL rule."""
source_net = None
source_port = None
destination_net = None
destination_port = None
protocol = None
# Add special patterns to detect IP networks and hosts here
# Make sure they start with the most specific, as they are tried in order
net_patterns = [
r"host\D+(\d+\.\d+\.\d+\.\d+)",
r"\D(\d+\.\d+\.\d+\.\d+\D\d+\.\d+\.\d+\.\d+)",
r"\D(\d+\.\d+\.\d+\.\d+\/\d+)",
r"\s(any)",
]
# Add special patterns to detect port descriptions here
# Make sure they start with the most specific, as they are tried in order
port_patterns = [
r"\s(range\s+\d+\s+\d+)",
r"\s(n?eq\s(\d+(\s|$))+)",
r"\s(n?eq\s+\S+)",
r"\s(gt\s+\d+)",
r"\s(lt\s+\d+)",
r"\s(any)",
]
protocol_patterns = [
r"\s(icmp|ip|tcp|udp)\s"
]
def __init__(self):
# compile all patterns to regexes
self.net_patterns = [re.compile(p) for p in self.net_patterns]
self.port_patterns = [re.compile(p) for p in self.port_patterns]
self.protocol_patterns = [re.compile(p) for p in self.protocol_patterns]
# prepare port name map regex (see https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html)
self.port_names = re.compile("\\b" + "\\b|\\b".join(map(re.escape, PORT_NAMES)) + "\\b")
def reset_transients(self):
self.source_net = None
self.source_port = None
self.destination_net = None
self.destination_port = None
self.protocol = None
def match_patterns(self, line, patterns):
"""We might get invalid matches, e.g. "source_mask destination_net. This gets sorted out by taking
the first and the last match later on."""
hits = {}
for p in patterns:
m = p.search(line)
while m:
if not m.start() in hits:
hits[m.start()] = m.group(1)
m = p.search(line, m.start() + 1)
return hits
def assign_source_dest(self, hits, line):
"""Take the first and last one to weed out the invalid hits."""
result = [None, None]
sorted_keys = sorted(hits.keys())
if len(sorted_keys) > 0:
result[0] = hits[sorted_keys[0]].strip()
if len(sorted_keys) > 1:
result[1] = hits[sorted_keys[-1]].strip()
# if there is only one hit, we must decide whether it is source or destination
# This should only happen for ports, so let's see if it is at the end of the line
# (should be destination then)
if len(sorted_keys) == 1:
hit = hits[sorted_keys[0]]
if line.index(hit) + len(hit) > len(line) - 4:
result[1] = result[0]
result[0] = None
return result
def next_line(self, line):
self.reset_transients()
# transform named ports to numbers (see https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html)
line = self.port_names.sub(lambda match: PORT_NAMES[match.group(0)], line)
# first look for all net matches
hits = self.match_patterns(line, self.net_patterns)
(self.source_net, self.destination_net) = self.assign_source_dest(hits, line)
# transform simple hosts into CIDR form
if self.source_net and not "any" in self.source_net and not "/" in self.source_net and not " " in self.source_net:
self.source_net += "/32"
if self.destination_net and not "any" in self.destination_net and not "/" in self.destination_net and not " " in self.destination_net:
self.destination_net += "/32"
# second look for all port matches
hits = self.match_patterns(line, self.port_patterns)
(self.source_port, self.destination_port) = self.assign_source_dest(hits, line)
# look for all protocol matches
hits = self.match_patterns(line, self.protocol_patterns)
if len(hits) == 1:
self.protocol = hits.popitem()[1]
class ACLGrepper:
'''The main class which handles the grep process as a whole.'''
splitter = re.compile(r"[^0-9.]")
parser = ACLParser()
source_ip_string = None
source_ip_address = None
source_port = None
destination_ip_string = None
destination_ip_address = None
destination_port = None
protocol = None
match_any = False
def __init__(self, sip = None, sport = None, dip = None, dport = None, protocol = None, match_any = None):
self.source_ip_string = sip
if sip:
self.source_ip_address = self.ip_to_bits(sip)
self.source_port = sport
self.destination_ip_string = dip
if dip:
self.destination_ip_address = self.ip_to_bits(dip)
self.destination_port = dport
self.protocol = protocol
self.match_any = match_any
def ip_to_bits(self, address):
'''Turns an IP address in dot notation into a single long value.'''
# Fixup IP addresses with leading zeros
fixed_address = ".".join([str(int(x)) for x in address.split(".")])
try:
return struct.unpack("!L", socket.inet_aton(fixed_address))[0]
except socket.error:
raise ValueError("Invalid IP address")
def ip_in_net(self, ip, net):
'''Checks if an IP adress is contained in a network described by a pair (net address, subnetmask).
All values are given as longs.'''
return (net[0] & net[1] == ip & net[1])
def ip_and_mask_to_pair(self, pattern):
'''Takes a mask pattern and creates a pair (net address, subnetmask) from it.
Detects automatically if the mask is a subnetmask or a wildcard mask, assuming the bits are
set continuously in either.'''
parts = re.split(self.splitter, pattern)
net = self.ip_to_bits(parts[0])
net_or_wildcard = self.ip_to_bits(parts[1])
# special case full bits -> subnet mask
if 0xffffffff == net_or_wildcard:
return (net, 0xffffffff)
# check if the mask is really a mask (only set bits from the right or left)
if net_or_wildcard & (net_or_wildcard + 1) != 0:
net_or_wildcard = 0xffffffff ^ net_or_wildcard
if net_or_wildcard & (net_or_wildcard + 1) != 0:
# it's not, never match
return (0, 0xffffffff)
return (net, 0xffffffff ^ net_or_wildcard)
def ip_and_cidr_to_pair(self, pattern):
'''Takes a CIDR pattern and creates a pair (net address, subnetmask) from it.'''
parts = pattern.split("/")
net = self.ip_to_bits(parts[0])
wildcard = (1 << (32-int(parts[1])))-1
return (net, 0xffffffff ^ wildcard)
def net_string_to_pair(self, pattern):
if pattern.find("/") == -1:
return self.ip_and_mask_to_pair(pattern)
else:
return self.ip_and_cidr_to_pair(pattern)
def grep(self, line):
self.parser.next_line(line)
try:
# FIXME check any if desired
if self.source_ip_address:
if self.parser.source_net == "any":
return self.match_any
if not self.parser.source_net:
return False
if not self.ip_in_net(self.source_ip_address, self.net_string_to_pair(self.parser.source_net)):
return False
if self.destination_ip_address:
if self.parser.destination_net == "any":
return self.match_any
if not self.parser.destination_net:
return False
if not self.ip_in_net(self.destination_ip_address, self.net_string_to_pair(self.parser.destination_net)):
return False
if self.protocol:
if not (self.parser.protocol == self.protocol or self.parser.protocol == "ip"):
return False
if self.source_port:
pattern = self.parser.source_port
if pattern:
# any is ok anyway
# eq
if pattern[:2] == "eq":
parts = pattern.split()
if not self.source_port in parts[1:]:
return False
# neq
if pattern[:3] == "neq":
if self.source_port == pattern[4:]:
return False
# gt
if pattern[:2] == "gt":
if int(self.source_port) <= int(pattern[3:]):
return False
# lt
if pattern[:2] == "lt":
if int(self.source_port) >= int(pattern[3:]):
return False
# range
if pattern[:5] == "range":
parts = pattern.split()
if int(self.source_port) < int(parts[1]) or int(self.source_port) > int(parts[2]):
return False
if self.destination_port:
pattern = self.parser.destination_port
if pattern:
# any is ok anyway
# eq
if pattern[:2] == "eq":
parts = pattern.split()
if not self.destination_port in parts[1:]:
return False
# neq
if pattern[:3] == "neq":
if self.destination_port == pattern[4:]:
return False
# gt
if pattern[:2] == "gt":
if int(self.destination_port) <= int(pattern[3:]):
return False
# lt
if pattern[:2] == "lt":
if int(self.destination_port) >= int(pattern[3:]):
return False
# range
if pattern[:5] == "range":
parts = pattern.split()
if int(self.destination_port) < int(parts[1]) or int(self.destination_port) > int(parts[2]):
return False
except ValueError:
# some trouble when parsing stuff, let's assume this is not a match
return False
return True
if __name__ == '__main__':
# check command line args
parser = OptionParser(usage="Usage: %prog [options] [file, file, ...]")
parser.add_option("-a", "--any", dest="match_any", action="store_true", default=False, help="Match ACLs with 'any', too")
parser.add_option("-i", "--sip", dest="source_ip", default=None, help="Source IP to look for")
parser.add_option("-p", "--sport", dest="source_port", default=None, help="Source port to look for")
parser.add_option("-I", "--dip", dest="destination_ip", default=None, help="Destination IP to look for")
parser.add_option("-P", "--dport", dest="destination_port", default=None, help="Destination port to look for")
parser.add_option("-o", "--proto", dest="protocol", default=None, help="Protocol to look for")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
# initialize grepper and...
grepper = ACLGrepper(options.source_ip, options.source_port, options.destination_ip, options.destination_port, options.protocol, options.match_any)
# ...check all lines in all files (or stdin)
for line in fileinput.input(args):
if grepper.grep(line):
print(line.strip())
|
|
from __future__ import unicode_literals
from django.utils.six import moves
import sys
import warnings
import tablib
try:
from tablib.compat import xlrd
XLS_IMPORT = True
except ImportError:
try:
import xlrd # NOQA
XLS_IMPORT = True
except ImportError:
xls_warning = "Installed `tablib` library does not include"
"import support for 'xls' format and xlrd module is not found."
warnings.warn(xls_warning, ImportWarning)
XLS_IMPORT = False
try:
import openpyxl
XLSX_IMPORT = True
except ImportError:
try:
from tablib.compat import openpyxl
XLSX_IMPORT = hasattr(openpyxl, 'load_workbook')
except ImportError:
xlsx_warning = "Installed `tablib` library does not include"
"import support for 'xlsx' format and openpyxl module is not found."
warnings.warn(xlsx_warning, ImportWarning)
XLSX_IMPORT = False
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from django.utils import six
class Format(object):
def get_title(self):
return type(self)
def create_dataset(self, in_stream):
"""
Create dataset from given string.
"""
raise NotImplementedError()
def export_data(self, dataset):
"""
Returns format representation for given dataset.
"""
raise NotImplementedError()
def is_binary(self):
"""
Returns if this format is binary.
"""
return True
def get_read_mode(self):
"""
Returns mode for opening files.
"""
return 'rb'
def get_extension(self):
"""
Returns extension for this format files.
"""
return ""
def get_content_type(self):
# For content types see
# http://www.iana.org/assignments/media-types/media-types.xhtml
return 'application/octet-stream'
def can_import(self):
return False
def can_export(self):
return False
class TablibFormat(Format):
TABLIB_MODULE = None
CONTENT_TYPE = 'application/octet-stream'
def get_format(self):
"""
Import and returns tablib module.
"""
return import_module(self.TABLIB_MODULE)
def get_title(self):
return self.get_format().title
def create_dataset(self, in_stream):
data = tablib.Dataset()
self.get_format().import_set(data, in_stream)
return data
def export_data(self, dataset):
return self.get_format().export_set(dataset)
def get_extension(self):
# we support both 'extentions' and 'extensions' because currently
# tablib's master branch uses 'extentions' (which is a typo) but it's
# dev branch already uses 'extension'.
# TODO - remove this once the typo is fixxed in tablib's master branch
if hasattr(self.get_format(), 'extentions'):
return self.get_format().extentions[0]
return self.get_format().extensions[0]
def get_content_type(self):
return self.CONTENT_TYPE
def can_import(self):
return hasattr(self.get_format(), 'import_set')
def can_export(self):
return hasattr(self.get_format(), 'export_set')
class TextFormat(TablibFormat):
def get_read_mode(self):
return 'rU'
def is_binary(self):
return False
class CSV(TextFormat):
TABLIB_MODULE = 'tablib.formats._csv'
CONTENT_TYPE = 'text/csv'
def create_dataset(self, in_stream):
if sys.version_info[0] < 3:
# python 2.7 csv does not do unicode
return super(CSV, self).create_dataset(in_stream.encode('utf-8'))
return super(CSV, self).create_dataset(in_stream)
class JSON(TextFormat):
TABLIB_MODULE = 'tablib.formats._json'
CONTENT_TYPE = 'application/json'
class YAML(TextFormat):
TABLIB_MODULE = 'tablib.formats._yaml'
# See http://stackoverflow.com/questions/332129/yaml-mime-type
CONTENT_TYPE = 'text/yaml'
class TSV(TextFormat):
TABLIB_MODULE = 'tablib.formats._tsv'
CONTENT_TYPE = 'text/tab-separated-values'
class ODS(TextFormat):
TABLIB_MODULE = 'tablib.formats._ods'
CONTENT_TYPE = 'application/vnd.oasis.opendocument.spreadsheet'
class HTML(TextFormat):
TABLIB_MODULE = 'tablib.formats._html'
CONTENT_TYPE = 'text/html'
class XLS(TablibFormat):
TABLIB_MODULE = 'tablib.formats._xls'
CONTENT_TYPE = 'application/vnd.ms-excel'
def can_import(self):
return XLS_IMPORT
def create_dataset(self, in_stream):
"""
Create dataset from first sheet.
"""
assert XLS_IMPORT
xls_book = xlrd.open_workbook(file_contents=in_stream)
dataset = tablib.Dataset()
sheet = xls_book.sheets()[0]
dataset.headers = sheet.row_values(0)
for i in moves.range(1, sheet.nrows):
dataset.append(sheet.row_values(i))
return dataset
class XLSX(TablibFormat):
TABLIB_MODULE = 'tablib.formats._xlsx'
CONTENT_TYPE = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
def can_import(self):
return XLSX_IMPORT
def create_dataset(self, in_stream):
"""
Create dataset from first sheet.
"""
assert XLSX_IMPORT
from io import BytesIO
xlsx_book = openpyxl.load_workbook(BytesIO(in_stream))
dataset = tablib.Dataset()
sheet = xlsx_book.active
dataset.headers = [cell.value for cell in sheet.rows[0]]
for i in moves.range(1, len(sheet.rows)):
row_values = [cell.value for cell in sheet.rows[i]]
dataset.append(row_values)
return dataset
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from operator import attrgetter
# These are LatencyInfo component names indicating the various components
# that the input event has travelled through.
# This is when the input event first reaches chrome.
UI_COMP_NAME = 'INPUT_EVENT_LATENCY_UI_COMPONENT'
# This is when the input event was originally created by OS.
ORIGINAL_COMP_NAME = 'INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT'
# This is when the input event was sent from browser to renderer.
BEGIN_COMP_NAME = 'INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT'
# This is when the input event has reached swap buffer.
END_COMP_NAME = 'INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT'
def GetScrollInputLatencyEvents(scroll_type, browser_process, timeline_range):
"""Get scroll events' LatencyInfo from the browser process's trace buffer
that are within the timeline_range.
Scroll events (MouseWheel, GestureScrollUpdate or JS scroll on TouchMove)
dump their LatencyInfo into trace buffer as async trace event with name
"InputLatency". The trace event has a memeber 'step' containing its event
type and a memeber 'data' containing its latency history.
"""
scroll_events = []
if not browser_process:
return scroll_events
for event in browser_process.IterAllAsyncSlicesOfName("InputLatency"):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
for ss in event.sub_slices:
if 'step' not in ss.args:
continue
if 'data' not in ss.args:
continue
if ss.args['step'] == scroll_type:
scroll_events.append(ss)
return scroll_events
def ComputeMouseWheelScrollLatency(mouse_wheel_events):
""" Compute the mouse wheel scroll latency.
Mouse wheel scroll latency is the time from when mouse wheel event is sent
from browser RWH to renderer (the timestamp of component
'INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT') to when the scrolled page is
buffer swapped (the timestamp of component
'INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT')
"""
mouse_wheel_latency = []
for event in mouse_wheel_events:
data = event.args['data']
if BEGIN_COMP_NAME in data and END_COMP_NAME in data:
latency = data[END_COMP_NAME]['time'] - data[BEGIN_COMP_NAME]['time']
mouse_wheel_latency.append(latency / 1000.0)
return mouse_wheel_latency
def ComputeTouchScrollLatency(touch_scroll_events):
""" Compute the touch scroll latency.
Touch scroll latency is the time from when the touch event is created to
when the scrolled page is buffer swapped.
Touch event on differnt platforms uses different LatencyInfo component to
record its creation timestamp. On Aura, the creation time is kept in
'INPUT_EVENT_LATENCY_UI_COMPONENT' . On Android, the creation time is kept
in 'INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT'.
"""
touch_scroll_latency = []
for event in touch_scroll_events:
data = event.args['data']
if END_COMP_NAME in data:
end_time = data[END_COMP_NAME]['time']
if UI_COMP_NAME in data and ORIGINAL_COMP_NAME in data:
raise ValueError, 'LatencyInfo has both UI and ORIGINAL component'
if UI_COMP_NAME in data:
latency = end_time - data[UI_COMP_NAME]['time']
touch_scroll_latency.append(latency / 1000.0)
elif ORIGINAL_COMP_NAME in data:
latency = end_time - data[ORIGINAL_COMP_NAME]['time']
touch_scroll_latency.append(latency / 1000.0)
return touch_scroll_latency
def HasRenderingStats(process):
""" Returns True if the process contains at least one
BenchmarkInstrumentation::*RenderingStats event with a frame.
"""
if not process:
return False
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::MainThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::ImplThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
return False
class RenderingStats(object):
def __init__(self, renderer_process, browser_process, timeline_ranges):
"""
Utility class for extracting rendering statistics from the timeline (or
other loggin facilities), and providing them in a common format to classes
that compute benchmark metrics from this data.
Stats are lists of lists of numbers. The outer list stores one list per
timeline range.
All *_time values are measured in milliseconds.
"""
assert(len(timeline_ranges) > 0)
# Find the top level process with rendering stats (browser or renderer).
if HasRenderingStats(browser_process):
self.top_level_process = browser_process
else:
self.top_level_process = renderer_process
self.frame_timestamps = []
self.frame_times = []
self.paint_times = []
self.painted_pixel_counts = []
self.record_times = []
self.recorded_pixel_counts = []
self.rasterize_times = []
self.rasterized_pixel_counts = []
# End-to-end latency for MouseWheel scroll - from when mouse wheel event is
# generated to when the scrolled page is buffer swapped.
self.mouse_wheel_scroll_latency = []
# End-to-end latency for GestureScrollUpdate scroll - from when the touch
# event is generated to the scrolled page is buffer swapped.
self.touch_scroll_latency = []
# End-to-end latency for JS touch handler scrolling - from when the touch
# event is generated to the scrolled page is buffer swapped.
self.js_touch_scroll_latency = []
for timeline_range in timeline_ranges:
self.frame_timestamps.append([])
self.frame_times.append([])
self.paint_times.append([])
self.painted_pixel_counts.append([])
self.record_times.append([])
self.recorded_pixel_counts.append([])
self.rasterize_times.append([])
self.rasterized_pixel_counts.append([])
self.mouse_wheel_scroll_latency.append([])
self.touch_scroll_latency.append([])
self.js_touch_scroll_latency.append([])
if timeline_range.is_empty:
continue
self.initMainThreadStatsFromTimeline(timeline_range)
self.initImplThreadStatsFromTimeline(timeline_range)
self.initScrollLatencyStatsFromTimeline(browser_process, timeline_range)
def initScrollLatencyStatsFromTimeline(self, browser_process, timeline_range):
mouse_wheel_events = GetScrollInputLatencyEvents(
"MouseWheel", browser_process, timeline_range)
self.mouse_wheel_scroll_latency = ComputeMouseWheelScrollLatency(
mouse_wheel_events)
touch_scroll_events = GetScrollInputLatencyEvents(
"GestureScrollUpdate", browser_process, timeline_range)
self.touch_scroll_latency = ComputeTouchScrollLatency(touch_scroll_events)
js_touch_scroll_events = GetScrollInputLatencyEvents(
"TouchMove", browser_process, timeline_range)
self.js_touch_scroll_latency = ComputeTouchScrollLatency(
js_touch_scroll_events)
def initMainThreadStatsFromTimeline(self, timeline_range):
event_name = 'BenchmarkInstrumentation::MainThreadRenderingStats'
events = []
for event in self.top_level_process.IterAllSlicesOfName(event_name):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
if 'data' not in event.args:
continue
events.append(event)
events.sort(key=attrgetter('start'))
first_frame = True
for event in events:
frame_count = event.args['data']['frame_count']
if frame_count > 1:
raise ValueError, 'trace contains multi-frame render stats'
if frame_count == 1:
self.frame_timestamps[-1].append(
event.start)
if not first_frame:
self.frame_times[-1].append(round(self.frame_timestamps[-1][-1] -
self.frame_timestamps[-1][-2], 2))
first_frame = False
self.paint_times[-1].append(1000.0 *
event.args['data']['paint_time'])
self.painted_pixel_counts[-1].append(
event.args['data']['painted_pixel_count'])
self.record_times[-1].append(1000.0 *
event.args['data']['record_time'])
self.recorded_pixel_counts[-1].append(
event.args['data']['recorded_pixel_count'])
def initImplThreadStatsFromTimeline(self, timeline_range):
event_name = 'BenchmarkInstrumentation::ImplThreadRenderingStats'
events = []
for event in self.top_level_process.IterAllSlicesOfName(event_name):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
if 'data' not in event.args:
continue
events.append(event)
events.sort(key=attrgetter('start'))
first_frame = True
for event in events:
frame_count = event.args['data']['frame_count']
if frame_count > 1:
raise ValueError, 'trace contains multi-frame render stats'
if frame_count == 1:
self.frame_timestamps[-1].append(
event.start)
if not first_frame:
self.frame_times[-1].append(round(self.frame_timestamps[-1][-1] -
self.frame_timestamps[-1][-2], 2))
first_frame = False
self.rasterize_times[-1].append(1000.0 *
event.args['data']['rasterize_time'])
self.rasterized_pixel_counts[-1].append(
event.args['data']['rasterized_pixel_count'])
|
|
##############################################################################
# #
# This code performs image reconstruction using Mehrdad Soumekh's Spatial #
# Frequency interpolation method. It has essentially been copied from his #
# MATLAB algorithm for spotlight processing and translated to Python. The #
# parameters used in this file are based on the ones in the Carrera text #
# for the UHF setup in the range migration section. #
# #
##############################################################################
from numpy import *
import numpy as np
import ritsar.signal as sig
from scipy.fftpack import *
from ritsar.signal import *
import ritsar.signal as sig
import matplotlib.pylab as plt
from scipy.interpolate import interp1d
plt.set_cmap(cm.Greys)
##########################################################
# PULSED SPOTLIGHT SAR SIMULATION AND RECONSTRUCTION #
##########################################################
cmap = cm.Greys_r
cj=1j;
pi2=2*pi;
#
c=3e8; # propagation speed
f0=135.5e6/2; # baseband bandwidth is 2*f0
w0=pi2*f0;
fc=242.4e6; # carrier frequency
wc=pi2*fc;
lambda_min=c/(fc+f0); # Wavelength at highest frequency
lambda_max=c/(fc-f0); # Wavelength at lowest frequency
kc=(pi2*fc)/c; # wavenumber at carrier frequency
kmin=(pi2*(fc-f0))/c; # wavenumber at lowest frequency
kmax=(pi2*(fc+f0))/c; # wavenumber at highest frequency
#
Xc=1000.; # Range distance to center of target area
X0=575.3; # target area in range is within [Xc-X0,Xc+X0]
Yc=0.; # Cross-range distance to center of target area
Y0=442.9; # target area in cross-range is within
# [Yc-Y0,Yc+Y0]
# Case 1: L < Y0; requires zero-padding of SAR signal in synthetic
# aperture domain
#
L=760.8/2; # synthetic aperture is 2*L
# Case 2: L > Y0; slow-time Doppler subsampling of SAR signal spectrum
# reduces computation
#
# L=400; # synthetic aperture is 2*L
theta_c=arctan(1.0*Yc/Xc); # Squint angle
Rc=sqrt(Xc**2+Yc**2); # Squint radial range
L_min=max(Y0,L); # Zero-padded aperture is 2*L_min
#
Xcc=Xc/(cos(theta_c)**2); # redefine Xc by Xcc for squint processing
##############################################################
## u domain parameters and arrays for compressed SAR signal ##
##############################################################
#
duc=(Xcc*lambda_min)/(4*Y0); # sample spacing in aperture domain
# for compressed SAR signal
duc=duc/1.2; # 10 percent guard band; this guard band
# would not be sufficient for targets
# outside digital spotlight filter (use
# a larger guard band, i.e., PRF)
mc=2*ceil(L_min/duc); # number of samples on aperture
uc=duc*np.array([arange(-mc/2,mc/2)]).T; # synthetic aperture array
dkuc=pi2/(mc*duc); # sample spacing in ku domain
kuc=dkuc*np.array([arange(-mc/2,mc/2)]).T; # kuc array
#
dku=1.0*dkuc; # sample spacing in ku domain
##########################################################
## u domain parameters and arrays for SAR signal ##
##########################################################
#
if Yc-Y0-L < 0: # minimum aspect angle
theta_min=arctan(1.*(Yc-Y0-L)/(Xc-X0));
else:
theta_min=arctan(1.*(Yc-Y0-L)/(Xc+X0));
#end;
theta_max=arctan(1.*(Yc+Y0+L)/(Xc-X0)); # maximum aspect angle
#
du=pi/(kmax*(sin(theta_max)-\
sin(theta_min))); # sample spacing in aperture
# domain for SAR signal
du=du/1.4; # 20 percent guard band
m=2*ceil(pi/(du*dku)); # number of samples on aperture
du=pi2/(m*dku); # readjust du
u=du*np.array([arange(-m/2,m/2)]).T; # synthetic aperture array
ku=dku*np.array([arange(-m/2,m/2)]).T; # ku array
##########################################################
## Fast-time domain parmeters and arrays ##
##########################################################
#
Tp=4e-6; # Chirp pulse duration
alpha=w0/Tp; # Chirp rate
wcm=wc-alpha*Tp; # Modified chirp carrier
#
if Yc-Y0-L < 0:
Rmin=Xc-X0;
else:
Rmin=sqrt((Xc-X0)**2+(Yc-Y0-L)**2);
#end;
Ts=(2/c)*Rmin; # start time of sampling
Rmax=sqrt((Xc+X0)**2+(Yc+Y0+L)**2);
Tf=(2/c)*Rmax+Tp; # end time of sampling
T=Tf-Ts; # fast-time interval of measurement
Ts=Ts-.1*T; # start slightly earlier (10# guard band)
Tf=Tf+.1*T; # end slightly later (10# guard band)
T=Tf-Ts;
Tmin=max(T,(4*X0)/(c*cos(theta_max))); # Minimum required T
#
dt=1/(4*f0); # Time domain sampling (guard band factor 2)
n=2*ceil((.5*Tmin)/dt); # number of time samples
t=Ts+arange(0,n)*dt; # time array for data acquisition
dw=pi2/(n*dt); # Frequency domain sampling
w=wc+dw*arange(-n/2,n/2); # Frequency array (centered at carrier)
k=w/c; # Wavenumber array
#
#############################################################
# Resolution for Broadside: (x,y) domain rotated by theta_c #
#############################################################
DX=1#c/(4*f0); # range resolution (broadside)
DY=1#(Xcc*lambda_max)/(4*L); # cross-range resolution (broadside)
#####################################################
## Parameters of Targets ##
#####################################################
#
ntarget=3; # number of targets
# Set ntarget=1 to see "clean" PSF of target at origin
# Try this with other targets
# xn: range; yn= cross-range; fn: reflectivity
xn=zeros(ntarget); yn=1.0*xn; fn=1.0*xn;
# Targets within digital spotlight filter
#
xn[1-1]=0.; yn[1-1]=0.; fn[1-1]=1.;
xn[2-1]=200.; yn[2-1]=0.; fn[2-1]=1.;
xn[3-1]=0.; yn[3-1]=-100.; fn[3-1]=1.;
#xn[4-1]=-.5*X0; yn[4-1]=.75*Y0; fn[4-1]=1.;
#xn[5-1]=-.5*X0+DX; yn[5-1]=.75*Y0+DY; fn[5-1]=1.;
# Targets outside digital spotlight filter
# (Run the code with and without these targets)
#
#xn[6-1]=-1.2*X0; yn[6-1]=.75*Y0; fn[6-1]=1.;
#xn[7-1]=.5*X0; yn[7-1]=1.25*Y0; fn[7-1]=1.;
#xn[8-1]=1.1*X0; yn[8-1]=-1.1*Y0; fn[8-1]=1.;
#xn[9-1]=-1.2*X0; yn[9-1]=-1.75*Y0; fn[9-1]=1.;
#####################################################
## SIMULATION ##
#####################################################
#
s=zeros([mc,n])+0j; # SAR signal array
#
for i in range(ntarget): # Loop for each target
for j in range(int(mc)):
td=t-2*sqrt((Xc+xn[i])**2+(Yc+yn[i]-uc[j])**2)/c;
s[j]+=fn[i]*exp(cj*wcm*td+cj*alpha*(td**2))*((td >= 0) & (td <= Tp) & \
(abs(uc[j]) <= L) & (t < Tf))
#end;
#
s=s*exp(-cj*wc*t); # Fast-time baseband conversion
# User may apply a slow-time domain window, e.g., power window, on
# simulated SAR signal array "s" here.
G=abs(s)
xg=np.max(np.max(G)); ng=np.min(np.min(G)); cg=255/(xg-ng);
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = (t.min()*1e6, t.max()*1e6, uc.min(), uc.max()), aspect = 'auto');
plt.xlabel('Fast-time t, $\mu$sec')
plt.ylabel('Synthetic Aperture (Slow-time) U, meters')
plt.title('Measured Spotlight SAR Signal')
#
td0=t-2*sqrt(Xc**2+Yc**2)/c;
s0=exp(cj*wcm*td0+cj*alpha*(td0**2))*((td0 >= 0) & (td0 <= Tp));
s0=s0*exp(-cj*wc*t); # Baseband reference fast-time signal
s=sig.ft(s)*(conj(sig.ft(s0))); # Fast-time matched filtering
#
G=abs(sig.ift(s));
xg=np.max(np.max(G)); ng=np.min(np.min(G)); cg=255/(xg-ng);
tm=(2*Rc/c)+dt*arange(-n/2,n/2); # fast-time array after matched filtering
plt.figure()
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = (tm.min()*1e6, tm.max()*1e6, uc.min(), uc.max()), aspect = 'auto');
plt.xlabel('Fast-time t, sec')
plt.ylabel('Synthetic Aperture (Slow-time) U, meters')
plt.title('SAR Signal after Fast-time Matched Filtering')
#
#############################################
# Slow-time baseband conversion for squint #
#############################################
#
kus=2*kc*sin(theta_c); # Doppler frequency shift in ku
# domain due to squint
#
s=s*exp(-cj*kus*uc); # slow-time baseband conversion
fs=sig.ft(s, ax=0);
# Display aliased SAR spectrum
#
G=abs(fs);
xg=np.max(np.max(G)); ng=np.min(np.min(G)); cg=255/(xg-ng);
plt.figure()
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = ((k*c/pi2).min(), (k*c/pi2).max(), kuc.min(), kuc.max()), aspect = 'auto');
plt.xlabel('Fast-time Frequency, Hertz')
plt.ylabel('Synthetic Aperture (Slow-time) Frequency Ku, rad/m')
plt.title('Aliased Spotlight SAR Signal Spectrum')
#
#################################################################
## Digital Spotlighting and Bandwidth Expansion in ku Domain ##
## via Slow-time Compression and Decompression ##
#################################################################
#
s=s*exp(cj*kus*uc); # Original signal before baseband
# conversion for squint
cs=s*exp(cj*2*k*\
(ones([mc,n])*sqrt(Xc**2+(Yc-uc)**2))-cj*2*k*Rc);# compression
fcs=sig.ft(cs,ax=0); # F.T. of compressed signal w.r.t. u
#
G=abs(fcs);
xg=np.max(np.max(G)); ng=np.min(np.min(G)); cg=255/(xg-ng);
plt.figure()
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = ((k*c/pi2).min(), (k*c/pi2).max(), kuc.min(), kuc.max()), aspect = 'auto');
plt.xlabel('Fast-time Frequency, Hertz')
plt.ylabel('Synthetic Aperture (Slow-time) Frequency Ku, rad/m')
plt.title('Compressed Spotlight SAR Signal Spectrum')
#
fp=sig.ift(sig.ft(cs, ax=0)); # Narrow-bandwidth Polar Format Processed
# reconstruction
#
PH=arcsin(kuc/(2*kc)); # angular Doppler domain
R=(c*tm)/2; # range domain mapped from reference
# fast-time domain
#
# Full Aperture Digital-Spotlight Filter
#
W_d=((np.abs(R*cos(PH+theta_c)-Xc) < X0)*\
(np.abs(R*sin(PH+theta_c)-Yc) < Y0));
#
G=(abs(fp)/abs(fp).max()+.1*W_d);
xg=G.max(); ng=G.min(); cg=255/(xg-ng);
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = (((Rc/Xc)*(.5*c*tm-Rc)).min(), ((Rc/Xc)*(.5*c*tm-Rc)).max(), ((kuc*Rc)/(2*kc)).min(), ((kuc*Rc)/(2*kc)).max()), aspect = 'auto');
plt.xlabel('Range x, m')
plt.ylabel('Cross-range y, m')
plt.title('Polar Format SAR Reconstruction with Digital Spotlight Filter')
fd=fp*W_d; # Digital Spotlight Filtering
fcs=sig.ft(fd); # Transform to (omega,ku) domain
# Zero-padding in ku domain for slow-time upsampling
#
mz=m-mc; # number is zeros
fcs=(m/mc)*np.vstack((zeros([mz/2,n]),fcs,zeros([mz/2,n])));
#
cs=sig.ift(fcs, ax=0); # Transform to (omega,u) domain
s = np.zeros(cs.shape)+0j
s=cs*exp(-cj*2*(k)*\
(sqrt(Xc**2+(Yc-u)**2))+cj*2*k*Rc);# decompression
#################################################################
# CAUTION #
# For TDC or backprojection, do not subsample in Doppler domain #
# and do not perform slow-time baseband conversion #
#################################################################
#
s_ds=1.0*s; # Save s(omega,u) array for TDC and
# backprojection algorithms
#
s=s*exp(-cj*kus*u); # Slow-time baseband conversion for squint
fs=sig.ft(s, ax=0); # Digitally-spotlighted SAR signal spectrum
#
G=abs(fs);
xg=G.max(); ng=G.min(); cg=255/(xg-ng);
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = ((k*c/pi2).min(), (k*c/pi2).max(), ku.min(), ku.max()), aspect = 'auto');
plt.xlabel('Fast-time Frequency, Hertz')
plt.ylabel('Synthetic Aperture (Slow-time) Frequency Ku, rad/m')
plt.title('Spotlight SAR Signal Spectrum after DS & Upsampling')
##########################################
## SLOW-TIME DOPPLER SUBSAMPLING ##
##########################################
#
if Y0 < L:
ny=2*ceil(1.2*Y0/du); # Number of samples in y domain
# 20 percent guard band
ms=floor(1.0*m/ny); # subsampling ratio
tt=floor(1.0*m/(2*ms));
I=np.arange(m/2-tt*ms,m/2+1+(tt-1)*ms,ms); # subsampled index in ku domain
I = np.array(I,dtype=int)
tt = 1
ny=int(I.size); # number of subsamples
fs=fs[I,:]; # subsampled SAR signal spectrum
ky=ku[I]; # subsampled ky array
dky=dku*ms; # ky domain sample spacing
else:
dky=dku;
ny=m;
ky=ku;
#end;
dy=pi2/(ny*dky); # y domain sample spacing
y=dy*np.array([arange(-ny/2,ny/2)]).T; # cross-range array
##########################################
## RECONSTRUCTION ##
##########################################
#
ky=np.tile(ky+kus,(1,n)); # ky array
kx=np.tile((4*k**2),[ny,1])-ky**2;
kx=sqrt(kx*(kx > 0)); # kx array
#
plt.figure()
plt.scatter(kx.flatten(), ky.flatten())
plt.xlabel('Spatial Frequency k_x, rad/m')
plt.ylabel('Spatial Frequency k_y, rad/m')
plt.title('Spotlight SAR Spatial Frequency Data Coverage')
#
kxmin=kx.min();
kxmax=kx.max();
dkx=pi/X0; # Nyquist sample spacing in kx domain
nx=2*ceil((.5*(kxmax-kxmin))/dkx); # Required number of
# samples in kx domain;
# This value will be increased slightly
# to avoid negative array index
#
###############################################################
### ###
### FIRST TWO OPTIONS FOR RECONSTRUCTION: ###
### ###
### 1. 2D Fourier Matched Filtering and Interpolation ###
### 2. Range Stacking ###
### ###
### Note: For "Range Stacking," make sure that the ###
### arrays nx, x, and kx are defined. ###
### ###
###############################################################
############################################################
### 2D FOURIER MATCHED FILTERING AND INTERPOLATION ###
############################################################
# Matched Filtering
#
fs0=(kx > 0)*exp(cj*kx*Xc+cj*ky*Yc+cj*.25*pi\
-cj*2*k*Rc); # reference signal complex conjugate
fsm=fs*fs0; # 2D Matched filtering
# Interpolation
#
iis=8; # number of neighbors (sidelobes) used for sinc interpolator
I=2*iis;
kxs=iis*dkx; # plus/minus size of interpolation neighborhood in KX domain
#
nx=nx+2*iis+4; # increase number of samples to avoid negative
# array index during interpolation in kx domain
KX=kxmin+arange(-iis-2,nx-iis-2)*dkx; # uniformly-spaced kx points where
# interpolation is done
kxc=KX[nx/2]; # carrier frequency in kx domain
KX=np.tile(KX, (ny,1));
#
F=zeros([ny,nx])+0j; # initialize F(kx,ky) array for interpolation
for i in range(int(ny)): # for each k loop
print(i) # print i to show that it is running
F[i]=interp1d(kx[i],fsm[i], kind='zero', bounds_error=False, fill_value=0)(KX[i]);
#end
#
# DISPLAY interpolated spatial frequency domain image F(kx,ky)
KX=KX[0,:]#.';
KY=ky[:,0];
G=abs(F)#';
xg=G.max(); ng=G.min(); cg=255/(xg-ng);
plt.figure()
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = (KX.min(), KX.max(), (KY+kus).min(), (KY+kus).max()), aspect = 'equal');
plt.xlabel('Spatial Frequency k_x, rad/m')
plt.ylabel('Spatial Frequency k_y, rad/m')
plt.title('Wavefront Spotlight SAR Reconstruction Spectrum')
#
f=sig.ift(sig.ift(F, ax = 0)); # Inverse 2D FFT for spatial domain image f(x,y)
#
dx=pi2/(nx*dkx); # range sample spacing in reconstructed image
x=dx*arange(-nx/2,nx/2); # range array
#
# Display SAR reconstructed image
G=abs(f)#';
xg=G.max(); ng=G.min(); cg=255/(xg-ng);
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = ((Xc+x).min(), (Xc+x).max(), (Yc+y).min(), (Yc+y).max()), aspect = 'equal');
plt.xlabel('Range X, meters')
plt.ylabel('Cross-range Y, meters')
plt.title('Wavefront Spotlight SAR Reconstruction')
#####################################################
## SAR Image Compression (for Spotlight System) ##
#####################################################
##
Fc=sig.ft(sig.ft(f*\
exp(cj*kxc*x+cj*2*kc*sin(theta_c)*y\
-cj*2*kc*sqrt(((Xc+x)**2)+((Yc+y)**2))), ax=0));
G=abs(Fc)#';
xg=G.max(); ng=G.min(); cg=255/(xg-ng)
plt.figure()
plt.imshow(256-cg*(G-ng)[::-1,:],
extent = (KX.min(), KX.max(), (KY+kus).min(), (KY+kus).max()), aspect = 'equal');
plt.xlabel('Spatial Frequency k_x, rad/m')
plt.ylabel('Spatial Frequency k_y, rad/m')
plt.title('Compressed Spotlight SAR Reconstruction Spectrum')
|
|
# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for HPE 3PAR Storage array.
This driver requires 3.1.3 firmware on the 3PAR array, using
the 4.x version of the hpe3parclient.
You will need to install the python hpe3parclient.
sudo pip install --upgrade "hpe3parclient>=4.0"
Set the following in the cinder.conf file to enable the
3PAR Fibre Channel Driver along with the required flags:
volume_driver=cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver
"""
try:
from hpe3parclient import exceptions as hpeexceptions
except ImportError:
hpeexceptions = None
from oslo_log import log as logging
from oslo_utils.excutils import save_and_reraise_exception
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
# EXISTENT_PATH error code returned from hpe3parclient
EXISTENT_PATH = 73
@interface.volumedriver
class HPE3PARFCDriver(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.MigrateVD,
driver.BaseVD):
"""OpenStack Fibre Channel driver to enable 3PAR storage array.
Version history:
.. code-block:: none
1.0 - Initial driver
1.1 - QoS, extend volume, multiple iscsi ports, remove domain,
session changes, faster clone, requires 3.1.2 MU2 firmware,
copy volume <--> Image.
1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored
the drivers to use the new APIs.
1.2.1 - Synchronized extend_volume method.
1.2.2 - Added try/finally around client login/logout.
1.2.3 - Added ability to add WWNs to host.
1.2.4 - Added metadata during attach/detach bug #1258033.
1.3.0 - Removed all SSH code. We rely on the hp3parclient now.
2.0.0 - Update hp3parclient API uses 3.0.x
2.0.2 - Add back-end assisted volume migrate
2.0.3 - Added initiator-target map for FC Zone Manager
2.0.4 - Added support for managing/unmanaging of volumes
2.0.5 - Only remove FC Zone on last volume detach
2.0.6 - Added support for volume retype
2.0.7 - Only one FC port is used when a single FC path
is present. bug #1360001
2.0.8 - Fixing missing login/logout around attach/detach bug #1367429
2.0.9 - Add support for pools with model update
2.0.10 - Migrate without losing type settings bug #1356608
2.0.11 - Removing locks bug #1381190
2.0.12 - Fix queryHost call to specify wwns bug #1398206
2.0.13 - Fix missing host name during attach bug #1398206
2.0.14 - Removed usage of host name cache #1398914
2.0.15 - Added support for updated detach_volume attachment.
2.0.16 - Added encrypted property to initialize_connection #1439917
2.0.17 - Improved VLUN creation and deletion logic. #1469816
2.0.18 - Changed initialize_connection to use getHostVLUNs. #1475064
2.0.19 - Adds consistency group support
2.0.20 - Update driver to use ABC metaclasses
2.0.21 - Added update_migrated_volume. bug # 1492023
3.0.0 - Rebranded HP to HPE.
3.0.1 - Remove db access for consistency groups
3.0.2 - Adds v2 managed replication support
3.0.3 - Adds v2 unmanaged replication support
3.0.4 - Adding manage/unmanage snapshot support
3.0.5 - Optimize array ID retrieval
3.0.6 - Update replication to version 2.1
3.0.7 - Remove metadata that tracks the instance ID. bug #1572665
3.0.8 - NSP feature, creating FC Vlun as match set instead of
host sees. bug #1577993
3.0.9 - Handling HTTP conflict 409, host WWN/iSCSI name already used
by another host, while creating 3PAR FC Host. bug #1597454
3.0.10 - Added Entry point tracing
3.0.11 - Handle manage and unmanage hosts present. bug #1648067
3.0.12 - Adds consistency group capability in generic volume groups.
"""
VERSION = "3.0.12"
# The name of the CI wiki page.
CI_WIKI_NAME = "HPE_Storage_CI"
def __init__(self, *args, **kwargs):
super(HPE3PARFCDriver, self).__init__(*args, **kwargs)
self._active_backend_id = kwargs.get('active_backend_id', None)
self.configuration.append_config_values(hpecommon.hpe3par_opts)
self.configuration.append_config_values(san.san_opts)
self.lookup_service = fczm_utils.create_lookup_service()
def _init_common(self):
return hpecommon.HPE3PARCommon(self.configuration,
self._active_backend_id)
def _login(self, timeout=None):
common = self._init_common()
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
common.do_setup(None, timeout=timeout, stats=self._stats)
common.client_login()
except Exception:
if common._replication_enabled:
LOG.warning("The primary array is not reachable at this "
"time. Since replication is enabled, "
"listing replication targets and failing over "
"a volume can still be performed.")
pass
else:
raise
return common
def _logout(self, common):
# If replication is enabled and we do not have a client ID, we did not
# login, but can still failover. There is no need to logout.
if common.client is None and common._replication_enabled:
return
common.client_logout()
def _check_flags(self, common):
"""Sanity check to ensure we have required options set."""
required_flags = ['hpe3par_api_url', 'hpe3par_username',
'hpe3par_password',
'san_ip', 'san_login', 'san_password']
common.check_flags(self.configuration, required_flags)
def get_volume_stats(self, refresh=False):
common = self._login()
try:
self._stats = common.get_volume_stats(
refresh,
self.get_filter_function(),
self.get_goodness_function())
self._stats['storage_protocol'] = 'FC'
self._stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return self._stats
finally:
self._logout(common)
def do_setup(self, context):
common = self._init_common()
common.do_setup(context)
self._check_flags(common)
common.check_for_setup_error()
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
pass
@utils.trace
def create_volume(self, volume):
common = self._login()
try:
return common.create_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_cloned_volume(self, volume, src_vref):
common = self._login()
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
@utils.trace
def delete_volume(self, volume):
common = self._login()
try:
common.delete_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
TODO: support using the size from the user.
"""
common = self._login()
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def create_snapshot(self, snapshot):
common = self._login()
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def delete_snapshot(self, snapshot):
common = self._login()
try:
common.delete_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
@fczm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'encrypted': False,
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'encrypted': False,
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
}
}
Steps to export a volume on 3PAR
* Create a host on the 3par with the target wwn
* Create a VLUN for that HOST with the volume we want to export.
"""
common = self._login()
try:
# we have to make sure we have a host
host = self._create_host(common, volume, connector)
target_wwns, init_targ_map, numPaths = \
self._build_initiator_target_map(common, connector)
# check if a VLUN already exists for this host
existing_vlun = common.find_existing_vlun(volume, host)
vlun = None
if existing_vlun is None:
# now that we have a host, create the VLUN
nsp = None
lun_id = None
active_fc_port_list = common.get_active_fc_target_ports()
if self.lookup_service:
if not init_targ_map:
msg = _("Setup is incomplete. Device mapping "
"not found from FC network. "
"Cannot perform VLUN creation.")
LOG.error(msg)
raise exception.FCSanLookupServiceException(msg)
for target_wwn in target_wwns:
for port in active_fc_port_list:
if port['portWWN'].lower() == target_wwn.lower():
nsp = port['nsp']
vlun = common.create_vlun(volume,
host,
nsp,
lun_id=lun_id)
if lun_id is None:
lun_id = vlun['lun']
break
else:
init_targ_map.clear()
del target_wwns[:]
host_connected_nsp = []
for fcpath in host['FCPaths']:
if 'portPos' in fcpath:
host_connected_nsp.append(
common.build_nsp(fcpath['portPos']))
for port in active_fc_port_list:
if (
port['type'] == common.client.PORT_TYPE_HOST and
port['nsp'] in host_connected_nsp
):
nsp = port['nsp']
vlun = common.create_vlun(volume,
host,
nsp,
lun_id=lun_id)
target_wwns.append(port['portWWN'])
if vlun['remoteName'] in init_targ_map:
init_targ_map[vlun['remoteName']].append(
port['portWWN'])
else:
init_targ_map[vlun['remoteName']] = [
port['portWWN']]
if lun_id is None:
lun_id = vlun['lun']
if lun_id is None:
# New vlun creation failed
msg = _('No new vlun(s) were created')
LOG.error(msg)
raise exception.VolumeDriverException(msg)
else:
vlun = existing_vlun
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': vlun['lun'],
'target_discovered': True,
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
encryption_key_id = volume.get('encryption_key_id', None)
info['data']['encrypted'] = encryption_key_id is not None
return info
finally:
self._logout(common)
@utils.trace
@fczm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
common = self._login()
try:
hostname = common._safe_hostname(connector['host'])
common.terminate_connection(volume, hostname,
wwn=connector['wwpns'])
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
try:
common.client.getHostVLUNs(hostname)
except hpeexceptions.HTTPNotFound:
# No more exports for this host.
LOG.info("Need to remove FC Zone, building initiator "
"target map")
target_wwns, init_targ_map, _numPaths = \
self._build_initiator_target_map(common, connector)
info['data'] = {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}
return info
finally:
self._logout(common)
def _build_initiator_target_map(self, common, connector):
"""Build the target_wwns and the initiator target map."""
fc_ports = common.get_active_fc_target_ports()
all_target_wwns = []
target_wwns = []
init_targ_map = {}
numPaths = 0
for port in fc_ports:
all_target_wwns.append(port['portWWN'])
if self.lookup_service is not None:
# use FC san lookup to determine which NSPs to use
# for the new VLUN.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'],
all_target_wwns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
for _target in init_targ_map[initiator]:
numPaths += 1
target_wwns = list(set(target_wwns))
else:
initiator_wwns = connector['wwpns']
target_wwns = all_target_wwns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return target_wwns, init_targ_map, numPaths
def _create_3par_fibrechan_host(self, common, hostname, wwns,
domain, persona_id):
"""Create a 3PAR host.
Create a 3PAR host, if there is already a host on the 3par using
the same wwn but with a different hostname, return the hostname
used by 3PAR.
"""
# first search for an existing host
host_found = None
hosts = common.client.queryHost(wwns=wwns)
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
host_found = hosts['members'][0]['name']
if host_found is not None:
return host_found
else:
persona_id = int(persona_id)
try:
common.client.createHost(hostname, FCWwns=wwns,
optional={'domain': domain,
'persona': persona_id})
except hpeexceptions.HTTPConflict as path_conflict:
msg = "Create FC host caught HTTP conflict code: %s"
LOG.exception(msg, path_conflict.get_code())
with save_and_reraise_exception(reraise=False) as ctxt:
if path_conflict.get_code() is EXISTENT_PATH:
# Handle exception : EXISTENT_PATH - host WWN/iSCSI
# name already used by another host
hosts = common.client.queryHost(wwns=wwns)
if hosts and hosts['members'] and (
'name' in hosts['members'][0]):
hostname = hosts['members'][0]['name']
else:
# re rasise last caught exception
ctxt.reraise = True
else:
# re rasise last caught exception
# for other HTTP conflict
ctxt.reraise = True
return hostname
def _modify_3par_fibrechan_host(self, common, hostname, wwn):
mod_request = {'pathOperation': common.client.HOST_EDIT_ADD,
'FCWWNs': wwn}
try:
common.client.modifyHost(hostname, mod_request)
except hpeexceptions.HTTPConflict as path_conflict:
msg = ("Modify FC Host %(hostname)s caught "
"HTTP conflict code: %(code)s")
LOG.exception(msg,
{'hostname': hostname,
'code': path_conflict.get_code()})
def _create_host(self, common, volume, connector):
"""Creates or modifies existing 3PAR host."""
host = None
hostname = common._safe_hostname(connector['host'])
cpg = common.get_cpg(volume, allowSnap=True)
domain = common.get_domain(cpg)
try:
host = common._get_3par_host(hostname)
# Check whether host with wwn of initiator present on 3par
hosts = common.client.queryHost(wwns=connector['wwpns'])
host, hostname = common._get_prioritized_host_on_3par(host,
hosts,
hostname)
except hpeexceptions.HTTPNotFound:
# get persona from the volume type extra specs
persona_id = common.get_persona_type(volume)
# host doesn't exist, we have to create it
hostname = self._create_3par_fibrechan_host(common,
hostname,
connector['wwpns'],
domain,
persona_id)
host = common._get_3par_host(hostname)
return host
else:
return self._add_new_wwn_to_host(common, host, connector['wwpns'])
def _add_new_wwn_to_host(self, common, host, wwns):
"""Add wwns to a host if one or more don't exist.
Identify if argument wwns contains any world wide names
not configured in the 3PAR host path. If any are found,
add them to the 3PAR host.
"""
# get the currently configured wwns
# from the host's FC paths
host_wwns = []
if 'FCPaths' in host:
for path in host['FCPaths']:
wwn = path.get('wwn', None)
if wwn is not None:
host_wwns.append(wwn.lower())
# lower case all wwns in the compare list
compare_wwns = [x.lower() for x in wwns]
# calculate wwns in compare list, but not in host_wwns list
new_wwns = list(set(compare_wwns).difference(host_wwns))
# if any wwns found that were not in host list,
# add them to the host
if (len(new_wwns) > 0):
self._modify_3par_fibrechan_host(common, host['name'], new_wwns)
host = common._get_3par_host(host['name'])
return host
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
@utils.trace
def extend_volume(self, volume, new_size):
common = self._login()
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
@utils.trace
def create_group(self, context, group):
common = self._login()
try:
return common.create_group(context, group)
finally:
self._logout(common)
@utils.trace
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
common = self._login()
try:
return common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots,
source_group, source_vols)
finally:
self._logout(common)
@utils.trace
def delete_group(self, context, group, volumes):
common = self._login()
try:
return common.delete_group(context, group, volumes)
finally:
self._logout(common)
@utils.trace
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
common = self._login()
try:
return common.update_group(context, group, add_volumes,
remove_volumes)
finally:
self._logout(common)
@utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.create_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.delete_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def manage_existing(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot(snapshot, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_get_size(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot_get_size(snapshot,
existing_ref)
finally:
self._logout(common)
@utils.trace
def unmanage(self, volume):
common = self._login()
try:
common.unmanage(volume)
finally:
self._logout(common)
@utils.trace
def unmanage_snapshot(self, snapshot):
common = self._login()
try:
common.unmanage_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
common = self._login()
try:
return common.retype(volume, new_type, diff, host)
finally:
self._logout(common)
@utils.trace
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert volume to snapshot."""
common = self._login()
try:
common.revert_to_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def migrate_volume(self, context, volume, host):
if volume['status'] == 'in-use':
protocol = host['capabilities']['storage_protocol']
if protocol != 'FC':
LOG.debug("3PAR FC driver cannot migrate in-use volume "
"to a host with storage_protocol=%s.", protocol)
return False, None
common = self._login()
try:
return common.migrate_volume(volume, host)
finally:
self._logout(common)
@utils.trace
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
common = self._login()
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
finally:
self._logout(common)
@utils.trace
def get_pool(self, volume):
common = self._login()
try:
return common.get_cpg(volume)
except hpeexceptions.HTTPNotFound:
reason = (_("Volume %s doesn't exist on array.") % volume)
LOG.error(reason)
raise exception.InvalidVolume(reason)
finally:
self._logout(common)
@utils.trace
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Force failover to a secondary replication target."""
common = self._login(timeout=30)
try:
# Update the active_backend_id in the driver and return it.
active_backend_id, volume_updates = common.failover_host(
context, volumes, secondary_id)
self._active_backend_id = active_backend_id
return active_backend_id, volume_updates, []
finally:
self._logout(common)
|
|
""" This module contains the logic for specifying and validating the top-level
DC/OS configuration from user arguments
The data structure called 'entry' is what defines which validation checks
should be run, how arguments should be calculated, which arguments should have
set defaults, which arguments should be user specified, and how some arguments
should be calculated.
Notes:
validate_* function: the arguments it takes will define the arguments which the
function is evaluated against. All validations are performed at once
argument calculation functions: like validation function, the arguments specified
will be pulled from the Source or user arguments. These function can be used
for both 'default' and 'must'
See gen.internals for more on how the nuts and bolts of this process works
"""
import collections
import ipaddress
import json
import os
import socket
import textwrap
from math import floor
from subprocess import check_output
from urllib.parse import urlparse
import yaml
import gen.internals
import pkgpanda.exceptions
from pkgpanda import PackageId
from pkgpanda.util import hash_checkout
def type_str(value):
return type(value).__name__
def check_duplicates(items: list):
counter = collections.Counter(items)
duplicates = dict(filter(lambda x: x[1] > 1, counter.items()))
assert not duplicates, 'List cannot contain duplicates: {}'.format(
', '.join('{} appears {} times'.format(*item) for item in duplicates.items()))
def validate_true_false(val) -> None:
gen.internals.validate_one_of(val, ['true', 'false'])
def validate_int_in_range(value, low, high):
try:
int_value = int(value)
except ValueError as ex:
raise AssertionError('Must be an integer but got a {}: {}'.format(type_str(value), value)) from ex
# Only a lower bound
if high is None:
assert low <= int_value, 'Must be above {}'.format(low)
else:
assert low <= int_value <= high, 'Must be between {} and {} inclusive'.format(low, high)
def validate_json_list(value):
try:
items = json.loads(value)
except ValueError as ex:
raise AssertionError("Must be a JSON formatted list, but couldn't be parsed the given "
"value `{}` as one because of: {}".format(value, ex)) from ex
assert isinstance(items, list), "Must be a JSON list. Got a {}".format(type_str(items))
non_str = list(filter(lambda x: not isinstance(x, str), items))
assert not non_str, "Items in list must be strings, got invalid values: {}".format(
", ".join("{} type {}".format(elem, type_str(elem)) for elem in non_str))
return items
def validate_ipv4_addresses(ips: list):
def try_parse_ip(ip):
try:
return socket.inet_pton(socket.AF_INET, ip)
except OSError:
return None
invalid_ips = list(filter(lambda ip: try_parse_ip(ip) is None, ips))
assert not len(invalid_ips), 'Invalid IPv4 addresses in list: {}'.format(', '.join(invalid_ips))
def validate_url(url: str):
try:
urlparse(url)
except ValueError as ex:
raise AssertionError(
"Couldn't parse given value `{}` as an URL".format(url)
) from ex
def validate_ip_list(json_str: str):
nodes_list = validate_json_list(json_str)
check_duplicates(nodes_list)
validate_ipv4_addresses(nodes_list)
def validate_ip_port_list(json_str: str):
nodes_list = validate_json_list(json_str)
check_duplicates(nodes_list)
# Create a list of only ip addresses by spliting the port from the node. Use the resulting
# ip_list to validate that it is an ipv4 address. If the port was specified, validate its
# value is between 1 and 65535.
ip_list = []
for node in nodes_list:
ip, separator, port = node.rpartition(':')
if not separator:
ip = node
else:
validate_int_in_range(port, 1, 65535)
ip_list.append(ip)
validate_ipv4_addresses(ip_list)
def calculate_environment_variable(name):
value = os.getenv(name)
assert value is not None, "{} must be a set environment variable".format(name)
return value
def calulate_dcos_image_commit():
dcos_image_commit = os.getenv('DCOS_IMAGE_COMMIT', None)
if dcos_image_commit is None:
dcos_image_commit = check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
assert dcos_image_commit is not None, "Unable to set dcos_image_commit from teamcity or git."
return dcos_image_commit
def calculate_resolvers_str(resolvers):
# Validation because accidentally slicing a string instead of indexing a
# list of resolvers then finding out at cluster launch is painful.
resolvers = json.loads(resolvers)
return ",".join(resolvers)
def calculate_mesos_dns_resolvers_str(resolvers):
resolver_list = json.loads(resolvers)
# Mesos-DNS unfortunately requires completley different config parameters
# for saying "Don't resolve / reject non-Mesos-DNS requests" than "there are
# no upstream resolvers". As such, if resolvers is given output that.
# Otherwise output the option externalOn which means "don't try resolving
# external queries / just fail fast without an error."
# This logic _should_ live in the Jinja template but it unfortunately can't
# because the "unset argument detection" in Jinja doesn't work around using
# jinja functions (the function names show up as unset arguments...).
# As such, generate the full JSON line and replace it in the manner given
# above.
if len(resolver_list) > 0:
return '"resolvers": ' + resolvers
else:
return '"externalOn": false'
def validate_mesos_log_retention_mb(mesos_log_retention_mb):
assert int(mesos_log_retention_mb) >= 1024, "Must retain at least 1024 MB of logs"
def validate_mesos_container_log_sink(mesos_container_log_sink):
assert mesos_container_log_sink in ['journald', 'logrotate', 'journald+logrotate'], \
"Container logs must go to 'journald', 'logrotate', or 'journald+logrotate'."
def calculate_mesos_log_retention_count(mesos_log_retention_mb):
# Determine how many 256 MB log chunks can be fit into the given size.
# We assume a 90% compression factor; logs are compressed after 2 rotations.
# We return the number of times the log can be rotated by logrotate;
# this is one less than the total number of log file retained.
return str(int(1 + (int(mesos_log_retention_mb) - 512) / 256 * 10))
def calculate_mesos_log_directory_max_files(mesos_log_retention_mb):
# We allow some maximum number of temporary/random files in the
# Mesos log directory. This maximum takes into account the number
# of rotated logs that stay in the archive subdirectory.
return str(25 + int(calculate_mesos_log_retention_count(mesos_log_retention_mb)))
def calculate_ip_detect_contents(ip_detect_filename):
assert os.path.exists(ip_detect_filename), "ip-detect script `{}` must exist".format(ip_detect_filename)
return yaml.dump(open(ip_detect_filename, encoding='utf-8').read())
def calculate_ip_detect_public_contents(ip_detect_contents, ip_detect_public_filename):
if ip_detect_public_filename != '':
calculate_ip_detect_contents(ip_detect_public_filename)
return ip_detect_contents
def calculate_rexray_config_contents(rexray_config):
return yaml.dump(
# Assume block style YAML (not flow) for REX-Ray config.
yaml.dump(json.loads(rexray_config), default_flow_style=False)
)
def validate_json_dictionary(data):
# TODO(cmaloney): Pull validate_json() out.
try:
loaded = json.loads(data)
assert isinstance(loaded, dict), "Must be a JSON dictionary. Got a {}".format(type_str(loaded))
return loaded
except ValueError as ex:
raise AssertionError("Must be valid JSON. Got: {}".format(data)) from ex
def calculate_gen_resolvconf_search(dns_search):
if len(dns_search) > 0:
return "SEARCH=" + dns_search
else:
return ""
def calculate_mesos_hooks(dcos_remove_dockercfg_enable):
if dcos_remove_dockercfg_enable == 'true':
return "com_mesosphere_dcos_RemoverHook"
else:
return ""
def calculate_use_mesos_hooks(mesos_hooks):
if mesos_hooks == "":
return "false"
else:
return "true"
def validate_network_default_name(dcos_overlay_network_default_name, dcos_overlay_network):
try:
overlay_network = json.loads(dcos_overlay_network)
except ValueError as ex:
raise AssertionError("Provided input was not valid JSON: {}".format(dcos_overlay_network)) from ex
overlay_names = map(lambda overlay: overlay['name'], overlay_network['overlays'])
assert dcos_overlay_network_default_name in overlay_names, (
"Default overlay network name does not reference a defined overlay network: {}".format(
dcos_overlay_network_default_name))
def validate_dcos_overlay_network(dcos_overlay_network):
try:
overlay_network = json.loads(dcos_overlay_network)
except ValueError as ex:
raise AssertionError("Provided input was not valid JSON: {}".format(dcos_overlay_network)) from ex
# Check the VTEP IP, VTEP MAC keys are present in the overlay
# configuration
assert 'vtep_subnet' in overlay_network.keys(), (
'Missing "vtep_subnet" in overlay configuration {}'.format(overlay_network))
try:
ipaddress.ip_network(overlay_network['vtep_subnet'])
except ValueError as ex:
raise AssertionError(
"Incorrect value for vtep_subnet: {}."
" Only IPv4 values are allowed".format(overlay_network['vtep_subnet'])) from ex
assert 'vtep_mac_oui' in overlay_network.keys(), (
'Missing "vtep_mac_oui" in overlay configuration {}'.format(overlay_network))
assert 'overlays' in overlay_network.keys(), (
'Missing "overlays" in overlay configuration {}'.format(overlay_network))
assert len(overlay_network['overlays']) > 0, (
'We need at least one overlay network configuration {}'.format(overlay_network))
for overlay in overlay_network['overlays']:
assert (len(overlay['name']) <= 13), (
"Overlay name cannot exceed 13 characters:{}".format(overlay['name']))
try:
ipaddress.ip_network(overlay['subnet'])
except ValueError as ex:
raise AssertionError(
"Incorrect value for vtep_subnet {}."
" Only IPv4 values are allowed".format(overlay['subnet'])) from ex
def validate_num_masters(num_masters):
assert int(num_masters) in [1, 3, 5, 7, 9], "Must have 1, 3, 5, 7, or 9 masters. Found {}".format(num_masters)
def validate_bootstrap_url(bootstrap_url):
assert len(bootstrap_url) > 1, "Should be a url (http://example.com/bar or file:///path/to/local/cache)"
assert bootstrap_url[-1] != '/', "Must not end in a '/'"
def validate_channel_name(channel_name):
assert len(channel_name) > 1, "Must be more than 2 characters"
assert channel_name[0] != '/', "Must not start with a '/'"
assert channel_name[-1] != '/', "Must not end with a '/'"
def validate_dns_search(dns_search):
assert '\n' not in dns_search, "Newlines are not allowed"
assert ',' not in dns_search, "Commas are not allowed"
# resolv.conf requirements
assert len(dns_search) < 256, "Must be less than 256 characters long"
assert len(dns_search.split()) <= 6, "Must contain no more than 6 domains"
def validate_master_list(master_list):
return validate_ip_list(master_list)
def validate_resolvers(resolvers):
return validate_ip_port_list(resolvers)
def validate_mesos_dns_ip_sources(mesos_dns_ip_sources):
return validate_json_list(mesos_dns_ip_sources)
def calc_num_masters(master_list):
return str(len(json.loads(master_list)))
def calculate_config_id(dcos_image_commit, template_filenames, sources_id):
return hash_checkout({
"commit": dcos_image_commit,
"template_filenames": json.loads(template_filenames),
"sources_id": sources_id})
def calculate_config_package_ids(config_package_names, config_id):
def get_config_package_id(config_package_name):
pkg_id_str = "{}--setup_{}".format(config_package_name, config_id)
# validate the pkg_id_str generated is a valid PackageId
return pkg_id_str
return json.dumps(list(sorted(map(get_config_package_id, json.loads(config_package_names)))))
def calculate_cluster_packages(config_package_ids, package_ids):
return json.dumps(sorted(json.loads(config_package_ids) + json.loads(package_ids)))
def validate_cluster_packages(cluster_packages):
pkg_id_list = json.loads(cluster_packages)
for pkg_id in pkg_id_list:
try:
PackageId(pkg_id)
except pkgpanda.exceptions.ValidationError as ex:
raise AssertionError(str(ex)) from ex
def calculate_no_proxy(no_proxy):
user_proxy_config = validate_json_list(no_proxy)
return ",".join(['.mesos,.thisdcos.directory,.dcos.directory,.zk,127.0.0.1,localhost'] + user_proxy_config)
def validate_zk_hosts(exhibitor_zk_hosts):
# TODO(malnick) Add validation of IPv4 address and port to this
assert not exhibitor_zk_hosts.startswith('zk://'), "Must be of the form `host:port,host:port', not start with zk://"
def validate_zk_path(exhibitor_zk_path):
assert exhibitor_zk_path.startswith('/'), "Must be of the form /path/to/znode"
def calculate_exhibitor_static_ensemble(master_list):
masters = json.loads(master_list)
masters.sort()
return ','.join(['%d:%s' % (i + 1, m) for i, m in enumerate(masters)])
def calculate_adminrouter_auth_enabled(oauth_enabled):
return oauth_enabled
def calculate_config_yaml(user_arguments):
return textwrap.indent(
yaml.dump(json.loads(user_arguments), default_style='|', default_flow_style=False, indent=2),
prefix=' ' * 3)
def calculate_mesos_isolation(enable_gpu_isolation):
isolators = ('cgroups/cpu,cgroups/mem,disk/du,network/cni,filesystem/linux,'
'docker/runtime,docker/volume,volume/sandbox_path,posix/rlimits,'
'namespaces/pid,com_mesosphere_MetricsIsolatorModule')
if enable_gpu_isolation == 'true':
isolators += ',cgroups/devices,gpu/nvidia'
return isolators
def validate_os_type(os_type):
gen.internals.validate_one_of(os_type, ['coreos', 'el7'])
def validate_bootstrap_tmp_dir(bootstrap_tmp_dir):
# Must be non_empty
assert bootstrap_tmp_dir, "Must not be empty"
# Should not start or end with `/`
assert bootstrap_tmp_dir[0] != '/' and bootstrap_tmp_dir[-1] != 0, \
"Must be an absolute path to a directory, although leave off the `/` at the beginning and end."
def calculate_minuteman_min_named_ip_erltuple(minuteman_min_named_ip):
return ip_to_erltuple(minuteman_min_named_ip)
def calculate_minuteman_max_named_ip_erltuple(minuteman_max_named_ip):
return ip_to_erltuple(minuteman_max_named_ip)
def ip_to_erltuple(ip):
return '{' + ip.replace('.', ',') + '}'
def validate_minuteman_min_named_ip(minuteman_min_named_ip):
validate_ipv4_addresses([minuteman_min_named_ip])
def validate_minuteman_max_named_ip(minuteman_max_named_ip):
validate_ipv4_addresses([minuteman_max_named_ip])
def calculate_docker_credentials_dcos_owned(cluster_docker_credentials):
if cluster_docker_credentials == "{}":
return "false"
else:
return "true"
def calculate_cluster_docker_credentials_path(cluster_docker_credentials_dcos_owned):
return {
'true': '/opt/mesosphere/etc/docker_credentials',
'false': '/etc/mesosphere/docker_credentials'
}[cluster_docker_credentials_dcos_owned]
def calculate_cluster_docker_registry_enabled(cluster_docker_registry_url):
return 'false' if cluster_docker_registry_url == '' else 'true'
def validate_cosmos_config(cosmos_config):
"""The schema for this configuration is.
{
"schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"staged_package_storage_uri": {
"type": "string"
},
"package_storage_uri": {
"type": "string"
}
}
}
"""
config = validate_json_dictionary(cosmos_config)
expects = ['staged_package_storage_uri', 'package_storage_uri']
found = list(filter(lambda value: value in config, expects))
if len(found) == 0:
# User didn't specify any configuration; nothing to do
pass
elif len(found) == 1:
# User specified one parameter but not the other; fail
raise AssertionError(
'cosmos_config must be a dictionary containing both {}, or must '
'be left empty. Found only {}'.format(' '.join(expects), found)
)
else:
# User specified both parameters; make sure they are URLs
for value in found:
validate_url(config[value])
def calculate_cosmos_staged_package_storage_uri_flag(cosmos_config):
config = validate_json_dictionary(cosmos_config)
if 'staged_package_storage_uri' in config:
return (
'-com.mesosphere.cosmos.stagedPackageStorageUri={}'.format(
config['staged_package_storage_uri']
)
)
else:
return ''
def calculate_cosmos_package_storage_uri_flag(cosmos_config):
config = validate_json_dictionary(cosmos_config)
if 'package_storage_uri' in config:
return (
'-com.mesosphere.cosmos.packageStorageUri={}'.format(
config['package_storage_uri']
)
)
else:
return ''
def calculate_set(parameter):
if parameter == '':
return 'false'
else:
return 'true'
def validate_exhibitor_storage_master_discovery(master_discovery, exhibitor_storage_backend):
if master_discovery != 'static':
assert exhibitor_storage_backend != 'static', "When master_discovery is not static, " \
"exhibitor_storage_backend must be non-static. Having a variable list of master which " \
"are discovered by agents using the master_discovery method but also having a fixed " \
"known at install time static list of master ips doesn't " \
"`master_http_load_balancer` then exhibitor_storage_backend must not be static."
__dcos_overlay_network_default_name = 'dcos'
entry = {
'validate': [
validate_num_masters,
validate_bootstrap_url,
validate_channel_name,
validate_dns_search,
validate_master_list,
validate_resolvers,
validate_zk_hosts,
validate_zk_path,
validate_cluster_packages,
lambda oauth_enabled: validate_true_false(oauth_enabled),
lambda oauth_available: validate_true_false(oauth_available),
validate_mesos_dns_ip_sources,
validate_mesos_log_retention_mb,
lambda telemetry_enabled: validate_true_false(telemetry_enabled),
lambda master_dns_bindall: validate_true_false(master_dns_bindall),
validate_os_type,
validate_dcos_overlay_network,
lambda dcos_overlay_network_default_name, dcos_overlay_network:
validate_network_default_name(dcos_overlay_network_default_name, dcos_overlay_network),
lambda dcos_overlay_enable: validate_true_false(dcos_overlay_enable),
lambda dcos_overlay_mtu: validate_int_in_range(dcos_overlay_mtu, 552, None),
lambda dcos_overlay_config_attempts: validate_int_in_range(dcos_overlay_config_attempts, 0, 10),
lambda dcos_remove_dockercfg_enable: validate_true_false(dcos_remove_dockercfg_enable),
lambda rexray_config: validate_json_dictionary(rexray_config),
lambda check_time: validate_true_false(check_time),
lambda enable_gpu_isolation: validate_true_false(enable_gpu_isolation),
validate_minuteman_min_named_ip,
validate_minuteman_max_named_ip,
lambda cluster_docker_credentials_dcos_owned: validate_true_false(cluster_docker_credentials_dcos_owned),
lambda cluster_docker_credentials_enabled: validate_true_false(cluster_docker_credentials_enabled),
lambda cluster_docker_credentials_write_to_etc: validate_true_false(cluster_docker_credentials_write_to_etc),
lambda cluster_docker_credentials: validate_json_dictionary(cluster_docker_credentials),
lambda aws_masters_have_public_ip: validate_true_false(aws_masters_have_public_ip),
validate_exhibitor_storage_master_discovery,
validate_cosmos_config,
lambda enable_lb: validate_true_false(enable_lb)
],
'default': {
'bootstrap_tmp_dir': 'tmp',
'bootstrap_variant': lambda: calculate_environment_variable('BOOTSTRAP_VARIANT'),
'use_proxy': 'false',
'weights': '',
'adminrouter_auth_enabled': calculate_adminrouter_auth_enabled,
'oauth_enabled': 'true',
'oauth_available': 'true',
'telemetry_enabled': 'true',
'check_time': 'true',
'cluster_packages_json': lambda cluster_packages: cluster_packages,
'enable_lb': 'true',
'docker_remove_delay': '1hrs',
'docker_stop_timeout': '20secs',
'gc_delay': '2days',
'ip_detect_contents': calculate_ip_detect_contents,
'ip_detect_public_filename': '',
'ip_detect_public_contents': calculate_ip_detect_public_contents,
'dns_search': '',
'auth_cookie_secure_flag': 'false',
'master_dns_bindall': 'true',
'mesos_dns_ip_sources': '["host", "netinfo"]',
'master_external_loadbalancer': '',
'mesos_log_retention_mb': '4000',
'mesos_container_log_sink': 'logrotate',
'oauth_issuer_url': 'https://dcos.auth0.com/',
'oauth_client_id': '3yF5TOSzdlI45Q1xspxzeoGBe9fNxm9m',
'oauth_auth_redirector': 'https://auth.dcos.io',
'oauth_auth_host': 'https://dcos.auth0.com',
'ui_tracking': 'true',
'ui_banner': 'false',
'ui_banner_background_color': '#1E232F',
'ui_banner_foreground_color': '#FFFFFF',
'ui_banner_header_title': 'null',
'ui_banner_header_content': 'null',
'ui_banner_footer_content': 'null',
'ui_banner_image_path': 'null',
'ui_banner_dismissible': 'null',
'dcos_overlay_config_attempts': '4',
'dcos_overlay_mtu': '1420',
'dcos_overlay_enable': "true",
'dcos_overlay_network': json.dumps({
'vtep_subnet': '44.128.0.0/20',
'vtep_mac_oui': '70:B3:D5:00:00:00',
'overlays': [{
'name': __dcos_overlay_network_default_name,
'subnet': '9.0.0.0/8',
'prefix': 24
}]
}),
'dcos_overlay_network_default_name': __dcos_overlay_network_default_name,
'dcos_remove_dockercfg_enable': "false",
'minuteman_min_named_ip': '11.0.0.0',
'minuteman_max_named_ip': '11.255.255.255',
'no_proxy': '',
'rexray_config_preset': '',
'rexray_config': json.dumps({
# Disabled. REX-Ray will start but not register as a volume driver.
'rexray': {
'loglevel': 'info',
'modules': {
'default-admin': {
'host': 'tcp://127.0.0.1:61003'
},
'default-docker': {
'disabled': True
}
}
}
}),
'enable_gpu_isolation': 'true',
'cluster_docker_registry_url': '',
'cluster_docker_credentials_dcos_owned': calculate_docker_credentials_dcos_owned,
'cluster_docker_credentials_write_to_etc': 'false',
'cluster_docker_credentials_enabled': 'false',
'cluster_docker_credentials': "{}",
'cosmos_config': '{}'
},
'must': {
'custom_auth': 'false',
'master_quorum': lambda num_masters: str(floor(int(num_masters) / 2) + 1),
'resolvers_str': calculate_resolvers_str,
'dcos_image_commit': calulate_dcos_image_commit,
'mesos_dns_resolvers_str': calculate_mesos_dns_resolvers_str,
'mesos_log_retention_count': calculate_mesos_log_retention_count,
'mesos_log_directory_max_files': calculate_mesos_log_directory_max_files,
'dcos_version': '1.9-dev',
'dcos_gen_resolvconf_search_str': calculate_gen_resolvconf_search,
'curly_pound': '{#',
'config_package_ids': calculate_config_package_ids,
'cluster_packages': calculate_cluster_packages,
'config_id': calculate_config_id,
'exhibitor_static_ensemble': calculate_exhibitor_static_ensemble,
'ui_branding': 'false',
'ui_external_links': 'false',
'ui_networking': 'false',
'ui_organization': 'false',
'ui_telemetry_metadata': '{"openBuild": true}',
'minuteman_forward_metrics': 'false',
'minuteman_min_named_ip_erltuple': calculate_minuteman_min_named_ip_erltuple,
'minuteman_max_named_ip_erltuple': calculate_minuteman_max_named_ip_erltuple,
'mesos_isolation': calculate_mesos_isolation,
'config_yaml': calculate_config_yaml,
'mesos_hooks': calculate_mesos_hooks,
'use_mesos_hooks': calculate_use_mesos_hooks,
'rexray_config_contents': calculate_rexray_config_contents,
'no_proxy_final': calculate_no_proxy,
'cluster_docker_credentials_path': calculate_cluster_docker_credentials_path,
'cluster_docker_registry_enabled': calculate_cluster_docker_registry_enabled,
'has_master_external_loadbalancer':
lambda master_external_loadbalancer: calculate_set(master_external_loadbalancer),
'cosmos_staged_package_storage_uri_flag':
calculate_cosmos_staged_package_storage_uri_flag,
'cosmos_package_storage_uri_flag':
calculate_cosmos_package_storage_uri_flag
},
'conditional': {
'master_discovery': {
'master_http_loadbalancer': {},
'static': {
'must': {'num_masters': calc_num_masters}
}
},
'rexray_config_preset': {
'': {},
'aws': {
'must': {
'rexray_config': json.dumps({
# Use IAM Instance Profile for auth.
'rexray': {
'loglevel': 'info',
'modules': {
'default-admin': {
'host': 'tcp://127.0.0.1:61003'
}
},
'storageDrivers': ['ec2'],
'volume': {
'unmount': {
'ignoreusedcount': True
}
}
}
})
}
}
}
}
}
|
|
import warnings
from dateutil.parser import parse
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.import_manager import sa
from great_expectations.expectations.metrics.map_metric_provider import (
ColumnMapMetricProvider,
column_condition_partial,
)
class ColumnValuesBetween(ColumnMapMetricProvider):
condition_metric_name = "column_values.between"
condition_value_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
"parse_strings_as_datetimes",
"allow_cross_type_comparisons",
)
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(
cls,
column,
min_value=None,
max_value=None,
strict_min=None,
strict_max=None,
parse_strings_as_datetimes: bool = False,
allow_cross_type_comparisons=None,
**kwargs
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if parse_strings_as_datetimes:
warnings.warn(
"""The parameter "parse_strings_as_datetimes" is no longer supported and will be deprecated in a \
future release. Please update code accordingly.
""",
DeprecationWarning,
)
if min_value is not None:
try:
min_value = parse(min_value)
except TypeError:
pass
if max_value is not None:
try:
max_value = parse(max_value)
except TypeError:
pass
try:
temp_column = column.map(parse)
except TypeError:
temp_column = column
else:
temp_column = column
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
def is_between(val):
# TODO Might be worth explicitly defining comparisons between types (for example, between strings and ints).
# Ensure types can be compared since some types in Python 3 cannot be logically compared.
# print type(val), type(min_value), type(max_value), val, min_value, max_value
if type(val) is None:
return False
if min_value is not None and max_value is not None:
if allow_cross_type_comparisons:
try:
if strict_min and strict_max:
return (min_value < val) and (val < max_value)
elif strict_min:
return (min_value < val) and (val <= max_value)
elif strict_max:
return (min_value <= val) and (val < max_value)
else:
return (min_value <= val) and (val <= max_value)
except TypeError:
return False
else:
if (isinstance(val, str) != isinstance(min_value, str)) or (
isinstance(val, str) != isinstance(max_value, str)
):
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type."
)
if strict_min and strict_max:
return (min_value < val) and (val < max_value)
elif strict_min:
return (min_value < val) and (val <= max_value)
elif strict_max:
return (min_value <= val) and (val < max_value)
else:
return (min_value <= val) and (val <= max_value)
elif min_value is None and max_value is not None:
if allow_cross_type_comparisons:
try:
if strict_max:
return val < max_value
else:
return val <= max_value
except TypeError:
return False
else:
if isinstance(val, str) != isinstance(max_value, str):
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type."
)
if strict_max:
return val < max_value
else:
return val <= max_value
elif min_value is not None and max_value is None:
if allow_cross_type_comparisons:
try:
if strict_min:
return min_value < val
else:
return min_value <= val
except TypeError:
return False
else:
if isinstance(val, str) != isinstance(min_value, str):
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type."
)
if strict_min:
return min_value < val
else:
return min_value <= val
else:
return False
return temp_column.map(is_between)
@column_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
column,
min_value=None,
max_value=None,
strict_min=None,
strict_max=None,
parse_strings_as_datetimes: bool = False,
**kwargs
):
if parse_strings_as_datetimes:
warnings.warn(
"""The parameter "parse_strings_as_datetimes" is no longer supported and will be deprecated in a \
future release. Please update code accordingly.
""",
DeprecationWarning,
)
if min_value is not None:
try:
min_value = parse(min_value)
except TypeError:
pass
if max_value is not None:
try:
max_value = parse(max_value)
except TypeError:
pass
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is None:
if strict_max:
return column < max_value
else:
return column <= max_value
elif max_value is None:
if strict_min:
return min_value < column
else:
return min_value <= column
else:
if strict_min and strict_max:
return sa.and_(min_value < column, column < max_value)
elif strict_min:
return sa.and_(min_value < column, column <= max_value)
elif strict_max:
return sa.and_(min_value <= column, column < max_value)
else:
return sa.and_(min_value <= column, column <= max_value)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(
cls,
column,
min_value=None,
max_value=None,
strict_min=None,
strict_max=None,
parse_strings_as_datetimes: bool = False,
**kwargs
):
if parse_strings_as_datetimes:
warnings.warn(
"""The parameter "parse_strings_as_datetimes" is no longer supported and will be deprecated in a \
future release. Please update code accordingly.
""",
DeprecationWarning,
)
if min_value is not None:
try:
min_value = parse(min_value)
except TypeError:
pass
if max_value is not None:
try:
max_value = parse(max_value)
except TypeError:
pass
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is None:
if strict_max:
return column < max_value
else:
return column <= max_value
elif max_value is None:
if strict_min:
return min_value < column
else:
return min_value <= column
else:
if strict_min and strict_max:
return (min_value < column) & (column < max_value)
elif strict_min:
return (min_value < column) & (column <= max_value)
elif strict_max:
return (min_value <= column) & (column < max_value)
else:
return (min_value <= column) & (column <= max_value)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class Action(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The action of virtual network rule.
"""
ALLOW = "Allow"
class ActionsRequired(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""A message indicating if changes on the service provider require any updates on the consumer.
"""
NONE = "None"
RECREATE = "Recreate"
class ActivationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The activation status of the connected registry.
"""
ACTIVE = "Active"
INACTIVE = "Inactive"
class AuditLogStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether audit logs are enabled on the connected registry.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class CertificateType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of certificate location.
"""
LOCAL_DIRECTORY = "LocalDirectory"
class ConnectedRegistryMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The mode of the connected registry resource that indicates the permissions of the registry.
"""
REGISTRY = "Registry"
MIRROR = "Mirror"
class ConnectionState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current connection state of the connected registry.
"""
ONLINE = "Online"
OFFLINE = "Offline"
SYNCING = "Syncing"
UNHEALTHY = "Unhealthy"
class ConnectionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The private link service connection status.
"""
APPROVED = "Approved"
PENDING = "Pending"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
class CreatedByType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class DefaultAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The default action of allow or deny when no other rules match.
"""
ALLOW = "Allow"
DENY = "Deny"
class EncryptionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether or not the encryption is enabled for container registry.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class ExportPolicyStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The value that indicates whether the policy is enabled or not.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class ImportMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""When Force, any existing target tags will be overwritten. When NoForce, any existing target
tags will fail the operation before any copying begins.
"""
NO_FORCE = "NoForce"
FORCE = "Force"
class LastModifiedByType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that last modified the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class LogLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The verbosity of logs persisted on the connected registry.
"""
DEBUG = "Debug"
INFORMATION = "Information"
WARNING = "Warning"
ERROR = "Error"
NONE = "None"
class NetworkRuleBypassOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Whether to allow trusted Azure services to access a network restricted registry.
"""
AZURE_SERVICES = "AzureServices"
NONE = "None"
class PasswordName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The password name.
"""
PASSWORD = "password"
PASSWORD2 = "password2"
class PipelineOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
OVERWRITE_TAGS = "OverwriteTags"
OVERWRITE_BLOBS = "OverwriteBlobs"
DELETE_SOURCE_BLOB_ON_SUCCESS = "DeleteSourceBlobOnSuccess"
CONTINUE_ON_ERRORS = "ContinueOnErrors"
class PipelineRunSourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the source.
"""
AZURE_STORAGE_BLOB = "AzureStorageBlob"
class PipelineRunTargetType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the target.
"""
AZURE_STORAGE_BLOB = "AzureStorageBlob"
class PipelineSourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of source for the import pipeline.
"""
AZURE_STORAGE_BLOB_CONTAINER = "AzureStorageBlobContainer"
class PolicyStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The value that indicates whether the policy is enabled or not.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class ProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the resource.
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
class PublicNetworkAccess(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Whether or not public network access is allowed for the container registry.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class RegistryUsageUnit(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The unit of measurement.
"""
COUNT = "Count"
BYTES = "Bytes"
class ResourceIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class SkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The SKU name of the container registry. Required for registry creation.
"""
CLASSIC = "Classic"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class SkuTier(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The SKU tier based on the SKU name.
"""
CLASSIC = "Classic"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class TlsStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether HTTPS is enabled for the login server.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class TokenCertificateName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CERTIFICATE1 = "certificate1"
CERTIFICATE2 = "certificate2"
class TokenPasswordName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The password name "password1" or "password2"
"""
PASSWORD1 = "password1"
PASSWORD2 = "password2"
class TokenStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the token example enabled or disabled.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class TriggerStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current status of the source trigger.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class TrustPolicyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of trust policy.
"""
NOTARY = "Notary"
class WebhookAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PUSH = "push"
DELETE = "delete"
QUARANTINE = "quarantine"
CHART_PUSH = "chart_push"
CHART_DELETE = "chart_delete"
class WebhookStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the webhook at the time the operation was called.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class ZoneRedundancy(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Whether or not zone redundancy is enabled for this container registry
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import contextlib
import multiprocessing
import multiprocessing.managers
import os
import platform
import random
import socket
import signal
import subprocess
import threading
import time
import traceback
from crossrunner.test import TestEntry, domain_socket_path
from crossrunner.report import ExecReporter, SummaryReporter
RESULT_TIMEOUT = 128
RESULT_ERROR = 64
class ExecutionContext(object):
def __init__(self, cmd, cwd, env, report):
self._log = multiprocessing.get_logger()
self.report = report
self.cmd = cmd
self.cwd = cwd
self.env = env
self.timer = None
self.expired = False
def _expire(self):
self._log.info('Timeout')
self.expired = True
self.kill()
def kill(self):
self._log.debug('Killing process : %d' % self.proc.pid)
if platform.system() != 'Windows':
try:
os.killpg(self.proc.pid, signal.SIGKILL)
except Exception as err:
self._log.info('Failed to kill process group : %s' % str(err))
try:
self.proc.kill()
except Exception as err:
self._log.info('Failed to kill process : %s' % str(err))
self.report.killed()
def _popen_args(self):
args = {
'cwd': self.cwd,
'env': self.env,
'stdout': self.report.out,
'stderr': subprocess.STDOUT,
}
# make sure child processes doesn't remain after killing
if platform.system() == 'Windows':
DETACHED_PROCESS = 0x00000008
args.update(creationflags=DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP)
else:
args.update(preexec_fn=os.setsid)
return args
def start(self, timeout=0):
self._log.debug('COMMAND: %s', ' '.join(self.cmd))
self._log.debug('WORKDIR: %s', self.cwd)
self._log.debug('LOGFILE: %s', self.report.logpath)
self.report.begin()
self.proc = subprocess.Popen(self.cmd, **self._popen_args())
if timeout > 0:
self.timer = threading.Timer(timeout, self._expire)
self.timer.start()
return self._scoped()
@contextlib.contextmanager
def _scoped(self):
yield self
self._log.debug('Killing scoped process')
self.kill()
def wait(self):
self.proc.communicate()
if self.timer:
self.timer.cancel()
self.report.end(self.returncode)
@property
def returncode(self):
return self.proc.returncode if self.proc else None
def exec_context(port, testdir, test, prog):
report = ExecReporter(testdir, test, prog)
prog.build_command(port)
return ExecutionContext(prog.command, prog.workdir, prog.env, report)
def run_test(testdir, test_dict, async=True, max_retry=3):
try:
logger = multiprocessing.get_logger()
retry_count = 0
test = TestEntry(testdir, **test_dict)
while True:
if stop.is_set():
logger.debug('Skipping because shutting down')
return None
logger.debug('Start')
with PortAllocator.alloc_port_scoped(ports, test.socket) as port:
logger.debug('Start with port %d' % port)
sv = exec_context(port, testdir, test, test.server)
cl = exec_context(port, testdir, test, test.client)
logger.debug('Starting server')
with sv.start():
if test.delay > 0:
logger.debug('Delaying client for %.2f seconds' % test.delay)
time.sleep(test.delay)
cl_retry_count = 0
cl_max_retry = 10
cl_retry_wait = 0.5
while True:
logger.debug('Starting client')
cl.start(test.timeout)
logger.debug('Waiting client')
cl.wait()
if not cl.report.maybe_false_positive() or cl_retry_count >= cl_max_retry:
if cl_retry_count > 0 and cl_retry_count < cl_max_retry:
logger.warn('[%s]: Connected after %d retry (%.2f sec each)' % (test.server.name, cl_retry_count, cl_retry_wait))
break
logger.debug('Server may not be ready, waiting %.2f second...' % cl_retry_wait)
time.sleep(cl_retry_wait)
cl_retry_count += 1
if not sv.report.maybe_false_positive() or retry_count >= max_retry:
logger.debug('Finish')
return RESULT_TIMEOUT if cl.expired else cl.proc.returncode
logger.warn('[%s]: Detected socket bind failure, retrying...' % test.server.name)
retry_count += 1
except (KeyboardInterrupt, SystemExit):
logger.info('Interrupted execution')
if not async:
raise
stop.set()
return None
except Exception as ex:
logger.warn('Error while executing test : %s' % str(ex))
if not async:
raise
logger.info(traceback.print_exc())
return RESULT_ERROR
class PortAllocator(object):
def __init__(self):
self._log = multiprocessing.get_logger()
self._lock = multiprocessing.Lock()
self._ports = set()
self._dom_ports = set()
self._last_alloc = 0
def _get_tcp_port(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
self._lock.acquire()
try:
ok = port not in self._ports
if ok:
self._ports.add(port)
self._last_alloc = time.time()
finally:
self._lock.release()
sock.close()
return port if ok else self._get_tcp_port()
def _get_domain_port(self):
port = random.randint(1024, 65536)
self._lock.acquire()
try:
ok = port not in self._dom_ports
if ok:
self._dom_ports.add(port)
finally:
self._lock.release()
return port if ok else self._get_domain_port()
def alloc_port(self, socket_type):
if socket_type in ('domain', 'abstract'):
return self._get_domain_port()
else:
return self._get_tcp_port()
# static method for inter-process invokation
@staticmethod
@contextlib.contextmanager
def alloc_port_scoped(allocator, socket_type):
port = allocator.alloc_port(socket_type)
yield port
allocator.free_port(socket_type, port)
def free_port(self, socket_type, port):
self._log.debug('free_port')
self._lock.acquire()
try:
if socket_type == 'domain':
self._dom_ports.remove(port)
path = domain_socket_path(port)
if os.path.exists(path):
os.remove(path)
elif socket_type == 'abstract':
self._dom_ports.remove(port)
else:
self._ports.remove(port)
except IOError as err:
self._log.info('Error while freeing port : %s' % str(err))
finally:
self._lock.release()
class NonAsyncResult(object):
def __init__(self, value):
self._value = value
def get(self, timeout=None):
return self._value
def wait(self, timeout=None):
pass
def ready(self):
return True
def successful(self):
return self._value == 0
class TestDispatcher(object):
def __init__(self, testdir, concurrency):
self._log = multiprocessing.get_logger()
self.testdir = testdir
# seems needed for python 2.x to handle keyboard interrupt
self._stop = multiprocessing.Event()
self._async = concurrency > 1
if not self._async:
self._pool = None
global stop
global ports
stop = self._stop
ports = PortAllocator()
else:
self._m = multiprocessing.managers.BaseManager()
self._m.register('ports', PortAllocator)
self._m.start()
self._pool = multiprocessing.Pool(concurrency, self._pool_init, (self._m.address,))
self._report = SummaryReporter(testdir, concurrency > 1)
self._log.debug(
'TestDispatcher started with %d concurrent jobs' % concurrency)
def _pool_init(self, address):
global stop
global m
global ports
stop = self._stop
m = multiprocessing.managers.BaseManager(address)
m.connect()
ports = m.ports()
def _dispatch_sync(self, test, cont):
r = run_test(self.testdir, test, False)
cont(r)
return NonAsyncResult(r)
def _dispatch_async(self, test, cont):
return self._pool.apply_async(func=run_test, args=(self.testdir, test,), callback=cont)
def dispatch(self, test):
index = self._report.add_test(test)
def cont(r):
if not self._stop.is_set():
self._log.debug('freeing port')
self._log.debug('adding result')
self._report.add_result(index, r, r == RESULT_TIMEOUT)
self._log.debug('finish continuation')
fn = self._dispatch_async if self._async else self._dispatch_sync
return fn(test, cont)
def wait(self):
if self._async:
self._pool.close()
self._pool.join()
self._m.shutdown()
return self._report.end()
def terminate(self):
self._stop.set()
if self._async:
self._pool.terminate()
self._pool.join()
self._m.shutdown()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQLAlchemy storage backend."""
import hashlib
import sys
from sqlalchemy.orm import exc
from ripcord.common import exception
from ripcord.db.sqlalchemy import models
from ripcord.openstack.common.db import exception as db_exc
from ripcord.openstack.common.db.sqlalchemy import session as db_session
from ripcord.openstack.common import log as logging
from ripcord.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
get_session = db_session.get_session
_DEFAULT_QUOTA_NAME = 'default'
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def create_domain(name, project_id, user_id, disabled=False):
"""Create a new domain."""
values = {
'disabled': disabled,
'name': name,
'project_id': project_id,
'user_id': user_id,
}
values['uuid'] = uuidutils.generate_uuid()
try:
res = _create_model(model=models.Domain(), values=values)
except db_exc.DBDuplicateEntry:
raise exception.DomainAlreadyExists(name=values['name'])
return res
def create_subscriber(
username, domain_id, password, user_id, project_id, description='',
disabled=False, email='', rpid=''):
"""Create a new subscriber."""
model = get_domain(uuid=domain_id)
values = {
'description': description,
'disabled': disabled,
'domain_id': domain_id,
'email_address': email,
'password': password,
'project_id': project_id,
'rpid': rpid,
'user_id': user_id,
'username': username,
}
values['ha1'] = hashlib.md5(
'%s:%s:%s' % (
values['username'], model['name'],
values['password'])).hexdigest()
values['ha1b'] = hashlib.md5(
'%s@%s:%s:%s' % (
values['username'], model['name'], model['name'],
values['password'])).hexdigest()
values['uuid'] = uuidutils.generate_uuid()
try:
res = _create_model(model=models.Subscriber(), values=values)
except db_exc.DBDuplicateEntry:
raise exception.SubscriberAlreadyExists(
username=values['username'], domain_id=model['name'])
return res
def delete_domain(uuid):
"""Delete a domain."""
res = _delete_model(model=models.Domain, uuid=uuid)
if res != 1:
raise exception.DomainNotFound(uuid=uuid)
def delete_subscriber(uuid):
"""Delete a subscriber."""
res = _delete_model(model=models.Subscriber, uuid=uuid)
if res != 1:
raise exception.SubscriberNotFound(uuid=uuid)
def get_domain(uuid):
"""Retrieve information about the given domain."""
try:
res = _get_model(model=models.Domain, uuid=uuid)
except exc.NoResultFound:
raise exception.DomainNotFound(uuid=uuid)
return res
def get_subscriber(uuid):
"""Retrieve information about the given subscriber."""
try:
res = _get_model(model=models.Subscriber, uuid=uuid)
except exc.NoResultFound:
raise exception.SubscriberNotFound(uuid=uuid)
return res
def list_domains(project_id):
"""Retrieve a list of domains."""
res = _list_model(model=models.Domain, project_id=project_id)
return res
def list_subscribers(project_id):
"""Retrieve a list of subscribers."""
res = _list_model(model=models.Subscriber, project_id=project_id)
return res
def update_domain(
uuid, name=None, disabled=None, project_id=None, user_id=None):
"""Update an existing domain."""
res = get_domain(uuid=uuid)
if disabled is not None:
res['disabled'] = disabled
if name is not None:
res['name'] = name
if project_id is not None:
res['project_id'] = project_id
if user_id is not None:
res['user_id'] = user_id
res.save()
return res
def update_subscriber(
uuid, description=None, disabled=None, domain_id=None, email=None,
password=None, project_id=None, rpid=None, user_id=None,
username=None):
"""Update an existing subscriber."""
res = get_subscriber(uuid=uuid)
if description is not None:
res['description'] = description
if disabled is not None:
res['disabled'] = disabled
if domain_id is not None:
res['domain_id'] = domain_id
if email is not None:
res['email_address'] = email
if password is not None:
res['password'] = password
if project_id is not None:
res['project_id'] = project_id
if rpid is not None:
res['rpid'] = rpid
if user_id is not None:
res['user_id'] = user_id
if username is not None:
res['username'] = username
model = get_domain(uuid=res['domain_id'])
res['ha1'] = hashlib.md5(
'%s:%s:%s' % (
res['username'], model['name'],
res['password'])).hexdigest()
res['ha1b'] = hashlib.md5(
'%s@%s:%s:%s' % (
res['username'], model['name'], model['name'],
res['password'])).hexdigest()
res.save()
return res
def get_default_quota_class():
rows = model_query(
models.QuotaClass).filter_by(class_name=_DEFAULT_QUOTA_NAME).all()
result = {
'class_name': _DEFAULT_QUOTA_NAME
}
for row in rows:
result[row.resource] = row.hard_limit
return result
def _create_model(model, values):
"""Create a new model."""
model.update(values)
model.save()
return model
def _delete_model(model, **kwargs):
session = get_session()
with session.begin():
query = model_query(
model, session=session
).filter_by(**kwargs)
count = query.delete()
return count
def _get_model(model, **kwargs):
"""Retrieve information about the given model."""
query = model_query(model).filter_by(**kwargs)
res = query.one()
return res
def _list_model(model, **kwargs):
"""Retrieve a list of the given model."""
query = model_query(model).filter_by(**kwargs)
return query.all()
|
|
from __future__ import division, print_function, unicode_literals
from __future__ import absolute_import
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2014-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: [email protected]
#
# pylint: disable=missing-docstring,protected-access
import math
import khmer
from screed.fasta import fasta_iter
from . import khmer_tst_utils as utils
from nose.tools import assert_raises
K = 20 # size of kmer
ERR_RATE = 0.01
N_UNIQUE = 3960
TRANSLATE = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}
def teardown():
utils.cleanup()
def test_hll_add_python():
# test python code to count unique kmers using HyperLogLog.
# use the lower level add() method, which accepts anything,
# and compare to an exact count using collections.Counter
filename = utils.get_test_data('random-20-a.fa')
hllcpp = khmer.HLLCounter(ERR_RATE, K)
counter = set()
for n, record in enumerate(fasta_iter(open(filename))):
sequence = record['sequence']
seq_len = len(sequence)
for n in range(0, seq_len + 1 - K):
kmer = sequence[n:n + K]
rc = "".join(TRANSLATE[c] for c in kmer[::-1])
hllcpp.add(kmer)
if rc in counter:
kmer = rc
counter.update([kmer])
n_unique = len(counter)
assert n_unique == N_UNIQUE
assert abs(1 - float(hllcpp.estimate_cardinality()) / N_UNIQUE) < ERR_RATE
def test_hll_consume_string():
# test c++ code to count unique kmers using HyperLogLog,
# using screed to feed each read to the counter.
filename = utils.get_test_data('random-20-a.fa')
hllcpp = khmer.HLLCounter(ERR_RATE, K)
n_consumed = 0
for n, record in enumerate(fasta_iter(open(filename)), 1):
n_consumed += hllcpp.consume_string(record['sequence'])
assert n == 99
assert n_consumed == 3960
assert abs(1 - float(hllcpp.estimate_cardinality()) / N_UNIQUE) < ERR_RATE
def test_hll_empty_fasta():
filename = utils.get_test_data('test-empty.fa')
hll = khmer.HLLCounter(ERR_RATE, K)
with assert_raises(IOError):
hll.consume_fasta(filename)
def test_hll_consume_fasta():
# test c++ code to count unique kmers using HyperLogLog
filename = utils.get_test_data('random-20-a.fa')
hllcpp = khmer.HLLCounter(ERR_RATE, K)
n, n_consumed = hllcpp.consume_fasta(filename)
assert n == 99
assert n_consumed == 3960
assert abs(1 - float(hllcpp.estimate_cardinality()) / N_UNIQUE) < ERR_RATE
def test_hll_consume_fasta_ep():
# During estimation trigger the _Ep() method,
# we need all internal counters values to be different than zero for this.
filename = utils.get_test_data('paired-mixed.fa')
hll = khmer.HLLCounter(0.36, 32)
n, n_consumed = hll.consume_fasta(filename)
assert all(c != 0 for c in hll.counters)
assert len(hll) == 236
assert n == 11
assert n_consumed == 575
def test_hll_consume_fasta_estimate_bias():
# During estimation trigger the estimate_bias method,
# we need all internal counters values to be different than zero for this,
# and also the cardinality should be small (if it is large we fall on the
# default case).
filename = utils.get_test_data("test-abund-read-3.fa")
hll = khmer.HLLCounter(0.36, K)
n, n_consumed = hll.consume_fasta(filename)
assert all(c != 0 for c in hll.counters)
assert len(hll) == 79
assert n == 21
assert n_consumed == 1176
def test_hll_len():
filename = utils.get_test_data('random-20-a.fa')
hllcpp = khmer.HLLCounter(ERR_RATE, K)
n, n_consumed = hllcpp.consume_fasta(filename)
assert n == 99
assert n_consumed == 3960
assert hllcpp.estimate_cardinality() == len(hllcpp)
def test_hll_empty():
hllcpp = khmer.HLLCounter(ERR_RATE, K)
assert len(hllcpp) == 0
def test_hll_readonly_alpha():
hllcpp = khmer.HLLCounter(ERR_RATE, K)
with assert_raises(AttributeError):
hllcpp.alpha = 5
def test_hll_cover_calc_alpha():
hllcpp = khmer.HLLCounter(0.36, K)
counters = hllcpp.counters
assert hllcpp.alpha == 0.673
assert len(counters) == 2 ** 4
hllcpp = khmer.HLLCounter(0.21, K)
counters = hllcpp.counters
assert hllcpp.alpha == 0.697
assert len(counters) == 2 ** 5
hllcpp = khmer.HLLCounter(0.16, K)
counters = hllcpp.counters
assert hllcpp.alpha == 0.709
assert len(counters) == 2 ** 6
def test_hll_invalid_base():
# this test should raise a ValueError,
# since there are invalid bases in read.
hllcpp = khmer.HLLCounter(ERR_RATE, 5)
with assert_raises(ValueError):
hllcpp.consume_string("ACGTTTCGNAATNNNNN")
def test_hll_invalid_error_rate():
# test if error_rate is a valid value
with assert_raises(ValueError):
hllcpp = khmer.HLLCounter(-0.01, K)
def test_hll_invalid_error_rate_max():
# test if error_rate is a valid value
with assert_raises(ValueError):
hllcpp = khmer.HLLCounter(0.367696, K)
def test_hll_error_rate_max():
# test if error_rate is a valid value
hllcpp = khmer.HLLCounter(0.367695, K)
assert len(hllcpp.counters) == 2 ** 4
def test_hll_invalid_error_rate_min():
# test if error_rate is a valid value
with assert_raises(ValueError):
hllcpp = khmer.HLLCounter(0.0040624, K)
def test_hll_error_rate_min():
# test if error_rate is a valid value
hllcpp = khmer.HLLCounter(0.0040625, K)
assert len(hllcpp.counters) == 2 ** 16
def test_hll_change_error_rate():
hllcpp = khmer.HLLCounter(0.0040625, K)
assert hllcpp.error_rate == 0.0040625
# error rate is discrete, what we test here is if an error rate of 1%
# rounds to the appropriate value
hllcpp.error_rate = 0.01
assert hllcpp.error_rate == 0.008125
with assert_raises(TypeError):
del hllcpp.error_rate
with assert_raises(TypeError):
hllcpp.error_rate = 5
with assert_raises(ValueError):
hllcpp.error_rate = 2.5
with assert_raises(ValueError):
hllcpp.error_rate = -10.
# error rate can only be changed prior to first counting,
hllcpp.consume_string('AAACCACTTGTGCATGTCAGTGCAGTCAGT')
with assert_raises(AttributeError):
hllcpp.error_rate = 0.3
def test_hll_change_ksize():
hllcpp = khmer.HLLCounter(0.0040625, K)
assert hllcpp.ksize == K
hllcpp.ksize = 24
assert hllcpp.ksize == 24
hllcpp.ksize = 12
assert hllcpp.ksize == 12
with assert_raises(ValueError):
hllcpp.ksize = -20
with assert_raises(TypeError):
del hllcpp.ksize
with assert_raises(TypeError):
hllcpp.ksize = 33.4
# error rate can only be changed prior to first counting,
hllcpp.consume_string('AAACCACTTGTGCATGTCAGTGCAGTCAGT')
with assert_raises(AttributeError):
hllcpp.ksize = 30
def test_hll_get_counters():
hll = khmer.HLLCounter(0.36, K)
counters = hll.counters
assert len(counters) == 2 ** 4
assert all(c == 0 for c in counters)
def test_hll_merge_1():
hll = khmer.HLLCounter(0.36, K)
hll2 = khmer.HLLCounter(0.36, K - 1)
try:
hll.merge(hll2)
assert 0, "previous statement should fail with a ValueError"
except ValueError as err:
print(str(err))
def test_hll_merge_2():
hll = khmer.HLLCounter(0.10, K)
hll2 = khmer.HLLCounter(0.36, K)
try:
hll.merge(hll2)
assert 0, "previous statement should fail with a ValueError"
except ValueError as err:
print(str(err))
def test_hll_merge_3():
hll = khmer.HLLCounter(0.36, 32)
hll2 = khmer.HLLCounter(0.36, 32)
filename = utils.get_test_data('paired-mixed.fa')
hll = khmer.HLLCounter(0.36, 32)
hll.consume_fasta(filename)
hll2 = khmer.HLLCounter(0.36, 32)
hll2.consume_fasta(filename)
assert len(hll) == 236
assert len(hll2) == 236
hll.merge(hll2)
assert len(hll) == 236
|
|
"""Class to manage the entities for a single platform."""
import asyncio
from homeassistant.const import DEVICE_DEFAULT_NAME
from homeassistant.core import callback, valid_entity_id, split_entity_id
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
from .event import async_track_time_interval, async_call_later
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
PLATFORM_NOT_READY_RETRIES = 10
class EntityPlatform:
"""Manage the entities for a single platform."""
def __init__(self, *, hass, logger, domain, platform_name, platform,
scan_interval, entity_namespace,
async_entities_added_callback):
"""Initialize the entity platform.
hass: HomeAssistant
logger: Logger
domain: str
platform_name: str
scan_interval: timedelta
parallel_updates: int
entity_namespace: str
async_entities_added_callback: @callback method
"""
self.hass = hass
self.logger = logger
self.domain = domain
self.platform_name = platform_name
self.platform = platform
self.scan_interval = scan_interval
self.entity_namespace = entity_namespace
self.async_entities_added_callback = async_entities_added_callback
self.config_entry = None
self.entities = {}
self._tasks = []
# Method to cancel the state change listener
self._async_unsub_polling = None
# Method to cancel the retry of setup
self._async_cancel_retry_setup = None
self._process_updates = asyncio.Lock(loop=hass.loop)
# Platform is None for the EntityComponent "catch-all" EntityPlatform
# which powers entity_component.add_entities
if platform is None:
self.parallel_updates = None
return
# Async platforms do all updates in parallel by default
if hasattr(platform, 'async_setup_platform'):
default_parallel_updates = 0
else:
default_parallel_updates = 1
parallel_updates = getattr(platform, 'PARALLEL_UPDATES',
default_parallel_updates)
if parallel_updates:
self.parallel_updates = asyncio.Semaphore(
parallel_updates, loop=hass.loop)
else:
self.parallel_updates = None
async def async_setup(self, platform_config, discovery_info=None):
"""Set up the platform from a config file."""
platform = self.platform
hass = self.hass
@callback
def async_create_setup_task():
"""Get task to set up platform."""
if getattr(platform, 'async_setup_platform', None):
return platform.async_setup_platform(
hass, platform_config,
self._async_schedule_add_entities, discovery_info
)
# This should not be replaced with hass.async_add_job because
# we don't want to track this task in case it blocks startup.
return hass.loop.run_in_executor(
None, platform.setup_platform, hass, platform_config,
self._schedule_add_entities, discovery_info
)
await self._async_setup_platform(async_create_setup_task)
async def async_setup_entry(self, config_entry):
"""Set up the platform from a config entry."""
# Store it so that we can save config entry ID in entity registry
self.config_entry = config_entry
platform = self.platform
@callback
def async_create_setup_task():
"""Get task to set up platform."""
return platform.async_setup_entry(
self.hass, config_entry, self._async_schedule_add_entities)
return await self._async_setup_platform(async_create_setup_task)
async def _async_setup_platform(self, async_create_setup_task, tries=0):
"""Set up a platform via config file or config entry.
async_create_setup_task creates a coroutine that sets up platform.
"""
logger = self.logger
hass = self.hass
full_name = '{}.{}'.format(self.domain, self.platform_name)
logger.info("Setting up %s", full_name)
warn_task = hass.loop.call_later(
SLOW_SETUP_WARNING, logger.warning,
"Setup of platform %s is taking over %s seconds.",
self.platform_name, SLOW_SETUP_WARNING)
try:
task = async_create_setup_task()
await asyncio.wait_for(
asyncio.shield(task, loop=hass.loop),
SLOW_SETUP_MAX_WAIT, loop=hass.loop)
# Block till all entities are done
if self._tasks:
pending = [task for task in self._tasks if not task.done()]
self._tasks.clear()
if pending:
await asyncio.wait(
pending, loop=self.hass.loop)
hass.config.components.add(full_name)
return True
except PlatformNotReady:
tries += 1
wait_time = min(tries, 6) * 30
logger.warning(
'Platform %s not ready yet. Retrying in %d seconds.',
self.platform_name, wait_time)
async def setup_again(now):
"""Run setup again."""
self._async_cancel_retry_setup = None
await self._async_setup_platform(
async_create_setup_task, tries)
self._async_cancel_retry_setup = \
async_call_later(hass, wait_time, setup_again)
return False
except asyncio.TimeoutError:
logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
self.platform_name, SLOW_SETUP_MAX_WAIT)
return False
except Exception: # pylint: disable=broad-except
logger.exception(
"Error while setting up platform %s", self.platform_name)
return False
finally:
warn_task.cancel()
def _schedule_add_entities(self, new_entities, update_before_add=False):
"""Schedule adding entities for a single platform, synchronously."""
run_callback_threadsafe(
self.hass.loop,
self._async_schedule_add_entities, list(new_entities),
update_before_add
).result()
@callback
def _async_schedule_add_entities(self, new_entities,
update_before_add=False):
"""Schedule adding entities for a single platform async."""
self._tasks.append(self.hass.async_add_job(
self.async_add_entities(
new_entities, update_before_add=update_before_add)
))
def add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform."""
# That avoid deadlocks
if update_before_add:
self.logger.warning(
"Call 'add_entities' with update_before_add=True "
"only inside tests or you can run into a deadlock!")
run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), update_before_add),
self.hass.loop).result()
async def async_add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform async.
This method must be run in the event loop.
"""
# handle empty list from component/platform
if not new_entities:
return
hass = self.hass
device_registry = await \
hass.helpers.device_registry.async_get_registry()
entity_registry = await \
hass.helpers.entity_registry.async_get_registry()
tasks = [
self._async_add_entity(entity, update_before_add,
entity_registry, device_registry)
for entity in new_entities]
# No entities for processing
if not tasks:
return
await asyncio.wait(tasks, loop=self.hass.loop)
self.async_entities_added_callback()
if self._async_unsub_polling is not None or \
not any(entity.should_poll for entity
in self.entities.values()):
return
self._async_unsub_polling = async_track_time_interval(
self.hass, self._update_entity_states, self.scan_interval
)
async def _async_add_entity(self, entity, update_before_add,
entity_registry, device_registry):
"""Add an entity to the platform."""
if entity is None:
raise ValueError('Entity cannot be None')
entity.hass = self.hass
entity.platform = self
entity.parallel_updates = self.parallel_updates
# Update properties before we generate the entity_id
if update_before_add:
try:
await entity.async_device_update(warning=False)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"%s: Error on device update!", self.platform_name)
return
suggested_object_id = None
# Get entity_id from unique ID registration
if entity.unique_id is not None:
if entity.entity_id is not None:
suggested_object_id = split_entity_id(entity.entity_id)[1]
else:
suggested_object_id = entity.name
if self.entity_namespace is not None:
suggested_object_id = '{} {}'.format(
self.entity_namespace, suggested_object_id)
if self.config_entry is not None:
config_entry_id = self.config_entry.entry_id
else:
config_entry_id = None
device_info = entity.device_info
device_id = None
if config_entry_id is not None and device_info is not None:
processed_dev_info = {
'config_entry_id': config_entry_id
}
for key in (
'connections',
'identifiers',
'manufacturer',
'model',
'name',
'sw_version',
'via_hub',
):
if key in device_info:
processed_dev_info[key] = device_info[key]
device = device_registry.async_get_or_create(
**processed_dev_info)
if device:
device_id = device.id
entry = entity_registry.async_get_or_create(
self.domain, self.platform_name, entity.unique_id,
suggested_object_id=suggested_object_id,
config_entry_id=config_entry_id,
device_id=device_id,
known_object_ids=self.entities.keys())
if entry.disabled:
self.logger.info(
"Not adding entity %s because it's disabled",
entry.name or entity.name or
'"{} {}"'.format(self.platform_name, entity.unique_id))
return
entity.entity_id = entry.entity_id
entity.registry_name = entry.name
entity.async_on_remove(entry.add_update_listener(entity))
# We won't generate an entity ID if the platform has already set one
# We will however make sure that platform cannot pick a registered ID
elif (entity.entity_id is not None and
entity_registry.async_is_registered(entity.entity_id)):
# If entity already registered, convert entity id to suggestion
suggested_object_id = split_entity_id(entity.entity_id)[1]
entity.entity_id = None
# Generate entity ID
if entity.entity_id is None:
suggested_object_id = \
suggested_object_id or entity.name or DEVICE_DEFAULT_NAME
if self.entity_namespace is not None:
suggested_object_id = '{} {}'.format(self.entity_namespace,
suggested_object_id)
entity.entity_id = entity_registry.async_generate_entity_id(
self.domain, suggested_object_id, self.entities.keys())
# Make sure it is valid in case an entity set the value themselves
if not valid_entity_id(entity.entity_id):
raise HomeAssistantError(
'Invalid entity id: {}'.format(entity.entity_id))
if (entity.entity_id in self.entities or
entity.entity_id in self.hass.states.async_entity_ids(
self.domain)):
msg = 'Entity id already exists: {}'.format(entity.entity_id)
if entity.unique_id is not None:
msg += '. Platform {} does not generate unique IDs'.format(
self.platform_name)
raise HomeAssistantError(msg)
entity_id = entity.entity_id
self.entities[entity_id] = entity
entity.async_on_remove(lambda: self.entities.pop(entity_id))
await entity.async_added_to_hass()
await entity.async_update_ha_state()
async def async_reset(self):
"""Remove all entities and reset data.
This method must be run in the event loop.
"""
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
if not self.entities:
return
tasks = [self.async_remove_entity(entity_id)
for entity_id in self.entities]
await asyncio.wait(tasks, loop=self.hass.loop)
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
async def async_remove_entity(self, entity_id):
"""Remove entity id from platform."""
await self.entities[entity_id].async_remove()
# Clean up polling job if no longer needed
if (self._async_unsub_polling is not None and
not any(entity.should_poll for entity
in self.entities.values())):
self._async_unsub_polling()
self._async_unsub_polling = None
async def _update_entity_states(self, now):
"""Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop.
"""
if self._process_updates.locked():
self.logger.warning(
"Updating %s %s took longer than the scheduled update "
"interval %s", self.platform_name, self.domain,
self.scan_interval)
return
async with self._process_updates:
tasks = []
for entity in self.entities.values():
if not entity.should_poll:
continue
tasks.append(entity.async_update_ha_state(True))
if tasks:
await asyncio.wait(tasks, loop=self.hass.loop)
|
|
#!/usr/bin/env python
#
# svnfsfs_tests.py: testing the 'svnfsfs' tool.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import os
import logging
import re
import shutil
import sys
import threading
import time
import gzip
logger = logging.getLogger()
# Our testing module
import svntest
from svntest.verify import SVNExpectedStdout, SVNExpectedStderr
from svntest.verify import SVNUnexpectedStderr
from svntest.verify import UnorderedOutput
from svntest.main import SVN_PROP_MERGEINFO
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
SkipDumpLoadCrossCheck = svntest.testcase.SkipDumpLoadCrossCheck_deco
Item = svntest.wc.StateItem
#----------------------------------------------------------------------
# How we currently test 'svnfsfs' --
#
# 'svnfsfs stats': Run this on a greek repo, then verify that the
# various sections are present. The section contents
# is matched against section-specific patterns.
#
# 'svnfsfs dump-index': Tested implicitly by the load-index test
#
# 'svnfsfs load-index': Create a greek repo but set shard to 2 and pack
# it so we can load into a packed shard with more
# than one revision to test ordering issues etc.
# r1 also contains a non-trival number of items such
# that parser issues etc. have a chance to surface.
#
# The idea is dump the index of the pack, mess with
# it to cover lots of UI guarantees but keep the
# semantics of the relevant bits. Then feed it back
# to load-index and verify that the result is still
# a complete, consistent etc. repo.
#
######################################################################
# Helper routines
def patch_format(repo_dir, shard_size):
"""Rewrite the format of the FSFS repository REPO_DIR so
that it would use sharding with SHARDS revisions per shard."""
format_path = os.path.join(repo_dir, "db", "format")
contents = open(format_path, 'rb').read()
processed_lines = []
for line in contents.split(b"\n"):
if line.startswith(b"layout "):
processed_lines.append(b"layout sharded %d" % shard_size)
else:
processed_lines.append(line)
new_contents = b"\n".join(processed_lines)
os.chmod(format_path, svntest.main.S_ALL_RW)
open(format_path, 'wb').write(new_contents)
######################################################################
# Tests
#----------------------------------------------------------------------
@SkipUnless(svntest.main.is_fs_type_fsfs)
def test_stats(sbox):
"stats output"
sbox.build(create_wc=False)
exit_code, output, errput = \
svntest.actions.run_and_verify_svnfsfs(None, [], 'stats', sbox.repo_dir)
# split output into sections
sections = { }
last_line = ''
section_name = ''
section_contents = []
for line in output:
line = line.rstrip()
if line != '':
# If the first character is not a space, then LINE is a section header
if line[0] == ' ':
section_contents.append(line)
else:
# Store previous section
if section_name != '':
sections[section_name] = section_contents
# Is the output formatted nicely?
if last_line != '':
logger.warn("Error: no empty line before section '" + line + "'")
raise svntest.Failure
# start new section
section_name = line
section_contents = []
last_line = line
sections[section_name] = section_contents
# verify that these sections exist
sections_to_find = ['Reading revisions',
'Global statistics:',
'Noderev statistics:',
'Representation statistics:',
'Directory representation statistics:',
'File representation statistics:',
'Directory property representation statistics:',
'File property representation statistics:',
'Largest representations:',
'Extensions by number of representations:',
'Extensions by size of changed files:',
'Extensions by size of representations:',
'Histogram of expanded node sizes:',
'Histogram of representation sizes:',
'Histogram of file sizes:',
'Histogram of file representation sizes:',
'Histogram of file property sizes:',
'Histogram of file property representation sizes:',
'Histogram of directory sizes:',
'Histogram of directory representation sizes:',
'Histogram of directory property sizes:',
'Histogram of directory property representation sizes:']
patterns_to_find = {
'Reading revisions' : ['\s+ 0[ 0-9]*'],
'Global .*' : ['.*\d+ bytes in .*\d+ revisions',
'.*\d+ bytes in .*\d+ changes',
'.*\d+ bytes in .*\d+ node revision records',
'.*\d+ bytes in .*\d+ representations',
'.*\d+ bytes expanded representation size',
'.*\d+ bytes with rep-sharing off' ],
'Noderev .*' : ['.*\d+ bytes in .*\d+ nodes total',
'.*\d+ bytes in .*\d+ directory noderevs',
'.*\d+ bytes in .*\d+ file noderevs' ],
'Representation .*' : ['.*\d+ bytes in .*\d+ representations total',
'.*\d+ bytes in .*\d+ directory representations',
'.*\d+ bytes in .*\d+ file representations',
'.*\d+ bytes in .*\d+ representations of added file nodes',
'.*\d+ bytes in .*\d+ directory property representations',
'.*\d+ bytes in .*\d+ file property representations',
'.*\d+ average delta chain length',
'.*\d+ bytes in header & footer overhead' ],
'.* representation statistics:' :
['.*\d+ bytes in .*\d+ reps',
'.*\d+ bytes in .*\d+ shared reps',
'.*\d+ bytes expanded size',
'.*\d+ bytes expanded shared size',
'.*\d+ bytes with rep-sharing off',
'.*\d+ shared references',
'.*\d+ average delta chain length'],
'Largest.*:' : ['.*\d+ r\d+ */\S*'],
'Extensions by number .*:' :
['.*\d+ \( ?\d+%\) representations'],
'Extensions by size .*:' :
['.*\d+ \( ?\d+%\) bytes'],
'Histogram of .*:' : ['.*\d+ \.\. < \d+.*\d+ \( ?\d+%\) bytes in *\d+ \( ?\d+%\) items']
}
# check that the output contains all sections
for section_name in sections_to_find:
if not section_name in sections.keys():
logger.warn("Error: section '" + section_name + "' not found")
raise svntest.Failure
# check section contents
for section_name in sections.keys():
patterns = []
# find the suitable patterns for the current section
for pattern in patterns_to_find.keys():
if re.match(pattern, section_name):
patterns = patterns_to_find[pattern]
break;
if len(patterns) == 0:
logger.warn("Error: unexpected section '" + section_name + "' found'")
logger.warn(sections[section_name])
raise svntest.Failure
# each line in the section must match one of the patterns
for line in sections[section_name]:
found = False
for pattern in patterns:
if re.match(pattern, line):
found = True
break
if not found:
logger.warn("Error: unexpected line '" + line + "' in section '"
+ section_name + "'")
logger.warn(sections[section_name])
raise svntest.Failure
#----------------------------------------------------------------------
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
@SkipUnless(svntest.main.is_fs_log_addressing)
def load_index_sharded(sbox):
"load-index in a packed repo"
# Configure two files per shard to trigger packing.
sbox.build(create_wc=False)
patch_format(sbox.repo_dir, shard_size=2)
expected_output = ["Packing revisions in shard 0...done.\n"]
svntest.actions.run_and_verify_svnadmin(expected_output, [],
"pack", sbox.repo_dir)
# Read P2L index using svnfsfs.
exit_code, items, errput = \
svntest.actions.run_and_verify_svnfsfs(None, [], "dump-index", "-r0",
sbox.repo_dir)
# load-index promises to deal with input that
#
# * uses the same encoding as the dump-index output
# * is not in ascending item offset order
# * contains lines with the full table header
# * invalid or incorrect data in the checksum column and beyond
# * starts with an item which does not belong to the first revision
# in the pack file
#
# So, let's mess with the ITEMS list to call in on these promises.
# not in ascending order
items.reverse()
# multiple headers (there is already one now at the bottom)
items.insert(0, " Start Length Type Revision Item Checksum\n")
# make columns have a variable size
# mess with the checksums
# add a junk column
# keep header lines as are
for i in range(0, len(items)):
if items[i].find("Start") == -1:
columns = items[i].split()
columns[5] = columns[5].replace('f','-').replace('0','9')
columns.append("junk")
items[i] = ' '.join(columns) + "\n"
# first entry shall be for rev 1, pack starts at rev 0, though
for i in range(0, len(items)):
if items[i].split()[3] == "1":
if i != 1:
items[i],items[1] = items[1],items[i]
break
assert(items[1].split()[3] == "1")
# The STDIN data must be binary.
items = svntest.main.ensure_list(map(str.encode, items))
# Reload the index
exit_code, output, errput = svntest.main.run_command_stdin(
svntest.main.svnfsfs_binary, [], 0, False, items,
"load-index", sbox.repo_dir)
# Run verify to see whether we broke anything.
expected_output = ["* Verifying metadata at revision 0 ...\n",
"* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n"]
svntest.actions.run_and_verify_svnadmin(expected_output, [],
"verify", sbox.repo_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def test_stats_on_empty_repo(sbox):
"stats on empty repo shall not crash"
sbox.build(create_wc=False, empty=True)
exit_code, output, errput = \
svntest.actions.run_and_verify_svnfsfs(None, [], 'stats', sbox.repo_dir)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
test_stats,
load_index_sharded,
test_stats_on_empty_repo,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
|
|
from splitwise.picture import Picture
from splitwise.balance import Balance
import splitwise.group as Group
class User(object):
""" Contains basic user data.
Attributes:
id(long, optional): ID of the user
first_name(str, optional): First name of the user
last_name(str, optional): Last name of the user
email(str, optional): Email of the user
registration_status(str, optional): Registration status of the user
picture(:obj:`splitwise.picture.Picture`, optional): Profile picture of the user
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing user object
"""
if data:
self.first_name = data["first_name"]
self.last_name = data["last_name"]
if 'id' in data:
self.id = data["id"]
else:
self.id = None
if 'email' in data:
self.email = data["email"]
else:
self.email = None
if 'registration_status' in data:
self.registration_status = data["registration_status"]
else:
self.registration_status = None
if 'picture' in data:
self.picture = Picture(data["picture"])
else:
self.picture = None
def getId(self):
""" Returns id of the user
Returns:
long: ID of the user
"""
return self.id
def getFirstName(self):
""" Returns first name of the user
Returns:
str: First name of the user
"""
return self.first_name
def getLastName(self):
""" Returns last name of the user
Returns:
str: Last name of the user
"""
return self.last_name
def getEmail(self):
""" Returns email of the user
Returns:
str: Email of the user
"""
return self.email
def getRegistrationStatus(self):
""" Returns registration status of the user
Returns:
str: Registration status of the user
"""
return self.registration_status
def getPicture(self):
""" Returns profile picture of the user
Returns:
:obj:`splitwise.picture.Picture`: Picture of the user
"""
return self.picture
def setFirstName(self, first_name):
""" Sets the first name of the user
Agrs:
first_name(str): First name of the user
"""
self.first_name = first_name
def setLastName(self, last_name):
""" Sets the last name of the user
Agrs:
last_name(str): Last name of the user
"""
self.last_name = last_name
def setEmail(self, email):
""" Sets the email of the user
Agrs:
email(str): Email of the user
"""
self.email = email
def setId(self, id):
""" Sets the id of the user
Agrs:
id(long): ID of the user
"""
self.id = id
def __getattr__(self, item):
return None
class CurrentUser(User):
""" Represents the current logged in user.
Inherits: :class:`splitwise.user.User`
Attributes:
default_currency(str, optional): Default Currency
locale(str, optional): Locale
date_format(str, optional): Date format used by the user
default_group_id(long, optional): User's default group id
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing current user object
"""
User.__init__(self, data)
self.default_currency = data["default_currency"]
self.locale = data["locale"]
self.date_format = data["date_format"]
self.default_group_id = data["default_group_id"]
def getDefaultCurrency(self):
""" Returns default currency of the user
Returns:
str: Default currency of the user
"""
return self.default_currency
def getLocale(self):
""" Returns locale of the user
Returns:
str: locale of the user
"""
return self.locale
def getDateFormat(self):
""" Returns Date format used by the user
Returns:
str: Date format used by the user
"""
return self.date_format
def getDefaultGroupId(self):
""" Returns default group id the user
Returns:
long: default group id the user
"""
return self.default_group_id
class Friend(User):
""" Represents a friend user.
Inherits: :class:`splitwise.user.User`
Attributes:
balances(:obj:`list` of :obj:`splitwise.balance.Balance`, optional): List of balances
groups(:obj:`list` of :obj:`splitwise.group.FriendGroup`, optional): List of groups
updated_at(str, optional): ISO 8601 Date time. The last updated date time of user
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing friend user object
"""
User.__init__(self, data)
if data:
if 'updated_at' in data:
self.updated_at = data["updated_at"]
else:
self.updated_at = None
self.balances = []
for balance in data["balance"]:
self.balances.append(Balance(balance))
self.groups = []
if "groups" in data:
for group in data["groups"]:
self.groups.append(Group.FriendGroup(group))
else:
self.groups = None
def getUpdatedAt(self):
""" Returns last updated date of the user
Returns:
str: last updated date of the user
"""
return self.updated_at
def getBalances(self):
""" Returns balances of the user
Returns:
:obj:`list` of :obj:`splitwise.balance.Balance`: List of balances
"""
return self.balances
def getGroups(self):
""" Returns balances of the user
Returns:
:obj:`list` of :obj:`splitwise.group.Group`: List of groups
"""
return self.groups
class ExpenseUser(User):
""" Represents a user in an expense.
Inherits: :class:`splitwise.user.User`
Attributes:
paid_share(str, optional): Paid share for the expense
owed_share(str, optional): Owed share for the expense
net_balance(str, optional): Net balance for the expense
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing user object
"""
if data:
User.__init__(self, data["user"])
self.paid_share = data["paid_share"]
self.owed_share = data["owed_share"]
self.net_balance = data["net_balance"]
def getPaidShare(self):
""" Returns paid share of the user
Returns:
str: paid share of the user
"""
return self.paid_share
def getOwedShare(self):
""" Returns owed share of the user
Returns:
str: owed share of the user
"""
return self.owed_share
def getNetBalance(self):
""" Returns net balance of the user
Returns:
str: net balance of the user
"""
return self.net_balance
def setPaidShare(self, paid_share):
""" Sets the paid share of the user
Args:
paid_share(str): Paid share share of the user
"""
self.paid_share = paid_share
def setOwedShare(self, owed_share):
""" Sets the owed share of the user
Args:
owed_share(str): Owed share share of the user
"""
self.owed_share = owed_share
|
|
#!/usr/bin/env python3
import os
import time
import tomlkit
import shm
from conf.vehicle import VEHICLE
# The directory where the serial toml config files are located
SERIAL_CONF_DIR = '/home/software/cuauv/software/serial/seriald/conf'
# The name of the motor desires SHM group
MOTOR_DESIRES = 'motor_desires'
# The name of the ramp status SHM group
RAMP_STATUS = 'ramp_status'
# Suffix at the end of ramp status SHM variables
RAMP_SUFFIX = '_ramp'
# How fast to spin the thrusters (0 to 255)
TEST_MOTOR_SPEED = 30
# How long to spin the thrusters (in seconds)
TEST_DURATION = 2
# Used for colored output
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def colored(msg, color):
"""
Returns a string wrapped in provided color codes. See bcolors for available
colors.
"""
return '{}{}{}'.format(color, msg, bcolors.ENDC)
class Failure(Exception):
"""
A failure mode. Will display the message in red and quit.
"""
def __init__(self, msg, extra_lines=[], *args, **kwargs):
super().__init__(msg, *args, **kwargs)
self.extra_lines = extra_lines
def extra(self, line):
"""
Add an extra line to the error output.
"""
self.extra_lines.append(line)
class TomlParseFailure(Failure):
"""
A failure in parsing a TOML file.
"""
pass
def load_serial_conf(fname):
"""
Load a TOML file from the serial daemon's conf directory. The directory is
specified by SERIAL_CONF_DIR; :fname: is just the name of the file, e.g.
'odysseus.toml'.
"""
full_path = os.path.join(SERIAL_CONF_DIR, fname)
if not os.path.isfile(full_path):
raise Failure("File '{}' does not exist on path '{}'"
.format(fname, SERIAL_CONF_DIR))
try:
with open(full_path) as f:
return tomlkit.parse(f.read())
except tomlkit.exceptions.ParseError as e:
raise TomlParseFailure(str(e))
def find_values(toml, predicate, type_filter=None, lst=[], path=''):
"""
Perform nested search in :toml: for all values of type :type_filter: (if
provided) and satisfying :predicate:, appending entries to :lst:. :path:
used for recursively constructing paths. The returned value is a list of
(path, value) tuples where the path is a dot-separated list of dictionary
names.
"""
def handle(value, handle_path):
if type_filter is None or isinstance(value, type_filter) \
and predicate(value):
# we add an extra dot at the beginning, so remove it
lst.append((handle_path[1:], value))
for key, value in toml.items():
new_path = '{}.{}'.format(path, key)
handle(value, new_path)
if isinstance(value, dict):
# traverse dictionaries
find_values(value, predicate, type_filter=type_filter, lst=lst,
path=new_path)
if isinstance(value, list):
# traverse lists
for val in value:
handle(val, new_path)
def deep_set(toml, path, value):
"""
Set a value inside a nested dictionary from a dotted key path, e.g.
'outer.inner.var'. Sets in the key at :path: in dict :toml: to :value:.
"""
segments = path.split('.')
# the last one is the final key
for segment in segments[:-1]:
assert segment in toml
toml = toml[segment]
toml[segments[-1]] = value
def main():
# load main serial conf for the current vehicle
vehicle_conf = load_serial_conf('{}.toml'.format(VEHICLE))
if not 'includes' in vehicle_conf or \
not isinstance(vehicle_conf['includes'], list):
raise Failure("Serial configuration for {} is missing 'includes' field"
.format(VEHICLE))
shm_map = {}
thruster_files = {}
# traverse and validate conf files
for include in vehicle_conf['includes']:
try:
board_conf = load_serial_conf(include)
except TomlParseFailure as e:
# we will ignore parse errors and just work with the files that
# parse; e.g. currently ports.toml does not have commas between
# items in the list, but we don't need ports.toml to re-map
# thrusters
print(colored("Error parsing file '{}', ignoring: {}"
.format(include, str(e)), bcolors.WARNING))
continue
motor_desires = []
ramp_status = []
find_values(board_conf, lambda s: s.startswith(MOTOR_DESIRES),
type_filter=str, lst=motor_desires)
find_values(board_conf, lambda s: s.startswith(RAMP_STATUS),
type_filter=str, lst=ramp_status)
if len(motor_desires) != len(ramp_status):
raise Failure("Invalid thruster conf: '{}', incompatible {} and {}"
.format(include, MOTOR_DESIRES, RAMP_STATUS))
ramp_status_map = {}
for path, shm_var in ramp_status:
# chop the ramp suffix at the end, i.e. the '_ramp'
motor_desire_shm_var = '{}{}'.format(MOTOR_DESIRES,
shm_var[len(RAMP_STATUS):
-len(RAMP_SUFFIX)])
ramp_status_map[motor_desire_shm_var] = path
for path, shm_var in motor_desires:
if not shm_var in ramp_status_map:
raise Failure("Invalid thruster conf: '{}', missing {} for {}"
.format(include, RAMP_STATUS,
shm_var[len(MOTOR_DESIRES) + 1:]))
shm_map[shm_var] = (include, path, ramp_status_map[shm_var])
if not include in thruster_files:
thruster_files[include] = board_conf
shm_motor_desires = getattr(shm, MOTOR_DESIRES)
shm_names = set([name for name, _ in shm_motor_desires._fields])
# chop 'motor_desires.' off of the front
conf_names = set(key[len(MOTOR_DESIRES) + 1:] for key in shm_map.keys())
if shm_names != conf_names:
e = Failure('Thruster mismatch between shm.{} and serial conf!'
.format(MOTOR_DESIRES))
e.extra('Found thrusters in these files: {}'
.format(list(thruster_files.keys())))
e.extra('SHM names: {}'.format(sorted(list(shm_names))))
e.extra('Serial conf names: {}'.format(sorted(list(conf_names))))
raise e
if len(shm_names) == 0:
e = Failure('No thrusters found!')
e.extra('Searched files: {}'.format(vehicle_conf['includes'].values()))
raise e
# TODO check to make sure serial daemon is running
# check soft kill
if shm.switches.soft_kill.get():
msg = 'Soft kill enabled. Override? [yN] '
if input(colored(msg, bcolors.WARNING)).strip().lower() in ['y', 'yes']:
shm.switches.soft_kill.set(False)
else:
raise Failure('Cannot proceed while soft killed, aborting')
print()
print('=== Instructions ===')
print()
print('The thruster mapper will spin one thruster at a time.')
print('Enter the name of the thruster (or a uniquely-defining prefix) to '
'assign that thruster.')
print('Enter nothing to have the thruster spin again.')
print('Ctrl+C to quit.')
print()
print('Available thrusters:')
for shm_name in shm_names:
print(' {}'.format(shm_name))
print()
print(colored('Press ENTER to start, Ctrl+C to abort.', bcolors.OKBLUE))
# wait for user to press ENTER
input()
re_mapping = {} # map from old values to new values
already_mapped = set() # new values that we have already mapped
# spin thrusters and ask user for re-mappings
for shm_name in sorted(shm_names):
full_shm_name = '{}.{}'.format(MOTOR_DESIRES, shm_name)
fname = shm_map[full_shm_name][0]
# 'ECE var' is the name of the variable on the board
ece_var_name = shm_map[full_shm_name][1].split('.')[-1]
passed = False # True if we can advance to the next thruster
spin_again = True
while not passed:
if spin_again:
# spin thruster
print("{} '{}' > '{}' (currently mapped to {})"
.format(colored('Spinning thruster:', bcolors.OKGREEN),
fname, ece_var_name, shm_name))
motor_desire = getattr(shm_motor_desires, shm_name)
motor_desire.set(TEST_MOTOR_SPEED)
time.sleep(TEST_DURATION)
motor_desire.set(0)
spin_again = False
user_input = input('Thruster name: ').strip().lower()
if user_input == '':
spin_again = True
continue
else:
matches = set(filter(lambda name: name.startswith(user_input),
shm_names))
if len(matches) == 1:
match = tuple(matches)[0]
if match in already_mapped:
print(colored("Already mapped thruster '{}'"
.format(match), bcolors.WARNING))
else:
re_mapping[shm_name] = match
already_mapped.add(match)
passed = True
elif len(matches) == 0:
print(colored("No thruster found for '{}'"
.format(user_input), bcolors.WARNING))
else:
print(colored("Multiple matches found for '{}': {}"
.format(user_input, matches),
bcolors.WARNING))
# change mappings in TOML data structures
for prev_name, (fname, motor_desires_path, ramp_status_path) \
in shm_map.items():
chopped_prev_name = prev_name[len(MOTOR_DESIRES) + 1:]
assert chopped_prev_name in re_mapping
new_motor_desires_name = '{}.{}'.format(MOTOR_DESIRES,
re_mapping[chopped_prev_name])
new_ramp_status_name = '{}.{}{}'.format(RAMP_STATUS,
re_mapping[chopped_prev_name],
RAMP_SUFFIX)
# update motor desires
deep_set(thruster_files[fname], motor_desires_path,
new_motor_desires_name)
# update ramp status
deep_set(thruster_files[fname], ramp_status_path, new_ramp_status_name)
# give user option: either overwrite existing files or print out
print()
print('Thruster config files: {}'.format(list(thruster_files.keys())))
overwrite = input(
colored('Overwrite previous thruster configuration? [yN] ',
bcolors.WARNING)).strip().lower() in ['y', 'yes']
for fname, toml in thruster_files.items():
output = tomlkit.dumps(toml)
if overwrite:
with open(os.path.join(SERIAL_CONF_DIR, fname), 'w') as f:
f.write(output)
else:
print()
print(colored("Proposed contents of '{}' "
"[the file has not been changed]:".format(fname),
bcolors.OKBLUE))
print(output)
if overwrite:
print()
print(colored(
'Saved. Restart serial daemon for changes to take effect.',
bcolors.OKBLUE))
def cleanup(was_soft_killed):
shm_motor_desires = getattr(shm, MOTOR_DESIRES)
if was_soft_killed:
shm.switches.soft_kill.set(True)
# zero all motor desires in case we quit halfway through a thruster spin
for motor_desire, _ in shm_motor_desires._fields:
getattr(shm_motor_desires, motor_desire).set(0)
if __name__ == '__main__':
was_soft_killed = shm.switches.soft_kill.get()
try:
main()
except KeyboardInterrupt:
pass
except EOFError:
pass
except Failure as e:
print(colored(str(e), bcolors.FAIL))
for line in e.extra_lines:
print(line)
finally:
# always clean up
cleanup(was_soft_killed)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Simple REST server that takes commands in a JSON payload
Interface to the :py:class:`~luigi.scheduler.CentralPlannerScheduler` class.
See :doc:`/central_scheduler` for more info.
"""
#
# Description: Added codes for visualization of how long each task takes
# running-time until it reaches the next status (failed or done)
# At "{base_url}/tasklist", all completed(failed or done) tasks are shown.
# At "{base_url}/tasklist", a user can select one specific task to see
# how its running-time has changed over time.
# At "{base_url}/tasklist/{task_name}", it visualizes a multi-bar graph
# that represents the changes of the running-time for a selected task
# up to the next status (failed or done).
# This visualization let us know how the running-time of the specific task
# has changed over time.
#
# Copyright 2015 Naver Corp.
# Author Yeseul Park ([email protected])
#
import atexit
import json
import logging
import mimetypes
import os
import posixpath
import signal
import sys
import datetime
import time
import pkg_resources
import tornado.httpclient
import tornado.httpserver
import tornado.ioloop
import tornado.netutil
import tornado.web
from luigi.scheduler import CentralPlannerScheduler
logger = logging.getLogger("luigi.server")
class RPCHandler(tornado.web.RequestHandler):
"""
Handle remote scheduling calls using rpc.RemoteSchedulerResponder.
"""
def initialize(self, scheduler):
self._scheduler = scheduler
def get(self, method):
payload = self.get_argument('data', default="{}")
arguments = json.loads(payload)
# TODO: we should probably denote all methods on the scheduler that are "API-level"
# versus internal methods. Right now you can do a REST method call to any method
# defined on the scheduler, which is pretty bad from a security point of view.
if hasattr(self._scheduler, method):
result = getattr(self._scheduler, method)(**arguments)
self.write({"response": result}) # wrap all json response in a dictionary
else:
self.send_error(404)
post = get
class BaseTaskHistoryHandler(tornado.web.RequestHandler):
def initialize(self, scheduler):
self._scheduler = scheduler
def get_template_path(self):
return pkg_resources.resource_filename(__name__, 'templates')
class AllRunHandler(BaseTaskHistoryHandler):
def get(self):
all_tasks = self._scheduler.task_history.find_all_runs()
tasknames = []
for task in all_tasks:
tasknames.append(task.name)
# show all tasks with their name list to be selected
# why all tasks? the duration of the event history of a selected task
# can be more than 24 hours.
self.render("menu.html", tasknames=tasknames)
class SelectedRunHandler(BaseTaskHistoryHandler):
def get(self, name):
tasks = {}
statusResults = {}
taskResults = []
# get all tasks that has been updated
all_tasks = self._scheduler.task_history.find_all_runs()
# get events history for all tasks
all_tasks_event_history = self._scheduler.task_history.find_all_events()
for task in all_tasks:
task_seq = task.id
task_name = task.name
# build the dictionary, tasks with index: id, value: task_name
tasks[task_seq] = str(task_name)
for task in all_tasks_event_history:
# if the name of user-selected task is in tasks, get its task_id
if tasks.get(task.task_id) == str(name):
status = str(task.event_name)
if status not in statusResults:
statusResults[status] = []
# append the id, task_id, ts, y with 0, next_process with null
# for the status(running/failed/done) of the selected task
statusResults[status].append(({
'id': str(task.id), 'task_id': str(task.task_id),
'x': from_utc(str(task.ts)), 'y': 0, 'next_process': ''}))
# append the id, task_name, task_id, status, datetime, timestamp
# for the selected task
taskResults.append({
'id': str(task.id), 'taskName': str(name), 'task_id': str(task.task_id),
'status': str(task.event_name), 'datetime': str(task.ts),
'timestamp': from_utc(str(task.ts))})
statusResults = json.dumps(statusResults)
taskResults = json.dumps(taskResults)
statusResults = tornado.escape.xhtml_unescape(str(statusResults))
taskResults = tornado.escape.xhtml_unescape(str(taskResults))
self.render('history.html', name=name, statusResults=statusResults, taskResults=taskResults)
def from_utc(utcTime, fmt=None):
"""convert UTC time string to time.struct_time: change datetime.datetime to time, return time.struct_time type"""
if fmt is None:
try_formats = ["%Y-%m-%d %H:%M:%S.%f", "%Y-%m-%d %H:%M:%S"]
else:
try_formats = [fmt]
for fmt in try_formats:
try:
time_struct = datetime.datetime.strptime(utcTime, fmt)
except ValueError:
pass
else:
date = int(time.mktime(time_struct.timetuple()))
return date
else:
raise ValueError("No UTC format matches {}".format(utcTime))
class RecentRunHandler(BaseTaskHistoryHandler):
def get(self):
tasks = self._scheduler.task_history.find_latest_runs()
self.render("recent.html", tasks=tasks)
class ByNameHandler(BaseTaskHistoryHandler):
def get(self, name):
tasks = self._scheduler.task_history.find_all_by_name(name)
self.render("recent.html", tasks=tasks)
class ByIdHandler(BaseTaskHistoryHandler):
def get(self, id):
task = self._scheduler.task_history.find_task_by_id(id)
self.render("show.html", task=task)
class ByParamsHandler(BaseTaskHistoryHandler):
def get(self, name):
payload = self.get_argument('data', default="{}")
arguments = json.loads(payload)
tasks = self._scheduler.task_history.find_all_by_parameters(name, session=None, **arguments)
self.render("recent.html", tasks=tasks)
class StaticFileHandler(tornado.web.RequestHandler):
def get(self, path):
# Path checking taken from Flask's safe_join function:
# https://github.com/mitsuhiko/flask/blob/1d55b8983/flask/helpers.py#L563-L587
path = posixpath.normpath(path)
if os.path.isabs(path) or path.startswith(".."):
return self.send_error(404)
extension = os.path.splitext(path)[1]
if extension in mimetypes.types_map:
self.set_header("Content-Type", mimetypes.types_map[extension])
data = pkg_resources.resource_string(__name__, os.path.join("static", path))
self.write(data)
class RootPathHandler(BaseTaskHistoryHandler):
def get(self):
self.redirect("/static/visualiser/index.html")
def app(scheduler):
settings = {"static_path": os.path.join(os.path.dirname(__file__), "static"), "unescape": tornado.escape.xhtml_unescape}
handlers = [
(r'/api/(.*)', RPCHandler, {"scheduler": scheduler}),
(r'/static/(.*)', StaticFileHandler),
(r'/', RootPathHandler, {'scheduler': scheduler}),
(r'/tasklist', AllRunHandler, {'scheduler': scheduler}),
(r'/tasklist/(.*?)', SelectedRunHandler, {'scheduler': scheduler}),
(r'/history', RecentRunHandler, {'scheduler': scheduler}),
(r'/history/by_name/(.*?)', ByNameHandler, {'scheduler': scheduler}),
(r'/history/by_id/(.*?)', ByIdHandler, {'scheduler': scheduler}),
(r'/history/by_params/(.*?)', ByParamsHandler, {'scheduler': scheduler})
]
api_app = tornado.web.Application(handlers, **settings)
return api_app
def _init_api(scheduler, responder=None, api_port=None, address=None, unix_socket=None):
if responder:
raise Exception('The "responder" argument is no longer supported')
api_app = app(scheduler)
if unix_socket is not None:
api_sockets = [tornado.netutil.bind_unix_socket(unix_socket)]
else:
api_sockets = tornado.netutil.bind_sockets(api_port, address=address)
server = tornado.httpserver.HTTPServer(api_app)
server.add_sockets(api_sockets)
# Return the bound socket names. Useful for connecting client in test scenarios.
return [s.getsockname() for s in api_sockets]
def run(api_port=8082, address=None, unix_socket=None, scheduler=None, responder=None):
"""
Runs one instance of the API server.
"""
if scheduler is None:
scheduler = CentralPlannerScheduler()
# load scheduler state
scheduler.load()
_init_api(
scheduler=scheduler,
responder=responder,
api_port=api_port,
address=address,
unix_socket=unix_socket,
)
# prune work DAG every 60 seconds
pruner = tornado.ioloop.PeriodicCallback(scheduler.prune, 60000)
pruner.start()
def shutdown_handler(signum, frame):
exit_handler()
sys.exit(0)
@atexit.register
def exit_handler():
logger.info("Scheduler instance shutting down")
scheduler.dump()
stop()
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
if os.name == 'nt':
signal.signal(signal.SIGBREAK, shutdown_handler)
else:
signal.signal(signal.SIGQUIT, shutdown_handler)
logger.info("Scheduler starting up")
tornado.ioloop.IOLoop.instance().start()
def stop():
tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
run()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from copy import deepcopy
from typing import Dict, List
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLBatchPredictOperator, AutoMLCreateDatasetOperator, AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator, AutoMLDeployModelOperator, AutoMLGetModelOperator, AutoMLImportDataOperator,
AutoMLListDatasetOperator, AutoMLPredictOperator, AutoMLTablesListColumnSpecsOperator,
AutoMLTablesListTableSpecsOperator, AutoMLTablesUpdateDatasetOperator, AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_DATASET_BUCKET = os.environ.get(
"GCP_AUTOML_DATASET_BUCKET", "gs://cloud-ml-tables-data/bank-marketing.csv"
)
TARGET = os.environ.get("GCP_AUTOML_TARGET", "Class")
# Example values
MODEL_ID = "TBL123456"
DATASET_ID = "TBL123456"
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
}
# Example dataset
DATASET = {
"display_name": "test_set",
"tables_dataset_metadata": {"target_column_spec_id": ""},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_DATASET_BUCKET]}}
default_args = {"start_date": days_ago(1)}
extract_object_id = CloudAutoMLHook.extract_object_id
def get_target_column_spec(columns_specs: List[Dict], column_name: str) -> str:
"""
Using column name returns spec of the column.
"""
for column in columns_specs:
if column["displayName"] == column_name:
return extract_object_id(column)
return ""
# Example DAG to create dataset, train model_id and deploy it.
with models.DAG(
"example_create_and_deploy",
default_args=default_args,
schedule_interval=None, # Override to match your needs
user_defined_macros={
"get_target_column_spec": get_target_column_spec,
"target": TARGET,
"extract_object_id": extract_object_id,
},
tags=['example'],
) as create_deploy_dag:
# [START howto_operator_automl_create_dataset]
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task",
dataset=DATASET,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
dataset_id = (
"{{ task_instance.xcom_pull('create_dataset_task', key='dataset_id') }}"
)
# [END howto_operator_automl_create_dataset]
MODEL["dataset_id"] = dataset_id
# [START howto_operator_automl_import_data]
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
# [END howto_operator_automl_import_data]
# [START howto_operator_automl_specs]
list_tables_spec_task = AutoMLTablesListTableSpecsOperator(
task_id="list_tables_spec_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_specs]
# [START howto_operator_automl_column_specs]
list_columns_spec_task = AutoMLTablesListColumnSpecsOperator(
task_id="list_columns_spec_task",
dataset_id=dataset_id,
table_spec_id="{{ extract_object_id(task_instance.xcom_pull('list_tables_spec_task')[0]) }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_column_specs]
# [START howto_operator_automl_update_dataset]
update = deepcopy(DATASET)
update["name"] = '{{ task_instance.xcom_pull("create_dataset_task")["name"] }}'
update["tables_dataset_metadata"][ # type: ignore
"target_column_spec_id"
] = "{{ get_target_column_spec(task_instance.xcom_pull('list_columns_spec_task'), target) }}"
update_dataset_task = AutoMLTablesUpdateDatasetOperator(
task_id="update_dataset_task",
dataset=update,
location=GCP_AUTOML_LOCATION,
)
# [END howto_operator_automl_update_dataset]
# [START howto_operator_automl_create_model]
create_model_task = AutoMLTrainModelOperator(
task_id="create_model_task",
model=MODEL,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
model_id = "{{ task_instance.xcom_pull('create_model_task', key='model_id') }}"
# [END howto_operator_automl_create_model]
# [START howto_operator_automl_delete_model]
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_delete_model]
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
(
create_dataset_task # noqa
>> import_dataset_task # noqa
>> list_tables_spec_task # noqa
>> list_columns_spec_task # noqa
>> update_dataset_task # noqa
>> create_model_task # noqa
>> delete_model_task # noqa
>> delete_datasets_task # noqa
)
# Example DAG for AutoML datasets operations
with models.DAG(
"example_automl_dataset",
default_args=default_args,
schedule_interval=None, # Override to match your needs
user_defined_macros={"extract_object_id": extract_object_id},
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task",
dataset=DATASET,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
dataset_id = (
'{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
)
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
list_tables_spec_task = AutoMLTablesListTableSpecsOperator(
task_id="list_tables_spec_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
list_columns_spec_task = AutoMLTablesListColumnSpecsOperator(
task_id="list_columns_spec_task",
dataset_id=dataset_id,
table_spec_id="{{ extract_object_id(task_instance.xcom_pull('list_tables_spec_task')[0]) }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [START howto_operator_list_dataset]
list_datasets_task = AutoMLListDatasetOperator(
task_id="list_datasets_task",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_list_dataset]
# [START howto_operator_delete_dataset]
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id="{{ task_instance.xcom_pull('list_datasets_task', key='dataset_id_list') | list }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_delete_dataset]
(
create_dataset_task # noqa
>> import_dataset_task # noqa
>> list_tables_spec_task # noqa
>> list_columns_spec_task # noqa
>> list_datasets_task # noqa
>> delete_datasets_task # noqa
)
with models.DAG(
"example_gcp_get_deploy",
default_args=default_args,
schedule_interval=None, # Override to match your needs
) as get_deploy_dag:
# [START howto_operator_get_model]
get_model_task = AutoMLGetModelOperator(
task_id="get_model_task",
model_id=MODEL_ID,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_get_model]
# [START howto_operator_deploy_model]
deploy_model_task = AutoMLDeployModelOperator(
task_id="deploy_model_task",
model_id=MODEL_ID,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_deploy_model]
with models.DAG(
"example_gcp_predict",
default_args=default_args,
schedule_interval=None, # Override to match your needs
) as predict_dag:
# [START howto_operator_prediction]
predict_task = AutoMLPredictOperator(
task_id="predict_task",
model_id=MODEL_ID,
payload={}, # Add your own payload, the used model_id must be deployed
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_prediction]
# [START howto_operator_batch_prediction]
batch_predict_task = AutoMLBatchPredictOperator(
task_id="batch_predict_task",
model_id=MODEL_ID,
input_config={}, # Add your config
output_config={}, # Add your config
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_batch_prediction]
|
|
# pyOCD debugger
# Copyright (c) 2006-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import copy
from .memory_interface import MemoryInterface
from .memory_map import MemoryMap
class Target(MemoryInterface):
class State(Enum):
"""! @brief States a target processor can be in."""
## Core is executing code.
RUNNING = 1
## Core is halted in debug mode.
HALTED = 2
## Core is being held in reset.
RESET = 3
## Core is sleeping due to a wfi or wfe instruction.
SLEEPING = 4
## Core is locked up.
LOCKUP = 5
class SecurityState(Enum):
"""! @brief Security states for a processor with the Security extension."""
## PE is in the Non-secure state.
NONSECURE = 0
## PE is in the Secure state.
SECURE = 1
class ResetType(Enum):
"""! @brief Available reset methods."""
## Hardware reset via the nRESET signal.
HW = 1
## Software reset using the core's default software reset method.
SW = 2
## Software reset using the AIRCR.SYSRESETREQ bit.
SW_SYSRESETREQ = 3
## Software reset the entire system (alias of #SW_SYSRESETREQ).
SW_SYSTEM = SW_SYSRESETREQ
## Software reset using the AIRCR.VECTRESET bit.
#
# v6-M and v8-M targets do not support VECTRESET, so they will fall back to SW_EMULATED.
SW_VECTRESET = 4
## Software reset the core only (alias of #SW_VECTRESET).
SW_CORE = SW_VECTRESET
## Emulated software reset.
SW_EMULATED = 5
class BreakpointType(Enum):
"""! @brief Types of breakpoints."""
## Hardware breakpoint.
HW = 1
## Software breakpoint.
SW = 2
## Auto will select the best type given the address and available breakpoints.
AUTO = 3
class WatchpointType(Enum):
"""! @brief Types of watchpoints."""
## Watchpoint on read accesses.
READ = 1
## Watchpoint on write accesses.
WRITE = 2
## Watchpoint on either read or write accesses.
READ_WRITE = 3
class VectorCatch:
"""! Vector catch option masks.
These constants can be OR'd together to form any combination of vector catch settings.
"""
## Disable vector catch.
NONE = 0
## Trap on HardFault exception.
HARD_FAULT = (1 << 0)
## Trap on BusFault exception.
BUS_FAULT = (1 << 1)
## Trap on MemManage exception.
MEM_FAULT = (1 << 2)
## Trap on fault occurring during exception entry or exit.
INTERRUPT_ERR = (1 << 3)
## Trap on UsageFault exception caused by state information error, such as an undefined
# instruction exception.
STATE_ERR = (1 << 4)
## Trap on UsageFault exception caused by checking error, for example an alignment check error.
CHECK_ERR = (1 << 5)
## Trap on UsageFault exception caused by a failed access to a coprocessor.
COPROCESSOR_ERR = (1 << 6)
## Trap on local reset.
CORE_RESET = (1 << 7)
## Trap SecureFault.
SECURE_FAULT = (1 << 8)
ALL = (HARD_FAULT | BUS_FAULT | MEM_FAULT | INTERRUPT_ERR
| STATE_ERR | CHECK_ERR | COPROCESSOR_ERR | CORE_RESET
| SECURE_FAULT)
class Event(Enum):
"""! Target notification events."""
## Sent after completing the initialisation sequence.
POST_CONNECT = 1
## Sent prior to disconnecting cores and powering down the DP.
PRE_DISCONNECT = 2
## Sent prior to resume or step.
#
# Associated data is a RunType enum.
PRE_RUN = 3
## Sent after a resume or step operation.
#
# For resume, this event will be sent while the target is still running. Use a halt event
# to trap when the target stops running.
#
# Associated data is a RunType enum.
POST_RUN = 4
## Sent prior to a user-invoked halt.
#
# Associated data is a HaltReason enum, which will currently always be HaltReason.USER.
PRE_HALT = 5
## Sent after the target halts.
#
# Associated data is a HaltReason enum.
POST_HALT = 6
## Sent before executing a reset operation.
PRE_RESET = 7
## Sent after the target has been reset.
POST_RESET = 8
## Sent before programming target flash.
PRE_FLASH_PROGRAM = 9
## Sent after target flash has been reprogrammed.
POST_FLASH_PROGRAM = 10
class RunType(Enum):
"""! Run type for run notifications.
An enum of this type is set as the data attribute on PRE_RUN and POST_RUN notifications.
"""
## Target is being resumed.
RESUME = 1
## Target is being stepped one instruction.
STEP = 2
class HaltReason(Enum):
"""! Halt type for halt notifications.
An value of this type is returned from Target.get_halt_reason(). It is also used as the data
attribute on PRE_HALT and POST_HALT notifications.
"""
## Target halted due to user action.
USER = 1
## Target halted because of a halt or step event.
DEBUG = 2
## Breakpoint event.
BREAKPOINT = 3
## DWT watchpoint event.
WATCHPOINT = 4
## Vector catch event.
VECTOR_CATCH = 5
## External debug request.
EXTERNAL = 6
## PMU event. v8.1-M only.
PMU = 7
def __init__(self, session, memory_map=None):
self._session = session
self._delegate = None
# Make a target-specific copy of the memory map. This is safe to do without locking
# because the memory map may not be mutated until target initialization.
self.memory_map = memory_map.clone() if memory_map else MemoryMap()
self._svd_location = None
self._svd_device = None
@property
def session(self):
return self._session
@property
def delegate(self):
return self._delegate
@delegate.setter
def delegate(self, the_delegate):
self._delegate = the_delegate
def delegate_implements(self, method_name):
return (self._delegate is not None) and (hasattr(self._delegate, method_name))
def call_delegate(self, method_name, *args, **kwargs):
if self.delegate_implements(method_name):
return getattr(self._delegate, method_name)(*args, **kwargs)
else:
# The default action is always taken if None is returned.
return None
@property
def svd_device(self):
return self._svd_device
@property
def supported_security_states(self):
raise NotImplementedError()
@property
def core_registers(self):
raise NotImplementedError()
def is_locked(self):
return False
def create_init_sequence(self):
raise NotImplementedError()
def init(self):
raise NotImplementedError()
def disconnect(self, resume=True):
pass
def flush(self):
self.session.probe.flush()
def halt(self):
raise NotImplementedError()
def step(self, disable_interrupts=True, start=0, end=0):
raise NotImplementedError()
def resume(self):
raise NotImplementedError()
def mass_erase(self):
raise NotImplementedError()
def read_core_register(self, id):
raise NotImplementedError()
def write_core_register(self, id, data):
raise NotImplementedError()
def read_core_register_raw(self, reg):
raise NotImplementedError()
def read_core_registers_raw(self, reg_list):
raise NotImplementedError()
def write_core_register_raw(self, reg, data):
raise NotImplementedError()
def write_core_registers_raw(self, reg_list, data_list):
raise NotImplementedError()
def find_breakpoint(self, addr):
raise NotImplementedError()
def set_breakpoint(self, addr, type=BreakpointType.AUTO):
raise NotImplementedError()
def get_breakpoint_type(self, addr):
raise NotImplementedError()
def remove_breakpoint(self, addr):
raise NotImplementedError()
def set_watchpoint(self, addr, size, type):
raise NotImplementedError()
def remove_watchpoint(self, addr, size, type):
raise NotImplementedError()
def reset(self, reset_type=None):
raise NotImplementedError()
def reset_and_halt(self, reset_type=None):
raise NotImplementedError()
def get_state(self):
raise NotImplementedError()
def get_security_state(self):
raise NotImplementedError()
def get_halt_reason(self):
raise NotImplementedError()
@property
def run_token(self):
return 0
def is_running(self):
return self.get_state() == Target.State.RUNNING
def is_halted(self):
return self.get_state() == Target.State.HALTED
def get_memory_map(self):
return self.memory_map
def set_vector_catch(self, enableMask):
raise NotImplementedError()
def get_vector_catch(self):
raise NotImplementedError()
def get_target_context(self, core=None):
raise NotImplementedError()
|
|
"""Library for creating sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn import linear
from tensorflow.models.rnn import rnn
from tensorflow.models.rnn import rnn_cell
def rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing generated outputs.
states: The state of each cell in each time-step. This is a list with
length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with tf.variable_scope(scope or "rnn_decoder"):
states = [initial_state]
outputs = []
prev = None
for i in xrange(len(decoder_inputs)):
inp = decoder_inputs[i]
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
# We do not propagate gradients over the loop function.
inp = tf.stop_gradient(loop_function(prev, i))
if i > 0:
tf.get_variable_scope().reuse_variables()
output, new_state = cell(inp, states[-1])
outputs.append(output)
states.append(new_state)
if loop_function is not None:
prev = tf.stop_gradient(output)
return outputs, states
def basic_rnn_seq2seq(
encoder_inputs, decoder_inputs, cell, dtype=tf.float32, scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "basic_rnn_seq2seq"):
_, enc_states = rnn.rnn(cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_states[-1], cell)
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
loop_function=None, dtype=tf.float32, scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_states = rnn.rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
tf.get_variable_scope().reuse_variables()
return rnn_decoder(decoder_inputs, enc_states[-1], cell,
loop_function=loop_function, scope=scope)
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
output_projection=None, feed_previous=False,
scope=None):
"""RNN decoder with embedding and a pure-decoding option.
Args:
decoder_inputs: a list of 1D batch-sized int32-Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: integer, how many symbols come into the embedding.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
If False, decoder_inputs are used as given (the standard decoder case).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=tf.float32)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=tf.float32)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_rnn_decoder"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
loop_function = None
if feed_previous:
loop_function = extract_argmax_and_embed
emb_inp = [tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]
return rnn_decoder(emb_inp, initial_state, cell,
loop_function=loop_function)
def embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
output_projection=None, feed_previous=False,
dtype=tf.float32, scope=None):
"""Embedding RNN sequence-to-sequence model.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: integer; number of symbols on the encoder side.
num_decoder_symbols: integer; number of symbols on the decoder side.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_seq2seq"
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "embedding_rnn_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
_, encoder_states = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
return embedding_rnn_decoder(decoder_inputs, encoder_states[-1], cell,
num_decoder_symbols, output_projection,
feed_previous)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = embedding_rnn_decoder(
decoder_inputs, encoder_states[-1], cell, num_decoder_symbols,
output_projection, True)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = embedding_rnn_decoder(
decoder_inputs, encoder_states[-1], cell, num_decoder_symbols,
output_projection, False)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def embedding_tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_symbols, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""Embedding RNN sequence-to-sequence model with tied (shared) parameters.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_symbols x cell.input_size]). Then it runs an RNN to encode embedded
encoder_inputs into a state vector. Next, it embeds decoder_inputs using
the same embedding. Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_symbols: integer; number of symbols for both encoder and decoder.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the initial RNN states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_tied_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_tied_rnn_seq2seq"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
emb_encoder_inputs = [tf.nn.embedding_lookup(embedding, x)
for x in encoder_inputs]
emb_decoder_inputs = [tf.nn.embedding_lookup(embedding, x)
for x in decoder_inputs]
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_symbols)
if isinstance(feed_previous, bool):
loop_function = extract_argmax_and_embed if feed_previous else None
return tied_rnn_seq2seq(emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=loop_function, dtype=dtype)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = tied_rnn_seq2seq(
emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=extract_argmax_and_embed, dtype=dtype)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = tied_rnn_seq2seq(
emb_encoder_inputs, emb_decoder_inputs, cell, dtype=dtype)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def attention_decoder(decoder_inputs, initial_state, attention_states, cell,
output_size=None, num_heads=1, loop_function=None,
dtype=tf.float32, scope=None):
"""RNN decoder with attention for the sequence-to-sequence model.
Args:
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function and size.
output_size: size of the output vectors; if None, we use cell.output_size.
num_heads: number of attention heads that read from attention_states.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors of shape
[batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either i-th decoder_inputs or
loop_function(output {i-1}, i)) as follows. First, we run the cell
on a combination of the input and previous attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, or shapes
of attention_states are not set.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with tf.variable_scope(scope or "attention_decoder"):
batch_size = tf.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = tf.reshape(attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = tf.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size])
hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(tf.get_variable("AttnV_%d" % a, [attention_vec_size]))
states = [initial_state]
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = [] # Results of attention reads will be stored here.
for a in xrange(num_heads):
with tf.variable_scope("Attention_%d" % a):
y = linear.linear(query, attention_vec_size, True)
y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
# Attention mask is a softmax of v^T * tanh(...).
s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
a = tf.nn.softmax(s)
# Now calculate the attention-weighted vector d.
d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,
[1, 2])
ds.append(tf.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = tf.pack([batch_size, attn_size])
attns = [tf.zeros(batch_attn_size, dtype=dtype)
for _ in xrange(num_heads)]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
for i in xrange(len(decoder_inputs)):
if i > 0:
tf.get_variable_scope().reuse_variables()
inp = decoder_inputs[i]
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
inp = tf.stop_gradient(loop_function(prev, i))
# Merge input and previous attentions into one vector of the right size.
x = linear.linear([inp] + attns, cell.input_size, True)
# Run the RNN.
cell_output, new_state = cell(x, states[-1])
states.append(new_state)
# Run the attention mechanism.
attns = attention(new_state)
with tf.variable_scope("AttnOutputProjection"):
output = linear.linear([cell_output] + attns, output_size, True)
if loop_function is not None:
# We do not propagate gradients over the loop function.
prev = tf.stop_gradient(output)
outputs.append(output)
return outputs, states
def embedding_attention_decoder(decoder_inputs, initial_state, attention_states,
cell, num_symbols, num_heads=1,
output_size=None, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: a list of 1D batch-sized int32-Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: integer, how many symbols come into the embedding.
num_heads: number of attention heads that read from attention_states.
output_size: size of the output vectors; if None, use cell.output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_attention_decoder"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol)
return emb_prev
loop_function = None
if feed_previous:
loop_function = extract_argmax_and_embed
emb_inp = [tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]
return attention_decoder(
emb_inp, initial_state, attention_states, cell, output_size=output_size,
num_heads=num_heads, loop_function=loop_function)
def embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
num_heads=1, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: integer; number of symbols on the encoder side.
num_decoder_symbols: integer; number of symbols on the decoder side.
num_heads: number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "embedding_attention_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
encoder_outputs, encoder_states = rnn.rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [tf.reshape(e, [-1, 1, cell.output_size])
for e in encoder_outputs]
attention_states = tf.concat(1, top_states)
# Decoder.
output_size = None
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection,
feed_previous)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection, True)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection, False)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def sequence_loss_by_example(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32-Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: the log-perplexity for each sequence.
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with tf.op_scope(logits + targets + weights, name,
"sequence_loss_by_example"):
batch_size = tf.shape(targets[0])[0]
log_perp_list = []
length = batch_size * num_decoder_symbols
for i in xrange(len(logits)):
if softmax_loss_function is None:
# TODO(lukaszkaiser): There is no SparseCrossEntropy in TensorFlow, so
# we need to first cast targets into a dense representation, and as
# SparseToDense does not accept batched inputs, we need to do this by
# re-indexing and re-sizing. When TensorFlow adds SparseCrossEntropy,
# rewrite this method.
indices = targets[i] + num_decoder_symbols * tf.range(batch_size)
with tf.device("/cpu:0"): # Sparse-to-dense must happen on CPU for now.
dense = tf.sparse_to_dense(indices, tf.expand_dims(length, 0), 1.0,
0.0)
target = tf.reshape(dense, [-1, num_decoder_symbols])
crossent = tf.nn.softmax_cross_entropy_with_logits(
logits[i], target, name="SequenceLoss/CrossEntropy{0}".format(i))
else:
crossent = softmax_loss_function(logits[i], targets[i])
log_perp_list.append(crossent * weights[i])
log_perps = tf.add_n(log_perp_list)
if average_across_timesteps:
total_size = tf.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
def sequence_loss(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True, average_across_batch=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: list of 2D Tensors os shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32-Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: the average log-perplexity per symbol (weighted).
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
with tf.op_scope(logits + targets + weights, name, "sequence_loss"):
cost = tf.reduce_sum(sequence_loss_by_example(
logits, targets, weights, num_decoder_symbols,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = tf.shape(targets[0])[0]
return cost / tf.cast(batch_size, tf.float32)
else:
return cost
def model_with_buckets(encoder_inputs, decoder_inputs, targets, weights,
buckets, num_decoder_symbols, seq2seq,
softmax_loss_function=None, name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: a list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: a list of Tensors to feed the decoder; second seq2seq input.
targets: a list of 1D batch-sized int32-Tensors (desired output sequence).
weights: list of 1D batch-sized float-Tensors to weight the targets.
buckets: a list of pairs of (input size, output size) for each bucket.
num_decoder_symbols: integer, number of decoder symbols (output classes).
seq2seq: a sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, defaults to "model_with_buckets".
Returns:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors of shape [batch_size x num_decoder_symbols] (j'th outputs).
losses: List of scalar Tensors, representing losses for each bucket.
Raises:
ValueError: if length of encoder_inputsut, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last"
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
with tf.op_scope(all_inputs, name, "model_with_buckets"):
for j in xrange(len(buckets)):
if j > 0:
tf.get_variable_scope().reuse_variables()
bucket_encoder_inputs = [encoder_inputs[i]
for i in xrange(buckets[j][0])]
bucket_decoder_inputs = [decoder_inputs[i]
for i in xrange(buckets[j][1])]
bucket_outputs, _ = seq2seq(bucket_encoder_inputs,
bucket_decoder_inputs)
outputs.append(bucket_outputs)
bucket_targets = [targets[i] for i in xrange(buckets[j][1])]
bucket_weights = [weights[i] for i in xrange(buckets[j][1])]
losses.append(sequence_loss(
outputs[-1], bucket_targets, bucket_weights, num_decoder_symbols,
softmax_loss_function=softmax_loss_function))
return outputs, losses
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.